MPTCP will make use of tcp_send_mss() and tcp_push() when sending
data to specific TCP subflows.

tcp_request_sock_ipv4_ops will be referenced during TCP subflow
creation.

Signed-off-by: Mat Martineau <mathew.j.martin...@linux.intel.com>
Signed-off-by: Peter Krystad <peter.krys...@linux.intel.com>
---
 include/net/tcp.h   | 5 +++++
 net/ipv4/tcp.c      | 6 +++---
 net/ipv4/tcp_ipv4.c | 2 +-
 3 files changed, 9 insertions(+), 4 deletions(-)

diff --git a/include/net/tcp.h b/include/net/tcp.h
index 414fe1749c0f..7f254765074f 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -330,6 +330,9 @@ int tcp_sendpage_locked(struct sock *sk, struct page *page, 
int offset,
                        size_t size, int flags);
 ssize_t do_tcp_sendpages(struct sock *sk, struct page *page, int offset,
                 size_t size, int flags);
+int tcp_send_mss(struct sock *sk, int *size_goal, int flags);
+void tcp_push(struct sock *sk, int flags, int mss_now, int nonagle,
+             int size_goal);
 void tcp_release_cb(struct sock *sk);
 void tcp_wfree(struct sk_buff *skb);
 void tcp_write_timer_handler(struct sock *sk);
@@ -1983,6 +1986,8 @@ struct tcp_request_sock_ops {
                           enum tcp_synack_type synack_type);
 };
 
+extern const struct tcp_request_sock_ops tcp_request_sock_ipv4_ops;
+
 #ifdef CONFIG_SYN_COOKIES
 static inline __u32 cookie_init_sequence(const struct tcp_request_sock_ops 
*ops,
                                         const struct sock *sk, struct sk_buff 
*skb,
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index f98a1882e537..f273ff308b87 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -690,8 +690,8 @@ static bool tcp_should_autocork(struct sock *sk, struct 
sk_buff *skb,
               refcount_read(&sk->sk_wmem_alloc) > skb->truesize;
 }
 
-static void tcp_push(struct sock *sk, int flags, int mss_now,
-                    int nonagle, int size_goal)
+void tcp_push(struct sock *sk, int flags, int mss_now,
+             int nonagle, int size_goal)
 {
        struct tcp_sock *tp = tcp_sk(sk);
        struct sk_buff *skb;
@@ -925,7 +925,7 @@ static unsigned int tcp_xmit_size_goal(struct sock *sk, u32 
mss_now,
        return max(size_goal, mss_now);
 }
 
-static int tcp_send_mss(struct sock *sk, int *size_goal, int flags)
+int tcp_send_mss(struct sock *sk, int *size_goal, int flags)
 {
        int mss_now;
 
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 5cb0e7f065ea..ea926e4c13f1 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -1369,7 +1369,7 @@ struct request_sock_ops tcp_request_sock_ops 
__read_mostly = {
        .syn_ack_timeout =      tcp_syn_ack_timeout,
 };
 
-static const struct tcp_request_sock_ops tcp_request_sock_ipv4_ops = {
+const struct tcp_request_sock_ops tcp_request_sock_ipv4_ops = {
        .mss_clamp      =       TCP_MSS_DEFAULT,
 #ifdef CONFIG_TCP_MD5SIG
        .req_md5_lookup =       tcp_v4_md5_lookup,
-- 
2.23.0

Reply via email to