Commit 35b2c321 authored by Mat Martineau's avatar Mat Martineau Committed by David S. Miller

tcp: Export TCP functions and ops struct

MPTCP will make use of tcp_send_mss() and tcp_push() when sending
data to specific TCP subflows.

tcp_request_sock_ipvX_ops and ipvX_specific will be referenced
during TCP subflow creation.
Co-developed-by: default avatarPeter Krystad <peter.krystad@linux.intel.com>
Signed-off-by: default avatarPeter Krystad <peter.krystad@linux.intel.com>
Reviewed-by: default avatarEric Dumazet <edumazet@google.com>
Signed-off-by: default avatarMat Martineau <mathew.j.martineau@linux.intel.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 85712484
...@@ -330,6 +330,9 @@ int tcp_sendpage_locked(struct sock *sk, struct page *page, int offset, ...@@ -330,6 +330,9 @@ int tcp_sendpage_locked(struct sock *sk, struct page *page, int offset,
size_t size, int flags); size_t size, int flags);
ssize_t do_tcp_sendpages(struct sock *sk, struct page *page, int offset, ssize_t do_tcp_sendpages(struct sock *sk, struct page *page, int offset,
size_t size, int flags); size_t size, int flags);
int tcp_send_mss(struct sock *sk, int *size_goal, int flags);
void tcp_push(struct sock *sk, int flags, int mss_now, int nonagle,
int size_goal);
void tcp_release_cb(struct sock *sk); void tcp_release_cb(struct sock *sk);
void tcp_wfree(struct sk_buff *skb); void tcp_wfree(struct sk_buff *skb);
void tcp_write_timer_handler(struct sock *sk); void tcp_write_timer_handler(struct sock *sk);
...@@ -2011,6 +2014,11 @@ struct tcp_request_sock_ops { ...@@ -2011,6 +2014,11 @@ struct tcp_request_sock_ops {
enum tcp_synack_type synack_type); enum tcp_synack_type synack_type);
}; };
extern const struct tcp_request_sock_ops tcp_request_sock_ipv4_ops;
#if IS_ENABLED(CONFIG_IPV6)
extern const struct tcp_request_sock_ops tcp_request_sock_ipv6_ops;
#endif
#ifdef CONFIG_SYN_COOKIES #ifdef CONFIG_SYN_COOKIES
static inline __u32 cookie_init_sequence(const struct tcp_request_sock_ops *ops, static inline __u32 cookie_init_sequence(const struct tcp_request_sock_ops *ops,
const struct sock *sk, struct sk_buff *skb, const struct sock *sk, struct sk_buff *skb,
......
...@@ -690,8 +690,8 @@ static bool tcp_should_autocork(struct sock *sk, struct sk_buff *skb, ...@@ -690,8 +690,8 @@ static bool tcp_should_autocork(struct sock *sk, struct sk_buff *skb,
refcount_read(&sk->sk_wmem_alloc) > skb->truesize; refcount_read(&sk->sk_wmem_alloc) > skb->truesize;
} }
static void tcp_push(struct sock *sk, int flags, int mss_now, void tcp_push(struct sock *sk, int flags, int mss_now,
int nonagle, int size_goal) int nonagle, int size_goal)
{ {
struct tcp_sock *tp = tcp_sk(sk); struct tcp_sock *tp = tcp_sk(sk);
struct sk_buff *skb; struct sk_buff *skb;
...@@ -925,7 +925,7 @@ static unsigned int tcp_xmit_size_goal(struct sock *sk, u32 mss_now, ...@@ -925,7 +925,7 @@ static unsigned int tcp_xmit_size_goal(struct sock *sk, u32 mss_now,
return max(size_goal, mss_now); return max(size_goal, mss_now);
} }
static int tcp_send_mss(struct sock *sk, int *size_goal, int flags) int tcp_send_mss(struct sock *sk, int *size_goal, int flags)
{ {
int mss_now; int mss_now;
......
...@@ -1426,7 +1426,7 @@ struct request_sock_ops tcp_request_sock_ops __read_mostly = { ...@@ -1426,7 +1426,7 @@ struct request_sock_ops tcp_request_sock_ops __read_mostly = {
.syn_ack_timeout = tcp_syn_ack_timeout, .syn_ack_timeout = tcp_syn_ack_timeout,
}; };
static const struct tcp_request_sock_ops tcp_request_sock_ipv4_ops = { const struct tcp_request_sock_ops tcp_request_sock_ipv4_ops = {
.mss_clamp = TCP_MSS_DEFAULT, .mss_clamp = TCP_MSS_DEFAULT,
#ifdef CONFIG_TCP_MD5SIG #ifdef CONFIG_TCP_MD5SIG
.req_md5_lookup = tcp_v4_md5_lookup, .req_md5_lookup = tcp_v4_md5_lookup,
......
...@@ -75,7 +75,7 @@ static void tcp_v6_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb, ...@@ -75,7 +75,7 @@ static void tcp_v6_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb); static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb);
static const struct inet_connection_sock_af_ops ipv6_mapped; static const struct inet_connection_sock_af_ops ipv6_mapped;
static const struct inet_connection_sock_af_ops ipv6_specific; const struct inet_connection_sock_af_ops ipv6_specific;
#ifdef CONFIG_TCP_MD5SIG #ifdef CONFIG_TCP_MD5SIG
static const struct tcp_sock_af_ops tcp_sock_ipv6_specific; static const struct tcp_sock_af_ops tcp_sock_ipv6_specific;
static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific; static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific;
...@@ -819,7 +819,7 @@ struct request_sock_ops tcp6_request_sock_ops __read_mostly = { ...@@ -819,7 +819,7 @@ struct request_sock_ops tcp6_request_sock_ops __read_mostly = {
.syn_ack_timeout = tcp_syn_ack_timeout, .syn_ack_timeout = tcp_syn_ack_timeout,
}; };
static const struct tcp_request_sock_ops tcp_request_sock_ipv6_ops = { const struct tcp_request_sock_ops tcp_request_sock_ipv6_ops = {
.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - .mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) -
sizeof(struct ipv6hdr), sizeof(struct ipv6hdr),
#ifdef CONFIG_TCP_MD5SIG #ifdef CONFIG_TCP_MD5SIG
...@@ -1794,7 +1794,7 @@ static struct timewait_sock_ops tcp6_timewait_sock_ops = { ...@@ -1794,7 +1794,7 @@ static struct timewait_sock_ops tcp6_timewait_sock_ops = {
.twsk_destructor = tcp_twsk_destructor, .twsk_destructor = tcp_twsk_destructor,
}; };
static const struct inet_connection_sock_af_ops ipv6_specific = { const struct inet_connection_sock_af_ops ipv6_specific = {
.queue_xmit = inet6_csk_xmit, .queue_xmit = inet6_csk_xmit,
.send_check = tcp_v6_send_check, .send_check = tcp_v6_send_check,
.rebuild_header = inet6_sk_rebuild_header, .rebuild_header = inet6_sk_rebuild_header,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment