Commit 7ea851d1 authored by Florian Westphal's avatar Florian Westphal Committed by Jakub Kicinski

tcp: merge 'init_req' and 'route_req' functions

The Multipath-TCP standard (RFC 8684) says that an MPTCP host should send
a TCP reset if the token in a MP_JOIN request is unknown.

At this time we don't do this, the 3whs completes and the 'new subflow'
is reset afterwards.  There are two ways to allow MPTCP to send the
reset.

1. override 'send_synack' callback and emit the rst from there.
   The drawback is that the request socket gets inserted into the
   listeners queue just to get removed again right away.

2. Send the reset from the 'route_req' function instead.
   This avoids the 'add&remove request socket', but route_req lacks the
   skb that is required to send the TCP reset.

Instead of just adding the skb to that function for MPTCP sake alone,
Paolo suggested to merge init_req and route_req functions.

This saves one indirection from syn processing path and provides the skb
to the merged function at the same time.

'send reset on unknown mptcp join token' is added in next patch.
Suggested-by: default avatarPaolo Abeni <pabeni@redhat.com>
Cc: Eric Dumazet <edumazet@google.com>
Signed-off-by: default avatarFlorian Westphal <fw@strlen.de>
Signed-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parent 41dd9596
......@@ -2007,15 +2007,14 @@ struct tcp_request_sock_ops {
const struct sock *sk,
const struct sk_buff *skb);
#endif
void (*init_req)(struct request_sock *req,
const struct sock *sk_listener,
struct sk_buff *skb);
#ifdef CONFIG_SYN_COOKIES
__u32 (*cookie_init_seq)(const struct sk_buff *skb,
__u16 *mss);
#endif
struct dst_entry *(*route_req)(const struct sock *sk, struct flowi *fl,
const struct request_sock *req);
struct dst_entry *(*route_req)(const struct sock *sk,
struct sk_buff *skb,
struct flowi *fl,
struct request_sock *req);
u32 (*init_seq)(const struct sk_buff *skb);
u32 (*init_ts_off)(const struct net *net, const struct sk_buff *skb);
int (*send_synack)(const struct sock *sk, struct dst_entry *dst,
......
......@@ -6799,18 +6799,13 @@ int tcp_conn_request(struct request_sock_ops *rsk_ops,
/* Note: tcp_v6_init_req() might override ir_iif for link locals */
inet_rsk(req)->ir_iif = inet_request_bound_dev_if(sk, skb);
af_ops->init_req(req, sk, skb);
if (security_inet_conn_request(sk, skb, req))
dst = af_ops->route_req(sk, skb, &fl, req);
if (!dst)
goto drop_and_free;
if (tmp_opt.tstamp_ok)
tcp_rsk(req)->ts_off = af_ops->init_ts_off(net, skb);
dst = af_ops->route_req(sk, &fl, req);
if (!dst)
goto drop_and_free;
if (!want_cookie && !isn) {
/* Kill the following clause, if you dislike this way. */
if (!net->ipv4.sysctl_tcp_syncookies &&
......
......@@ -1444,9 +1444,15 @@ static void tcp_v4_init_req(struct request_sock *req,
}
static struct dst_entry *tcp_v4_route_req(const struct sock *sk,
struct sk_buff *skb,
struct flowi *fl,
const struct request_sock *req)
struct request_sock *req)
{
tcp_v4_init_req(req, sk, skb);
if (security_inet_conn_request(sk, skb, req))
return NULL;
return inet_csk_route_req(sk, &fl->u.ip4, req);
}
......@@ -1466,7 +1472,6 @@ const struct tcp_request_sock_ops tcp_request_sock_ipv4_ops = {
.req_md5_lookup = tcp_v4_md5_lookup,
.calc_md5_hash = tcp_v4_md5_hash_skb,
#endif
.init_req = tcp_v4_init_req,
#ifdef CONFIG_SYN_COOKIES
.cookie_init_seq = cookie_v4_init_sequence,
#endif
......
......@@ -828,9 +828,15 @@ static void tcp_v6_init_req(struct request_sock *req,
}
static struct dst_entry *tcp_v6_route_req(const struct sock *sk,
struct sk_buff *skb,
struct flowi *fl,
const struct request_sock *req)
struct request_sock *req)
{
tcp_v6_init_req(req, sk, skb);
if (security_inet_conn_request(sk, skb, req))
return NULL;
return inet6_csk_route_req(sk, &fl->u.ip6, req, IPPROTO_TCP);
}
......@@ -851,7 +857,6 @@ const struct tcp_request_sock_ops tcp_request_sock_ipv6_ops = {
.req_md5_lookup = tcp_v6_md5_lookup,
.calc_md5_hash = tcp_v6_md5_hash_skb,
#endif
.init_req = tcp_v6_init_req,
#ifdef CONFIG_SYN_COOKIES
.cookie_init_seq = cookie_v6_init_sequence,
#endif
......
......@@ -228,27 +228,39 @@ int mptcp_subflow_init_cookie_req(struct request_sock *req,
}
EXPORT_SYMBOL_GPL(mptcp_subflow_init_cookie_req);
static void subflow_v4_init_req(struct request_sock *req,
const struct sock *sk_listener,
struct sk_buff *skb)
static struct dst_entry *subflow_v4_route_req(const struct sock *sk,
struct sk_buff *skb,
struct flowi *fl,
struct request_sock *req)
{
struct dst_entry *dst;
tcp_rsk(req)->is_mptcp = 1;
tcp_request_sock_ipv4_ops.init_req(req, sk_listener, skb);
dst = tcp_request_sock_ipv4_ops.route_req(sk, skb, fl, req);
if (!dst)
return NULL;
subflow_init_req(req, sk_listener, skb);
subflow_init_req(req, sk, skb);
return dst;
}
#if IS_ENABLED(CONFIG_MPTCP_IPV6)
static void subflow_v6_init_req(struct request_sock *req,
const struct sock *sk_listener,
struct sk_buff *skb)
static struct dst_entry *subflow_v6_route_req(const struct sock *sk,
struct sk_buff *skb,
struct flowi *fl,
struct request_sock *req)
{
struct dst_entry *dst;
tcp_rsk(req)->is_mptcp = 1;
tcp_request_sock_ipv6_ops.init_req(req, sk_listener, skb);
dst = tcp_request_sock_ipv6_ops.route_req(sk, skb, fl, req);
if (!dst)
return NULL;
subflow_init_req(req, sk_listener, skb);
subflow_init_req(req, sk, skb);
return dst;
}
#endif
......@@ -1388,7 +1400,7 @@ void __init mptcp_subflow_init(void)
panic("MPTCP: failed to init subflow request sock ops\n");
subflow_request_sock_ipv4_ops = tcp_request_sock_ipv4_ops;
subflow_request_sock_ipv4_ops.init_req = subflow_v4_init_req;
subflow_request_sock_ipv4_ops.route_req = subflow_v4_route_req;
subflow_specific = ipv4_specific;
subflow_specific.conn_request = subflow_v4_conn_request;
......@@ -1397,7 +1409,7 @@ void __init mptcp_subflow_init(void)
#if IS_ENABLED(CONFIG_MPTCP_IPV6)
subflow_request_sock_ipv6_ops = tcp_request_sock_ipv6_ops;
subflow_request_sock_ipv6_ops.init_req = subflow_v6_init_req;
subflow_request_sock_ipv6_ops.route_req = subflow_v6_route_req;
subflow_v6_specific = ipv6_specific;
subflow_v6_specific.conn_request = subflow_v6_conn_request;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment