Commit 16c0cd07 authored by David S. Miller's avatar David S. Miller

Merge branch 'net-preserve-sock-reference-when-scrubbing-the-skb'

Flavio Leitner says:

====================
net: preserve sock reference when scrubbing the skb.

The sock reference is lost when scrubbing the packet and that breaks
TSQ (TCP Small Queues) and XPS (Transmit Packet Steering) causing
performance impacts of about 50% in a single TCP stream when crossing
network namespaces.

XPS breaks because the queue mapping stored in the socket is not
available, so another random queue might be selected when the stack
needs to transmit something like a TCP ACK, or TCP Retransmissions.
That causes packet re-ordering and/or performance issues.

TSQ breaks because it orphans the packet while it is still in the
host, so packets are queued contributing to the buffer bloat problem.

Preserving the sock reference fixes both issues. The socket is
orphaned anyways in the receiving path before any relevant action,
but the transmit side needs some extra checking included in the
first patch.

The first patch will update netfilter to check if the socket
netns is local before use it.

The second patch removes the skb_orphan() from the skb_scrub_packet()
and improve the documentation.

ChangeLog:
- split into two (Eric)
- addressed Paolo's offline feedback to swap the checks in xt_socket.c
  to preserve original behavior.
- improved ip-sysctl.txt (reported by Cong)
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 003504a2 9c4c3252
...@@ -733,11 +733,11 @@ tcp_limit_output_bytes - INTEGER ...@@ -733,11 +733,11 @@ tcp_limit_output_bytes - INTEGER
Controls TCP Small Queue limit per tcp socket. Controls TCP Small Queue limit per tcp socket.
TCP bulk sender tends to increase packets in flight until it TCP bulk sender tends to increase packets in flight until it
gets losses notifications. With SNDBUF autotuning, this can gets losses notifications. With SNDBUF autotuning, this can
result in a large amount of packets queued in qdisc/device result in a large amount of packets queued on the local machine
on the local machine, hurting latency of other flows, for (e.g.: qdiscs, CPU backlog, or device) hurting latency of other
typical pfifo_fast qdiscs. flows, for typical pfifo_fast qdiscs. tcp_limit_output_bytes
tcp_limit_output_bytes limits the number of bytes on qdisc limits the number of bytes on qdisc or device to reduce artificial
or device to reduce artificial RTT/cwnd and reduce bufferbloat. RTT/cwnd and reduce bufferbloat.
Default: 262144 Default: 262144
tcp_challenge_ack_limit - INTEGER tcp_challenge_ack_limit - INTEGER
......
...@@ -106,7 +106,8 @@ int nf_log_dump_udp_header(struct nf_log_buf *m, const struct sk_buff *skb, ...@@ -106,7 +106,8 @@ int nf_log_dump_udp_header(struct nf_log_buf *m, const struct sk_buff *skb,
int nf_log_dump_tcp_header(struct nf_log_buf *m, const struct sk_buff *skb, int nf_log_dump_tcp_header(struct nf_log_buf *m, const struct sk_buff *skb,
u8 proto, int fragment, unsigned int offset, u8 proto, int fragment, unsigned int offset,
unsigned int logflags); unsigned int logflags);
void nf_log_dump_sk_uid_gid(struct nf_log_buf *m, struct sock *sk); void nf_log_dump_sk_uid_gid(struct net *net, struct nf_log_buf *m,
struct sock *sk);
void nf_log_dump_packet_common(struct nf_log_buf *m, u_int8_t pf, void nf_log_dump_packet_common(struct nf_log_buf *m, u_int8_t pf,
unsigned int hooknum, const struct sk_buff *skb, unsigned int hooknum, const struct sk_buff *skb,
const struct net_device *in, const struct net_device *in,
......
...@@ -4911,7 +4911,6 @@ void skb_scrub_packet(struct sk_buff *skb, bool xnet) ...@@ -4911,7 +4911,6 @@ void skb_scrub_packet(struct sk_buff *skb, bool xnet)
return; return;
ipvs_reset(skb); ipvs_reset(skb);
skb_orphan(skb);
skb->mark = 0; skb->mark = 0;
} }
EXPORT_SYMBOL_GPL(skb_scrub_packet); EXPORT_SYMBOL_GPL(skb_scrub_packet);
......
...@@ -35,7 +35,7 @@ static const struct nf_loginfo default_loginfo = { ...@@ -35,7 +35,7 @@ static const struct nf_loginfo default_loginfo = {
}; };
/* One level of recursion won't kill us */ /* One level of recursion won't kill us */
static void dump_ipv4_packet(struct nf_log_buf *m, static void dump_ipv4_packet(struct net *net, struct nf_log_buf *m,
const struct nf_loginfo *info, const struct nf_loginfo *info,
const struct sk_buff *skb, unsigned int iphoff) const struct sk_buff *skb, unsigned int iphoff)
{ {
...@@ -183,7 +183,7 @@ static void dump_ipv4_packet(struct nf_log_buf *m, ...@@ -183,7 +183,7 @@ static void dump_ipv4_packet(struct nf_log_buf *m,
/* Max length: 3+maxlen */ /* Max length: 3+maxlen */
if (!iphoff) { /* Only recurse once. */ if (!iphoff) { /* Only recurse once. */
nf_log_buf_add(m, "["); nf_log_buf_add(m, "[");
dump_ipv4_packet(m, info, skb, dump_ipv4_packet(net, m, info, skb,
iphoff + ih->ihl*4+sizeof(_icmph)); iphoff + ih->ihl*4+sizeof(_icmph));
nf_log_buf_add(m, "] "); nf_log_buf_add(m, "] ");
} }
...@@ -251,7 +251,7 @@ static void dump_ipv4_packet(struct nf_log_buf *m, ...@@ -251,7 +251,7 @@ static void dump_ipv4_packet(struct nf_log_buf *m,
/* Max length: 15 "UID=4294967295 " */ /* Max length: 15 "UID=4294967295 " */
if ((logflags & NF_LOG_UID) && !iphoff) if ((logflags & NF_LOG_UID) && !iphoff)
nf_log_dump_sk_uid_gid(m, skb->sk); nf_log_dump_sk_uid_gid(net, m, skb->sk);
/* Max length: 16 "MARK=0xFFFFFFFF " */ /* Max length: 16 "MARK=0xFFFFFFFF " */
if (!iphoff && skb->mark) if (!iphoff && skb->mark)
...@@ -333,7 +333,7 @@ static void nf_log_ip_packet(struct net *net, u_int8_t pf, ...@@ -333,7 +333,7 @@ static void nf_log_ip_packet(struct net *net, u_int8_t pf,
if (in != NULL) if (in != NULL)
dump_ipv4_mac_header(m, loginfo, skb); dump_ipv4_mac_header(m, loginfo, skb);
dump_ipv4_packet(m, loginfo, skb, 0); dump_ipv4_packet(net, m, loginfo, skb, 0);
nf_log_buf_close(m); nf_log_buf_close(m);
} }
......
...@@ -36,7 +36,7 @@ static const struct nf_loginfo default_loginfo = { ...@@ -36,7 +36,7 @@ static const struct nf_loginfo default_loginfo = {
}; };
/* One level of recursion won't kill us */ /* One level of recursion won't kill us */
static void dump_ipv6_packet(struct nf_log_buf *m, static void dump_ipv6_packet(struct net *net, struct nf_log_buf *m,
const struct nf_loginfo *info, const struct nf_loginfo *info,
const struct sk_buff *skb, unsigned int ip6hoff, const struct sk_buff *skb, unsigned int ip6hoff,
int recurse) int recurse)
...@@ -258,7 +258,7 @@ static void dump_ipv6_packet(struct nf_log_buf *m, ...@@ -258,7 +258,7 @@ static void dump_ipv6_packet(struct nf_log_buf *m,
/* Max length: 3+maxlen */ /* Max length: 3+maxlen */
if (recurse) { if (recurse) {
nf_log_buf_add(m, "["); nf_log_buf_add(m, "[");
dump_ipv6_packet(m, info, skb, dump_ipv6_packet(net, m, info, skb,
ptr + sizeof(_icmp6h), 0); ptr + sizeof(_icmp6h), 0);
nf_log_buf_add(m, "] "); nf_log_buf_add(m, "] ");
} }
...@@ -278,7 +278,7 @@ static void dump_ipv6_packet(struct nf_log_buf *m, ...@@ -278,7 +278,7 @@ static void dump_ipv6_packet(struct nf_log_buf *m,
/* Max length: 15 "UID=4294967295 " */ /* Max length: 15 "UID=4294967295 " */
if ((logflags & NF_LOG_UID) && recurse) if ((logflags & NF_LOG_UID) && recurse)
nf_log_dump_sk_uid_gid(m, skb->sk); nf_log_dump_sk_uid_gid(net, m, skb->sk);
/* Max length: 16 "MARK=0xFFFFFFFF " */ /* Max length: 16 "MARK=0xFFFFFFFF " */
if (recurse && skb->mark) if (recurse && skb->mark)
...@@ -365,7 +365,7 @@ static void nf_log_ip6_packet(struct net *net, u_int8_t pf, ...@@ -365,7 +365,7 @@ static void nf_log_ip6_packet(struct net *net, u_int8_t pf,
if (in != NULL) if (in != NULL)
dump_ipv6_mac_header(m, loginfo, skb); dump_ipv6_mac_header(m, loginfo, skb);
dump_ipv6_packet(m, loginfo, skb, skb_network_offset(skb), 1); dump_ipv6_packet(net, m, loginfo, skb, skb_network_offset(skb), 1);
nf_log_buf_close(m); nf_log_buf_close(m);
} }
......
...@@ -32,7 +32,7 @@ int nf_conntrack_broadcast_help(struct sk_buff *skb, ...@@ -32,7 +32,7 @@ int nf_conntrack_broadcast_help(struct sk_buff *skb,
__be32 mask = 0; __be32 mask = 0;
/* we're only interested in locally generated packets */ /* we're only interested in locally generated packets */
if (skb->sk == NULL) if (skb->sk == NULL || !net_eq(nf_ct_net(ct), sock_net(skb->sk)))
goto out; goto out;
if (rt == NULL || !(rt->rt_flags & RTCF_BROADCAST)) if (rt == NULL || !(rt->rt_flags & RTCF_BROADCAST))
goto out; goto out;
......
...@@ -132,9 +132,10 @@ int nf_log_dump_tcp_header(struct nf_log_buf *m, const struct sk_buff *skb, ...@@ -132,9 +132,10 @@ int nf_log_dump_tcp_header(struct nf_log_buf *m, const struct sk_buff *skb,
} }
EXPORT_SYMBOL_GPL(nf_log_dump_tcp_header); EXPORT_SYMBOL_GPL(nf_log_dump_tcp_header);
void nf_log_dump_sk_uid_gid(struct nf_log_buf *m, struct sock *sk) void nf_log_dump_sk_uid_gid(struct net *net, struct nf_log_buf *m,
struct sock *sk)
{ {
if (!sk || !sk_fullsock(sk)) if (!sk || !sk_fullsock(sk) || !net_eq(net, sock_net(sk)))
return; return;
read_lock_bh(&sk->sk_callback_lock); read_lock_bh(&sk->sk_callback_lock);
......
...@@ -108,6 +108,7 @@ int nf_xfrm_me_harder(struct net *net, struct sk_buff *skb, unsigned int family) ...@@ -108,6 +108,7 @@ int nf_xfrm_me_harder(struct net *net, struct sk_buff *skb, unsigned int family)
struct flowi fl; struct flowi fl;
unsigned int hh_len; unsigned int hh_len;
struct dst_entry *dst; struct dst_entry *dst;
struct sock *sk = skb->sk;
int err; int err;
err = xfrm_decode_session(skb, &fl, family); err = xfrm_decode_session(skb, &fl, family);
...@@ -119,7 +120,10 @@ int nf_xfrm_me_harder(struct net *net, struct sk_buff *skb, unsigned int family) ...@@ -119,7 +120,10 @@ int nf_xfrm_me_harder(struct net *net, struct sk_buff *skb, unsigned int family)
dst = ((struct xfrm_dst *)dst)->route; dst = ((struct xfrm_dst *)dst)->route;
dst_hold(dst); dst_hold(dst);
dst = xfrm_lookup(net, dst, &fl, skb->sk, 0); if (sk && !net_eq(net, sock_net(sk)))
sk = NULL;
dst = xfrm_lookup(net, dst, &fl, sk, 0);
if (IS_ERR(dst)) if (IS_ERR(dst))
return PTR_ERR(dst); return PTR_ERR(dst);
......
...@@ -107,7 +107,8 @@ static void nft_meta_get_eval(const struct nft_expr *expr, ...@@ -107,7 +107,8 @@ static void nft_meta_get_eval(const struct nft_expr *expr,
break; break;
case NFT_META_SKUID: case NFT_META_SKUID:
sk = skb_to_full_sk(skb); sk = skb_to_full_sk(skb);
if (!sk || !sk_fullsock(sk)) if (!sk || !sk_fullsock(sk) ||
!net_eq(nft_net(pkt), sock_net(sk)))
goto err; goto err;
read_lock_bh(&sk->sk_callback_lock); read_lock_bh(&sk->sk_callback_lock);
...@@ -123,7 +124,8 @@ static void nft_meta_get_eval(const struct nft_expr *expr, ...@@ -123,7 +124,8 @@ static void nft_meta_get_eval(const struct nft_expr *expr,
break; break;
case NFT_META_SKGID: case NFT_META_SKGID:
sk = skb_to_full_sk(skb); sk = skb_to_full_sk(skb);
if (!sk || !sk_fullsock(sk)) if (!sk || !sk_fullsock(sk) ||
!net_eq(nft_net(pkt), sock_net(sk)))
goto err; goto err;
read_lock_bh(&sk->sk_callback_lock); read_lock_bh(&sk->sk_callback_lock);
...@@ -214,7 +216,8 @@ static void nft_meta_get_eval(const struct nft_expr *expr, ...@@ -214,7 +216,8 @@ static void nft_meta_get_eval(const struct nft_expr *expr,
#ifdef CONFIG_CGROUP_NET_CLASSID #ifdef CONFIG_CGROUP_NET_CLASSID
case NFT_META_CGROUP: case NFT_META_CGROUP:
sk = skb_to_full_sk(skb); sk = skb_to_full_sk(skb);
if (!sk || !sk_fullsock(sk)) if (!sk || !sk_fullsock(sk) ||
!net_eq(nft_net(pkt), sock_net(sk)))
goto err; goto err;
*dest = sock_cgroup_classid(&sk->sk_cgrp_data); *dest = sock_cgroup_classid(&sk->sk_cgrp_data);
break; break;
......
...@@ -23,6 +23,9 @@ static void nft_socket_eval(const struct nft_expr *expr, ...@@ -23,6 +23,9 @@ static void nft_socket_eval(const struct nft_expr *expr,
struct sock *sk = skb->sk; struct sock *sk = skb->sk;
u32 *dest = &regs->data[priv->dreg]; u32 *dest = &regs->data[priv->dreg];
if (sk && !net_eq(nft_net(pkt), sock_net(sk)))
sk = NULL;
if (!sk) if (!sk)
switch(nft_pf(pkt)) { switch(nft_pf(pkt)) {
case NFPROTO_IPV4: case NFPROTO_IPV4:
...@@ -39,7 +42,7 @@ static void nft_socket_eval(const struct nft_expr *expr, ...@@ -39,7 +42,7 @@ static void nft_socket_eval(const struct nft_expr *expr,
return; return;
} }
if(!sk) { if (!sk) {
nft_reg_store8(dest, 0); nft_reg_store8(dest, 0);
return; return;
} }
......
...@@ -72,8 +72,9 @@ static bool ...@@ -72,8 +72,9 @@ static bool
cgroup_mt_v0(const struct sk_buff *skb, struct xt_action_param *par) cgroup_mt_v0(const struct sk_buff *skb, struct xt_action_param *par)
{ {
const struct xt_cgroup_info_v0 *info = par->matchinfo; const struct xt_cgroup_info_v0 *info = par->matchinfo;
struct sock *sk = skb->sk;
if (skb->sk == NULL || !sk_fullsock(skb->sk)) if (!sk || !sk_fullsock(sk) || !net_eq(xt_net(par), sock_net(sk)))
return false; return false;
return (info->id == sock_cgroup_classid(&skb->sk->sk_cgrp_data)) ^ return (info->id == sock_cgroup_classid(&skb->sk->sk_cgrp_data)) ^
...@@ -85,8 +86,9 @@ static bool cgroup_mt_v1(const struct sk_buff *skb, struct xt_action_param *par) ...@@ -85,8 +86,9 @@ static bool cgroup_mt_v1(const struct sk_buff *skb, struct xt_action_param *par)
const struct xt_cgroup_info_v1 *info = par->matchinfo; const struct xt_cgroup_info_v1 *info = par->matchinfo;
struct sock_cgroup_data *skcd = &skb->sk->sk_cgrp_data; struct sock_cgroup_data *skcd = &skb->sk->sk_cgrp_data;
struct cgroup *ancestor = info->priv; struct cgroup *ancestor = info->priv;
struct sock *sk = skb->sk;
if (!skb->sk || !sk_fullsock(skb->sk)) if (!sk || !sk_fullsock(sk) || !net_eq(xt_net(par), sock_net(sk)))
return false; return false;
if (ancestor) if (ancestor)
......
...@@ -67,7 +67,7 @@ owner_mt(const struct sk_buff *skb, struct xt_action_param *par) ...@@ -67,7 +67,7 @@ owner_mt(const struct sk_buff *skb, struct xt_action_param *par)
struct sock *sk = skb_to_full_sk(skb); struct sock *sk = skb_to_full_sk(skb);
struct net *net = xt_net(par); struct net *net = xt_net(par);
if (sk == NULL || sk->sk_socket == NULL) if (!sk || !sk->sk_socket || !net_eq(net, sock_net(sk)))
return (info->match ^ info->invert) == 0; return (info->match ^ info->invert) == 0;
else if (info->match & info->invert & XT_OWNER_SOCKET) else if (info->match & info->invert & XT_OWNER_SOCKET)
/* /*
......
...@@ -265,7 +265,8 @@ recent_mt(const struct sk_buff *skb, struct xt_action_param *par) ...@@ -265,7 +265,8 @@ recent_mt(const struct sk_buff *skb, struct xt_action_param *par)
} }
/* use TTL as seen before forwarding */ /* use TTL as seen before forwarding */
if (xt_out(par) != NULL && skb->sk == NULL) if (xt_out(par) != NULL &&
(!skb->sk || !net_eq(net, sock_net(skb->sk))))
ttl++; ttl++;
spin_lock_bh(&recent_lock); spin_lock_bh(&recent_lock);
......
...@@ -56,8 +56,12 @@ socket_match(const struct sk_buff *skb, struct xt_action_param *par, ...@@ -56,8 +56,12 @@ socket_match(const struct sk_buff *skb, struct xt_action_param *par,
struct sk_buff *pskb = (struct sk_buff *)skb; struct sk_buff *pskb = (struct sk_buff *)skb;
struct sock *sk = skb->sk; struct sock *sk = skb->sk;
if (!net_eq(xt_net(par), sock_net(sk)))
sk = NULL;
if (!sk) if (!sk)
sk = nf_sk_lookup_slow_v4(xt_net(par), skb, xt_in(par)); sk = nf_sk_lookup_slow_v4(xt_net(par), skb, xt_in(par));
if (sk) { if (sk) {
bool wildcard; bool wildcard;
bool transparent = true; bool transparent = true;
...@@ -113,8 +117,12 @@ socket_mt6_v1_v2_v3(const struct sk_buff *skb, struct xt_action_param *par) ...@@ -113,8 +117,12 @@ socket_mt6_v1_v2_v3(const struct sk_buff *skb, struct xt_action_param *par)
struct sk_buff *pskb = (struct sk_buff *)skb; struct sk_buff *pskb = (struct sk_buff *)skb;
struct sock *sk = skb->sk; struct sock *sk = skb->sk;
if (!net_eq(xt_net(par), sock_net(sk)))
sk = NULL;
if (!sk) if (!sk)
sk = nf_sk_lookup_slow_v6(xt_net(par), skb, xt_in(par)); sk = nf_sk_lookup_slow_v6(xt_net(par), skb, xt_in(par));
if (sk) { if (sk) {
bool wildcard; bool wildcard;
bool transparent = true; bool transparent = true;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment