Commit 16c0cd07 authored by David S. Miller's avatar David S. Miller

Merge branch 'net-preserve-sock-reference-when-scrubbing-the-skb'

Flavio Leitner says:

====================
net: preserve sock reference when scrubbing the skb.

The sock reference is lost when scrubbing the packet and that breaks
TSQ (TCP Small Queues) and XPS (Transmit Packet Steering) causing
performance impacts of about 50% in a single TCP stream when crossing
network namespaces.

XPS breaks because the queue mapping stored in the socket is not
available, so another random queue might be selected when the stack
needs to transmit something like a TCP ACK, or TCP Retransmissions.
That causes packet re-ordering and/or performance issues.

TSQ breaks because it orphans the packet while it is still in the
host, so packets are queued contributing to the buffer bloat problem.

Preserving the sock reference fixes both issues. The socket is
orphaned anyways in the receiving path before any relevant action,
but the transmit side needs some extra checking included in the
first patch.

The first patch will update netfilter to check if the socket
netns is local before use it.

The second patch removes the skb_orphan() from the skb_scrub_packet()
and improve the documentation.

ChangeLog:
- split into two (Eric)
- addressed Paolo's offline feedback to swap the checks in xt_socket.c
  to preserve original behavior.
- improved ip-sysctl.txt (reported by Cong)
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 003504a2 9c4c3252
......@@ -733,11 +733,11 @@ tcp_limit_output_bytes - INTEGER
Controls TCP Small Queue limit per tcp socket.
TCP bulk sender tends to increase packets in flight until it
gets losses notifications. With SNDBUF autotuning, this can
result in a large amount of packets queued in qdisc/device
on the local machine, hurting latency of other flows, for
typical pfifo_fast qdiscs.
tcp_limit_output_bytes limits the number of bytes on qdisc
or device to reduce artificial RTT/cwnd and reduce bufferbloat.
result in a large amount of packets queued on the local machine
(e.g.: qdiscs, CPU backlog, or device) hurting latency of other
flows, for typical pfifo_fast qdiscs. tcp_limit_output_bytes
limits the number of bytes on qdisc or device to reduce artificial
RTT/cwnd and reduce bufferbloat.
Default: 262144
tcp_challenge_ack_limit - INTEGER
......
......@@ -106,7 +106,8 @@ int nf_log_dump_udp_header(struct nf_log_buf *m, const struct sk_buff *skb,
int nf_log_dump_tcp_header(struct nf_log_buf *m, const struct sk_buff *skb,
u8 proto, int fragment, unsigned int offset,
unsigned int logflags);
void nf_log_dump_sk_uid_gid(struct nf_log_buf *m, struct sock *sk);
void nf_log_dump_sk_uid_gid(struct net *net, struct nf_log_buf *m,
struct sock *sk);
void nf_log_dump_packet_common(struct nf_log_buf *m, u_int8_t pf,
unsigned int hooknum, const struct sk_buff *skb,
const struct net_device *in,
......
......@@ -4911,7 +4911,6 @@ void skb_scrub_packet(struct sk_buff *skb, bool xnet)
return;
ipvs_reset(skb);
skb_orphan(skb);
skb->mark = 0;
}
EXPORT_SYMBOL_GPL(skb_scrub_packet);
......
......@@ -35,7 +35,7 @@ static const struct nf_loginfo default_loginfo = {
};
/* One level of recursion won't kill us */
static void dump_ipv4_packet(struct nf_log_buf *m,
static void dump_ipv4_packet(struct net *net, struct nf_log_buf *m,
const struct nf_loginfo *info,
const struct sk_buff *skb, unsigned int iphoff)
{
......@@ -183,7 +183,7 @@ static void dump_ipv4_packet(struct nf_log_buf *m,
/* Max length: 3+maxlen */
if (!iphoff) { /* Only recurse once. */
nf_log_buf_add(m, "[");
dump_ipv4_packet(m, info, skb,
dump_ipv4_packet(net, m, info, skb,
iphoff + ih->ihl*4+sizeof(_icmph));
nf_log_buf_add(m, "] ");
}
......@@ -251,7 +251,7 @@ static void dump_ipv4_packet(struct nf_log_buf *m,
/* Max length: 15 "UID=4294967295 " */
if ((logflags & NF_LOG_UID) && !iphoff)
nf_log_dump_sk_uid_gid(m, skb->sk);
nf_log_dump_sk_uid_gid(net, m, skb->sk);
/* Max length: 16 "MARK=0xFFFFFFFF " */
if (!iphoff && skb->mark)
......@@ -333,7 +333,7 @@ static void nf_log_ip_packet(struct net *net, u_int8_t pf,
if (in != NULL)
dump_ipv4_mac_header(m, loginfo, skb);
dump_ipv4_packet(m, loginfo, skb, 0);
dump_ipv4_packet(net, m, loginfo, skb, 0);
nf_log_buf_close(m);
}
......
......@@ -36,7 +36,7 @@ static const struct nf_loginfo default_loginfo = {
};
/* One level of recursion won't kill us */
static void dump_ipv6_packet(struct nf_log_buf *m,
static void dump_ipv6_packet(struct net *net, struct nf_log_buf *m,
const struct nf_loginfo *info,
const struct sk_buff *skb, unsigned int ip6hoff,
int recurse)
......@@ -258,7 +258,7 @@ static void dump_ipv6_packet(struct nf_log_buf *m,
/* Max length: 3+maxlen */
if (recurse) {
nf_log_buf_add(m, "[");
dump_ipv6_packet(m, info, skb,
dump_ipv6_packet(net, m, info, skb,
ptr + sizeof(_icmp6h), 0);
nf_log_buf_add(m, "] ");
}
......@@ -278,7 +278,7 @@ static void dump_ipv6_packet(struct nf_log_buf *m,
/* Max length: 15 "UID=4294967295 " */
if ((logflags & NF_LOG_UID) && recurse)
nf_log_dump_sk_uid_gid(m, skb->sk);
nf_log_dump_sk_uid_gid(net, m, skb->sk);
/* Max length: 16 "MARK=0xFFFFFFFF " */
if (recurse && skb->mark)
......@@ -365,7 +365,7 @@ static void nf_log_ip6_packet(struct net *net, u_int8_t pf,
if (in != NULL)
dump_ipv6_mac_header(m, loginfo, skb);
dump_ipv6_packet(m, loginfo, skb, skb_network_offset(skb), 1);
dump_ipv6_packet(net, m, loginfo, skb, skb_network_offset(skb), 1);
nf_log_buf_close(m);
}
......
......@@ -32,7 +32,7 @@ int nf_conntrack_broadcast_help(struct sk_buff *skb,
__be32 mask = 0;
/* we're only interested in locally generated packets */
if (skb->sk == NULL)
if (skb->sk == NULL || !net_eq(nf_ct_net(ct), sock_net(skb->sk)))
goto out;
if (rt == NULL || !(rt->rt_flags & RTCF_BROADCAST))
goto out;
......
......@@ -132,9 +132,10 @@ int nf_log_dump_tcp_header(struct nf_log_buf *m, const struct sk_buff *skb,
}
EXPORT_SYMBOL_GPL(nf_log_dump_tcp_header);
void nf_log_dump_sk_uid_gid(struct nf_log_buf *m, struct sock *sk)
void nf_log_dump_sk_uid_gid(struct net *net, struct nf_log_buf *m,
struct sock *sk)
{
if (!sk || !sk_fullsock(sk))
if (!sk || !sk_fullsock(sk) || !net_eq(net, sock_net(sk)))
return;
read_lock_bh(&sk->sk_callback_lock);
......
......@@ -108,6 +108,7 @@ int nf_xfrm_me_harder(struct net *net, struct sk_buff *skb, unsigned int family)
struct flowi fl;
unsigned int hh_len;
struct dst_entry *dst;
struct sock *sk = skb->sk;
int err;
err = xfrm_decode_session(skb, &fl, family);
......@@ -119,7 +120,10 @@ int nf_xfrm_me_harder(struct net *net, struct sk_buff *skb, unsigned int family)
dst = ((struct xfrm_dst *)dst)->route;
dst_hold(dst);
dst = xfrm_lookup(net, dst, &fl, skb->sk, 0);
if (sk && !net_eq(net, sock_net(sk)))
sk = NULL;
dst = xfrm_lookup(net, dst, &fl, sk, 0);
if (IS_ERR(dst))
return PTR_ERR(dst);
......
......@@ -107,7 +107,8 @@ static void nft_meta_get_eval(const struct nft_expr *expr,
break;
case NFT_META_SKUID:
sk = skb_to_full_sk(skb);
if (!sk || !sk_fullsock(sk))
if (!sk || !sk_fullsock(sk) ||
!net_eq(nft_net(pkt), sock_net(sk)))
goto err;
read_lock_bh(&sk->sk_callback_lock);
......@@ -123,7 +124,8 @@ static void nft_meta_get_eval(const struct nft_expr *expr,
break;
case NFT_META_SKGID:
sk = skb_to_full_sk(skb);
if (!sk || !sk_fullsock(sk))
if (!sk || !sk_fullsock(sk) ||
!net_eq(nft_net(pkt), sock_net(sk)))
goto err;
read_lock_bh(&sk->sk_callback_lock);
......@@ -214,7 +216,8 @@ static void nft_meta_get_eval(const struct nft_expr *expr,
#ifdef CONFIG_CGROUP_NET_CLASSID
case NFT_META_CGROUP:
sk = skb_to_full_sk(skb);
if (!sk || !sk_fullsock(sk))
if (!sk || !sk_fullsock(sk) ||
!net_eq(nft_net(pkt), sock_net(sk)))
goto err;
*dest = sock_cgroup_classid(&sk->sk_cgrp_data);
break;
......
......@@ -23,6 +23,9 @@ static void nft_socket_eval(const struct nft_expr *expr,
struct sock *sk = skb->sk;
u32 *dest = &regs->data[priv->dreg];
if (sk && !net_eq(nft_net(pkt), sock_net(sk)))
sk = NULL;
if (!sk)
switch(nft_pf(pkt)) {
case NFPROTO_IPV4:
......@@ -39,7 +42,7 @@ static void nft_socket_eval(const struct nft_expr *expr,
return;
}
if(!sk) {
if (!sk) {
nft_reg_store8(dest, 0);
return;
}
......
......@@ -72,8 +72,9 @@ static bool
cgroup_mt_v0(const struct sk_buff *skb, struct xt_action_param *par)
{
const struct xt_cgroup_info_v0 *info = par->matchinfo;
struct sock *sk = skb->sk;
if (skb->sk == NULL || !sk_fullsock(skb->sk))
if (!sk || !sk_fullsock(sk) || !net_eq(xt_net(par), sock_net(sk)))
return false;
return (info->id == sock_cgroup_classid(&skb->sk->sk_cgrp_data)) ^
......@@ -85,8 +86,9 @@ static bool cgroup_mt_v1(const struct sk_buff *skb, struct xt_action_param *par)
const struct xt_cgroup_info_v1 *info = par->matchinfo;
struct sock_cgroup_data *skcd = &skb->sk->sk_cgrp_data;
struct cgroup *ancestor = info->priv;
struct sock *sk = skb->sk;
if (!skb->sk || !sk_fullsock(skb->sk))
if (!sk || !sk_fullsock(sk) || !net_eq(xt_net(par), sock_net(sk)))
return false;
if (ancestor)
......
......@@ -67,7 +67,7 @@ owner_mt(const struct sk_buff *skb, struct xt_action_param *par)
struct sock *sk = skb_to_full_sk(skb);
struct net *net = xt_net(par);
if (sk == NULL || sk->sk_socket == NULL)
if (!sk || !sk->sk_socket || !net_eq(net, sock_net(sk)))
return (info->match ^ info->invert) == 0;
else if (info->match & info->invert & XT_OWNER_SOCKET)
/*
......
......@@ -265,7 +265,8 @@ recent_mt(const struct sk_buff *skb, struct xt_action_param *par)
}
/* use TTL as seen before forwarding */
if (xt_out(par) != NULL && skb->sk == NULL)
if (xt_out(par) != NULL &&
(!skb->sk || !net_eq(net, sock_net(skb->sk))))
ttl++;
spin_lock_bh(&recent_lock);
......
......@@ -56,8 +56,12 @@ socket_match(const struct sk_buff *skb, struct xt_action_param *par,
struct sk_buff *pskb = (struct sk_buff *)skb;
struct sock *sk = skb->sk;
if (!net_eq(xt_net(par), sock_net(sk)))
sk = NULL;
if (!sk)
sk = nf_sk_lookup_slow_v4(xt_net(par), skb, xt_in(par));
if (sk) {
bool wildcard;
bool transparent = true;
......@@ -113,8 +117,12 @@ socket_mt6_v1_v2_v3(const struct sk_buff *skb, struct xt_action_param *par)
struct sk_buff *pskb = (struct sk_buff *)skb;
struct sock *sk = skb->sk;
if (!net_eq(xt_net(par), sock_net(sk)))
sk = NULL;
if (!sk)
sk = nf_sk_lookup_slow_v6(xt_net(par), skb, xt_in(par));
if (sk) {
bool wildcard;
bool transparent = true;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment