Commit 16c3c913 authored by David S. Miller's avatar David S. Miller

Merge branch 'inet-factorize-sk_wmem_alloc-updates'

Eric Dumazet says:

====================
inet: factorize sk_wmem_alloc updates

While testing my inet defrag changes, I found that senders
could spend ~20% of cpu cycles in skb_set_owner_w() updating
sk->sk_wmem_alloc for every fragment they cook, competing
with TX completion of prior skbs possibly happening on another cpus.

One solution to this problem is to use alloc_skb() instead
of sock_wmalloc() and manually perform a single sk_wmem_alloc change.

This greatly increases speed for applications sending big UDP datagrams.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents c0725502 1f4c6eb2
......@@ -876,6 +876,7 @@ static int __ip_append_data(struct sock *sk,
unsigned int maxfraglen, fragheaderlen, maxnonfragsize;
int csummode = CHECKSUM_NONE;
struct rtable *rt = (struct rtable *)cork->dst;
unsigned int wmem_alloc_delta = 0;
u32 tskey = 0;
skb = skb_peek_tail(queue);
......@@ -971,11 +972,10 @@ static int __ip_append_data(struct sock *sk,
(flags & MSG_DONTWAIT), &err);
} else {
skb = NULL;
if (refcount_read(&sk->sk_wmem_alloc) <=
if (refcount_read(&sk->sk_wmem_alloc) + wmem_alloc_delta <=
2 * sk->sk_sndbuf)
skb = sock_wmalloc(sk,
alloclen + hh_len + 15, 1,
sk->sk_allocation);
skb = alloc_skb(alloclen + hh_len + 15,
sk->sk_allocation);
if (unlikely(!skb))
err = -ENOBUFS;
}
......@@ -1033,6 +1033,11 @@ static int __ip_append_data(struct sock *sk,
/*
* Put the packet on the pending queue.
*/
if (!skb->destructor) {
skb->destructor = sock_wfree;
skb->sk = sk;
wmem_alloc_delta += skb->truesize;
}
__skb_queue_tail(queue, skb);
continue;
}
......@@ -1079,12 +1084,13 @@ static int __ip_append_data(struct sock *sk,
skb->len += copy;
skb->data_len += copy;
skb->truesize += copy;
refcount_add(copy, &sk->sk_wmem_alloc);
wmem_alloc_delta += copy;
}
offset += copy;
length -= copy;
}
refcount_add(wmem_alloc_delta, &sk->sk_wmem_alloc);
return 0;
error_efault:
......@@ -1092,6 +1098,7 @@ static int __ip_append_data(struct sock *sk,
error:
cork->length -= length;
IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTDISCARDS);
refcount_add(wmem_alloc_delta, &sk->sk_wmem_alloc);
return err;
}
......
......@@ -1259,6 +1259,7 @@ static int __ip6_append_data(struct sock *sk,
struct ipv6_txoptions *opt = v6_cork->opt;
int csummode = CHECKSUM_NONE;
unsigned int maxnonfragsize, headersize;
unsigned int wmem_alloc_delta = 0;
skb = skb_peek_tail(queue);
if (!skb) {
......@@ -1411,11 +1412,10 @@ static int __ip6_append_data(struct sock *sk,
(flags & MSG_DONTWAIT), &err);
} else {
skb = NULL;
if (refcount_read(&sk->sk_wmem_alloc) <=
if (refcount_read(&sk->sk_wmem_alloc) + wmem_alloc_delta <=
2 * sk->sk_sndbuf)
skb = sock_wmalloc(sk,
alloclen + hh_len, 1,
sk->sk_allocation);
skb = alloc_skb(alloclen + hh_len,
sk->sk_allocation);
if (unlikely(!skb))
err = -ENOBUFS;
}
......@@ -1474,6 +1474,11 @@ static int __ip6_append_data(struct sock *sk,
/*
* Put the packet on the pending queue
*/
if (!skb->destructor) {
skb->destructor = sock_wfree;
skb->sk = sk;
wmem_alloc_delta += skb->truesize;
}
__skb_queue_tail(queue, skb);
continue;
}
......@@ -1520,12 +1525,13 @@ static int __ip6_append_data(struct sock *sk,
skb->len += copy;
skb->data_len += copy;
skb->truesize += copy;
refcount_add(copy, &sk->sk_wmem_alloc);
wmem_alloc_delta += copy;
}
offset += copy;
length -= copy;
}
refcount_add(wmem_alloc_delta, &sk->sk_wmem_alloc);
return 0;
error_efault:
......@@ -1533,6 +1539,7 @@ static int __ip6_append_data(struct sock *sk,
error:
cork->length -= length;
IP6_INC_STATS(sock_net(sk), rt->rt6i_idev, IPSTATS_MIB_OUTDISCARDS);
refcount_add(wmem_alloc_delta, &sk->sk_wmem_alloc);
return err;
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment