Commit d6a4a104 authored by Eric Dumazet's avatar Eric Dumazet Committed by David S. Miller

tcp: GSO should be TSQ friendly

I noticed that TSQ (TCP Small queues) was less effective when TSO is
turned off, and GSO is on. If BQL is not enabled, TSQ has then no
effect.

It turns out the GSO engine frees the original gso_skb at the time the
fragments are generated and queued to the NIC.

We should instead call the tcp_wfree() destructor for the last fragment,
to keep the flow control as intended in TSQ. This effectively limits
the number of queued packets on qdisc + NIC layers.
Signed-off-by: default avatarEric Dumazet <edumazet@google.com>
Cc: Tom Herbert <therbert@google.com>
Cc: Yuchung Cheng <ycheng@google.com>
Cc: Nandita Dukkipati <nanditad@google.com>
Cc: Neal Cardwell <ncardwell@google.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent d14a489a
...@@ -370,6 +370,7 @@ extern int tcp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, ...@@ -370,6 +370,7 @@ extern int tcp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
extern int tcp_sendpage(struct sock *sk, struct page *page, int offset, extern int tcp_sendpage(struct sock *sk, struct page *page, int offset,
size_t size, int flags); size_t size, int flags);
extern void tcp_release_cb(struct sock *sk); extern void tcp_release_cb(struct sock *sk);
extern void tcp_wfree(struct sk_buff *skb);
extern void tcp_write_timer_handler(struct sock *sk); extern void tcp_write_timer_handler(struct sock *sk);
extern void tcp_delack_timer_handler(struct sock *sk); extern void tcp_delack_timer_handler(struct sock *sk);
extern int tcp_ioctl(struct sock *sk, int cmd, unsigned long arg); extern int tcp_ioctl(struct sock *sk, int cmd, unsigned long arg);
......
...@@ -2885,6 +2885,7 @@ struct sk_buff *tcp_tso_segment(struct sk_buff *skb, ...@@ -2885,6 +2885,7 @@ struct sk_buff *tcp_tso_segment(struct sk_buff *skb,
__be32 delta; __be32 delta;
unsigned int oldlen; unsigned int oldlen;
unsigned int mss; unsigned int mss;
struct sk_buff *gso_skb = skb;
if (!pskb_may_pull(skb, sizeof(*th))) if (!pskb_may_pull(skb, sizeof(*th)))
goto out; goto out;
...@@ -2953,6 +2954,17 @@ struct sk_buff *tcp_tso_segment(struct sk_buff *skb, ...@@ -2953,6 +2954,17 @@ struct sk_buff *tcp_tso_segment(struct sk_buff *skb,
th->cwr = 0; th->cwr = 0;
} while (skb->next); } while (skb->next);
/* Following permits TCP Small Queues to work well with GSO :
* The callback to TCP stack will be called at the time last frag
* is freed at TX completion, and not right now when gso_skb
* is freed by GSO engine
*/
if (gso_skb->destructor == tcp_wfree) {
swap(gso_skb->sk, skb->sk);
swap(gso_skb->destructor, skb->destructor);
swap(gso_skb->truesize, skb->truesize);
}
delta = htonl(oldlen + (skb->tail - skb->transport_header) + delta = htonl(oldlen + (skb->tail - skb->transport_header) +
skb->data_len); skb->data_len);
th->check = ~csum_fold((__force __wsum)((__force u32)th->check + th->check = ~csum_fold((__force __wsum)((__force u32)th->check +
......
...@@ -787,7 +787,7 @@ void __init tcp_tasklet_init(void) ...@@ -787,7 +787,7 @@ void __init tcp_tasklet_init(void)
* We cant xmit new skbs from this context, as we might already * We cant xmit new skbs from this context, as we might already
* hold qdisc lock. * hold qdisc lock.
*/ */
static void tcp_wfree(struct sk_buff *skb) void tcp_wfree(struct sk_buff *skb)
{ {
struct sock *sk = skb->sk; struct sock *sk = skb->sk;
struct tcp_sock *tp = tcp_sk(sk); struct tcp_sock *tp = tcp_sk(sk);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment