Commit 15fad714 authored by Jesper Dangaard Brouer's avatar Jesper Dangaard Brouer Committed by David S. Miller

net: bulk free SKBs that were delay free'ed due to IRQ context

The network stack defers SKBs free, in-case free happens in IRQ or
when IRQs are disabled. This happens in __dev_kfree_skb_irq() that
writes SKBs that were free'ed during IRQ to the softirq completion
queue (softnet_data.completion_queue).

These SKBs are naturally delayed, and cleaned up during NET_TX_SOFTIRQ
in function net_tx_action().  Take advantage of this a use the skb
defer and flush API, as we are already in softirq context.

For modern drivers this rarely happens. Although most drivers do call
dev_kfree_skb_any(), which detects the situation and calls
__dev_kfree_skb_irq() when needed.  This due to netpoll can call from
IRQ context.
Signed-off-by: default avatarAlexander Duyck <alexander.h.duyck@redhat.com>
Signed-off-by: default avatarJesper Dangaard Brouer <brouer@redhat.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 795bb1c0
...@@ -2407,6 +2407,7 @@ static inline struct sk_buff *napi_alloc_skb(struct napi_struct *napi, ...@@ -2407,6 +2407,7 @@ static inline struct sk_buff *napi_alloc_skb(struct napi_struct *napi,
void napi_consume_skb(struct sk_buff *skb, int budget); void napi_consume_skb(struct sk_buff *skb, int budget);
void __kfree_skb_flush(void); void __kfree_skb_flush(void);
void __kfree_skb_defer(struct sk_buff *skb);
/** /**
* __dev_alloc_pages - allocate page for network Rx * __dev_alloc_pages - allocate page for network Rx
......
...@@ -3829,8 +3829,14 @@ static void net_tx_action(struct softirq_action *h) ...@@ -3829,8 +3829,14 @@ static void net_tx_action(struct softirq_action *h)
trace_consume_skb(skb); trace_consume_skb(skb);
else else
trace_kfree_skb(skb, net_tx_action); trace_kfree_skb(skb, net_tx_action);
__kfree_skb(skb);
if (skb->fclone != SKB_FCLONE_UNAVAILABLE)
__kfree_skb(skb);
else
__kfree_skb_defer(skb);
} }
__kfree_skb_flush();
} }
if (sd->output_queue) { if (sd->output_queue) {
......
...@@ -767,7 +767,7 @@ void __kfree_skb_flush(void) ...@@ -767,7 +767,7 @@ void __kfree_skb_flush(void)
} }
} }
static void __kfree_skb_defer(struct sk_buff *skb) static inline void _kfree_skb_defer(struct sk_buff *skb)
{ {
struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache); struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache);
...@@ -789,6 +789,10 @@ static void __kfree_skb_defer(struct sk_buff *skb) ...@@ -789,6 +789,10 @@ static void __kfree_skb_defer(struct sk_buff *skb)
nc->skb_count = 0; nc->skb_count = 0;
} }
} }
void __kfree_skb_defer(struct sk_buff *skb)
{
_kfree_skb_defer(skb);
}
void napi_consume_skb(struct sk_buff *skb, int budget) void napi_consume_skb(struct sk_buff *skb, int budget)
{ {
...@@ -814,7 +818,7 @@ void napi_consume_skb(struct sk_buff *skb, int budget) ...@@ -814,7 +818,7 @@ void napi_consume_skb(struct sk_buff *skb, int budget)
return; return;
} }
__kfree_skb_defer(skb); _kfree_skb_defer(skb);
} }
EXPORT_SYMBOL(napi_consume_skb); EXPORT_SYMBOL(napi_consume_skb);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment