Commit a1aaee7f authored by Eric Dumazet's avatar Eric Dumazet Committed by David S. Miller

net: make napi_threaded_poll() aware of sd->defer_list

If we call skb_defer_free_flush() from napi_threaded_poll(),
we can avoid to raise IPI from skb_attempt_defer_free()
when the list becomes too big.

This allows napi_threaded_poll() to rely less on softirqs,
and lowers latency caused by a too big list.
Signed-off-by: default avatarEric Dumazet <edumazet@google.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent e6f50edf
...@@ -6622,6 +6622,7 @@ static void skb_defer_free_flush(struct softnet_data *sd) ...@@ -6622,6 +6622,7 @@ static void skb_defer_free_flush(struct softnet_data *sd)
static int napi_threaded_poll(void *data) static int napi_threaded_poll(void *data)
{ {
struct napi_struct *napi = data; struct napi_struct *napi = data;
struct softnet_data *sd;
void *have; void *have;
while (!napi_thread_wait(napi)) { while (!napi_thread_wait(napi)) {
...@@ -6629,11 +6630,13 @@ static int napi_threaded_poll(void *data) ...@@ -6629,11 +6630,13 @@ static int napi_threaded_poll(void *data)
bool repoll = false; bool repoll = false;
local_bh_disable(); local_bh_disable();
sd = this_cpu_ptr(&softnet_data);
have = netpoll_poll_lock(napi); have = netpoll_poll_lock(napi);
__napi_poll(napi, &repoll); __napi_poll(napi, &repoll);
netpoll_poll_unlock(have); netpoll_poll_unlock(have);
skb_defer_free_flush(sd);
local_bh_enable(); local_bh_enable();
if (!repoll) if (!repoll)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment