Commit 0a7606c1 authored by David S. Miller's avatar David S. Miller

[NET]: Fix race between poll_napi() and net_rx_action()

netpoll_poll_lock() synchronizes the ->poll() invocation
code paths, but once we have the lock we have to make
sure that NAPI_STATE_SCHED is still set.  Otherwise we
get:

	cpu 0			cpu 1

	net_rx_action()		poll_napi()
	netpoll_poll_lock()	... spin on ->poll_lock
	->poll()
	  netif_rx_complete
	netpoll_poll_unlock()	acquire ->poll_lock()
				->poll()
				 netif_rx_complete()
				 CRASH

Based upon a bug report from Tina Yang.
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent b0a713e9
...@@ -2172,7 +2172,15 @@ static void net_rx_action(struct softirq_action *h) ...@@ -2172,7 +2172,15 @@ static void net_rx_action(struct softirq_action *h)
weight = n->weight; weight = n->weight;
work = n->poll(n, weight); /* This NAPI_STATE_SCHED test is for avoiding a race
* with netpoll's poll_napi(). Only the entity which
* obtains the lock and sees NAPI_STATE_SCHED set will
* actually make the ->poll() call. Therefore we avoid
* accidently calling ->poll() when NAPI is not scheduled.
*/
work = 0;
if (test_bit(NAPI_STATE_SCHED, &n->state))
work = n->poll(n, weight);
WARN_ON_ONCE(work > weight); WARN_ON_ONCE(work > weight);
......
...@@ -116,6 +116,29 @@ static __sum16 checksum_udp(struct sk_buff *skb, struct udphdr *uh, ...@@ -116,6 +116,29 @@ static __sum16 checksum_udp(struct sk_buff *skb, struct udphdr *uh,
* network adapter, forcing superfluous retries and possibly timeouts. * network adapter, forcing superfluous retries and possibly timeouts.
* Thus, we set our budget to greater than 1. * Thus, we set our budget to greater than 1.
*/ */
static int poll_one_napi(struct netpoll_info *npinfo,
struct napi_struct *napi, int budget)
{
int work;
/* net_rx_action's ->poll() invocations and our's are
* synchronized by this test which is only made while
* holding the napi->poll_lock.
*/
if (!test_bit(NAPI_STATE_SCHED, &napi->state))
return budget;
npinfo->rx_flags |= NETPOLL_RX_DROP;
atomic_inc(&trapped);
work = napi->poll(napi, budget);
atomic_dec(&trapped);
npinfo->rx_flags &= ~NETPOLL_RX_DROP;
return budget - work;
}
static void poll_napi(struct netpoll *np) static void poll_napi(struct netpoll *np)
{ {
struct netpoll_info *npinfo = np->dev->npinfo; struct netpoll_info *npinfo = np->dev->npinfo;
...@@ -123,17 +146,13 @@ static void poll_napi(struct netpoll *np) ...@@ -123,17 +146,13 @@ static void poll_napi(struct netpoll *np)
int budget = 16; int budget = 16;
list_for_each_entry(napi, &np->dev->napi_list, dev_list) { list_for_each_entry(napi, &np->dev->napi_list, dev_list) {
if (test_bit(NAPI_STATE_SCHED, &napi->state) && if (napi->poll_owner != smp_processor_id() &&
napi->poll_owner != smp_processor_id() &&
spin_trylock(&napi->poll_lock)) { spin_trylock(&napi->poll_lock)) {
npinfo->rx_flags |= NETPOLL_RX_DROP; budget = poll_one_napi(npinfo, napi, budget);
atomic_inc(&trapped);
napi->poll(napi, budget);
atomic_dec(&trapped);
npinfo->rx_flags &= ~NETPOLL_RX_DROP;
spin_unlock(&napi->poll_lock); spin_unlock(&napi->poll_lock);
if (!budget)
break;
} }
} }
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment