Commit f8e8f97c authored by Eric Dumazet's avatar Eric Dumazet Committed by David S. Miller

net: fix a race in gro_cell_poll()

Dmitry Kravkov reported packet drops for GRE packets since GRO support
was added.

There is a race in gro_cell_poll() because we call napi_complete()
without any synchronization with a concurrent gro_cells_receive()

Once bug was triggered, we queued packets but did not schedule NAPI
poll.

We can fix this issue using the spinlock protected the napi_skbs queue,
as we have to hold it to perform skb dequeue anyway.

As we open-code skb_dequeue(), we no longer need to mask IRQS, as both
producer and consumer run under BH context.

Bug added in commit c9e6bc64 (net: add gro_cells infrastructure)
Reported-by: default avatarDmitry Kravkov <dmitry@broadcom.com>
Signed-off-by: default avatarEric Dumazet <edumazet@google.com>
Tested-by: default avatarDmitry Kravkov <dmitry@broadcom.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent d46d132c
...@@ -17,7 +17,6 @@ struct gro_cells { ...@@ -17,7 +17,6 @@ struct gro_cells {
static inline void gro_cells_receive(struct gro_cells *gcells, struct sk_buff *skb) static inline void gro_cells_receive(struct gro_cells *gcells, struct sk_buff *skb)
{ {
unsigned long flags;
struct gro_cell *cell = gcells->cells; struct gro_cell *cell = gcells->cells;
struct net_device *dev = skb->dev; struct net_device *dev = skb->dev;
...@@ -35,32 +34,37 @@ static inline void gro_cells_receive(struct gro_cells *gcells, struct sk_buff *s ...@@ -35,32 +34,37 @@ static inline void gro_cells_receive(struct gro_cells *gcells, struct sk_buff *s
return; return;
} }
spin_lock_irqsave(&cell->napi_skbs.lock, flags); /* We run in BH context */
spin_lock(&cell->napi_skbs.lock);
__skb_queue_tail(&cell->napi_skbs, skb); __skb_queue_tail(&cell->napi_skbs, skb);
if (skb_queue_len(&cell->napi_skbs) == 1) if (skb_queue_len(&cell->napi_skbs) == 1)
napi_schedule(&cell->napi); napi_schedule(&cell->napi);
spin_unlock_irqrestore(&cell->napi_skbs.lock, flags); spin_unlock(&cell->napi_skbs.lock);
} }
/* called unser BH context */
static inline int gro_cell_poll(struct napi_struct *napi, int budget) static inline int gro_cell_poll(struct napi_struct *napi, int budget)
{ {
struct gro_cell *cell = container_of(napi, struct gro_cell, napi); struct gro_cell *cell = container_of(napi, struct gro_cell, napi);
struct sk_buff *skb; struct sk_buff *skb;
int work_done = 0; int work_done = 0;
spin_lock(&cell->napi_skbs.lock);
while (work_done < budget) { while (work_done < budget) {
skb = skb_dequeue(&cell->napi_skbs); skb = __skb_dequeue(&cell->napi_skbs);
if (!skb) if (!skb)
break; break;
spin_unlock(&cell->napi_skbs.lock);
napi_gro_receive(napi, skb); napi_gro_receive(napi, skb);
work_done++; work_done++;
spin_lock(&cell->napi_skbs.lock);
} }
if (work_done < budget) if (work_done < budget)
napi_complete(napi); napi_complete(napi);
spin_unlock(&cell->napi_skbs.lock);
return work_done; return work_done;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment