Commit 27717f8b authored by Sriram Yagnaraman's avatar Sriram Yagnaraman Committed by Tony Nguyen

igb: Always call igb_xdp_ring_update_tail() under Tx lock

Always call igb_xdp_ring_update_tail() under __netif_tx_lock, add a comment
and lockdep assert to indicate that. This is needed to share the same TX
ring between XDP, XSK and slow paths. Furthermore, the current XDP
implementation is racy on tail updates.

Fixes: 9cbc948b ("igb: add XDP support")
Signed-off-by: default avatarSriram Yagnaraman <sriram.yagnaraman@est.tech>
[Kurt: Add lockdep assert and fixes tag]
Signed-off-by: Kurt Kanzenbach's avatarKurt Kanzenbach <kurt@linutronix.de>
Acked-by: default avatarMaciej Fijalkowski <maciej.fijalkowski@intel.com>
Tested-by: default avatarGeorge Kuruvinakunnel <george.kuruvinakunnel@intel.com>
Signed-off-by: default avatarTony Nguyen <anthony.l.nguyen@intel.com>
parent d2940002
...@@ -33,6 +33,7 @@ ...@@ -33,6 +33,7 @@
#include <linux/bpf_trace.h> #include <linux/bpf_trace.h>
#include <linux/pm_runtime.h> #include <linux/pm_runtime.h>
#include <linux/etherdevice.h> #include <linux/etherdevice.h>
#include <linux/lockdep.h>
#ifdef CONFIG_IGB_DCA #ifdef CONFIG_IGB_DCA
#include <linux/dca.h> #include <linux/dca.h>
#endif #endif
...@@ -2914,8 +2915,11 @@ static int igb_xdp(struct net_device *dev, struct netdev_bpf *xdp) ...@@ -2914,8 +2915,11 @@ static int igb_xdp(struct net_device *dev, struct netdev_bpf *xdp)
} }
} }
/* This function assumes __netif_tx_lock is held by the caller. */
static void igb_xdp_ring_update_tail(struct igb_ring *ring) static void igb_xdp_ring_update_tail(struct igb_ring *ring)
{ {
lockdep_assert_held(&txring_txq(ring)->_xmit_lock);
/* Force memory writes to complete before letting h/w know there /* Force memory writes to complete before letting h/w know there
* are new descriptors to fetch. * are new descriptors to fetch.
*/ */
...@@ -3000,11 +3004,11 @@ static int igb_xdp_xmit(struct net_device *dev, int n, ...@@ -3000,11 +3004,11 @@ static int igb_xdp_xmit(struct net_device *dev, int n,
nxmit++; nxmit++;
} }
__netif_tx_unlock(nq);
if (unlikely(flags & XDP_XMIT_FLUSH)) if (unlikely(flags & XDP_XMIT_FLUSH))
igb_xdp_ring_update_tail(tx_ring); igb_xdp_ring_update_tail(tx_ring);
__netif_tx_unlock(nq);
return nxmit; return nxmit;
} }
...@@ -8864,12 +8868,14 @@ static void igb_put_rx_buffer(struct igb_ring *rx_ring, ...@@ -8864,12 +8868,14 @@ static void igb_put_rx_buffer(struct igb_ring *rx_ring,
static int igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget) static int igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget)
{ {
unsigned int total_bytes = 0, total_packets = 0;
struct igb_adapter *adapter = q_vector->adapter; struct igb_adapter *adapter = q_vector->adapter;
struct igb_ring *rx_ring = q_vector->rx.ring; struct igb_ring *rx_ring = q_vector->rx.ring;
struct sk_buff *skb = rx_ring->skb;
unsigned int total_bytes = 0, total_packets = 0;
u16 cleaned_count = igb_desc_unused(rx_ring); u16 cleaned_count = igb_desc_unused(rx_ring);
struct sk_buff *skb = rx_ring->skb;
int cpu = smp_processor_id();
unsigned int xdp_xmit = 0; unsigned int xdp_xmit = 0;
struct netdev_queue *nq;
struct xdp_buff xdp; struct xdp_buff xdp;
u32 frame_sz = 0; u32 frame_sz = 0;
int rx_buf_pgcnt; int rx_buf_pgcnt;
...@@ -8997,7 +9003,10 @@ static int igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget) ...@@ -8997,7 +9003,10 @@ static int igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget)
if (xdp_xmit & IGB_XDP_TX) { if (xdp_xmit & IGB_XDP_TX) {
struct igb_ring *tx_ring = igb_xdp_tx_queue_mapping(adapter); struct igb_ring *tx_ring = igb_xdp_tx_queue_mapping(adapter);
nq = txring_txq(tx_ring);
__netif_tx_lock(nq, cpu);
igb_xdp_ring_update_tail(tx_ring); igb_xdp_ring_update_tail(tx_ring);
__netif_tx_unlock(nq);
} }
u64_stats_update_begin(&rx_ring->rx_syncp); u64_stats_update_begin(&rx_ring->rx_syncp);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment