Commit efecfd5f authored by Tony Nguyen's avatar Tony Nguyen Committed by Jeff Kirsher

ixgbevf: Delay tail write for XDP packets

Current XDP implementation hits the tail on every XDP_TX; change the
driver to only hit the tail after packet processing is complete.

Based on
commit 7379f97a ("ixgbe: delay tail write to every 'n' packets")
Signed-off-by: default avatarTony Nguyen <anthony.l.nguyen@intel.com>
Acked-by: default avatarJohn Fastabend <john.fastabend@gmail.com>
Tested-by: default avatarAndrew Bowers <andrewx.bowers@intel.com>
Signed-off-by: default avatarJeff Kirsher <jeffrey.t.kirsher@intel.com>
parent 21092e9c
...@@ -1016,14 +1016,8 @@ static int ixgbevf_xmit_xdp_ring(struct ixgbevf_ring *ring, ...@@ -1016,14 +1016,8 @@ static int ixgbevf_xmit_xdp_ring(struct ixgbevf_ring *ring,
cpu_to_le32((len << IXGBE_ADVTXD_PAYLEN_SHIFT) | cpu_to_le32((len << IXGBE_ADVTXD_PAYLEN_SHIFT) |
IXGBE_ADVTXD_CC); IXGBE_ADVTXD_CC);
/* Force memory writes to complete before letting h/w know there /* Avoid any potential race with cleanup */
* are new descriptors to fetch. (Only applicable for weak-ordered smp_wmb();
* memory model archs, such as IA-64).
*
* We also need this memory barrier to make certain all of the
* status bits have been updated before next_to_watch is written.
*/
wmb();
/* set next_to_watch value indicating a packet is present */ /* set next_to_watch value indicating a packet is present */
i++; i++;
...@@ -1033,8 +1027,6 @@ static int ixgbevf_xmit_xdp_ring(struct ixgbevf_ring *ring, ...@@ -1033,8 +1027,6 @@ static int ixgbevf_xmit_xdp_ring(struct ixgbevf_ring *ring,
tx_buffer->next_to_watch = tx_desc; tx_buffer->next_to_watch = tx_desc;
ring->next_to_use = i; ring->next_to_use = i;
/* notify HW of packet */
ixgbevf_write_tail(ring, i);
return IXGBEVF_XDP_TX; return IXGBEVF_XDP_TX;
} }
...@@ -1101,6 +1093,7 @@ static int ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector, ...@@ -1101,6 +1093,7 @@ static int ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector,
struct ixgbevf_adapter *adapter = q_vector->adapter; struct ixgbevf_adapter *adapter = q_vector->adapter;
u16 cleaned_count = ixgbevf_desc_unused(rx_ring); u16 cleaned_count = ixgbevf_desc_unused(rx_ring);
struct sk_buff *skb = rx_ring->skb; struct sk_buff *skb = rx_ring->skb;
bool xdp_xmit = false;
struct xdp_buff xdp; struct xdp_buff xdp;
xdp.rxq = &rx_ring->xdp_rxq; xdp.rxq = &rx_ring->xdp_rxq;
...@@ -1142,11 +1135,13 @@ static int ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector, ...@@ -1142,11 +1135,13 @@ static int ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector,
} }
if (IS_ERR(skb)) { if (IS_ERR(skb)) {
if (PTR_ERR(skb) == -IXGBEVF_XDP_TX) if (PTR_ERR(skb) == -IXGBEVF_XDP_TX) {
xdp_xmit = true;
ixgbevf_rx_buffer_flip(rx_ring, rx_buffer, ixgbevf_rx_buffer_flip(rx_ring, rx_buffer,
size); size);
else } else {
rx_buffer->pagecnt_bias++; rx_buffer->pagecnt_bias++;
}
total_rx_packets++; total_rx_packets++;
total_rx_bytes += size; total_rx_bytes += size;
} else if (skb) { } else if (skb) {
...@@ -1208,6 +1203,17 @@ static int ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector, ...@@ -1208,6 +1203,17 @@ static int ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector,
/* place incomplete frames back on ring for completion */ /* place incomplete frames back on ring for completion */
rx_ring->skb = skb; rx_ring->skb = skb;
if (xdp_xmit) {
struct ixgbevf_ring *xdp_ring =
adapter->xdp_ring[rx_ring->queue_index];
/* Force memory writes to complete before letting h/w
* know there are new descriptors to fetch.
*/
wmb();
ixgbevf_write_tail(xdp_ring, xdp_ring->next_to_use);
}
u64_stats_update_begin(&rx_ring->syncp); u64_stats_update_begin(&rx_ring->syncp);
rx_ring->stats.packets += total_rx_packets; rx_ring->stats.packets += total_rx_packets;
rx_ring->stats.bytes += total_rx_bytes; rx_ring->stats.bytes += total_rx_bytes;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment