Commit 59e92bfe authored by Maciej Fijalkowski's avatar Maciej Fijalkowski Committed by Daniel Borkmann

ice: xsk: Borrow xdp_tx_active logic from i40e

One of the things that commit 5574ff7b ("i40e: optimize AF_XDP Tx
completion path") introduced was the @xdp_tx_active field. Its usage
from i40e can be adjusted to ice driver and give us positive performance
results.

If the descriptor that @next_dd points to has been sent by HW (its DD
bit is set), then we are sure that at least quarter of the ring is ready
to be cleaned. If @xdp_tx_active is 0 which means that related xdp_ring
is not used for XDP_{TX, REDIRECT} workloads, then we know how many XSK
entries should placed to completion queue, IOW walking through the ring
can be skipped.
Signed-off-by: default avatarMaciej Fijalkowski <maciej.fijalkowski@intel.com>
Signed-off-by: default avatarDaniel Borkmann <daniel@iogearbox.net>
Reviewed-by: default avatarAlexander Lobakin <alexandr.lobakin@intel.com>
Acked-by: default avatarMagnus Karlsson <magnus.karlsson@intel.com>
Link: https://lore.kernel.org/bpf/20220125160446.78976-9-maciej.fijalkowski@intel.com
parent 126cdfe1
...@@ -333,6 +333,7 @@ struct ice_tx_ring { ...@@ -333,6 +333,7 @@ struct ice_tx_ring {
spinlock_t tx_lock; spinlock_t tx_lock;
u32 txq_teid; /* Added Tx queue TEID */ u32 txq_teid; /* Added Tx queue TEID */
/* CL4 - 4th cacheline starts here */ /* CL4 - 4th cacheline starts here */
u16 xdp_tx_active;
#define ICE_TX_FLAGS_RING_XDP BIT(0) #define ICE_TX_FLAGS_RING_XDP BIT(0)
u8 flags; u8 flags;
u8 dcb_tc; /* Traffic class of ring */ u8 dcb_tc; /* Traffic class of ring */
......
...@@ -302,6 +302,7 @@ int ice_xmit_xdp_ring(void *data, u16 size, struct ice_tx_ring *xdp_ring) ...@@ -302,6 +302,7 @@ int ice_xmit_xdp_ring(void *data, u16 size, struct ice_tx_ring *xdp_ring)
tx_desc->cmd_type_offset_bsz = ice_build_ctob(ICE_TX_DESC_CMD_EOP, 0, tx_desc->cmd_type_offset_bsz = ice_build_ctob(ICE_TX_DESC_CMD_EOP, 0,
size, 0); size, 0);
xdp_ring->xdp_tx_active++;
i++; i++;
if (i == xdp_ring->count) { if (i == xdp_ring->count) {
i = 0; i = 0;
......
...@@ -687,6 +687,7 @@ static void ...@@ -687,6 +687,7 @@ static void
ice_clean_xdp_tx_buf(struct ice_tx_ring *xdp_ring, struct ice_tx_buf *tx_buf) ice_clean_xdp_tx_buf(struct ice_tx_ring *xdp_ring, struct ice_tx_buf *tx_buf)
{ {
xdp_return_frame((struct xdp_frame *)tx_buf->raw_buf); xdp_return_frame((struct xdp_frame *)tx_buf->raw_buf);
xdp_ring->xdp_tx_active--;
dma_unmap_single(xdp_ring->dev, dma_unmap_addr(tx_buf, dma), dma_unmap_single(xdp_ring->dev, dma_unmap_addr(tx_buf, dma),
dma_unmap_len(tx_buf, len), DMA_TO_DEVICE); dma_unmap_len(tx_buf, len), DMA_TO_DEVICE);
dma_unmap_len_set(tx_buf, len, 0); dma_unmap_len_set(tx_buf, len, 0);
...@@ -703,9 +704,8 @@ static u16 ice_clean_xdp_irq_zc(struct ice_tx_ring *xdp_ring, int napi_budget) ...@@ -703,9 +704,8 @@ static u16 ice_clean_xdp_irq_zc(struct ice_tx_ring *xdp_ring, int napi_budget)
{ {
u16 tx_thresh = ICE_RING_QUARTER(xdp_ring); u16 tx_thresh = ICE_RING_QUARTER(xdp_ring);
int budget = napi_budget / tx_thresh; int budget = napi_budget / tx_thresh;
u16 ntc = xdp_ring->next_to_clean;
u16 next_dd = xdp_ring->next_dd; u16 next_dd = xdp_ring->next_dd;
u16 cleared_dds = 0; u16 ntc, cleared_dds = 0;
do { do {
struct ice_tx_desc *next_dd_desc; struct ice_tx_desc *next_dd_desc;
...@@ -721,6 +721,12 @@ static u16 ice_clean_xdp_irq_zc(struct ice_tx_ring *xdp_ring, int napi_budget) ...@@ -721,6 +721,12 @@ static u16 ice_clean_xdp_irq_zc(struct ice_tx_ring *xdp_ring, int napi_budget)
cleared_dds++; cleared_dds++;
xsk_frames = 0; xsk_frames = 0;
if (likely(!xdp_ring->xdp_tx_active)) {
xsk_frames = tx_thresh;
goto skip;
}
ntc = xdp_ring->next_to_clean;
for (i = 0; i < tx_thresh; i++) { for (i = 0; i < tx_thresh; i++) {
tx_buf = &xdp_ring->tx_buf[ntc]; tx_buf = &xdp_ring->tx_buf[ntc];
...@@ -736,6 +742,10 @@ static u16 ice_clean_xdp_irq_zc(struct ice_tx_ring *xdp_ring, int napi_budget) ...@@ -736,6 +742,10 @@ static u16 ice_clean_xdp_irq_zc(struct ice_tx_ring *xdp_ring, int napi_budget)
if (ntc >= xdp_ring->count) if (ntc >= xdp_ring->count)
ntc = 0; ntc = 0;
} }
skip:
xdp_ring->next_to_clean += tx_thresh;
if (xdp_ring->next_to_clean >= desc_cnt)
xdp_ring->next_to_clean -= desc_cnt;
if (xsk_frames) if (xsk_frames)
xsk_tx_completed(xdp_ring->xsk_pool, xsk_frames); xsk_tx_completed(xdp_ring->xsk_pool, xsk_frames);
next_dd_desc->cmd_type_offset_bsz = 0; next_dd_desc->cmd_type_offset_bsz = 0;
...@@ -744,7 +754,6 @@ static u16 ice_clean_xdp_irq_zc(struct ice_tx_ring *xdp_ring, int napi_budget) ...@@ -744,7 +754,6 @@ static u16 ice_clean_xdp_irq_zc(struct ice_tx_ring *xdp_ring, int napi_budget)
next_dd = tx_thresh - 1; next_dd = tx_thresh - 1;
} while (budget--); } while (budget--);
xdp_ring->next_to_clean = ntc;
xdp_ring->next_dd = next_dd; xdp_ring->next_dd = next_dd;
return cleared_dds * tx_thresh; return cleared_dds * tx_thresh;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment