Commit 4c2bacbe authored by Alexander Lobakin's avatar Alexander Lobakin Committed by David S. Miller

qede: refactor XDP Tx processing

Current XDP Tx logic is suboptimal and can't be reused for XDP_REDIRECT
path.
Make qede_xdp_{tx_int,xmit}() more universal and effective in general to
allow future expanding.

Misc: use unlikely() hints where appropriate and replace "fallthrough"
comments with pseudo-keywords.
Signed-off-by: default avatarAlexander Lobakin <alobakin@marvell.com>
Signed-off-by: default avatarIgor Russkikh <irusskikh@marvell.com>
Signed-off-by: default avatarMichal Kalderon <michal.kalderon@marvell.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent f285ad57
...@@ -455,6 +455,7 @@ struct qede_fastpath { ...@@ -455,6 +455,7 @@ struct qede_fastpath {
u8 id; u8 id;
u8 xdp_xmit; u8 xdp_xmit;
#define QEDE_XDP_TX BIT(0)
struct napi_struct napi; struct napi_struct napi;
struct qed_sb_info *sb_info; struct qed_sb_info *sb_info;
......
...@@ -302,48 +302,37 @@ static inline void qede_update_tx_producer(struct qede_tx_queue *txq) ...@@ -302,48 +302,37 @@ static inline void qede_update_tx_producer(struct qede_tx_queue *txq)
wmb(); wmb();
} }
static int qede_xdp_xmit(struct qede_dev *edev, struct qede_fastpath *fp, static int qede_xdp_xmit(struct qede_tx_queue *txq, dma_addr_t dma, u16 pad,
struct sw_rx_data *metadata, u16 padding, u16 length) u16 len, struct page *page)
{ {
struct qede_tx_queue *txq = fp->xdp_tx; struct eth_tx_1st_bd *bd;
struct eth_tx_1st_bd *first_bd; struct sw_tx_xdp *xdp;
u16 idx = txq->sw_tx_prod;
u16 val; u16 val;
if (!qed_chain_get_elem_left(&txq->tx_pbl)) { if (unlikely(qed_chain_get_elem_used(&txq->tx_pbl) >=
txq->num_tx_buffers)) {
txq->stopped_cnt++; txq->stopped_cnt++;
return -ENOMEM; return -ENOMEM;
} }
first_bd = (struct eth_tx_1st_bd *)qed_chain_produce(&txq->tx_pbl); bd = qed_chain_produce(&txq->tx_pbl);
bd->data.nbds = 1;
bd->data.bd_flags.bitfields = BIT(ETH_TX_1ST_BD_FLAGS_START_BD_SHIFT);
memset(first_bd, 0, sizeof(*first_bd)); val = (len & ETH_TX_DATA_1ST_BD_PKT_LEN_MASK) <<
first_bd->data.bd_flags.bitfields =
BIT(ETH_TX_1ST_BD_FLAGS_START_BD_SHIFT);
val = (length & ETH_TX_DATA_1ST_BD_PKT_LEN_MASK) <<
ETH_TX_DATA_1ST_BD_PKT_LEN_SHIFT; ETH_TX_DATA_1ST_BD_PKT_LEN_SHIFT;
first_bd->data.bitfields |= cpu_to_le16(val); bd->data.bitfields = cpu_to_le16(val);
first_bd->data.nbds = 1;
/* We can safely ignore the offset, as it's 0 for XDP */ /* We can safely ignore the offset, as it's 0 for XDP */
BD_SET_UNMAP_ADDR_LEN(first_bd, metadata->mapping + padding, length); BD_SET_UNMAP_ADDR_LEN(bd, dma + pad, len);
/* Synchronize the buffer back to device, as program [probably] xdp = txq->sw_tx_ring.xdp + txq->sw_tx_prod;
* has changed it. xdp->mapping = dma;
*/ xdp->page = page;
dma_sync_single_for_device(&edev->pdev->dev,
metadata->mapping + padding,
length, PCI_DMA_TODEVICE);
txq->sw_tx_ring.xdp[idx].page = metadata->data;
txq->sw_tx_ring.xdp[idx].mapping = metadata->mapping;
txq->sw_tx_prod = (txq->sw_tx_prod + 1) % txq->num_tx_buffers; txq->sw_tx_prod = (txq->sw_tx_prod + 1) % txq->num_tx_buffers;
/* Mark the fastpath for future XDP doorbell */
fp->xdp_xmit = 1;
return 0; return 0;
} }
...@@ -362,20 +351,21 @@ int qede_txq_has_work(struct qede_tx_queue *txq) ...@@ -362,20 +351,21 @@ int qede_txq_has_work(struct qede_tx_queue *txq)
static void qede_xdp_tx_int(struct qede_dev *edev, struct qede_tx_queue *txq) static void qede_xdp_tx_int(struct qede_dev *edev, struct qede_tx_queue *txq)
{ {
u16 hw_bd_cons, idx; struct sw_tx_xdp *xdp_info, *xdp_arr = txq->sw_tx_ring.xdp;
struct device *dev = &edev->pdev->dev;
u16 hw_bd_cons;
hw_bd_cons = le16_to_cpu(*txq->hw_cons_ptr); hw_bd_cons = le16_to_cpu(*txq->hw_cons_ptr);
barrier(); barrier();
while (hw_bd_cons != qed_chain_get_cons_idx(&txq->tx_pbl)) { while (hw_bd_cons != qed_chain_get_cons_idx(&txq->tx_pbl)) {
qed_chain_consume(&txq->tx_pbl); xdp_info = xdp_arr + txq->sw_tx_cons;
idx = txq->sw_tx_cons;
dma_unmap_page(&edev->pdev->dev, dma_unmap_page(dev, xdp_info->mapping, PAGE_SIZE,
txq->sw_tx_ring.xdp[idx].mapping, DMA_BIDIRECTIONAL);
PAGE_SIZE, DMA_BIDIRECTIONAL); __free_page(xdp_info->page);
__free_page(txq->sw_tx_ring.xdp[idx].page);
qed_chain_consume(&txq->tx_pbl);
txq->sw_tx_cons = (txq->sw_tx_cons + 1) % txq->num_tx_buffers; txq->sw_tx_cons = (txq->sw_tx_cons + 1) % txq->num_tx_buffers;
txq->xmit_pkts++; txq->xmit_pkts++;
} }
...@@ -1064,32 +1054,39 @@ static bool qede_rx_xdp(struct qede_dev *edev, ...@@ -1064,32 +1054,39 @@ static bool qede_rx_xdp(struct qede_dev *edev,
switch (act) { switch (act) {
case XDP_TX: case XDP_TX:
/* We need the replacement buffer before transmit. */ /* We need the replacement buffer before transmit. */
if (qede_alloc_rx_buffer(rxq, true)) { if (unlikely(qede_alloc_rx_buffer(rxq, true))) {
qede_recycle_rx_bd_ring(rxq, 1); qede_recycle_rx_bd_ring(rxq, 1);
trace_xdp_exception(edev->ndev, prog, act); trace_xdp_exception(edev->ndev, prog, act);
return false; break;
} }
/* Now if there's a transmission problem, we'd still have to /* Now if there's a transmission problem, we'd still have to
* throw current buffer, as replacement was already allocated. * throw current buffer, as replacement was already allocated.
*/ */
if (qede_xdp_xmit(edev, fp, bd, *data_offset, *len)) { if (unlikely(qede_xdp_xmit(fp->xdp_tx, bd->mapping,
dma_unmap_page(rxq->dev, bd->mapping, *data_offset, *len, bd->data))) {
PAGE_SIZE, DMA_BIDIRECTIONAL); dma_unmap_page(rxq->dev, bd->mapping, PAGE_SIZE,
rxq->data_direction);
__free_page(bd->data); __free_page(bd->data);
trace_xdp_exception(edev->ndev, prog, act); trace_xdp_exception(edev->ndev, prog, act);
} else {
dma_sync_single_for_device(rxq->dev,
bd->mapping + *data_offset,
*len, rxq->data_direction);
fp->xdp_xmit |= QEDE_XDP_TX;
} }
/* Regardless, we've consumed an Rx BD */ /* Regardless, we've consumed an Rx BD */
qede_rx_bd_ring_consume(rxq); qede_rx_bd_ring_consume(rxq);
return false; break;
default: default:
bpf_warn_invalid_xdp_action(act); bpf_warn_invalid_xdp_action(act);
/* Fall through */ fallthrough;
case XDP_ABORTED: case XDP_ABORTED:
trace_xdp_exception(edev->ndev, prog, act); trace_xdp_exception(edev->ndev, prog, act);
/* Fall through */ fallthrough;
case XDP_DROP: case XDP_DROP:
qede_recycle_rx_bd_ring(rxq, cqe->bd_num); qede_recycle_rx_bd_ring(rxq, cqe->bd_num);
} }
...@@ -1353,6 +1350,9 @@ int qede_poll(struct napi_struct *napi, int budget) ...@@ -1353,6 +1350,9 @@ int qede_poll(struct napi_struct *napi, int budget)
napi); napi);
struct qede_dev *edev = fp->edev; struct qede_dev *edev = fp->edev;
int rx_work_done = 0; int rx_work_done = 0;
u16 xdp_prod;
fp->xdp_xmit = 0;
if (likely(fp->type & QEDE_FASTPATH_TX)) { if (likely(fp->type & QEDE_FASTPATH_TX)) {
int cos; int cos;
...@@ -1380,10 +1380,9 @@ int qede_poll(struct napi_struct *napi, int budget) ...@@ -1380,10 +1380,9 @@ int qede_poll(struct napi_struct *napi, int budget)
} }
} }
if (fp->xdp_xmit) { if (fp->xdp_xmit & QEDE_XDP_TX) {
u16 xdp_prod = qed_chain_get_prod_idx(&fp->xdp_tx->tx_pbl); xdp_prod = qed_chain_get_prod_idx(&fp->xdp_tx->tx_pbl);
fp->xdp_xmit = 0;
fp->xdp_tx->tx_db.data.bd_prod = cpu_to_le16(xdp_prod); fp->xdp_tx->tx_db.data.bd_prod = cpu_to_le16(xdp_prod);
qede_update_tx_producer(fp->xdp_tx); qede_update_tx_producer(fp->xdp_tx);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment