Commit f1a25589 authored by Shay Agroskin's avatar Shay Agroskin Committed by Jakub Kicinski

net: ena: introduce ndo_xdp_xmit() function for XDP_REDIRECT

This patch implements the ndo_xdp_xmit() net_device function which is
called when a packet is redirected to this driver using an
XDP_REDIRECT directive.

The function receives an array of xdp frames that it needs to xmit.
The TX queues that are used to xmit these frames are the XDP
queues used by the XDP_TX flow. Therefore a lock is added to synchronize
both flows (XDP_TX and XDP_REDIRECT).
Signed-off-by: default avatarShay Agroskin <shayagr@amazon.com>
Signed-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parent f8b91f25
...@@ -281,20 +281,18 @@ static int ena_xdp_tx_map_frame(struct ena_ring *xdp_ring, ...@@ -281,20 +281,18 @@ static int ena_xdp_tx_map_frame(struct ena_ring *xdp_ring,
return -EINVAL; return -EINVAL;
} }
static int ena_xdp_xmit_frame(struct net_device *dev, static int ena_xdp_xmit_frame(struct ena_ring *xdp_ring,
struct net_device *dev,
struct xdp_frame *xdpf, struct xdp_frame *xdpf,
int qid) int flags)
{ {
struct ena_adapter *adapter = netdev_priv(dev);
struct ena_com_tx_ctx ena_tx_ctx = {}; struct ena_com_tx_ctx ena_tx_ctx = {};
struct ena_tx_buffer *tx_info; struct ena_tx_buffer *tx_info;
struct ena_ring *xdp_ring;
u16 next_to_use, req_id; u16 next_to_use, req_id;
int rc;
void *push_hdr; void *push_hdr;
u32 push_len; u32 push_len;
int rc;
xdp_ring = &adapter->tx_ring[qid];
next_to_use = xdp_ring->next_to_use; next_to_use = xdp_ring->next_to_use;
req_id = xdp_ring->free_ids[next_to_use]; req_id = xdp_ring->free_ids[next_to_use];
tx_info = &xdp_ring->tx_buffer_info[req_id]; tx_info = &xdp_ring->tx_buffer_info[req_id];
...@@ -321,25 +319,76 @@ static int ena_xdp_xmit_frame(struct net_device *dev, ...@@ -321,25 +319,76 @@ static int ena_xdp_xmit_frame(struct net_device *dev,
/* trigger the dma engine. ena_com_write_sq_doorbell() /* trigger the dma engine. ena_com_write_sq_doorbell()
* has a mb * has a mb
*/ */
ena_com_write_sq_doorbell(xdp_ring->ena_com_io_sq); if (flags & XDP_XMIT_FLUSH) {
ena_increase_stat(&xdp_ring->tx_stats.doorbells, 1, &xdp_ring->syncp); ena_com_write_sq_doorbell(xdp_ring->ena_com_io_sq);
ena_increase_stat(&xdp_ring->tx_stats.doorbells, 1,
&xdp_ring->syncp);
}
return NETDEV_TX_OK; return rc;
error_unmap_dma: error_unmap_dma:
ena_unmap_tx_buff(xdp_ring, tx_info); ena_unmap_tx_buff(xdp_ring, tx_info);
tx_info->xdpf = NULL; tx_info->xdpf = NULL;
error_drop_packet: error_drop_packet:
xdp_return_frame(xdpf); xdp_return_frame(xdpf);
return NETDEV_TX_OK; return rc;
}
static int ena_xdp_xmit(struct net_device *dev, int n,
struct xdp_frame **frames, u32 flags)
{
struct ena_adapter *adapter = netdev_priv(dev);
int qid, i, err, drops = 0;
struct ena_ring *xdp_ring;
if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
return -EINVAL;
if (!test_bit(ENA_FLAG_DEV_UP, &adapter->flags))
return -ENETDOWN;
/* We assume that all rings have the same XDP program */
if (!READ_ONCE(adapter->rx_ring->xdp_bpf_prog))
return -ENXIO;
qid = smp_processor_id() % adapter->xdp_num_queues;
qid += adapter->xdp_first_ring;
xdp_ring = &adapter->tx_ring[qid];
/* Other CPU ids might try to send thorugh this queue */
spin_lock(&xdp_ring->xdp_tx_lock);
for (i = 0; i < n; i++) {
err = ena_xdp_xmit_frame(xdp_ring, dev, frames[i], 0);
/* The descriptor is freed by ena_xdp_xmit_frame in case
* of an error.
*/
if (err)
drops++;
}
/* Ring doorbell to make device aware of the packets */
if (flags & XDP_XMIT_FLUSH) {
ena_com_write_sq_doorbell(xdp_ring->ena_com_io_sq);
ena_increase_stat(&xdp_ring->tx_stats.doorbells, 1,
&xdp_ring->syncp);
}
spin_unlock(&xdp_ring->xdp_tx_lock);
/* Return number of packets sent */
return n - drops;
} }
static int ena_xdp_execute(struct ena_ring *rx_ring, struct xdp_buff *xdp) static int ena_xdp_execute(struct ena_ring *rx_ring, struct xdp_buff *xdp)
{ {
struct bpf_prog *xdp_prog; struct bpf_prog *xdp_prog;
struct ena_ring *xdp_ring;
u32 verdict = XDP_PASS; u32 verdict = XDP_PASS;
struct xdp_frame *xdpf; struct xdp_frame *xdpf;
u64 *xdp_stat; u64 *xdp_stat;
int qid;
rcu_read_lock(); rcu_read_lock();
xdp_prog = READ_ONCE(rx_ring->xdp_bpf_prog); xdp_prog = READ_ONCE(rx_ring->xdp_bpf_prog);
...@@ -358,8 +407,16 @@ static int ena_xdp_execute(struct ena_ring *rx_ring, struct xdp_buff *xdp) ...@@ -358,8 +407,16 @@ static int ena_xdp_execute(struct ena_ring *rx_ring, struct xdp_buff *xdp)
break; break;
} }
ena_xdp_xmit_frame(rx_ring->netdev, xdpf, /* Find xmit queue */
rx_ring->qid + rx_ring->adapter->num_io_queues); qid = rx_ring->qid + rx_ring->adapter->num_io_queues;
xdp_ring = &rx_ring->adapter->tx_ring[qid];
/* The XDP queues are shared between XDP_TX and XDP_REDIRECT */
spin_lock(&xdp_ring->xdp_tx_lock);
ena_xdp_xmit_frame(xdp_ring, rx_ring->netdev, xdpf, XDP_XMIT_FLUSH);
spin_unlock(&xdp_ring->xdp_tx_lock);
xdp_stat = &rx_ring->rx_stats.xdp_tx; xdp_stat = &rx_ring->rx_stats.xdp_tx;
break; break;
case XDP_REDIRECT: case XDP_REDIRECT:
...@@ -652,6 +709,7 @@ static void ena_init_io_rings(struct ena_adapter *adapter, ...@@ -652,6 +709,7 @@ static void ena_init_io_rings(struct ena_adapter *adapter,
txr->smoothed_interval = txr->smoothed_interval =
ena_com_get_nonadaptive_moderation_interval_tx(ena_dev); ena_com_get_nonadaptive_moderation_interval_tx(ena_dev);
txr->disable_meta_caching = adapter->disable_meta_caching; txr->disable_meta_caching = adapter->disable_meta_caching;
spin_lock_init(&txr->xdp_tx_lock);
/* Don't init RX queues for xdp queues */ /* Don't init RX queues for xdp queues */
if (!ENA_IS_XDP_INDEX(adapter, i)) { if (!ENA_IS_XDP_INDEX(adapter, i)) {
...@@ -3244,6 +3302,7 @@ static const struct net_device_ops ena_netdev_ops = { ...@@ -3244,6 +3302,7 @@ static const struct net_device_ops ena_netdev_ops = {
.ndo_set_mac_address = NULL, .ndo_set_mac_address = NULL,
.ndo_validate_addr = eth_validate_addr, .ndo_validate_addr = eth_validate_addr,
.ndo_bpf = ena_xdp, .ndo_bpf = ena_xdp,
.ndo_xdp_xmit = ena_xdp_xmit,
}; };
static int ena_device_validate_params(struct ena_adapter *adapter, static int ena_device_validate_params(struct ena_adapter *adapter,
......
...@@ -258,6 +258,7 @@ struct ena_ring { ...@@ -258,6 +258,7 @@ struct ena_ring {
struct ena_com_io_sq *ena_com_io_sq; struct ena_com_io_sq *ena_com_io_sq;
struct bpf_prog *xdp_bpf_prog; struct bpf_prog *xdp_bpf_prog;
struct xdp_rxq_info xdp_rxq; struct xdp_rxq_info xdp_rxq;
spinlock_t xdp_tx_lock; /* synchronize XDP TX/Redirect traffic */
u16 next_to_use; u16 next_to_use;
u16 next_to_clean; u16 next_to_clean;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment