Commit dfe44c1f authored by Charles McLachlan's avatar Charles McLachlan Committed by David S. Miller

sfc: handle XDP_TX outcomes of XDP eBPF programs

Provide an ndo_xdp_xmit function that uses the XDP tx queue for this
CPU to send the packet.
Signed-off-by: default avatarCharles McLachlan <cmclachlan@solarflare.com>
Acked-by: default avatarJesper Dangaard Brouer <brouer@redhat.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 3990a8ff
...@@ -228,6 +228,8 @@ static void efx_start_all(struct efx_nic *efx); ...@@ -228,6 +228,8 @@ static void efx_start_all(struct efx_nic *efx);
static void efx_stop_all(struct efx_nic *efx); static void efx_stop_all(struct efx_nic *efx);
static int efx_xdp_setup_prog(struct efx_nic *efx, struct bpf_prog *prog); static int efx_xdp_setup_prog(struct efx_nic *efx, struct bpf_prog *prog);
static int efx_xdp(struct net_device *dev, struct netdev_bpf *xdp); static int efx_xdp(struct net_device *dev, struct netdev_bpf *xdp);
static int efx_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **xdpfs,
u32 flags);
#define EFX_ASSERT_RESET_SERIALISED(efx) \ #define EFX_ASSERT_RESET_SERIALISED(efx) \
do { \ do { \
...@@ -2633,6 +2635,7 @@ static const struct net_device_ops efx_netdev_ops = { ...@@ -2633,6 +2635,7 @@ static const struct net_device_ops efx_netdev_ops = {
#endif #endif
.ndo_udp_tunnel_add = efx_udp_tunnel_add, .ndo_udp_tunnel_add = efx_udp_tunnel_add,
.ndo_udp_tunnel_del = efx_udp_tunnel_del, .ndo_udp_tunnel_del = efx_udp_tunnel_del,
.ndo_xdp_xmit = efx_xdp_xmit,
.ndo_bpf = efx_xdp .ndo_bpf = efx_xdp
}; };
...@@ -2680,6 +2683,17 @@ static int efx_xdp(struct net_device *dev, struct netdev_bpf *xdp) ...@@ -2680,6 +2683,17 @@ static int efx_xdp(struct net_device *dev, struct netdev_bpf *xdp)
} }
} }
static int efx_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **xdpfs,
u32 flags)
{
struct efx_nic *efx = netdev_priv(dev);
if (!netif_running(dev))
return -EINVAL;
return efx_xdp_tx_buffers(efx, n, xdpfs, flags & XDP_XMIT_FLUSH);
}
static void efx_update_name(struct efx_nic *efx) static void efx_update_name(struct efx_nic *efx)
{ {
strcpy(efx->name, efx->net_dev->name); strcpy(efx->name, efx->net_dev->name);
......
...@@ -322,4 +322,7 @@ static inline bool efx_rwsem_assert_write_locked(struct rw_semaphore *sem) ...@@ -322,4 +322,7 @@ static inline bool efx_rwsem_assert_write_locked(struct rw_semaphore *sem)
return true; return true;
} }
int efx_xdp_tx_buffers(struct efx_nic *efx, int n, struct xdp_frame **xdpfs,
bool flush);
#endif /* EFX_EFX_H */ #endif /* EFX_EFX_H */
...@@ -653,6 +653,7 @@ static bool efx_do_xdp(struct efx_nic *efx, struct efx_channel *channel, ...@@ -653,6 +653,7 @@ static bool efx_do_xdp(struct efx_nic *efx, struct efx_channel *channel,
u8 rx_prefix[EFX_MAX_RX_PREFIX_SIZE]; u8 rx_prefix[EFX_MAX_RX_PREFIX_SIZE];
struct efx_rx_queue *rx_queue; struct efx_rx_queue *rx_queue;
struct bpf_prog *xdp_prog; struct bpf_prog *xdp_prog;
struct xdp_frame *xdpf;
struct xdp_buff xdp; struct xdp_buff xdp;
u32 xdp_act; u32 xdp_act;
s16 offset; s16 offset;
...@@ -713,7 +714,16 @@ static bool efx_do_xdp(struct efx_nic *efx, struct efx_channel *channel, ...@@ -713,7 +714,16 @@ static bool efx_do_xdp(struct efx_nic *efx, struct efx_channel *channel,
break; break;
case XDP_TX: case XDP_TX:
return -EOPNOTSUPP; /* Buffer ownership passes to tx on success. */
xdpf = convert_to_xdp_frame(&xdp);
err = efx_xdp_tx_buffers(efx, 1, &xdpf, true);
if (unlikely(err != 1)) {
efx_free_rx_buffers(rx_queue, rx_buf, 1);
if (net_ratelimit())
netif_err(efx, rx_err, efx->net_dev,
"XDP TX failed (%d)\n", err);
}
break;
case XDP_REDIRECT: case XDP_REDIRECT:
err = xdp_do_redirect(efx->net_dev, &xdp, xdp_prog); err = xdp_do_redirect(efx->net_dev, &xdp, xdp_prog);
......
...@@ -599,6 +599,94 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb) ...@@ -599,6 +599,94 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
return NETDEV_TX_OK; return NETDEV_TX_OK;
} }
static void efx_xdp_return_frames(int n, struct xdp_frame **xdpfs)
{
int i;
for (i = 0; i < n; i++)
xdp_return_frame_rx_napi(xdpfs[i]);
}
/* Transmit a packet from an XDP buffer
*
* Returns number of packets sent on success, error code otherwise.
* Runs in NAPI context, either in our poll (for XDP TX) or a different NIC
* (for XDP redirect).
*/
int efx_xdp_tx_buffers(struct efx_nic *efx, int n, struct xdp_frame **xdpfs,
bool flush)
{
struct efx_tx_buffer *tx_buffer;
struct efx_tx_queue *tx_queue;
struct xdp_frame *xdpf;
dma_addr_t dma_addr;
unsigned int len;
int space;
int cpu;
int i;
cpu = raw_smp_processor_id();
if (!efx->xdp_tx_queue_count ||
unlikely(cpu >= efx->xdp_tx_queue_count))
return -EINVAL;
tx_queue = efx->xdp_tx_queues[cpu];
if (unlikely(!tx_queue))
return -EINVAL;
if (unlikely(n && !xdpfs))
return -EINVAL;
if (!n)
return 0;
/* Check for available space. We should never need multiple
* descriptors per frame.
*/
space = efx->txq_entries +
tx_queue->read_count - tx_queue->insert_count;
for (i = 0; i < n; i++) {
xdpf = xdpfs[i];
if (i >= space)
break;
/* We'll want a descriptor for this tx. */
prefetchw(__efx_tx_queue_get_insert_buffer(tx_queue));
len = xdpf->len;
/* Map for DMA. */
dma_addr = dma_map_single(&efx->pci_dev->dev,
xdpf->data, len,
DMA_TO_DEVICE);
if (dma_mapping_error(&efx->pci_dev->dev, dma_addr))
break;
/* Create descriptor and set up for unmapping DMA. */
tx_buffer = efx_tx_map_chunk(tx_queue, dma_addr, len);
tx_buffer->xdpf = xdpf;
tx_buffer->flags = EFX_TX_BUF_XDP |
EFX_TX_BUF_MAP_SINGLE;
tx_buffer->dma_offset = 0;
tx_buffer->unmap_len = len;
tx_queue->tx_packets++;
}
/* Pass mapped frames to hardware. */
if (flush && i > 0)
efx_nic_push_buffers(tx_queue);
if (i == 0)
return -EIO;
efx_xdp_return_frames(n - i, xdpfs + i);
return i;
}
/* Remove packets from the TX queue /* Remove packets from the TX queue
* *
* This removes packets from the TX queue, up to and including the * This removes packets from the TX queue, up to and including the
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment