Commit 3fc23339 authored by Gerhard Engleder's avatar Gerhard Engleder Committed by Jakub Kicinski

tsnep: Add XDP socket zero-copy RX support

Add support for XSK zero-copy to RX path. The setup of the XSK pool can
be done at runtime. If the netdev is running, then the queue must be
disabled and enabled during reconfiguration. This can be done easily
with functions introduced in previous commits.

A more important property is that, if the netdev is running, then the
setup of the XSK pool shall not stop the netdev in case of errors. A
broken netdev after a failed XSK pool setup is bad behavior. Therefore,
the allocation and setup of resources during XSK pool setup is done only
before any queue is disabled. Additionally, freeing and later allocation
of resources is eliminated in some cases. Page pool entries are kept for
later use. Two memory models are registered in parallel. As a result,
the XSK pool setup cannot fail during queue reconfiguration.

In contrast to other drivers, XSK pool setup and XDP BPF program setup
are separate actions. XSK pool setup can be done without any XDP BPF
program. The XDP BPF program can be added, removed or changed without
any reconfiguration of the XSK pool.

Test results with A53 1.2GHz:

xdpsock rxdrop copy mode, 64 byte frames:
                   pps            pkts           1.00
rx                 856,054        10,625,775
Two CPUs with both 100% utilization.

xdpsock rxdrop zero-copy mode, 64 byte frames:
                   pps            pkts           1.00
rx                 889,388        4,615,284
Two CPUs with 100% and 20% utilization.

Packet rate increases and CPU utilization is reduced.

100% CPU load seems to the base load. This load is consumed by ksoftirqd
just for dropping the generated packets without xdpsock running.

Using batch API reduced CPU utilization slightly, but measurements are
not stable enough to provide meaningful numbers.
Signed-off-by: default avatarGerhard Engleder <gerhard@engleder-embedded.com>
Reviewed-by: default avatarMaciej Fijalkowski <maciej.fijalkowski@intel.com>
Signed-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parent c2d64697
......@@ -101,7 +101,10 @@ struct tsnep_rx_entry {
u32 properties;
struct page *page;
union {
struct page *page;
struct xdp_buff *xdp;
};
size_t len;
dma_addr_t dma;
};
......@@ -121,6 +124,9 @@ struct tsnep_rx {
u32 owner_counter;
int increment_owner_counter;
struct page_pool *page_pool;
struct page **page_buffer;
struct xsk_buff_pool *xsk_pool;
struct xdp_buff **xdp_batch;
u32 packets;
u32 bytes;
......@@ -129,6 +135,7 @@ struct tsnep_rx {
u32 alloc_failed;
struct xdp_rxq_info xdp_rxq;
struct xdp_rxq_info xdp_rxq_zc;
};
struct tsnep_queue {
......@@ -214,6 +221,8 @@ int tsnep_rxnfc_del_rule(struct tsnep_adapter *adapter,
int tsnep_xdp_setup_prog(struct tsnep_adapter *adapter, struct bpf_prog *prog,
struct netlink_ext_ack *extack);
int tsnep_xdp_setup_pool(struct tsnep_adapter *adapter,
struct xsk_buff_pool *pool, u16 queue_id);
#if IS_ENABLED(CONFIG_TSNEP_SELFTESTS)
int tsnep_ethtool_get_test_count(void);
......@@ -242,5 +251,7 @@ static inline void tsnep_ethtool_self_test(struct net_device *dev,
void tsnep_get_system_time(struct tsnep_adapter *adapter, u64 *time);
int tsnep_set_irq_coalesce(struct tsnep_queue *queue, u32 usecs);
u32 tsnep_get_irq_coalesce(struct tsnep_queue *queue);
int tsnep_enable_xsk(struct tsnep_queue *queue, struct xsk_buff_pool *pool);
void tsnep_disable_xsk(struct tsnep_queue *queue);
#endif /* _TSNEP_H */
This diff is collapsed.
......@@ -17,3 +17,69 @@ int tsnep_xdp_setup_prog(struct tsnep_adapter *adapter, struct bpf_prog *prog,
return 0;
}
static int tsnep_xdp_enable_pool(struct tsnep_adapter *adapter,
struct xsk_buff_pool *pool, u16 queue_id)
{
struct tsnep_queue *queue;
int retval;
if (queue_id >= adapter->num_rx_queues ||
queue_id >= adapter->num_tx_queues)
return -EINVAL;
queue = &adapter->queue[queue_id];
if (queue->rx->queue_index != queue_id ||
queue->tx->queue_index != queue_id) {
netdev_err(adapter->netdev,
"XSK support only for TX/RX queue pairs\n");
return -EOPNOTSUPP;
}
retval = xsk_pool_dma_map(pool, adapter->dmadev,
DMA_ATTR_SKIP_CPU_SYNC);
if (retval) {
netdev_err(adapter->netdev, "failed to map XSK pool\n");
return retval;
}
retval = tsnep_enable_xsk(queue, pool);
if (retval) {
xsk_pool_dma_unmap(pool, DMA_ATTR_SKIP_CPU_SYNC);
return retval;
}
return 0;
}
static int tsnep_xdp_disable_pool(struct tsnep_adapter *adapter, u16 queue_id)
{
struct xsk_buff_pool *pool;
struct tsnep_queue *queue;
if (queue_id >= adapter->num_rx_queues ||
queue_id >= adapter->num_tx_queues)
return -EINVAL;
pool = xsk_get_pool_from_qid(adapter->netdev, queue_id);
if (!pool)
return -EINVAL;
queue = &adapter->queue[queue_id];
tsnep_disable_xsk(queue);
xsk_pool_dma_unmap(pool, DMA_ATTR_SKIP_CPU_SYNC);
return 0;
}
int tsnep_xdp_setup_pool(struct tsnep_adapter *adapter,
struct xsk_buff_pool *pool, u16 queue_id)
{
return pool ? tsnep_xdp_enable_pool(adapter, pool, queue_id) :
tsnep_xdp_disable_pool(adapter, queue_id);
}
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment