Commit 92b13cab authored by David S. Miller's avatar David S. Miller

Merge branch '1GbE' of git://git.kernel.org/pub/scm/linux/kernel/git/tnguy/next-queue

Tony Nguyen says:

====================
1GbE Intel Wired LAN Driver Updates 2021-05-20

This series contains updates to igc driver only.

Andre Guedes says:

This series adds AF_XDP zero-copy feature to igc driver.

The initial patches do some code refactoring, preparing the code base to
land the AF_XDP zero-copy feature, avoiding code duplications. The last
patches of the series are the ones implementing the feature.

The last patch which indeed implements AF_XDP zero-copy support was
originally way too lengthy so, for the sake of code review, I broke it
up into two patches: one adding support for the RX functionality and the
other one adding TX support.
---
v2:
Patch 8/9 - "igc: Enable RX via AF_XDP zero-copy"
 * In XDP_PASS flow, copy metadata too into the skb.
 * When HW timestamp is added by the NIC, after copying it into
   a local variable, update xdp_buff->data_meta so that
   metadata length when XDP program is called 0.
 * In igc_xdp_enable_pool(), call xsk_pool_dma_unmap() on
   failure.

Known issues:
 When an XDP application is running in Tx-Only mode with Zero-Copy
 enabled, it is not expected to add the frames to the fill-queue. I have
 noticed the following two issues in this scenario:
 - If XDP_USE_NEED_WAKEUP flag is not set by application, igc_poll()
   will go into infinite loop because the buffer allocation resulting
   in igc_clean_rx_irq_zc() indicating that all work is not done and NAPI
   should keep polling. This does not occur if XDP_USE_NEED_WAKEUP
   flag is set.
 - Since there are no buffers allocated by userspace for the fill
   queue, there is no memory allocated for the NIC to copy the data
   to. If the packet received is destined to the hardware queue where
   XDP application is running, no packets are received even on other
   queues.
 Both these issues can be mitigated by adding a few frames to the
 fill queue. The second issue can also be mitigated by making sure no
 packets are being received on the hardware queue where Rx is running.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 93826306 9acf59a7
......@@ -118,6 +118,7 @@ struct igc_ring {
};
struct xdp_rxq_info xdp_rxq;
struct xsk_buff_pool *xsk_pool;
} ____cacheline_internodealigned_in_smp;
/* Board specific private data structure */
......@@ -255,6 +256,11 @@ bool igc_has_link(struct igc_adapter *adapter);
void igc_reset(struct igc_adapter *adapter);
int igc_set_spd_dplx(struct igc_adapter *adapter, u32 spd, u8 dplx);
void igc_update_stats(struct igc_adapter *adapter);
void igc_disable_rx_ring(struct igc_ring *ring);
void igc_enable_rx_ring(struct igc_ring *ring);
void igc_disable_tx_ring(struct igc_ring *ring);
void igc_enable_tx_ring(struct igc_ring *ring);
int igc_xsk_wakeup(struct net_device *dev, u32 queue_id, u32 flags);
/* igc_dump declarations */
void igc_rings_dump(struct igc_adapter *adapter);
......@@ -390,8 +396,6 @@ enum igc_tx_flags {
/* olinfo flags */
IGC_TX_FLAGS_IPV4 = 0x10,
IGC_TX_FLAGS_CSUM = 0x20,
IGC_TX_FLAGS_XDP = 0x100,
};
enum igc_boards {
......@@ -408,12 +412,19 @@ enum igc_boards {
#define TXD_USE_COUNT(S) DIV_ROUND_UP((S), IGC_MAX_DATA_PER_TXD)
#define DESC_NEEDED (MAX_SKB_FRAGS + 4)
enum igc_tx_buffer_type {
IGC_TX_BUFFER_TYPE_SKB,
IGC_TX_BUFFER_TYPE_XDP,
IGC_TX_BUFFER_TYPE_XSK,
};
/* wrapper around a pointer to a socket buffer,
* so a DMA handle can be stored along with the buffer
*/
struct igc_tx_buffer {
union igc_adv_tx_desc *next_to_watch;
unsigned long time_stamp;
enum igc_tx_buffer_type type;
union {
struct sk_buff *skb;
struct xdp_frame *xdpf;
......@@ -428,6 +439,8 @@ struct igc_tx_buffer {
};
struct igc_rx_buffer {
union {
struct {
dma_addr_t dma;
struct page *page;
#if (BITS_PER_LONG > 32) || (PAGE_SIZE >= 65536)
......@@ -436,6 +449,9 @@ struct igc_rx_buffer {
__u16 page_offset;
#endif
__u16 pagecnt_bias;
};
struct xdp_buff *xdp;
};
};
struct igc_q_vector {
......@@ -521,7 +537,8 @@ enum igc_ring_flags_t {
IGC_RING_FLAG_RX_SCTP_CSUM,
IGC_RING_FLAG_RX_LB_VLAN_BSWAP,
IGC_RING_FLAG_TX_CTX_IDX,
IGC_RING_FLAG_TX_DETECT_HANG
IGC_RING_FLAG_TX_DETECT_HANG,
IGC_RING_FLAG_AF_XDP_ZC,
};
#define ring_uses_large_buffer(ring) \
......
......@@ -78,9 +78,11 @@ union igc_adv_rx_desc {
/* Additional Transmit Descriptor Control definitions */
#define IGC_TXDCTL_QUEUE_ENABLE 0x02000000 /* Ena specific Tx Queue */
#define IGC_TXDCTL_SWFLUSH 0x04000000 /* Transmit Software Flush */
/* Additional Receive Descriptor Control definitions */
#define IGC_RXDCTL_QUEUE_ENABLE 0x02000000 /* Ena specific Rx Queue */
#define IGC_RXDCTL_SWFLUSH 0x04000000 /* Receive Software Flush */
/* SRRCTL bit definitions */
#define IGC_SRRCTL_BSIZEPKT_SHIFT 10 /* Shift _right_ */
......
This diff is collapsed.
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2020, Intel Corporation. */
#include <net/xdp_sock_drv.h>
#include "igc.h"
#include "igc_xdp.h"
......@@ -32,29 +34,112 @@ int igc_xdp_set_prog(struct igc_adapter *adapter, struct bpf_prog *prog,
return 0;
}
int igc_xdp_register_rxq_info(struct igc_ring *ring)
static int igc_xdp_enable_pool(struct igc_adapter *adapter,
struct xsk_buff_pool *pool, u16 queue_id)
{
struct net_device *dev = ring->netdev;
struct net_device *ndev = adapter->netdev;
struct device *dev = &adapter->pdev->dev;
struct igc_ring *rx_ring, *tx_ring;
struct napi_struct *napi;
bool needs_reset;
u32 frame_size;
int err;
err = xdp_rxq_info_reg(&ring->xdp_rxq, dev, ring->queue_index, 0);
if (queue_id >= adapter->num_rx_queues ||
queue_id >= adapter->num_tx_queues)
return -EINVAL;
frame_size = xsk_pool_get_rx_frame_size(pool);
if (frame_size < ETH_FRAME_LEN + VLAN_HLEN * 2) {
/* When XDP is enabled, the driver doesn't support frames that
* span over multiple buffers. To avoid that, we check if xsk
* frame size is big enough to fit the max ethernet frame size
* + vlan double tagging.
*/
return -EOPNOTSUPP;
}
err = xsk_pool_dma_map(pool, dev, IGC_RX_DMA_ATTR);
if (err) {
netdev_err(dev, "Failed to register xdp rxq info\n");
netdev_err(ndev, "Failed to map xsk pool\n");
return err;
}
err = xdp_rxq_info_reg_mem_model(&ring->xdp_rxq, MEM_TYPE_PAGE_SHARED,
NULL);
needs_reset = netif_running(adapter->netdev) && igc_xdp_is_enabled(adapter);
rx_ring = adapter->rx_ring[queue_id];
tx_ring = adapter->tx_ring[queue_id];
/* Rx and Tx rings share the same napi context. */
napi = &rx_ring->q_vector->napi;
if (needs_reset) {
igc_disable_rx_ring(rx_ring);
igc_disable_tx_ring(tx_ring);
napi_disable(napi);
}
set_bit(IGC_RING_FLAG_AF_XDP_ZC, &rx_ring->flags);
set_bit(IGC_RING_FLAG_AF_XDP_ZC, &tx_ring->flags);
if (needs_reset) {
napi_enable(napi);
igc_enable_rx_ring(rx_ring);
igc_enable_tx_ring(tx_ring);
err = igc_xsk_wakeup(ndev, queue_id, XDP_WAKEUP_RX);
if (err) {
netdev_err(dev, "Failed to register xdp rxq mem model\n");
xdp_rxq_info_unreg(&ring->xdp_rxq);
xsk_pool_dma_unmap(pool, IGC_RX_DMA_ATTR);
return err;
}
}
return 0;
}
static int igc_xdp_disable_pool(struct igc_adapter *adapter, u16 queue_id)
{
struct igc_ring *rx_ring, *tx_ring;
struct xsk_buff_pool *pool;
struct napi_struct *napi;
bool needs_reset;
if (queue_id >= adapter->num_rx_queues ||
queue_id >= adapter->num_tx_queues)
return -EINVAL;
pool = xsk_get_pool_from_qid(adapter->netdev, queue_id);
if (!pool)
return -EINVAL;
needs_reset = netif_running(adapter->netdev) && igc_xdp_is_enabled(adapter);
rx_ring = adapter->rx_ring[queue_id];
tx_ring = adapter->tx_ring[queue_id];
/* Rx and Tx rings share the same napi context. */
napi = &rx_ring->q_vector->napi;
if (needs_reset) {
igc_disable_rx_ring(rx_ring);
igc_disable_tx_ring(tx_ring);
napi_disable(napi);
}
xsk_pool_dma_unmap(pool, IGC_RX_DMA_ATTR);
clear_bit(IGC_RING_FLAG_AF_XDP_ZC, &rx_ring->flags);
clear_bit(IGC_RING_FLAG_AF_XDP_ZC, &tx_ring->flags);
if (needs_reset) {
napi_enable(napi);
igc_enable_rx_ring(rx_ring);
igc_enable_tx_ring(tx_ring);
}
return 0;
}
void igc_xdp_unregister_rxq_info(struct igc_ring *ring)
int igc_xdp_setup_pool(struct igc_adapter *adapter, struct xsk_buff_pool *pool,
u16 queue_id)
{
xdp_rxq_info_unreg(&ring->xdp_rxq);
return pool ? igc_xdp_enable_pool(adapter, pool, queue_id) :
igc_xdp_disable_pool(adapter, queue_id);
}
......@@ -6,8 +6,12 @@
int igc_xdp_set_prog(struct igc_adapter *adapter, struct bpf_prog *prog,
struct netlink_ext_ack *extack);
int igc_xdp_setup_pool(struct igc_adapter *adapter, struct xsk_buff_pool *pool,
u16 queue_id);
int igc_xdp_register_rxq_info(struct igc_ring *ring);
void igc_xdp_unregister_rxq_info(struct igc_ring *ring);
static inline bool igc_xdp_is_enabled(struct igc_adapter *adapter)
{
return !!adapter->xdp_prog;
}
#endif /* _IGC_XDP_H_ */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment