Commit 47251a36 authored by David S. Miller's avatar David S. Miller

Merge branch '100GbE' of git://git.kernel.org/pub/scm/linux/kernel/git

/tnguy/net-queue

Tony Nguyen says:

====================
Intel Wired LAN Driver Updates 2021-03-12

This series contains updates to ice, i40e, ixgbe and igb drivers.

Magnus adjusts the return value for xsk allocation for ice. This fixes
reporting of napi work done and matches the behavior of other Intel NIC
drivers for xsk allocations.

Maciej moves storing of the rx_offset value to after the build_skb flag
is set as this flag affects the offset value for ice, i40e, and ixgbe.

Li RongQing resolves an issue where an Rx buffer can be reused
prematurely with XDP redirect for igb.
====================
parents 6afa455e 98dfb02a
...@@ -3258,6 +3258,17 @@ static int i40e_configure_tx_ring(struct i40e_ring *ring) ...@@ -3258,6 +3258,17 @@ static int i40e_configure_tx_ring(struct i40e_ring *ring)
return 0; return 0;
} }
/**
* i40e_rx_offset - Return expected offset into page to access data
* @rx_ring: Ring we are requesting offset of
*
* Returns the offset value for ring into the data buffer.
*/
static unsigned int i40e_rx_offset(struct i40e_ring *rx_ring)
{
return ring_uses_build_skb(rx_ring) ? I40E_SKB_PAD : 0;
}
/** /**
* i40e_configure_rx_ring - Configure a receive ring context * i40e_configure_rx_ring - Configure a receive ring context
* @ring: The Rx ring to configure * @ring: The Rx ring to configure
...@@ -3369,6 +3380,8 @@ static int i40e_configure_rx_ring(struct i40e_ring *ring) ...@@ -3369,6 +3380,8 @@ static int i40e_configure_rx_ring(struct i40e_ring *ring)
else else
set_ring_build_skb_enabled(ring); set_ring_build_skb_enabled(ring);
ring->rx_offset = i40e_rx_offset(ring);
/* cache tail for quicker writes, and clear the reg before use */ /* cache tail for quicker writes, and clear the reg before use */
ring->tail = hw->hw_addr + I40E_QRX_TAIL(pf_q); ring->tail = hw->hw_addr + I40E_QRX_TAIL(pf_q);
writel(0, ring->tail); writel(0, ring->tail);
......
...@@ -1569,17 +1569,6 @@ void i40e_free_rx_resources(struct i40e_ring *rx_ring) ...@@ -1569,17 +1569,6 @@ void i40e_free_rx_resources(struct i40e_ring *rx_ring)
} }
} }
/**
* i40e_rx_offset - Return expected offset into page to access data
* @rx_ring: Ring we are requesting offset of
*
* Returns the offset value for ring into the data buffer.
*/
static unsigned int i40e_rx_offset(struct i40e_ring *rx_ring)
{
return ring_uses_build_skb(rx_ring) ? I40E_SKB_PAD : 0;
}
/** /**
* i40e_setup_rx_descriptors - Allocate Rx descriptors * i40e_setup_rx_descriptors - Allocate Rx descriptors
* @rx_ring: Rx descriptor ring (for a specific queue) to setup * @rx_ring: Rx descriptor ring (for a specific queue) to setup
...@@ -1608,7 +1597,6 @@ int i40e_setup_rx_descriptors(struct i40e_ring *rx_ring) ...@@ -1608,7 +1597,6 @@ int i40e_setup_rx_descriptors(struct i40e_ring *rx_ring)
rx_ring->next_to_alloc = 0; rx_ring->next_to_alloc = 0;
rx_ring->next_to_clean = 0; rx_ring->next_to_clean = 0;
rx_ring->next_to_use = 0; rx_ring->next_to_use = 0;
rx_ring->rx_offset = i40e_rx_offset(rx_ring);
/* XDP RX-queue info only needed for RX rings exposed to XDP */ /* XDP RX-queue info only needed for RX rings exposed to XDP */
if (rx_ring->vsi->type == I40E_VSI_MAIN) { if (rx_ring->vsi->type == I40E_VSI_MAIN) {
......
...@@ -274,6 +274,22 @@ ice_setup_tx_ctx(struct ice_ring *ring, struct ice_tlan_ctx *tlan_ctx, u16 pf_q) ...@@ -274,6 +274,22 @@ ice_setup_tx_ctx(struct ice_ring *ring, struct ice_tlan_ctx *tlan_ctx, u16 pf_q)
tlan_ctx->legacy_int = ICE_TX_LEGACY; tlan_ctx->legacy_int = ICE_TX_LEGACY;
} }
/**
* ice_rx_offset - Return expected offset into page to access data
* @rx_ring: Ring we are requesting offset of
*
* Returns the offset value for ring into the data buffer.
*/
static unsigned int ice_rx_offset(struct ice_ring *rx_ring)
{
if (ice_ring_uses_build_skb(rx_ring))
return ICE_SKB_PAD;
else if (ice_is_xdp_ena_vsi(rx_ring->vsi))
return XDP_PACKET_HEADROOM;
return 0;
}
/** /**
* ice_setup_rx_ctx - Configure a receive ring context * ice_setup_rx_ctx - Configure a receive ring context
* @ring: The Rx ring to configure * @ring: The Rx ring to configure
...@@ -413,11 +429,15 @@ int ice_setup_rx_ctx(struct ice_ring *ring) ...@@ -413,11 +429,15 @@ int ice_setup_rx_ctx(struct ice_ring *ring)
else else
ice_set_ring_build_skb_ena(ring); ice_set_ring_build_skb_ena(ring);
ring->rx_offset = ice_rx_offset(ring);
/* init queue specific tail register */ /* init queue specific tail register */
ring->tail = hw->hw_addr + QRX_TAIL(pf_q); ring->tail = hw->hw_addr + QRX_TAIL(pf_q);
writel(0, ring->tail); writel(0, ring->tail);
if (ring->xsk_pool) { if (ring->xsk_pool) {
bool ok;
if (!xsk_buff_can_alloc(ring->xsk_pool, num_bufs)) { if (!xsk_buff_can_alloc(ring->xsk_pool, num_bufs)) {
dev_warn(dev, "XSK buffer pool does not provide enough addresses to fill %d buffers on Rx ring %d\n", dev_warn(dev, "XSK buffer pool does not provide enough addresses to fill %d buffers on Rx ring %d\n",
num_bufs, ring->q_index); num_bufs, ring->q_index);
...@@ -426,8 +446,8 @@ int ice_setup_rx_ctx(struct ice_ring *ring) ...@@ -426,8 +446,8 @@ int ice_setup_rx_ctx(struct ice_ring *ring)
return 0; return 0;
} }
err = ice_alloc_rx_bufs_zc(ring, num_bufs); ok = ice_alloc_rx_bufs_zc(ring, num_bufs);
if (err) if (!ok)
dev_info(dev, "Failed to allocate some buffers on XSK buffer pool enabled Rx ring %d (pf_q %d)\n", dev_info(dev, "Failed to allocate some buffers on XSK buffer pool enabled Rx ring %d (pf_q %d)\n",
ring->q_index, pf_q); ring->q_index, pf_q);
return 0; return 0;
......
...@@ -443,22 +443,6 @@ void ice_free_rx_ring(struct ice_ring *rx_ring) ...@@ -443,22 +443,6 @@ void ice_free_rx_ring(struct ice_ring *rx_ring)
} }
} }
/**
* ice_rx_offset - Return expected offset into page to access data
* @rx_ring: Ring we are requesting offset of
*
* Returns the offset value for ring into the data buffer.
*/
static unsigned int ice_rx_offset(struct ice_ring *rx_ring)
{
if (ice_ring_uses_build_skb(rx_ring))
return ICE_SKB_PAD;
else if (ice_is_xdp_ena_vsi(rx_ring->vsi))
return XDP_PACKET_HEADROOM;
return 0;
}
/** /**
* ice_setup_rx_ring - Allocate the Rx descriptors * ice_setup_rx_ring - Allocate the Rx descriptors
* @rx_ring: the Rx ring to set up * @rx_ring: the Rx ring to set up
...@@ -493,7 +477,6 @@ int ice_setup_rx_ring(struct ice_ring *rx_ring) ...@@ -493,7 +477,6 @@ int ice_setup_rx_ring(struct ice_ring *rx_ring)
rx_ring->next_to_use = 0; rx_ring->next_to_use = 0;
rx_ring->next_to_clean = 0; rx_ring->next_to_clean = 0;
rx_ring->rx_offset = ice_rx_offset(rx_ring);
if (ice_is_xdp_ena_vsi(rx_ring->vsi)) if (ice_is_xdp_ena_vsi(rx_ring->vsi))
WRITE_ONCE(rx_ring->xdp_prog, rx_ring->vsi->xdp_prog); WRITE_ONCE(rx_ring->xdp_prog, rx_ring->vsi->xdp_prog);
......
...@@ -358,18 +358,18 @@ int ice_xsk_pool_setup(struct ice_vsi *vsi, struct xsk_buff_pool *pool, u16 qid) ...@@ -358,18 +358,18 @@ int ice_xsk_pool_setup(struct ice_vsi *vsi, struct xsk_buff_pool *pool, u16 qid)
* This function allocates a number of Rx buffers from the fill ring * This function allocates a number of Rx buffers from the fill ring
* or the internal recycle mechanism and places them on the Rx ring. * or the internal recycle mechanism and places them on the Rx ring.
* *
* Returns false if all allocations were successful, true if any fail. * Returns true if all allocations were successful, false if any fail.
*/ */
bool ice_alloc_rx_bufs_zc(struct ice_ring *rx_ring, u16 count) bool ice_alloc_rx_bufs_zc(struct ice_ring *rx_ring, u16 count)
{ {
union ice_32b_rx_flex_desc *rx_desc; union ice_32b_rx_flex_desc *rx_desc;
u16 ntu = rx_ring->next_to_use; u16 ntu = rx_ring->next_to_use;
struct ice_rx_buf *rx_buf; struct ice_rx_buf *rx_buf;
bool ret = false; bool ok = true;
dma_addr_t dma; dma_addr_t dma;
if (!count) if (!count)
return false; return true;
rx_desc = ICE_RX_DESC(rx_ring, ntu); rx_desc = ICE_RX_DESC(rx_ring, ntu);
rx_buf = &rx_ring->rx_buf[ntu]; rx_buf = &rx_ring->rx_buf[ntu];
...@@ -377,7 +377,7 @@ bool ice_alloc_rx_bufs_zc(struct ice_ring *rx_ring, u16 count) ...@@ -377,7 +377,7 @@ bool ice_alloc_rx_bufs_zc(struct ice_ring *rx_ring, u16 count)
do { do {
rx_buf->xdp = xsk_buff_alloc(rx_ring->xsk_pool); rx_buf->xdp = xsk_buff_alloc(rx_ring->xsk_pool);
if (!rx_buf->xdp) { if (!rx_buf->xdp) {
ret = true; ok = false;
break; break;
} }
...@@ -402,7 +402,7 @@ bool ice_alloc_rx_bufs_zc(struct ice_ring *rx_ring, u16 count) ...@@ -402,7 +402,7 @@ bool ice_alloc_rx_bufs_zc(struct ice_ring *rx_ring, u16 count)
ice_release_rx_desc(rx_ring, ntu); ice_release_rx_desc(rx_ring, ntu);
} }
return ret; return ok;
} }
/** /**
......
...@@ -8214,7 +8214,8 @@ static void igb_reuse_rx_page(struct igb_ring *rx_ring, ...@@ -8214,7 +8214,8 @@ static void igb_reuse_rx_page(struct igb_ring *rx_ring,
new_buff->pagecnt_bias = old_buff->pagecnt_bias; new_buff->pagecnt_bias = old_buff->pagecnt_bias;
} }
static bool igb_can_reuse_rx_page(struct igb_rx_buffer *rx_buffer) static bool igb_can_reuse_rx_page(struct igb_rx_buffer *rx_buffer,
int rx_buf_pgcnt)
{ {
unsigned int pagecnt_bias = rx_buffer->pagecnt_bias; unsigned int pagecnt_bias = rx_buffer->pagecnt_bias;
struct page *page = rx_buffer->page; struct page *page = rx_buffer->page;
...@@ -8225,7 +8226,7 @@ static bool igb_can_reuse_rx_page(struct igb_rx_buffer *rx_buffer) ...@@ -8225,7 +8226,7 @@ static bool igb_can_reuse_rx_page(struct igb_rx_buffer *rx_buffer)
#if (PAGE_SIZE < 8192) #if (PAGE_SIZE < 8192)
/* if we are only owner of page we can reuse it */ /* if we are only owner of page we can reuse it */
if (unlikely((page_ref_count(page) - pagecnt_bias) > 1)) if (unlikely((rx_buf_pgcnt - pagecnt_bias) > 1))
return false; return false;
#else #else
#define IGB_LAST_OFFSET \ #define IGB_LAST_OFFSET \
...@@ -8614,11 +8615,17 @@ static unsigned int igb_rx_offset(struct igb_ring *rx_ring) ...@@ -8614,11 +8615,17 @@ static unsigned int igb_rx_offset(struct igb_ring *rx_ring)
} }
static struct igb_rx_buffer *igb_get_rx_buffer(struct igb_ring *rx_ring, static struct igb_rx_buffer *igb_get_rx_buffer(struct igb_ring *rx_ring,
const unsigned int size) const unsigned int size, int *rx_buf_pgcnt)
{ {
struct igb_rx_buffer *rx_buffer; struct igb_rx_buffer *rx_buffer;
rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean]; rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean];
*rx_buf_pgcnt =
#if (PAGE_SIZE < 8192)
page_count(rx_buffer->page);
#else
0;
#endif
prefetchw(rx_buffer->page); prefetchw(rx_buffer->page);
/* we are reusing so sync this buffer for CPU use */ /* we are reusing so sync this buffer for CPU use */
...@@ -8634,9 +8641,9 @@ static struct igb_rx_buffer *igb_get_rx_buffer(struct igb_ring *rx_ring, ...@@ -8634,9 +8641,9 @@ static struct igb_rx_buffer *igb_get_rx_buffer(struct igb_ring *rx_ring,
} }
static void igb_put_rx_buffer(struct igb_ring *rx_ring, static void igb_put_rx_buffer(struct igb_ring *rx_ring,
struct igb_rx_buffer *rx_buffer) struct igb_rx_buffer *rx_buffer, int rx_buf_pgcnt)
{ {
if (igb_can_reuse_rx_page(rx_buffer)) { if (igb_can_reuse_rx_page(rx_buffer, rx_buf_pgcnt)) {
/* hand second half of page back to the ring */ /* hand second half of page back to the ring */
igb_reuse_rx_page(rx_ring, rx_buffer); igb_reuse_rx_page(rx_ring, rx_buffer);
} else { } else {
...@@ -8664,6 +8671,7 @@ static int igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget) ...@@ -8664,6 +8671,7 @@ static int igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget)
unsigned int xdp_xmit = 0; unsigned int xdp_xmit = 0;
struct xdp_buff xdp; struct xdp_buff xdp;
u32 frame_sz = 0; u32 frame_sz = 0;
int rx_buf_pgcnt;
/* Frame size depend on rx_ring setup when PAGE_SIZE=4K */ /* Frame size depend on rx_ring setup when PAGE_SIZE=4K */
#if (PAGE_SIZE < 8192) #if (PAGE_SIZE < 8192)
...@@ -8693,7 +8701,7 @@ static int igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget) ...@@ -8693,7 +8701,7 @@ static int igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget)
*/ */
dma_rmb(); dma_rmb();
rx_buffer = igb_get_rx_buffer(rx_ring, size); rx_buffer = igb_get_rx_buffer(rx_ring, size, &rx_buf_pgcnt);
/* retrieve a buffer from the ring */ /* retrieve a buffer from the ring */
if (!skb) { if (!skb) {
...@@ -8736,7 +8744,7 @@ static int igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget) ...@@ -8736,7 +8744,7 @@ static int igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget)
break; break;
} }
igb_put_rx_buffer(rx_ring, rx_buffer); igb_put_rx_buffer(rx_ring, rx_buffer, rx_buf_pgcnt);
cleaned_count++; cleaned_count++;
/* fetch next buffer in frame if non-eop */ /* fetch next buffer in frame if non-eop */
......
...@@ -4118,6 +4118,8 @@ void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter, ...@@ -4118,6 +4118,8 @@ void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter,
#endif #endif
} }
ring->rx_offset = ixgbe_rx_offset(ring);
if (ring->xsk_pool && hw->mac.type != ixgbe_mac_82599EB) { if (ring->xsk_pool && hw->mac.type != ixgbe_mac_82599EB) {
u32 xsk_buf_len = xsk_pool_get_rx_frame_size(ring->xsk_pool); u32 xsk_buf_len = xsk_pool_get_rx_frame_size(ring->xsk_pool);
...@@ -6578,7 +6580,6 @@ int ixgbe_setup_rx_resources(struct ixgbe_adapter *adapter, ...@@ -6578,7 +6580,6 @@ int ixgbe_setup_rx_resources(struct ixgbe_adapter *adapter,
rx_ring->next_to_clean = 0; rx_ring->next_to_clean = 0;
rx_ring->next_to_use = 0; rx_ring->next_to_use = 0;
rx_ring->rx_offset = ixgbe_rx_offset(rx_ring);
/* XDP RX-queue info */ /* XDP RX-queue info */
if (xdp_rxq_info_reg(&rx_ring->xdp_rxq, adapter->netdev, if (xdp_rxq_info_reg(&rx_ring->xdp_rxq, adapter->netdev,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment