Commit 1d02c039 authored by David S. Miller's avatar David S. Miller

Merge branch '10GbE' of git://git.kernel.org/pub/scm/linux/kernel/git/tnguy/next

-queue

Tony Nguyen says:

====================
10GbE Intel Wired LAN Driver Updates 2022-01-31

Alexander Lobakin says:

This is an interpolation of [0] to other Intel Ethernet drivers
(and is (re)based on its code).
The main aim is to keep XDP metadata not only in case with
build_skb(), but also when we do napi_alloc_skb() + memcpy().

All Intel drivers suffers from the same here:
 - metadata gets lost on XDP_PASS in legacy-rx;
 - excessive headroom allocation on XSK Rx to skbs;
 - metadata gets lost on XSK Rx to skbs.

Those get especially actual in XDP Hints upcoming.
I couldn't have addressed the first one for all Intel drivers due to
that they don't reserve any headroom for now in legacy-rx mode even
with XDP enabled. This is hugely wrong, but requires quite a bunch
of work and a separate series. Luckily, ice doesn't suffer from
that.
igc has 1 and 3 already fixed in [0].

[0] https://lore.kernel.org/netdev/163700856423.565980.10162564921347693758.stgit@firesoul
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 9a90986e f322a620
...@@ -241,21 +241,25 @@ bool i40e_alloc_rx_buffers_zc(struct i40e_ring *rx_ring, u16 count) ...@@ -241,21 +241,25 @@ bool i40e_alloc_rx_buffers_zc(struct i40e_ring *rx_ring, u16 count)
static struct sk_buff *i40e_construct_skb_zc(struct i40e_ring *rx_ring, static struct sk_buff *i40e_construct_skb_zc(struct i40e_ring *rx_ring,
struct xdp_buff *xdp) struct xdp_buff *xdp)
{ {
unsigned int totalsize = xdp->data_end - xdp->data_meta;
unsigned int metasize = xdp->data - xdp->data_meta; unsigned int metasize = xdp->data - xdp->data_meta;
unsigned int datasize = xdp->data_end - xdp->data;
struct sk_buff *skb; struct sk_buff *skb;
net_prefetch(xdp->data_meta);
/* allocate a skb to store the frags */ /* allocate a skb to store the frags */
skb = __napi_alloc_skb(&rx_ring->q_vector->napi, skb = __napi_alloc_skb(&rx_ring->q_vector->napi, totalsize,
xdp->data_end - xdp->data_hard_start,
GFP_ATOMIC | __GFP_NOWARN); GFP_ATOMIC | __GFP_NOWARN);
if (unlikely(!skb)) if (unlikely(!skb))
goto out; goto out;
skb_reserve(skb, xdp->data - xdp->data_hard_start); memcpy(__skb_put(skb, totalsize), xdp->data_meta,
memcpy(__skb_put(skb, datasize), xdp->data, datasize); ALIGN(totalsize, sizeof(long)));
if (metasize)
if (metasize) {
skb_metadata_set(skb, metasize); skb_metadata_set(skb, metasize);
__skb_pull(skb, metasize);
}
out: out:
xsk_buff_free(xdp); xsk_buff_free(xdp);
......
...@@ -983,15 +983,17 @@ static struct sk_buff * ...@@ -983,15 +983,17 @@ static struct sk_buff *
ice_construct_skb(struct ice_rx_ring *rx_ring, struct ice_rx_buf *rx_buf, ice_construct_skb(struct ice_rx_ring *rx_ring, struct ice_rx_buf *rx_buf,
struct xdp_buff *xdp) struct xdp_buff *xdp)
{ {
unsigned int metasize = xdp->data - xdp->data_meta;
unsigned int size = xdp->data_end - xdp->data; unsigned int size = xdp->data_end - xdp->data;
unsigned int headlen; unsigned int headlen;
struct sk_buff *skb; struct sk_buff *skb;
/* prefetch first cache line of first page */ /* prefetch first cache line of first page */
net_prefetch(xdp->data); net_prefetch(xdp->data_meta);
/* allocate a skb to store the frags */ /* allocate a skb to store the frags */
skb = __napi_alloc_skb(&rx_ring->q_vector->napi, ICE_RX_HDR_SIZE, skb = __napi_alloc_skb(&rx_ring->q_vector->napi,
ICE_RX_HDR_SIZE + metasize,
GFP_ATOMIC | __GFP_NOWARN); GFP_ATOMIC | __GFP_NOWARN);
if (unlikely(!skb)) if (unlikely(!skb))
return NULL; return NULL;
...@@ -1003,8 +1005,13 @@ ice_construct_skb(struct ice_rx_ring *rx_ring, struct ice_rx_buf *rx_buf, ...@@ -1003,8 +1005,13 @@ ice_construct_skb(struct ice_rx_ring *rx_ring, struct ice_rx_buf *rx_buf,
headlen = eth_get_headlen(skb->dev, xdp->data, ICE_RX_HDR_SIZE); headlen = eth_get_headlen(skb->dev, xdp->data, ICE_RX_HDR_SIZE);
/* align pull length to size of long to optimize memcpy performance */ /* align pull length to size of long to optimize memcpy performance */
memcpy(__skb_put(skb, headlen), xdp->data, ALIGN(headlen, memcpy(__skb_put(skb, headlen + metasize), xdp->data_meta,
sizeof(long))); ALIGN(headlen + metasize, sizeof(long)));
if (metasize) {
skb_metadata_set(skb, metasize);
__skb_pull(skb, metasize);
}
/* if we exhaust the linear part then add what is left as a frag */ /* if we exhaust the linear part then add what is left as a frag */
size -= headlen; size -= headlen;
......
...@@ -428,20 +428,24 @@ static void ice_bump_ntc(struct ice_rx_ring *rx_ring) ...@@ -428,20 +428,24 @@ static void ice_bump_ntc(struct ice_rx_ring *rx_ring)
static struct sk_buff * static struct sk_buff *
ice_construct_skb_zc(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp) ice_construct_skb_zc(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp)
{ {
unsigned int datasize_hard = xdp->data_end - xdp->data_hard_start; unsigned int totalsize = xdp->data_end - xdp->data_meta;
unsigned int metasize = xdp->data - xdp->data_meta; unsigned int metasize = xdp->data - xdp->data_meta;
unsigned int datasize = xdp->data_end - xdp->data;
struct sk_buff *skb; struct sk_buff *skb;
skb = __napi_alloc_skb(&rx_ring->q_vector->napi, datasize_hard, net_prefetch(xdp->data_meta);
skb = __napi_alloc_skb(&rx_ring->q_vector->napi, totalsize,
GFP_ATOMIC | __GFP_NOWARN); GFP_ATOMIC | __GFP_NOWARN);
if (unlikely(!skb)) if (unlikely(!skb))
return NULL; return NULL;
skb_reserve(skb, xdp->data - xdp->data_hard_start); memcpy(__skb_put(skb, totalsize), xdp->data_meta,
memcpy(__skb_put(skb, datasize), xdp->data, datasize); ALIGN(totalsize, sizeof(long)));
if (metasize)
if (metasize) {
skb_metadata_set(skb, metasize); skb_metadata_set(skb, metasize);
__skb_pull(skb, metasize);
}
xsk_buff_free(xdp); xsk_buff_free(xdp);
return skb; return skb;
......
...@@ -2446,19 +2446,20 @@ static int igc_clean_rx_irq(struct igc_q_vector *q_vector, const int budget) ...@@ -2446,19 +2446,20 @@ static int igc_clean_rx_irq(struct igc_q_vector *q_vector, const int budget)
static struct sk_buff *igc_construct_skb_zc(struct igc_ring *ring, static struct sk_buff *igc_construct_skb_zc(struct igc_ring *ring,
struct xdp_buff *xdp) struct xdp_buff *xdp)
{ {
unsigned int totalsize = xdp->data_end - xdp->data_meta;
unsigned int metasize = xdp->data - xdp->data_meta; unsigned int metasize = xdp->data - xdp->data_meta;
unsigned int datasize = xdp->data_end - xdp->data;
unsigned int totalsize = metasize + datasize;
struct sk_buff *skb; struct sk_buff *skb;
skb = __napi_alloc_skb(&ring->q_vector->napi, net_prefetch(xdp->data_meta);
xdp->data_end - xdp->data_hard_start,
skb = __napi_alloc_skb(&ring->q_vector->napi, totalsize,
GFP_ATOMIC | __GFP_NOWARN); GFP_ATOMIC | __GFP_NOWARN);
if (unlikely(!skb)) if (unlikely(!skb))
return NULL; return NULL;
skb_reserve(skb, xdp->data_meta - xdp->data_hard_start); memcpy(__skb_put(skb, totalsize), xdp->data_meta,
memcpy(__skb_put(skb, totalsize), xdp->data_meta, totalsize); ALIGN(totalsize, sizeof(long)));
if (metasize) { if (metasize) {
skb_metadata_set(skb, metasize); skb_metadata_set(skb, metasize);
__skb_pull(skb, metasize); __skb_pull(skb, metasize);
......
...@@ -207,26 +207,28 @@ bool ixgbe_alloc_rx_buffers_zc(struct ixgbe_ring *rx_ring, u16 count) ...@@ -207,26 +207,28 @@ bool ixgbe_alloc_rx_buffers_zc(struct ixgbe_ring *rx_ring, u16 count)
} }
static struct sk_buff *ixgbe_construct_skb_zc(struct ixgbe_ring *rx_ring, static struct sk_buff *ixgbe_construct_skb_zc(struct ixgbe_ring *rx_ring,
struct ixgbe_rx_buffer *bi) const struct xdp_buff *xdp)
{ {
unsigned int metasize = bi->xdp->data - bi->xdp->data_meta; unsigned int totalsize = xdp->data_end - xdp->data_meta;
unsigned int datasize = bi->xdp->data_end - bi->xdp->data; unsigned int metasize = xdp->data - xdp->data_meta;
struct sk_buff *skb; struct sk_buff *skb;
net_prefetch(xdp->data_meta);
/* allocate a skb to store the frags */ /* allocate a skb to store the frags */
skb = __napi_alloc_skb(&rx_ring->q_vector->napi, skb = __napi_alloc_skb(&rx_ring->q_vector->napi, totalsize,
bi->xdp->data_end - bi->xdp->data_hard_start,
GFP_ATOMIC | __GFP_NOWARN); GFP_ATOMIC | __GFP_NOWARN);
if (unlikely(!skb)) if (unlikely(!skb))
return NULL; return NULL;
skb_reserve(skb, bi->xdp->data - bi->xdp->data_hard_start); memcpy(__skb_put(skb, totalsize), xdp->data_meta,
memcpy(__skb_put(skb, datasize), bi->xdp->data, datasize); ALIGN(totalsize, sizeof(long)));
if (metasize)
if (metasize) {
skb_metadata_set(skb, metasize); skb_metadata_set(skb, metasize);
__skb_pull(skb, metasize);
}
xsk_buff_free(bi->xdp);
bi->xdp = NULL;
return skb; return skb;
} }
...@@ -317,12 +319,15 @@ int ixgbe_clean_rx_irq_zc(struct ixgbe_q_vector *q_vector, ...@@ -317,12 +319,15 @@ int ixgbe_clean_rx_irq_zc(struct ixgbe_q_vector *q_vector,
} }
/* XDP_PASS path */ /* XDP_PASS path */
skb = ixgbe_construct_skb_zc(rx_ring, bi); skb = ixgbe_construct_skb_zc(rx_ring, bi->xdp);
if (!skb) { if (!skb) {
rx_ring->rx_stats.alloc_rx_buff_failed++; rx_ring->rx_stats.alloc_rx_buff_failed++;
break; break;
} }
xsk_buff_free(bi->xdp);
bi->xdp = NULL;
cleaned_count++; cleaned_count++;
ixgbe_inc_ntc(rx_ring); ixgbe_inc_ntc(rx_ring);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment