Commit 83014323 authored by Tirthendu Sarkar's avatar Tirthendu Sarkar Committed by Alexei Starovoitov

i40e: handle multi-buffer packets that are shrunk by xdp prog

XDP programs can shrink packets by calling the bpf_xdp_adjust_tail()
helper function. For multi-buffer packets this may lead to reduction of
frag count stored in skb_shared_info area of the xdp_buff struct. This
results in issues with the current handling of XDP_PASS and XDP_DROP
cases.

For XDP_PASS, currently skb is being built using frag count of
xdp_buffer before it was processed by XDP prog and thus will result in
an inconsistent skb when frag count gets reduced by XDP prog. To fix
this, get correct frag count while building the skb instead of using
pre-obtained frag count.

For XDP_DROP, current page recycling logic will not reuse the page but
instead will adjust the pagecnt_bias so that the page can be freed. This
again results in inconsistent behavior as the page refcnt has already
been changed by the helper while freeing the frag(s) as part of
shrinking the packet. To fix this, only adjust pagecnt_bias for buffers
that are stillpart of the packet post-xdp prog run.

Fixes: e213ced1 ("i40e: add support for XDP multi-buffer Rx")
Reported-by: default avatarMaciej Fijalkowski <maciej.fijalkowski@intel.com>
Signed-off-by: default avatarTirthendu Sarkar <tirthendu.sarkar@intel.com>
Link: https://lore.kernel.org/r/20240124191602.566724-6-maciej.fijalkowski@intel.comSigned-off-by: default avatarAlexei Starovoitov <ast@kernel.org>
parent ad2047cf
......@@ -2087,7 +2087,8 @@ static void i40e_put_rx_buffer(struct i40e_ring *rx_ring,
static void i40e_process_rx_buffs(struct i40e_ring *rx_ring, int xdp_res,
struct xdp_buff *xdp)
{
u32 next = rx_ring->next_to_clean;
u32 nr_frags = xdp_get_shared_info_from_buff(xdp)->nr_frags;
u32 next = rx_ring->next_to_clean, i = 0;
struct i40e_rx_buffer *rx_buffer;
xdp->flags = 0;
......@@ -2100,10 +2101,10 @@ static void i40e_process_rx_buffs(struct i40e_ring *rx_ring, int xdp_res,
if (!rx_buffer->page)
continue;
if (xdp_res == I40E_XDP_CONSUMED)
rx_buffer->pagecnt_bias++;
else
if (xdp_res != I40E_XDP_CONSUMED)
i40e_rx_buffer_flip(rx_buffer, xdp->frame_sz);
else if (i++ <= nr_frags)
rx_buffer->pagecnt_bias++;
/* EOP buffer will be put in i40e_clean_rx_irq() */
if (next == rx_ring->next_to_process)
......@@ -2117,20 +2118,20 @@ static void i40e_process_rx_buffs(struct i40e_ring *rx_ring, int xdp_res,
* i40e_construct_skb - Allocate skb and populate it
* @rx_ring: rx descriptor ring to transact packets on
* @xdp: xdp_buff pointing to the data
* @nr_frags: number of buffers for the packet
*
* This function allocates an skb. It then populates it with the page
* data from the current receive descriptor, taking care to set up the
* skb correctly.
*/
static struct sk_buff *i40e_construct_skb(struct i40e_ring *rx_ring,
struct xdp_buff *xdp,
u32 nr_frags)
struct xdp_buff *xdp)
{
unsigned int size = xdp->data_end - xdp->data;
struct i40e_rx_buffer *rx_buffer;
struct skb_shared_info *sinfo;
unsigned int headlen;
struct sk_buff *skb;
u32 nr_frags = 0;
/* prefetch first cache line of first page */
net_prefetch(xdp->data);
......@@ -2168,6 +2169,10 @@ static struct sk_buff *i40e_construct_skb(struct i40e_ring *rx_ring,
memcpy(__skb_put(skb, headlen), xdp->data,
ALIGN(headlen, sizeof(long)));
if (unlikely(xdp_buff_has_frags(xdp))) {
sinfo = xdp_get_shared_info_from_buff(xdp);
nr_frags = sinfo->nr_frags;
}
rx_buffer = i40e_rx_bi(rx_ring, rx_ring->next_to_clean);
/* update all of the pointers */
size -= headlen;
......@@ -2187,9 +2192,8 @@ static struct sk_buff *i40e_construct_skb(struct i40e_ring *rx_ring,
}
if (unlikely(xdp_buff_has_frags(xdp))) {
struct skb_shared_info *sinfo, *skinfo = skb_shinfo(skb);
struct skb_shared_info *skinfo = skb_shinfo(skb);
sinfo = xdp_get_shared_info_from_buff(xdp);
memcpy(&skinfo->frags[skinfo->nr_frags], &sinfo->frags[0],
sizeof(skb_frag_t) * nr_frags);
......@@ -2212,17 +2216,17 @@ static struct sk_buff *i40e_construct_skb(struct i40e_ring *rx_ring,
* i40e_build_skb - Build skb around an existing buffer
* @rx_ring: Rx descriptor ring to transact packets on
* @xdp: xdp_buff pointing to the data
* @nr_frags: number of buffers for the packet
*
* This function builds an skb around an existing Rx buffer, taking care
* to set up the skb correctly and avoid any memcpy overhead.
*/
static struct sk_buff *i40e_build_skb(struct i40e_ring *rx_ring,
struct xdp_buff *xdp,
u32 nr_frags)
struct xdp_buff *xdp)
{
unsigned int metasize = xdp->data - xdp->data_meta;
struct skb_shared_info *sinfo;
struct sk_buff *skb;
u32 nr_frags;
/* Prefetch first cache line of first page. If xdp->data_meta
* is unused, this points exactly as xdp->data, otherwise we
......@@ -2231,6 +2235,11 @@ static struct sk_buff *i40e_build_skb(struct i40e_ring *rx_ring,
*/
net_prefetch(xdp->data_meta);
if (unlikely(xdp_buff_has_frags(xdp))) {
sinfo = xdp_get_shared_info_from_buff(xdp);
nr_frags = sinfo->nr_frags;
}
/* build an skb around the page buffer */
skb = napi_build_skb(xdp->data_hard_start, xdp->frame_sz);
if (unlikely(!skb))
......@@ -2243,9 +2252,6 @@ static struct sk_buff *i40e_build_skb(struct i40e_ring *rx_ring,
skb_metadata_set(skb, metasize);
if (unlikely(xdp_buff_has_frags(xdp))) {
struct skb_shared_info *sinfo;
sinfo = xdp_get_shared_info_from_buff(xdp);
xdp_update_skb_shared_info(skb, nr_frags,
sinfo->xdp_frags_size,
nr_frags * xdp->frame_sz,
......@@ -2589,9 +2595,9 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget,
total_rx_bytes += size;
} else {
if (ring_uses_build_skb(rx_ring))
skb = i40e_build_skb(rx_ring, xdp, nfrags);
skb = i40e_build_skb(rx_ring, xdp);
else
skb = i40e_construct_skb(rx_ring, xdp, nfrags);
skb = i40e_construct_skb(rx_ring, xdp);
/* drop if we failed to retrieve a buffer */
if (!skb) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment