Commit d7956d81 authored by Maciej Fijalkowski's avatar Maciej Fijalkowski Committed by Daniel Borkmann

ice: Pull out next_to_clean bump out of ice_put_rx_buf()

Plan is to move ice_put_rx_buf() to the end of ice_clean_rx_irq() so
in order to keep the ability of walking through HW Rx descriptors, pull
out next_to_clean handling out of ice_put_rx_buf().
Signed-off-by: default avatarMaciej Fijalkowski <maciej.fijalkowski@intel.com>
Signed-off-by: default avatarDaniel Borkmann <daniel@iogearbox.net>
Reviewed-by: default avatarAlexander Lobakin <alexandr.lobakin@intel.com>
Link: https://lore.kernel.org/bpf/20230131204506.219292-5-maciej.fijalkowski@intel.com
parent ac075339
......@@ -898,11 +898,12 @@ ice_reuse_rx_page(struct ice_rx_ring *rx_ring, struct ice_rx_buf *old_buf)
* for use by the CPU.
*/
static struct ice_rx_buf *
ice_get_rx_buf(struct ice_rx_ring *rx_ring, const unsigned int size)
ice_get_rx_buf(struct ice_rx_ring *rx_ring, const unsigned int size,
const unsigned int ntc)
{
struct ice_rx_buf *rx_buf;
rx_buf = &rx_ring->rx_buf[rx_ring->next_to_clean];
rx_buf = &rx_ring->rx_buf[ntc];
rx_buf->pgcnt =
#if (PAGE_SIZE < 8192)
page_count(rx_buf->page);
......@@ -1040,19 +1041,12 @@ ice_construct_skb(struct ice_rx_ring *rx_ring, struct ice_rx_buf *rx_buf,
* @rx_ring: Rx descriptor ring to transact packets on
* @rx_buf: Rx buffer to pull data from
*
* This function will update next_to_clean and then clean up the contents
* of the rx_buf. It will either recycle the buffer or unmap it and free
* the associated resources.
* This function will clean up the contents of the rx_buf. It will either
* recycle the buffer or unmap it and free the associated resources.
*/
static void
ice_put_rx_buf(struct ice_rx_ring *rx_ring, struct ice_rx_buf *rx_buf)
{
u16 ntc = rx_ring->next_to_clean + 1;
/* fetch, update, and store next to clean */
ntc = (ntc < rx_ring->count) ? ntc : 0;
rx_ring->next_to_clean = ntc;
if (!rx_buf)
return;
......@@ -1114,6 +1108,8 @@ int ice_clean_rx_irq(struct ice_rx_ring *rx_ring, int budget)
unsigned int xdp_res, xdp_xmit = 0;
struct sk_buff *skb = rx_ring->skb;
struct bpf_prog *xdp_prog = NULL;
u32 ntc = rx_ring->next_to_clean;
u32 cnt = rx_ring->count;
bool failure;
/* Frame size depend on rx_ring setup when PAGE_SIZE=4K */
......@@ -1136,7 +1132,7 @@ int ice_clean_rx_irq(struct ice_rx_ring *rx_ring, int budget)
u16 rx_ptype;
/* get the Rx desc from Rx ring based on 'next_to_clean' */
rx_desc = ICE_RX_DESC(rx_ring, rx_ring->next_to_clean);
rx_desc = ICE_RX_DESC(rx_ring, ntc);
/* status_error_len will always be zero for unused descriptors
* because it's cleared in cleanup, and overlaps with hdr_addr
......@@ -1160,6 +1156,8 @@ int ice_clean_rx_irq(struct ice_rx_ring *rx_ring, int budget)
if (rx_desc->wb.rxdid == FDIR_DESC_RXDID &&
ctrl_vsi->vf)
ice_vc_fdir_irq_handler(ctrl_vsi, rx_desc);
if (++ntc == cnt)
ntc = 0;
ice_put_rx_buf(rx_ring, NULL);
cleaned_count++;
continue;
......@@ -1169,7 +1167,7 @@ int ice_clean_rx_irq(struct ice_rx_ring *rx_ring, int budget)
ICE_RX_FLX_DESC_PKT_LEN_M;
/* retrieve a buffer from the ring */
rx_buf = ice_get_rx_buf(rx_ring, size);
rx_buf = ice_get_rx_buf(rx_ring, size, ntc);
if (!size) {
xdp->data = NULL;
......@@ -1203,6 +1201,8 @@ int ice_clean_rx_irq(struct ice_rx_ring *rx_ring, int budget)
total_rx_pkts++;
cleaned_count++;
if (++ntc == cnt)
ntc = 0;
ice_put_rx_buf(rx_ring, rx_buf);
continue;
construct_skb:
......@@ -1222,6 +1222,8 @@ int ice_clean_rx_irq(struct ice_rx_ring *rx_ring, int budget)
break;
}
if (++ntc == cnt)
ntc = 0;
ice_put_rx_buf(rx_ring, rx_buf);
cleaned_count++;
......@@ -1262,6 +1264,7 @@ int ice_clean_rx_irq(struct ice_rx_ring *rx_ring, int budget)
total_rx_pkts++;
}
rx_ring->next_to_clean = ntc;
/* return up to cleaned_count buffers to hardware */
failure = ice_alloc_rx_bufs(rx_ring, cleaned_count);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment