Commit 1beb7830 authored by Björn Töpel's avatar Björn Töpel Committed by Tony Nguyen

ice: avoid premature Rx buffer reuse

The page recycle code, incorrectly, relied on that a page fragment
could not be freed inside xdp_do_redirect(). This assumption leads to
that page fragments that are used by the stack/XDP redirect can be
reused and overwritten.

To avoid this, store the page count prior invoking xdp_do_redirect().

Fixes: efc2214b ("ice: Add support for XDP")
Reported-and-analyzed-by: default avatarLi RongQing <lirongqing@baidu.com>
Signed-off-by: default avatarBjörn Töpel <bjorn.topel@intel.com>
Tested-by: default avatarGeorge Kuruvinakunnel <george.kuruvinakunnel@intel.com>
Signed-off-by: default avatarTony Nguyen <anthony.l.nguyen@intel.com>
parent a06316dc
...@@ -762,13 +762,15 @@ ice_rx_buf_adjust_pg_offset(struct ice_rx_buf *rx_buf, unsigned int size) ...@@ -762,13 +762,15 @@ ice_rx_buf_adjust_pg_offset(struct ice_rx_buf *rx_buf, unsigned int size)
/** /**
* ice_can_reuse_rx_page - Determine if page can be reused for another Rx * ice_can_reuse_rx_page - Determine if page can be reused for another Rx
* @rx_buf: buffer containing the page * @rx_buf: buffer containing the page
* @rx_buf_pgcnt: rx_buf page refcount pre xdp_do_redirect() call
* *
* If page is reusable, we have a green light for calling ice_reuse_rx_page, * If page is reusable, we have a green light for calling ice_reuse_rx_page,
* which will assign the current buffer to the buffer that next_to_alloc is * which will assign the current buffer to the buffer that next_to_alloc is
* pointing to; otherwise, the DMA mapping needs to be destroyed and * pointing to; otherwise, the DMA mapping needs to be destroyed and
* page freed * page freed
*/ */
static bool ice_can_reuse_rx_page(struct ice_rx_buf *rx_buf) static bool
ice_can_reuse_rx_page(struct ice_rx_buf *rx_buf, int rx_buf_pgcnt)
{ {
unsigned int pagecnt_bias = rx_buf->pagecnt_bias; unsigned int pagecnt_bias = rx_buf->pagecnt_bias;
struct page *page = rx_buf->page; struct page *page = rx_buf->page;
...@@ -779,7 +781,7 @@ static bool ice_can_reuse_rx_page(struct ice_rx_buf *rx_buf) ...@@ -779,7 +781,7 @@ static bool ice_can_reuse_rx_page(struct ice_rx_buf *rx_buf)
#if (PAGE_SIZE < 8192) #if (PAGE_SIZE < 8192)
/* if we are only owner of page we can reuse it */ /* if we are only owner of page we can reuse it */
if (unlikely((page_count(page) - pagecnt_bias) > 1)) if (unlikely((rx_buf_pgcnt - pagecnt_bias) > 1))
return false; return false;
#else #else
#define ICE_LAST_OFFSET \ #define ICE_LAST_OFFSET \
...@@ -864,17 +866,24 @@ ice_reuse_rx_page(struct ice_ring *rx_ring, struct ice_rx_buf *old_buf) ...@@ -864,17 +866,24 @@ ice_reuse_rx_page(struct ice_ring *rx_ring, struct ice_rx_buf *old_buf)
* @rx_ring: Rx descriptor ring to transact packets on * @rx_ring: Rx descriptor ring to transact packets on
* @skb: skb to be used * @skb: skb to be used
* @size: size of buffer to add to skb * @size: size of buffer to add to skb
* @rx_buf_pgcnt: rx_buf page refcount
* *
* This function will pull an Rx buffer from the ring and synchronize it * This function will pull an Rx buffer from the ring and synchronize it
* for use by the CPU. * for use by the CPU.
*/ */
static struct ice_rx_buf * static struct ice_rx_buf *
ice_get_rx_buf(struct ice_ring *rx_ring, struct sk_buff **skb, ice_get_rx_buf(struct ice_ring *rx_ring, struct sk_buff **skb,
const unsigned int size) const unsigned int size, int *rx_buf_pgcnt)
{ {
struct ice_rx_buf *rx_buf; struct ice_rx_buf *rx_buf;
rx_buf = &rx_ring->rx_buf[rx_ring->next_to_clean]; rx_buf = &rx_ring->rx_buf[rx_ring->next_to_clean];
*rx_buf_pgcnt =
#if (PAGE_SIZE < 8192)
page_count(rx_buf->page);
#else
0;
#endif
prefetchw(rx_buf->page); prefetchw(rx_buf->page);
*skb = rx_buf->skb; *skb = rx_buf->skb;
...@@ -1006,12 +1015,15 @@ ice_construct_skb(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf, ...@@ -1006,12 +1015,15 @@ ice_construct_skb(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf,
* ice_put_rx_buf - Clean up used buffer and either recycle or free * ice_put_rx_buf - Clean up used buffer and either recycle or free
* @rx_ring: Rx descriptor ring to transact packets on * @rx_ring: Rx descriptor ring to transact packets on
* @rx_buf: Rx buffer to pull data from * @rx_buf: Rx buffer to pull data from
* @rx_buf_pgcnt: Rx buffer page count pre xdp_do_redirect()
* *
* This function will update next_to_clean and then clean up the contents * This function will update next_to_clean and then clean up the contents
* of the rx_buf. It will either recycle the buffer or unmap it and free * of the rx_buf. It will either recycle the buffer or unmap it and free
* the associated resources. * the associated resources.
*/ */
static void ice_put_rx_buf(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf) static void
ice_put_rx_buf(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf,
int rx_buf_pgcnt)
{ {
u16 ntc = rx_ring->next_to_clean + 1; u16 ntc = rx_ring->next_to_clean + 1;
...@@ -1022,7 +1034,7 @@ static void ice_put_rx_buf(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf) ...@@ -1022,7 +1034,7 @@ static void ice_put_rx_buf(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf)
if (!rx_buf) if (!rx_buf)
return; return;
if (ice_can_reuse_rx_page(rx_buf)) { if (ice_can_reuse_rx_page(rx_buf, rx_buf_pgcnt)) {
/* hand second half of page back to the ring */ /* hand second half of page back to the ring */
ice_reuse_rx_page(rx_ring, rx_buf); ice_reuse_rx_page(rx_ring, rx_buf);
} else { } else {
...@@ -1097,6 +1109,7 @@ int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget) ...@@ -1097,6 +1109,7 @@ int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget)
struct sk_buff *skb; struct sk_buff *skb;
unsigned int size; unsigned int size;
u16 stat_err_bits; u16 stat_err_bits;
int rx_buf_pgcnt;
u16 vlan_tag = 0; u16 vlan_tag = 0;
u8 rx_ptype; u8 rx_ptype;
...@@ -1119,7 +1132,7 @@ int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget) ...@@ -1119,7 +1132,7 @@ int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget)
dma_rmb(); dma_rmb();
if (rx_desc->wb.rxdid == FDIR_DESC_RXDID || !rx_ring->netdev) { if (rx_desc->wb.rxdid == FDIR_DESC_RXDID || !rx_ring->netdev) {
ice_put_rx_buf(rx_ring, NULL); ice_put_rx_buf(rx_ring, NULL, 0);
cleaned_count++; cleaned_count++;
continue; continue;
} }
...@@ -1128,7 +1141,7 @@ int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget) ...@@ -1128,7 +1141,7 @@ int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget)
ICE_RX_FLX_DESC_PKT_LEN_M; ICE_RX_FLX_DESC_PKT_LEN_M;
/* retrieve a buffer from the ring */ /* retrieve a buffer from the ring */
rx_buf = ice_get_rx_buf(rx_ring, &skb, size); rx_buf = ice_get_rx_buf(rx_ring, &skb, size, &rx_buf_pgcnt);
if (!size) { if (!size) {
xdp.data = NULL; xdp.data = NULL;
...@@ -1168,7 +1181,7 @@ int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget) ...@@ -1168,7 +1181,7 @@ int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget)
total_rx_pkts++; total_rx_pkts++;
cleaned_count++; cleaned_count++;
ice_put_rx_buf(rx_ring, rx_buf); ice_put_rx_buf(rx_ring, rx_buf, rx_buf_pgcnt);
continue; continue;
construct_skb: construct_skb:
if (skb) { if (skb) {
...@@ -1187,7 +1200,7 @@ int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget) ...@@ -1187,7 +1200,7 @@ int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget)
break; break;
} }
ice_put_rx_buf(rx_ring, rx_buf); ice_put_rx_buf(rx_ring, rx_buf, rx_buf_pgcnt);
cleaned_count++; cleaned_count++;
/* skip if it is NOP desc */ /* skip if it is NOP desc */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment