Commit aaf27254 authored by Maciej Fijalkowski's avatar Maciej Fijalkowski Committed by Jeff Kirsher

ice: add build_skb() support

Driver is now prepared for building the skb around the existing Rx
buffer, so introduce the ice_build_skb responsible for it. Make use of
XDP's data_meta as well.

I've observed around 30% less CPU consumption with build_skb Rx path, in
comparison to legacy Rx. What stands behind such result is the avoidance
of flow_dissector (which we were diving into via eth_get_headlen) and no
memcpy calls.
Signed-off-by: default avatarMaciej Fijalkowski <maciej.fijalkowski@intel.com>
Tested-by: default avatarAndrew Bowers <andrewx.bowers@intel.com>
Signed-off-by: default avatarJeff Kirsher <jeffrey.t.kirsher@intel.com>
parent 59bb0808
...@@ -790,6 +790,60 @@ ice_get_rx_buf(struct ice_ring *rx_ring, struct sk_buff **skb, ...@@ -790,6 +790,60 @@ ice_get_rx_buf(struct ice_ring *rx_ring, struct sk_buff **skb,
return rx_buf; return rx_buf;
} }
/**
* ice_build_skb - Build skb around an existing buffer
* @rx_ring: Rx descriptor ring to transact packets on
* @rx_buf: Rx buffer to pull data from
* @xdp: xdp_buff pointing to the data
*
* This function builds an skb around an existing Rx buffer, taking care
* to set up the skb correctly and avoid any memcpy overhead.
*/
static struct sk_buff *
ice_build_skb(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf,
struct xdp_buff *xdp)
{
unsigned int metasize = xdp->data - xdp->data_meta;
#if (PAGE_SIZE < 8192)
unsigned int truesize = ice_rx_pg_size(rx_ring) / 2;
#else
unsigned int truesize = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) +
SKB_DATA_ALIGN(xdp->data_end -
xdp->data_hard_start);
#endif
struct sk_buff *skb;
/* Prefetch first cache line of first page. If xdp->data_meta
* is unused, this points exactly as xdp->data, otherwise we
* likely have a consumer accessing first few bytes of meta
* data, and then actual data.
*/
prefetch(xdp->data_meta);
#if L1_CACHE_BYTES < 128
prefetch((void *)(xdp->data + L1_CACHE_BYTES));
#endif
/* build an skb around the page buffer */
skb = build_skb(xdp->data_hard_start, truesize);
if (unlikely(!skb))
return NULL;
/* must to record Rx queue, otherwise OS features such as
* symmetric queue won't work
*/
skb_record_rx_queue(skb, rx_ring->q_index);
/* update pointers within the skb to store the data */
skb_reserve(skb, xdp->data - xdp->data_hard_start);
__skb_put(skb, xdp->data_end - xdp->data);
if (metasize)
skb_metadata_set(skb, metasize);
/* buffer is used by skb, update page_offset */
ice_rx_buf_adjust_pg_offset(rx_buf, truesize);
return skb;
}
/** /**
* ice_construct_skb - Allocate skb and populate it * ice_construct_skb - Allocate skb and populate it
* @rx_ring: Rx descriptor ring to transact packets on * @rx_ring: Rx descriptor ring to transact packets on
...@@ -996,12 +1050,14 @@ static int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget) ...@@ -996,12 +1050,14 @@ static int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget)
if (!size) { if (!size) {
xdp.data = NULL; xdp.data = NULL;
xdp.data_end = NULL; xdp.data_end = NULL;
xdp.data_hard_start = NULL;
xdp.data_meta = NULL;
goto construct_skb; goto construct_skb;
} }
xdp.data = page_address(rx_buf->page) + rx_buf->page_offset; xdp.data = page_address(rx_buf->page) + rx_buf->page_offset;
xdp.data_hard_start = xdp.data - ice_rx_offset(rx_ring); xdp.data_hard_start = xdp.data - ice_rx_offset(rx_ring);
xdp_set_data_meta_invalid(&xdp); xdp.data_meta = xdp.data;
xdp.data_end = xdp.data + size; xdp.data_end = xdp.data + size;
rcu_read_lock(); rcu_read_lock();
...@@ -1038,6 +1094,8 @@ static int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget) ...@@ -1038,6 +1094,8 @@ static int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget)
construct_skb: construct_skb:
if (skb) if (skb)
ice_add_rx_frag(rx_ring, rx_buf, skb, size); ice_add_rx_frag(rx_ring, rx_buf, skb, size);
else if (ice_ring_uses_build_skb(rx_ring))
skb = ice_build_skb(rx_ring, rx_buf, &xdp);
else else
skb = ice_construct_skb(rx_ring, rx_buf, &xdp); skb = ice_construct_skb(rx_ring, rx_buf, &xdp);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment