Commit 2bc0de9a authored by Tirthendu Sarkar's avatar Tirthendu Sarkar Committed by Tony Nguyen

i40e: use frame_sz instead of recalculating truesize for building skb

In skb path truesize is calculated while building skb. This is now
avoided and xdp->frame_is used instead for both i40e_build_skb() and
i40e_construct_skb().
Signed-off-by: default avatarTirthendu Sarkar <tirthendu.sarkar@intel.com>
Tested-by: Chandan Kumar Rout <chandanx.rout@intel.com> (A Contingent Worker at Intel)
Signed-off-by: default avatarTony Nguyen <anthony.l.nguyen@intel.com>
parent 03e88c8a
...@@ -2113,11 +2113,6 @@ static struct sk_buff *i40e_construct_skb(struct i40e_ring *rx_ring, ...@@ -2113,11 +2113,6 @@ static struct sk_buff *i40e_construct_skb(struct i40e_ring *rx_ring,
struct xdp_buff *xdp) struct xdp_buff *xdp)
{ {
unsigned int size = xdp->data_end - xdp->data; unsigned int size = xdp->data_end - xdp->data;
#if (PAGE_SIZE < 8192)
unsigned int truesize = i40e_rx_pg_size(rx_ring) / 2;
#else
unsigned int truesize = SKB_DATA_ALIGN(size);
#endif
unsigned int headlen; unsigned int headlen;
struct sk_buff *skb; struct sk_buff *skb;
...@@ -2162,10 +2157,10 @@ static struct sk_buff *i40e_construct_skb(struct i40e_ring *rx_ring, ...@@ -2162,10 +2157,10 @@ static struct sk_buff *i40e_construct_skb(struct i40e_ring *rx_ring,
if (size) { if (size) {
skb_add_rx_frag(skb, 0, rx_buffer->page, skb_add_rx_frag(skb, 0, rx_buffer->page,
rx_buffer->page_offset + headlen, rx_buffer->page_offset + headlen,
size, truesize); size, xdp->frame_sz);
/* buffer is used by skb, update page_offset */ /* buffer is used by skb, update page_offset */
i40e_rx_buffer_flip(rx_buffer, truesize); i40e_rx_buffer_flip(rx_buffer, xdp->frame_sz);
} else { } else {
/* buffer is unused, reset bias back to rx_buffer */ /* buffer is unused, reset bias back to rx_buffer */
rx_buffer->pagecnt_bias++; rx_buffer->pagecnt_bias++;
...@@ -2188,13 +2183,6 @@ static struct sk_buff *i40e_build_skb(struct i40e_ring *rx_ring, ...@@ -2188,13 +2183,6 @@ static struct sk_buff *i40e_build_skb(struct i40e_ring *rx_ring,
struct xdp_buff *xdp) struct xdp_buff *xdp)
{ {
unsigned int metasize = xdp->data - xdp->data_meta; unsigned int metasize = xdp->data - xdp->data_meta;
#if (PAGE_SIZE < 8192)
unsigned int truesize = i40e_rx_pg_size(rx_ring) / 2;
#else
unsigned int truesize = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) +
SKB_DATA_ALIGN(xdp->data_end -
xdp->data_hard_start);
#endif
struct sk_buff *skb; struct sk_buff *skb;
/* Prefetch first cache line of first page. If xdp->data_meta /* Prefetch first cache line of first page. If xdp->data_meta
...@@ -2205,7 +2193,7 @@ static struct sk_buff *i40e_build_skb(struct i40e_ring *rx_ring, ...@@ -2205,7 +2193,7 @@ static struct sk_buff *i40e_build_skb(struct i40e_ring *rx_ring,
net_prefetch(xdp->data_meta); net_prefetch(xdp->data_meta);
/* build an skb around the page buffer */ /* build an skb around the page buffer */
skb = napi_build_skb(xdp->data_hard_start, truesize); skb = napi_build_skb(xdp->data_hard_start, xdp->frame_sz);
if (unlikely(!skb)) if (unlikely(!skb))
return NULL; return NULL;
...@@ -2216,7 +2204,7 @@ static struct sk_buff *i40e_build_skb(struct i40e_ring *rx_ring, ...@@ -2216,7 +2204,7 @@ static struct sk_buff *i40e_build_skb(struct i40e_ring *rx_ring,
skb_metadata_set(skb, metasize); skb_metadata_set(skb, metasize);
/* buffer is used by skb, update page_offset */ /* buffer is used by skb, update page_offset */
i40e_rx_buffer_flip(rx_buffer, truesize); i40e_rx_buffer_flip(rx_buffer, xdp->frame_sz);
return skb; return skb;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment