Commit 6d7aad1d authored by Björn Töpel's avatar Björn Töpel Committed by Alexei Starovoitov

i40e: refactor Rx path for re-use

In this commit, the Rx path is refactored some, as a step torwards the
introduction AF_XDP Rx zero-copy.

The page re-use counter is moved into the i40e_reuse_rx_page, instead
of bumping the counter in many places. The Rx buffer page clearing is
moved for better readability. Lastely, functions to update statistics
and bump the XDP Tx ring are introduced.
Signed-off-by: default avatarBjörn Töpel <bjorn.topel@intel.com>
Signed-off-by: default avatarAlexei Starovoitov <ast@kernel.org>
parent 123cecd4
...@@ -1244,6 +1244,11 @@ static void i40e_reuse_rx_page(struct i40e_ring *rx_ring, ...@@ -1244,6 +1244,11 @@ static void i40e_reuse_rx_page(struct i40e_ring *rx_ring,
new_buff->page = old_buff->page; new_buff->page = old_buff->page;
new_buff->page_offset = old_buff->page_offset; new_buff->page_offset = old_buff->page_offset;
new_buff->pagecnt_bias = old_buff->pagecnt_bias; new_buff->pagecnt_bias = old_buff->pagecnt_bias;
rx_ring->rx_stats.page_reuse_count++;
/* clear contents of buffer_info */
old_buff->page = NULL;
} }
/** /**
...@@ -1266,7 +1271,7 @@ static inline bool i40e_rx_is_programming_status(u64 qw) ...@@ -1266,7 +1271,7 @@ static inline bool i40e_rx_is_programming_status(u64 qw)
} }
/** /**
* i40e_clean_programming_status - clean the programming status descriptor * i40e_clean_programming_status - try clean the programming status descriptor
* @rx_ring: the rx ring that has this descriptor * @rx_ring: the rx ring that has this descriptor
* @rx_desc: the rx descriptor written back by HW * @rx_desc: the rx descriptor written back by HW
* @qw: qword representing status_error_len in CPU ordering * @qw: qword representing status_error_len in CPU ordering
...@@ -1275,15 +1280,22 @@ static inline bool i40e_rx_is_programming_status(u64 qw) ...@@ -1275,15 +1280,22 @@ static inline bool i40e_rx_is_programming_status(u64 qw)
* status being successful or not and take actions accordingly. FCoE should * status being successful or not and take actions accordingly. FCoE should
* handle its context/filter programming/invalidation status and take actions. * handle its context/filter programming/invalidation status and take actions.
* *
* Returns an i40e_rx_buffer to reuse if the cleanup occurred, otherwise NULL.
**/ **/
static void i40e_clean_programming_status(struct i40e_ring *rx_ring, static struct i40e_rx_buffer *i40e_clean_programming_status(
struct i40e_ring *rx_ring,
union i40e_rx_desc *rx_desc, union i40e_rx_desc *rx_desc,
u64 qw) u64 qw)
{ {
struct i40e_rx_buffer *rx_buffer; struct i40e_rx_buffer *rx_buffer;
u32 ntc = rx_ring->next_to_clean; u32 ntc;
u8 id; u8 id;
if (!i40e_rx_is_programming_status(qw))
return NULL;
ntc = rx_ring->next_to_clean;
/* fetch, update, and store next to clean */ /* fetch, update, and store next to clean */
rx_buffer = &rx_ring->rx_bi[ntc++]; rx_buffer = &rx_ring->rx_bi[ntc++];
ntc = (ntc < rx_ring->count) ? ntc : 0; ntc = (ntc < rx_ring->count) ? ntc : 0;
...@@ -1291,18 +1303,13 @@ static void i40e_clean_programming_status(struct i40e_ring *rx_ring, ...@@ -1291,18 +1303,13 @@ static void i40e_clean_programming_status(struct i40e_ring *rx_ring,
prefetch(I40E_RX_DESC(rx_ring, ntc)); prefetch(I40E_RX_DESC(rx_ring, ntc));
/* place unused page back on the ring */
i40e_reuse_rx_page(rx_ring, rx_buffer);
rx_ring->rx_stats.page_reuse_count++;
/* clear contents of buffer_info */
rx_buffer->page = NULL;
id = (qw & I40E_RX_PROG_STATUS_DESC_QW1_PROGID_MASK) >> id = (qw & I40E_RX_PROG_STATUS_DESC_QW1_PROGID_MASK) >>
I40E_RX_PROG_STATUS_DESC_QW1_PROGID_SHIFT; I40E_RX_PROG_STATUS_DESC_QW1_PROGID_SHIFT;
if (id == I40E_RX_PROG_STATUS_DESC_FD_FILTER_STATUS) if (id == I40E_RX_PROG_STATUS_DESC_FD_FILTER_STATUS)
i40e_fd_handle_status(rx_ring, rx_desc, id); i40e_fd_handle_status(rx_ring, rx_desc, id);
return rx_buffer;
} }
/** /**
...@@ -2152,7 +2159,6 @@ static void i40e_put_rx_buffer(struct i40e_ring *rx_ring, ...@@ -2152,7 +2159,6 @@ static void i40e_put_rx_buffer(struct i40e_ring *rx_ring,
if (i40e_can_reuse_rx_page(rx_buffer)) { if (i40e_can_reuse_rx_page(rx_buffer)) {
/* hand second half of page back to the ring */ /* hand second half of page back to the ring */
i40e_reuse_rx_page(rx_ring, rx_buffer); i40e_reuse_rx_page(rx_ring, rx_buffer);
rx_ring->rx_stats.page_reuse_count++;
} else { } else {
/* we are not reusing the buffer so unmap it */ /* we are not reusing the buffer so unmap it */
dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma, dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma,
...@@ -2160,10 +2166,9 @@ static void i40e_put_rx_buffer(struct i40e_ring *rx_ring, ...@@ -2160,10 +2166,9 @@ static void i40e_put_rx_buffer(struct i40e_ring *rx_ring,
DMA_FROM_DEVICE, I40E_RX_DMA_ATTR); DMA_FROM_DEVICE, I40E_RX_DMA_ATTR);
__page_frag_cache_drain(rx_buffer->page, __page_frag_cache_drain(rx_buffer->page,
rx_buffer->pagecnt_bias); rx_buffer->pagecnt_bias);
}
/* clear contents of buffer_info */ /* clear contents of buffer_info */
rx_buffer->page = NULL; rx_buffer->page = NULL;
}
} }
/** /**
...@@ -2287,6 +2292,12 @@ static void i40e_rx_buffer_flip(struct i40e_ring *rx_ring, ...@@ -2287,6 +2292,12 @@ static void i40e_rx_buffer_flip(struct i40e_ring *rx_ring,
#endif #endif
} }
/**
* i40e_xdp_ring_update_tail - Updates the XDP Tx ring tail register
* @xdp_ring: XDP Tx ring
*
* This function updates the XDP Tx ring tail register.
**/
static inline void i40e_xdp_ring_update_tail(struct i40e_ring *xdp_ring) static inline void i40e_xdp_ring_update_tail(struct i40e_ring *xdp_ring)
{ {
/* Force memory writes to complete before letting h/w /* Force memory writes to complete before letting h/w
...@@ -2296,6 +2307,49 @@ static inline void i40e_xdp_ring_update_tail(struct i40e_ring *xdp_ring) ...@@ -2296,6 +2307,49 @@ static inline void i40e_xdp_ring_update_tail(struct i40e_ring *xdp_ring)
writel_relaxed(xdp_ring->next_to_use, xdp_ring->tail); writel_relaxed(xdp_ring->next_to_use, xdp_ring->tail);
} }
/**
* i40e_update_rx_stats - Update Rx ring statistics
* @rx_ring: rx descriptor ring
* @total_rx_bytes: number of bytes received
* @total_rx_packets: number of packets received
*
* This function updates the Rx ring statistics.
**/
static void i40e_update_rx_stats(struct i40e_ring *rx_ring,
unsigned int total_rx_bytes,
unsigned int total_rx_packets)
{
u64_stats_update_begin(&rx_ring->syncp);
rx_ring->stats.packets += total_rx_packets;
rx_ring->stats.bytes += total_rx_bytes;
u64_stats_update_end(&rx_ring->syncp);
rx_ring->q_vector->rx.total_packets += total_rx_packets;
rx_ring->q_vector->rx.total_bytes += total_rx_bytes;
}
/**
* i40e_finalize_xdp_rx - Bump XDP Tx tail and/or flush redirect map
* @rx_ring: Rx ring
* @xdp_res: Result of the receive batch
*
* This function bumps XDP Tx tail and/or flush redirect map, and
* should be called when a batch of packets has been processed in the
* napi loop.
**/
static void i40e_finalize_xdp_rx(struct i40e_ring *rx_ring,
unsigned int xdp_res)
{
if (xdp_res & I40E_XDP_REDIR)
xdp_do_flush_map();
if (xdp_res & I40E_XDP_TX) {
struct i40e_ring *xdp_ring =
rx_ring->vsi->xdp_rings[rx_ring->queue_index];
i40e_xdp_ring_update_tail(xdp_ring);
}
}
/** /**
* i40e_clean_rx_irq - Clean completed descriptors from Rx ring - bounce buf * i40e_clean_rx_irq - Clean completed descriptors from Rx ring - bounce buf
* @rx_ring: rx descriptor ring to transact packets on * @rx_ring: rx descriptor ring to transact packets on
...@@ -2349,11 +2403,14 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget) ...@@ -2349,11 +2403,14 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
*/ */
dma_rmb(); dma_rmb();
if (unlikely(i40e_rx_is_programming_status(qword))) { rx_buffer = i40e_clean_programming_status(rx_ring, rx_desc,
i40e_clean_programming_status(rx_ring, rx_desc, qword); qword);
if (unlikely(rx_buffer)) {
i40e_reuse_rx_page(rx_ring, rx_buffer);
cleaned_count++; cleaned_count++;
continue; continue;
} }
size = (qword & I40E_RXD_QW1_LENGTH_PBUF_MASK) >> size = (qword & I40E_RXD_QW1_LENGTH_PBUF_MASK) >>
I40E_RXD_QW1_LENGTH_PBUF_SHIFT; I40E_RXD_QW1_LENGTH_PBUF_SHIFT;
if (!size) if (!size)
...@@ -2432,24 +2489,10 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget) ...@@ -2432,24 +2489,10 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
total_rx_packets++; total_rx_packets++;
} }
if (xdp_xmit & I40E_XDP_REDIR) i40e_finalize_xdp_rx(rx_ring, xdp_xmit);
xdp_do_flush_map();
if (xdp_xmit & I40E_XDP_TX) {
struct i40e_ring *xdp_ring =
rx_ring->vsi->xdp_rings[rx_ring->queue_index];
i40e_xdp_ring_update_tail(xdp_ring);
}
rx_ring->skb = skb; rx_ring->skb = skb;
u64_stats_update_begin(&rx_ring->syncp); i40e_update_rx_stats(rx_ring, total_rx_bytes, total_rx_packets);
rx_ring->stats.packets += total_rx_packets;
rx_ring->stats.bytes += total_rx_bytes;
u64_stats_update_end(&rx_ring->syncp);
rx_ring->q_vector->rx.total_packets += total_rx_packets;
rx_ring->q_vector->rx.total_bytes += total_rx_bytes;
/* guarantee a trip back through this routine if there was a failure */ /* guarantee a trip back through this routine if there was a failure */
return failure ? budget : (int)total_rx_packets; return failure ? budget : (int)total_rx_packets;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment