Commit aa4725c2 authored by David S. Miller's avatar David S. Miller

Merge branch '40GbE' of git://git.kernel.org/pub/scm/linux/kernel/git/tnguy/next

-queue

Tony Nguyen says:

====================
40GbE Intel Wired LAN Driver Updates 2022-02-08

Joe Damato says:

This patch set makes several updates to the i40e driver stats collection
and reporting code to help users of i40e get a better sense of how the
driver is performing and interacting with the rest of the kernel.

These patches include some new stats (like waived and busy) which were
inspired by other drivers that track stats using the same nomenclature.

The new stats and an existing stat, rx_reuse, are now accessible with
ethtool to make harvesting this data more convenient for users.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 3a5f238f b76bc129
...@@ -854,6 +854,10 @@ struct i40e_vsi { ...@@ -854,6 +854,10 @@ struct i40e_vsi {
u64 tx_force_wb; u64 tx_force_wb;
u64 rx_buf_failed; u64 rx_buf_failed;
u64 rx_page_failed; u64 rx_page_failed;
u64 rx_page_reuse;
u64 rx_page_alloc;
u64 rx_page_waive;
u64 rx_page_busy;
/* These are containers of ring pointers, allocated at run-time */ /* These are containers of ring pointers, allocated at run-time */
struct i40e_ring **rx_rings; struct i40e_ring **rx_rings;
......
...@@ -295,6 +295,10 @@ static const struct i40e_stats i40e_gstrings_misc_stats[] = { ...@@ -295,6 +295,10 @@ static const struct i40e_stats i40e_gstrings_misc_stats[] = {
I40E_VSI_STAT("tx_busy", tx_busy), I40E_VSI_STAT("tx_busy", tx_busy),
I40E_VSI_STAT("rx_alloc_fail", rx_buf_failed), I40E_VSI_STAT("rx_alloc_fail", rx_buf_failed),
I40E_VSI_STAT("rx_pg_alloc_fail", rx_page_failed), I40E_VSI_STAT("rx_pg_alloc_fail", rx_page_failed),
I40E_VSI_STAT("rx_cache_reuse", rx_page_reuse),
I40E_VSI_STAT("rx_cache_alloc", rx_page_alloc),
I40E_VSI_STAT("rx_cache_waive", rx_page_waive),
I40E_VSI_STAT("rx_cache_busy", rx_page_busy),
}; };
/* These PF_STATs might look like duplicates of some NETDEV_STATs, /* These PF_STATs might look like duplicates of some NETDEV_STATs,
......
...@@ -773,6 +773,7 @@ void i40e_update_veb_stats(struct i40e_veb *veb) ...@@ -773,6 +773,7 @@ void i40e_update_veb_stats(struct i40e_veb *veb)
**/ **/
static void i40e_update_vsi_stats(struct i40e_vsi *vsi) static void i40e_update_vsi_stats(struct i40e_vsi *vsi)
{ {
u64 rx_page, rx_buf, rx_reuse, rx_alloc, rx_waive, rx_busy;
struct i40e_pf *pf = vsi->back; struct i40e_pf *pf = vsi->back;
struct rtnl_link_stats64 *ons; struct rtnl_link_stats64 *ons;
struct rtnl_link_stats64 *ns; /* netdev stats */ struct rtnl_link_stats64 *ns; /* netdev stats */
...@@ -780,7 +781,6 @@ static void i40e_update_vsi_stats(struct i40e_vsi *vsi) ...@@ -780,7 +781,6 @@ static void i40e_update_vsi_stats(struct i40e_vsi *vsi)
struct i40e_eth_stats *es; /* device's eth stats */ struct i40e_eth_stats *es; /* device's eth stats */
u64 tx_restart, tx_busy; u64 tx_restart, tx_busy;
struct i40e_ring *p; struct i40e_ring *p;
u64 rx_page, rx_buf;
u64 bytes, packets; u64 bytes, packets;
unsigned int start; unsigned int start;
u64 tx_linearize; u64 tx_linearize;
...@@ -806,6 +806,10 @@ static void i40e_update_vsi_stats(struct i40e_vsi *vsi) ...@@ -806,6 +806,10 @@ static void i40e_update_vsi_stats(struct i40e_vsi *vsi)
tx_restart = tx_busy = tx_linearize = tx_force_wb = 0; tx_restart = tx_busy = tx_linearize = tx_force_wb = 0;
rx_page = 0; rx_page = 0;
rx_buf = 0; rx_buf = 0;
rx_reuse = 0;
rx_alloc = 0;
rx_waive = 0;
rx_busy = 0;
rcu_read_lock(); rcu_read_lock();
for (q = 0; q < vsi->num_queue_pairs; q++) { for (q = 0; q < vsi->num_queue_pairs; q++) {
/* locate Tx ring */ /* locate Tx ring */
...@@ -839,6 +843,10 @@ static void i40e_update_vsi_stats(struct i40e_vsi *vsi) ...@@ -839,6 +843,10 @@ static void i40e_update_vsi_stats(struct i40e_vsi *vsi)
rx_p += packets; rx_p += packets;
rx_buf += p->rx_stats.alloc_buff_failed; rx_buf += p->rx_stats.alloc_buff_failed;
rx_page += p->rx_stats.alloc_page_failed; rx_page += p->rx_stats.alloc_page_failed;
rx_reuse += p->rx_stats.page_reuse_count;
rx_alloc += p->rx_stats.page_alloc_count;
rx_waive += p->rx_stats.page_waive_count;
rx_busy += p->rx_stats.page_busy_count;
if (i40e_enabled_xdp_vsi(vsi)) { if (i40e_enabled_xdp_vsi(vsi)) {
/* locate XDP ring */ /* locate XDP ring */
...@@ -866,6 +874,10 @@ static void i40e_update_vsi_stats(struct i40e_vsi *vsi) ...@@ -866,6 +874,10 @@ static void i40e_update_vsi_stats(struct i40e_vsi *vsi)
vsi->tx_force_wb = tx_force_wb; vsi->tx_force_wb = tx_force_wb;
vsi->rx_page_failed = rx_page; vsi->rx_page_failed = rx_page;
vsi->rx_buf_failed = rx_buf; vsi->rx_buf_failed = rx_buf;
vsi->rx_page_reuse = rx_reuse;
vsi->rx_page_alloc = rx_alloc;
vsi->rx_page_waive = rx_waive;
vsi->rx_page_busy = rx_busy;
ns->rx_packets = rx_p; ns->rx_packets = rx_p;
ns->rx_bytes = rx_b; ns->rx_bytes = rx_b;
......
...@@ -1382,8 +1382,6 @@ static void i40e_reuse_rx_page(struct i40e_ring *rx_ring, ...@@ -1382,8 +1382,6 @@ static void i40e_reuse_rx_page(struct i40e_ring *rx_ring,
new_buff->page_offset = old_buff->page_offset; new_buff->page_offset = old_buff->page_offset;
new_buff->pagecnt_bias = old_buff->pagecnt_bias; new_buff->pagecnt_bias = old_buff->pagecnt_bias;
rx_ring->rx_stats.page_reuse_count++;
/* clear contents of buffer_info */ /* clear contents of buffer_info */
old_buff->page = NULL; old_buff->page = NULL;
} }
...@@ -1675,6 +1673,8 @@ static bool i40e_alloc_mapped_page(struct i40e_ring *rx_ring, ...@@ -1675,6 +1673,8 @@ static bool i40e_alloc_mapped_page(struct i40e_ring *rx_ring,
return false; return false;
} }
rx_ring->rx_stats.page_alloc_count++;
/* map page for use */ /* map page for use */
dma = dma_map_page_attrs(rx_ring->dev, page, 0, dma = dma_map_page_attrs(rx_ring->dev, page, 0,
i40e_rx_pg_size(rx_ring), i40e_rx_pg_size(rx_ring),
...@@ -1982,32 +1982,43 @@ static bool i40e_cleanup_headers(struct i40e_ring *rx_ring, struct sk_buff *skb, ...@@ -1982,32 +1982,43 @@ static bool i40e_cleanup_headers(struct i40e_ring *rx_ring, struct sk_buff *skb,
/** /**
* i40e_can_reuse_rx_page - Determine if page can be reused for another Rx * i40e_can_reuse_rx_page - Determine if page can be reused for another Rx
* @rx_buffer: buffer containing the page * @rx_buffer: buffer containing the page
* @rx_stats: rx stats structure for the rx ring
* @rx_buffer_pgcnt: buffer page refcount pre xdp_do_redirect() call * @rx_buffer_pgcnt: buffer page refcount pre xdp_do_redirect() call
* *
* If page is reusable, we have a green light for calling i40e_reuse_rx_page, * If page is reusable, we have a green light for calling i40e_reuse_rx_page,
* which will assign the current buffer to the buffer that next_to_alloc is * which will assign the current buffer to the buffer that next_to_alloc is
* pointing to; otherwise, the DMA mapping needs to be destroyed and * pointing to; otherwise, the DMA mapping needs to be destroyed and
* page freed * page freed.
*
* rx_stats will be updated to indicate whether the page was waived
* or busy if it could not be reused.
*/ */
static bool i40e_can_reuse_rx_page(struct i40e_rx_buffer *rx_buffer, static bool i40e_can_reuse_rx_page(struct i40e_rx_buffer *rx_buffer,
struct i40e_rx_queue_stats *rx_stats,
int rx_buffer_pgcnt) int rx_buffer_pgcnt)
{ {
unsigned int pagecnt_bias = rx_buffer->pagecnt_bias; unsigned int pagecnt_bias = rx_buffer->pagecnt_bias;
struct page *page = rx_buffer->page; struct page *page = rx_buffer->page;
/* Is any reuse possible? */ /* Is any reuse possible? */
if (!dev_page_is_reusable(page)) if (!dev_page_is_reusable(page)) {
rx_stats->page_waive_count++;
return false; return false;
}
#if (PAGE_SIZE < 8192) #if (PAGE_SIZE < 8192)
/* if we are only owner of page we can reuse it */ /* if we are only owner of page we can reuse it */
if (unlikely((rx_buffer_pgcnt - pagecnt_bias) > 1)) if (unlikely((rx_buffer_pgcnt - pagecnt_bias) > 1)) {
rx_stats->page_busy_count++;
return false; return false;
}
#else #else
#define I40E_LAST_OFFSET \ #define I40E_LAST_OFFSET \
(SKB_WITH_OVERHEAD(PAGE_SIZE) - I40E_RXBUFFER_2048) (SKB_WITH_OVERHEAD(PAGE_SIZE) - I40E_RXBUFFER_2048)
if (rx_buffer->page_offset > I40E_LAST_OFFSET) if (rx_buffer->page_offset > I40E_LAST_OFFSET) {
rx_stats->page_busy_count++;
return false; return false;
}
#endif #endif
/* If we have drained the page fragment pool we need to update /* If we have drained the page fragment pool we need to update
...@@ -2237,7 +2248,7 @@ static void i40e_put_rx_buffer(struct i40e_ring *rx_ring, ...@@ -2237,7 +2248,7 @@ static void i40e_put_rx_buffer(struct i40e_ring *rx_ring,
struct i40e_rx_buffer *rx_buffer, struct i40e_rx_buffer *rx_buffer,
int rx_buffer_pgcnt) int rx_buffer_pgcnt)
{ {
if (i40e_can_reuse_rx_page(rx_buffer, rx_buffer_pgcnt)) { if (i40e_can_reuse_rx_page(rx_buffer, &rx_ring->rx_stats, rx_buffer_pgcnt)) {
/* hand second half of page back to the ring */ /* hand second half of page back to the ring */
i40e_reuse_rx_page(rx_ring, rx_buffer); i40e_reuse_rx_page(rx_ring, rx_buffer);
} else { } else {
......
...@@ -298,6 +298,9 @@ struct i40e_rx_queue_stats { ...@@ -298,6 +298,9 @@ struct i40e_rx_queue_stats {
u64 alloc_page_failed; u64 alloc_page_failed;
u64 alloc_buff_failed; u64 alloc_buff_failed;
u64 page_reuse_count; u64 page_reuse_count;
u64 page_alloc_count;
u64 page_waive_count;
u64 page_busy_count;
}; };
enum i40e_ring_state_t { enum i40e_ring_state_t {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment