Commit b8312365 authored by Jacob Keller's avatar Jacob Keller Committed by Jeff Kirsher

i40e: always return all queue stat strings

The ethtool API for obtaining device statistics is not intended to allow
runtime changes in the number of statistics reported. It may *appear*
this way, as there is an ability to request the number of stats using
ethtool_get_set_count(). However, it is expected that this must always
return the same value for invocations of the same device.

If we don't satisfy this contract, and allow the number of stats to
change during run time, we could cause invalid memory accesses or report
the stat strings incorrectly. This is because the API for obtaining
stats is to (1) get the size, (2) get the strings and finally (3) get
the stats. Since these are each separate ethtool op commands, it is not
possible to maintain consistency by holding the RTNL lock over the whole
operation. This results in the potential for a race condition to occur
where the size changed between any of the 3 calls.

Avoid this issue by requiring that we always return the same value for
a given device. We can check any values which remain constant for the
life of the device, but must not report different sizes depending on
runtime attributes.

This patch specifically fixes the queue statistics to always return
every queue even if it's not currently in use.
Signed-off-by: default avatarJacob Keller <jacob.e.keller@intel.com>
Tested-by: default avatarAndrew Bowers <andrewx.bowers@intel.com>
Signed-off-by: default avatarJeff Kirsher <jeffrey.t.kirsher@intel.com>
parent 9955d494
...@@ -140,8 +140,12 @@ static const struct i40e_stats i40e_gstrings_stats[] = { ...@@ -140,8 +140,12 @@ static const struct i40e_stats i40e_gstrings_stats[] = {
I40E_PF_STAT("rx_lpi_count", stats.rx_lpi_count), I40E_PF_STAT("rx_lpi_count", stats.rx_lpi_count),
}; };
#define I40E_QUEUE_STATS_LEN(n) \ /* We use num_tx_queues here as a proxy for the maximum number of queues
(((struct i40e_netdev_priv *)netdev_priv((n)))->vsi->num_queue_pairs \ * available because we always allocate queues symmetrically.
*/
#define I40E_MAX_NUM_QUEUES(n) ((n)->num_tx_queues)
#define I40E_QUEUE_STATS_LEN(n) \
(I40E_MAX_NUM_QUEUES(n) \
* 2 /* Tx and Rx together */ \ * 2 /* Tx and Rx together */ \
* (sizeof(struct i40e_queue_stats) / sizeof(u64))) * (sizeof(struct i40e_queue_stats) / sizeof(u64)))
#define I40E_GLOBAL_STATS_LEN ARRAY_SIZE(i40e_gstrings_stats) #define I40E_GLOBAL_STATS_LEN ARRAY_SIZE(i40e_gstrings_stats)
...@@ -1712,11 +1716,19 @@ static void i40e_get_ethtool_stats(struct net_device *netdev, ...@@ -1712,11 +1716,19 @@ static void i40e_get_ethtool_stats(struct net_device *netdev,
sizeof(u64)) ? *(u64 *)p : *(u32 *)p; sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
} }
rcu_read_lock(); rcu_read_lock();
for (j = 0; j < vsi->num_queue_pairs; j++) { for (j = 0; j < I40E_MAX_NUM_QUEUES(netdev) ; j++) {
tx_ring = READ_ONCE(vsi->tx_rings[j]); tx_ring = READ_ONCE(vsi->tx_rings[j]);
if (!tx_ring) if (!tx_ring) {
/* Bump the stat counter to skip these stats, and make
* sure the memory is zero'd
*/
data[i++] = 0;
data[i++] = 0;
data[i++] = 0;
data[i++] = 0;
continue; continue;
}
/* process Tx ring statistics */ /* process Tx ring statistics */
do { do {
...@@ -1800,7 +1812,7 @@ static void i40e_get_strings(struct net_device *netdev, u32 stringset, ...@@ -1800,7 +1812,7 @@ static void i40e_get_strings(struct net_device *netdev, u32 stringset,
i40e_gstrings_misc_stats[i].stat_string); i40e_gstrings_misc_stats[i].stat_string);
p += ETH_GSTRING_LEN; p += ETH_GSTRING_LEN;
} }
for (i = 0; i < vsi->num_queue_pairs; i++) { for (i = 0; i < I40E_MAX_NUM_QUEUES(netdev); i++) {
snprintf(p, ETH_GSTRING_LEN, "tx-%d.tx_packets", i); snprintf(p, ETH_GSTRING_LEN, "tx-%d.tx_packets", i);
p += ETH_GSTRING_LEN; p += ETH_GSTRING_LEN;
snprintf(p, ETH_GSTRING_LEN, "tx-%d.tx_bytes", i); snprintf(p, ETH_GSTRING_LEN, "tx-%d.tx_bytes", i);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment