Commit 325945ed authored by Jakub Kicinski's avatar Jakub Kicinski Committed by David S. Miller

nfp: split software and hardware vNIC statistics

In preparation for reporting vNIC HW stats on representors
split handling of the SW and HW stats in ethtool -S.
Representors don't have SW stats (since vNIC is assigned
to the VM).

Remove the questionable defines which assume nn variable
exists in the scope.
Signed-off-by: default avatarJakub Kicinski <jakub.kicinski@netronome.com>
Reviewed-by: default avatarSimon Horman <simon.horman@netronome.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 634287ba
...@@ -100,11 +100,7 @@ static const struct nfp_et_stat nfp_net_et_stats[] = { ...@@ -100,11 +100,7 @@ static const struct nfp_et_stat nfp_net_et_stats[] = {
}; };
#define NN_ET_GLOBAL_STATS_LEN ARRAY_SIZE(nfp_net_et_stats) #define NN_ET_GLOBAL_STATS_LEN ARRAY_SIZE(nfp_net_et_stats)
#define NN_ET_RVEC_STATS_LEN (nn->dp.num_r_vecs * 3)
#define NN_ET_RVEC_GATHER_STATS 7 #define NN_ET_RVEC_GATHER_STATS 7
#define NN_ET_QUEUE_STATS_LEN ((nn->dp.num_tx_rings + nn->dp.num_rx_rings) * 2)
#define NN_ET_STATS_LEN (NN_ET_GLOBAL_STATS_LEN + NN_ET_RVEC_GATHER_STATS + \
NN_ET_RVEC_STATS_LEN + NN_ET_QUEUE_STATS_LEN)
static void nfp_net_get_nspinfo(struct nfp_app *app, char *version) static void nfp_net_get_nspinfo(struct nfp_app *app, char *version)
{ {
...@@ -346,16 +342,17 @@ static __printf(2, 3) u8 *nfp_pr_et(u8 *data, const char *fmt, ...) ...@@ -346,16 +342,17 @@ static __printf(2, 3) u8 *nfp_pr_et(u8 *data, const char *fmt, ...)
return data + ETH_GSTRING_LEN; return data + ETH_GSTRING_LEN;
} }
static void nfp_net_get_strings(struct net_device *netdev, static unsigned int nfp_vnic_get_sw_stats_count(struct net_device *netdev)
u32 stringset, u8 *data)
{ {
struct nfp_net *nn = netdev_priv(netdev); struct nfp_net *nn = netdev_priv(netdev);
int i;
switch (stringset) { return NN_ET_RVEC_GATHER_STATS + nn->dp.num_r_vecs * 3;
case ETH_SS_STATS: }
for (i = 0; i < NN_ET_GLOBAL_STATS_LEN; i++)
data = nfp_pr_et(data, nfp_net_et_stats[i].name); static u8 *nfp_vnic_get_sw_stats_strings(struct net_device *netdev, u8 *data)
{
struct nfp_net *nn = netdev_priv(netdev);
int i;
for (i = 0; i < nn->dp.num_r_vecs; i++) { for (i = 0; i < nn->dp.num_r_vecs; i++) {
data = nfp_pr_et(data, "rvec_%u_rx_pkts", i); data = nfp_pr_et(data, "rvec_%u_rx_pkts", i);
...@@ -371,71 +368,120 @@ static void nfp_net_get_strings(struct net_device *netdev, ...@@ -371,71 +368,120 @@ static void nfp_net_get_strings(struct net_device *netdev,
data = nfp_pr_et(data, "tx_gather"); data = nfp_pr_et(data, "tx_gather");
data = nfp_pr_et(data, "tx_lso"); data = nfp_pr_et(data, "tx_lso");
for (i = 0; i < nn->dp.num_tx_rings; i++) { return data;
}
static u64 *nfp_vnic_get_sw_stats(struct net_device *netdev, u64 *data)
{
u64 gathered_stats[NN_ET_RVEC_GATHER_STATS] = {};
struct nfp_net *nn = netdev_priv(netdev);
u64 tmp[NN_ET_RVEC_GATHER_STATS];
unsigned int i, j;
for (i = 0; i < nn->dp.num_r_vecs; i++) {
unsigned int start;
do {
start = u64_stats_fetch_begin(&nn->r_vecs[i].rx_sync);
*data++ = nn->r_vecs[i].rx_pkts;
tmp[0] = nn->r_vecs[i].hw_csum_rx_ok;
tmp[1] = nn->r_vecs[i].hw_csum_rx_inner_ok;
tmp[2] = nn->r_vecs[i].hw_csum_rx_error;
} while (u64_stats_fetch_retry(&nn->r_vecs[i].rx_sync, start));
do {
start = u64_stats_fetch_begin(&nn->r_vecs[i].tx_sync);
*data++ = nn->r_vecs[i].tx_pkts;
*data++ = nn->r_vecs[i].tx_busy;
tmp[3] = nn->r_vecs[i].hw_csum_tx;
tmp[4] = nn->r_vecs[i].hw_csum_tx_inner;
tmp[5] = nn->r_vecs[i].tx_gather;
tmp[6] = nn->r_vecs[i].tx_lso;
} while (u64_stats_fetch_retry(&nn->r_vecs[i].tx_sync, start));
for (j = 0; j < NN_ET_RVEC_GATHER_STATS; j++)
gathered_stats[j] += tmp[j];
}
for (j = 0; j < NN_ET_RVEC_GATHER_STATS; j++)
*data++ = gathered_stats[j];
return data;
}
static unsigned int
nfp_vnic_get_hw_stats_count(unsigned int rx_rings, unsigned int tx_rings)
{
return NN_ET_GLOBAL_STATS_LEN + (rx_rings + tx_rings) * 2;
}
static u8 *
nfp_vnic_get_hw_stats_strings(u8 *data, unsigned int rx_rings,
unsigned int tx_rings)
{
int i;
for (i = 0; i < NN_ET_GLOBAL_STATS_LEN; i++)
data = nfp_pr_et(data, nfp_net_et_stats[i].name);
for (i = 0; i < tx_rings; i++) {
data = nfp_pr_et(data, "txq_%u_pkts", i); data = nfp_pr_et(data, "txq_%u_pkts", i);
data = nfp_pr_et(data, "txq_%u_bytes", i); data = nfp_pr_et(data, "txq_%u_bytes", i);
} }
for (i = 0; i < nn->dp.num_rx_rings; i++) { for (i = 0; i < rx_rings; i++) {
data = nfp_pr_et(data, "rxq_%u_pkts", i); data = nfp_pr_et(data, "rxq_%u_pkts", i);
data = nfp_pr_et(data, "rxq_%u_bytes", i); data = nfp_pr_et(data, "rxq_%u_bytes", i);
} }
break; return data;
}
static u64 *
nfp_vnic_get_hw_stats(u64 *data, u8 __iomem *mem,
unsigned int rx_rings, unsigned int tx_rings)
{
unsigned int i;
for (i = 0; i < NN_ET_GLOBAL_STATS_LEN; i++)
*data++ = readq(mem + nfp_net_et_stats[i].off);
for (i = 0; i < tx_rings; i++) {
*data++ = readq(mem + NFP_NET_CFG_TXR_STATS(i));
*data++ = readq(mem + NFP_NET_CFG_TXR_STATS(i) + 8);
} }
for (i = 0; i < rx_rings; i++) {
*data++ = readq(mem + NFP_NET_CFG_RXR_STATS(i));
*data++ = readq(mem + NFP_NET_CFG_RXR_STATS(i) + 8);
}
return data;
} }
static void nfp_net_get_stats(struct net_device *netdev, static void nfp_net_get_strings(struct net_device *netdev,
struct ethtool_stats *stats, u64 *data) u32 stringset, u8 *data)
{ {
u64 gathered_stats[NN_ET_RVEC_GATHER_STATS] = {};
struct nfp_net *nn = netdev_priv(netdev); struct nfp_net *nn = netdev_priv(netdev);
u64 tmp[NN_ET_RVEC_GATHER_STATS];
u8 __iomem *io_p;
int i, j, k;
for (i = 0; i < NN_ET_GLOBAL_STATS_LEN; i++) { switch (stringset) {
io_p = nn->dp.ctrl_bar + nfp_net_et_stats[i].off; case ETH_SS_STATS:
data[i] = readq(io_p); data = nfp_vnic_get_sw_stats_strings(netdev, data);
data = nfp_vnic_get_hw_stats_strings(data, nn->dp.num_rx_rings,
nn->dp.num_tx_rings);
break;
} }
for (j = 0; j < nn->dp.num_r_vecs; j++) { }
unsigned int start;
do { static void
start = u64_stats_fetch_begin(&nn->r_vecs[j].rx_sync); nfp_net_get_stats(struct net_device *netdev, struct ethtool_stats *stats,
data[i++] = nn->r_vecs[j].rx_pkts; u64 *data)
tmp[0] = nn->r_vecs[j].hw_csum_rx_ok; {
tmp[1] = nn->r_vecs[j].hw_csum_rx_inner_ok; struct nfp_net *nn = netdev_priv(netdev);
tmp[2] = nn->r_vecs[j].hw_csum_rx_error;
} while (u64_stats_fetch_retry(&nn->r_vecs[j].rx_sync, start));
do { data = nfp_vnic_get_sw_stats(netdev, data);
start = u64_stats_fetch_begin(&nn->r_vecs[j].tx_sync); data = nfp_vnic_get_hw_stats(data, nn->dp.ctrl_bar,
data[i++] = nn->r_vecs[j].tx_pkts; nn->dp.num_rx_rings, nn->dp.num_tx_rings);
data[i++] = nn->r_vecs[j].tx_busy;
tmp[3] = nn->r_vecs[j].hw_csum_tx;
tmp[4] = nn->r_vecs[j].hw_csum_tx_inner;
tmp[5] = nn->r_vecs[j].tx_gather;
tmp[6] = nn->r_vecs[j].tx_lso;
} while (u64_stats_fetch_retry(&nn->r_vecs[j].tx_sync, start));
for (k = 0; k < NN_ET_RVEC_GATHER_STATS; k++)
gathered_stats[k] += tmp[k];
}
for (j = 0; j < NN_ET_RVEC_GATHER_STATS; j++)
data[i++] = gathered_stats[j];
for (j = 0; j < nn->dp.num_tx_rings; j++) {
io_p = nn->dp.ctrl_bar + NFP_NET_CFG_TXR_STATS(j);
data[i++] = readq(io_p);
io_p = nn->dp.ctrl_bar + NFP_NET_CFG_TXR_STATS(j) + 8;
data[i++] = readq(io_p);
}
for (j = 0; j < nn->dp.num_rx_rings; j++) {
io_p = nn->dp.ctrl_bar + NFP_NET_CFG_RXR_STATS(j);
data[i++] = readq(io_p);
io_p = nn->dp.ctrl_bar + NFP_NET_CFG_RXR_STATS(j) + 8;
data[i++] = readq(io_p);
}
} }
static int nfp_net_get_sset_count(struct net_device *netdev, int sset) static int nfp_net_get_sset_count(struct net_device *netdev, int sset)
...@@ -444,7 +490,9 @@ static int nfp_net_get_sset_count(struct net_device *netdev, int sset) ...@@ -444,7 +490,9 @@ static int nfp_net_get_sset_count(struct net_device *netdev, int sset)
switch (sset) { switch (sset) {
case ETH_SS_STATS: case ETH_SS_STATS:
return NN_ET_STATS_LEN; return nfp_vnic_get_sw_stats_count(netdev) +
nfp_vnic_get_hw_stats_count(nn->dp.num_rx_rings,
nn->dp.num_tx_rings);
default: default:
return -EOPNOTSUPP; return -EOPNOTSUPP;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment