Commit bd7fc6e1 authored by Shradha Gupta's avatar Shradha Gupta Committed by David S. Miller

net: mana: Add new MANA VF performance counters for easier troubleshooting

Extended performance counter stats in 'ethtool -S <interface>' output
for MANA VF to facilitate troubleshooting.

Tested-on: Ubuntu22
Signed-off-by: default avatarShradha Gupta <shradhagupta@linux.microsoft.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 81dc0741
...@@ -156,6 +156,7 @@ netdev_tx_t mana_start_xmit(struct sk_buff *skb, struct net_device *ndev) ...@@ -156,6 +156,7 @@ netdev_tx_t mana_start_xmit(struct sk_buff *skb, struct net_device *ndev)
struct mana_txq *txq; struct mana_txq *txq;
struct mana_cq *cq; struct mana_cq *cq;
int err, len; int err, len;
u16 ihs;
if (unlikely(!apc->port_is_up)) if (unlikely(!apc->port_is_up))
goto tx_drop; goto tx_drop;
...@@ -166,6 +167,7 @@ netdev_tx_t mana_start_xmit(struct sk_buff *skb, struct net_device *ndev) ...@@ -166,6 +167,7 @@ netdev_tx_t mana_start_xmit(struct sk_buff *skb, struct net_device *ndev)
txq = &apc->tx_qp[txq_idx].txq; txq = &apc->tx_qp[txq_idx].txq;
gdma_sq = txq->gdma_sq; gdma_sq = txq->gdma_sq;
cq = &apc->tx_qp[txq_idx].tx_cq; cq = &apc->tx_qp[txq_idx].tx_cq;
tx_stats = &txq->stats;
pkg.tx_oob.s_oob.vcq_num = cq->gdma_id; pkg.tx_oob.s_oob.vcq_num = cq->gdma_id;
pkg.tx_oob.s_oob.vsq_frame = txq->vsq_frame; pkg.tx_oob.s_oob.vsq_frame = txq->vsq_frame;
...@@ -179,10 +181,17 @@ netdev_tx_t mana_start_xmit(struct sk_buff *skb, struct net_device *ndev) ...@@ -179,10 +181,17 @@ netdev_tx_t mana_start_xmit(struct sk_buff *skb, struct net_device *ndev)
pkg.tx_oob.s_oob.pkt_fmt = pkt_fmt; pkg.tx_oob.s_oob.pkt_fmt = pkt_fmt;
if (pkt_fmt == MANA_SHORT_PKT_FMT) if (pkt_fmt == MANA_SHORT_PKT_FMT) {
pkg.wqe_req.inline_oob_size = sizeof(struct mana_tx_short_oob); pkg.wqe_req.inline_oob_size = sizeof(struct mana_tx_short_oob);
else u64_stats_update_begin(&tx_stats->syncp);
tx_stats->short_pkt_fmt++;
u64_stats_update_end(&tx_stats->syncp);
} else {
pkg.wqe_req.inline_oob_size = sizeof(struct mana_tx_oob); pkg.wqe_req.inline_oob_size = sizeof(struct mana_tx_oob);
u64_stats_update_begin(&tx_stats->syncp);
tx_stats->long_pkt_fmt++;
u64_stats_update_end(&tx_stats->syncp);
}
pkg.wqe_req.inline_oob_data = &pkg.tx_oob; pkg.wqe_req.inline_oob_data = &pkg.tx_oob;
pkg.wqe_req.flags = 0; pkg.wqe_req.flags = 0;
...@@ -232,9 +241,35 @@ netdev_tx_t mana_start_xmit(struct sk_buff *skb, struct net_device *ndev) ...@@ -232,9 +241,35 @@ netdev_tx_t mana_start_xmit(struct sk_buff *skb, struct net_device *ndev)
&ipv6_hdr(skb)->daddr, 0, &ipv6_hdr(skb)->daddr, 0,
IPPROTO_TCP, 0); IPPROTO_TCP, 0);
} }
if (skb->encapsulation) {
ihs = skb_inner_tcp_all_headers(skb);
u64_stats_update_begin(&tx_stats->syncp);
tx_stats->tso_inner_packets++;
tx_stats->tso_inner_bytes += skb->len - ihs;
u64_stats_update_end(&tx_stats->syncp);
} else {
if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) {
ihs = skb_transport_offset(skb) + sizeof(struct udphdr);
} else {
ihs = skb_tcp_all_headers(skb);
if (ipv6_has_hopopt_jumbo(skb))
ihs -= sizeof(struct hop_jumbo_hdr);
}
u64_stats_update_begin(&tx_stats->syncp);
tx_stats->tso_packets++;
tx_stats->tso_bytes += skb->len - ihs;
u64_stats_update_end(&tx_stats->syncp);
}
} else if (skb->ip_summed == CHECKSUM_PARTIAL) { } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
csum_type = mana_checksum_info(skb); csum_type = mana_checksum_info(skb);
u64_stats_update_begin(&tx_stats->syncp);
tx_stats->csum_partial++;
u64_stats_update_end(&tx_stats->syncp);
if (csum_type == IPPROTO_TCP) { if (csum_type == IPPROTO_TCP) {
pkg.tx_oob.s_oob.is_outer_ipv4 = ipv4; pkg.tx_oob.s_oob.is_outer_ipv4 = ipv4;
pkg.tx_oob.s_oob.is_outer_ipv6 = ipv6; pkg.tx_oob.s_oob.is_outer_ipv6 = ipv6;
...@@ -254,8 +289,12 @@ netdev_tx_t mana_start_xmit(struct sk_buff *skb, struct net_device *ndev) ...@@ -254,8 +289,12 @@ netdev_tx_t mana_start_xmit(struct sk_buff *skb, struct net_device *ndev)
} }
} }
if (mana_map_skb(skb, apc, &pkg)) if (mana_map_skb(skb, apc, &pkg)) {
u64_stats_update_begin(&tx_stats->syncp);
tx_stats->mana_map_err++;
u64_stats_update_end(&tx_stats->syncp);
goto free_sgl_ptr; goto free_sgl_ptr;
}
skb_queue_tail(&txq->pending_skbs, skb); skb_queue_tail(&txq->pending_skbs, skb);
...@@ -1038,6 +1077,8 @@ static void mana_poll_tx_cq(struct mana_cq *cq) ...@@ -1038,6 +1077,8 @@ static void mana_poll_tx_cq(struct mana_cq *cq)
if (comp_read < 1) if (comp_read < 1)
return; return;
apc->eth_stats.tx_cqes = comp_read;
for (i = 0; i < comp_read; i++) { for (i = 0; i < comp_read; i++) {
struct mana_tx_comp_oob *cqe_oob; struct mana_tx_comp_oob *cqe_oob;
...@@ -1064,6 +1105,7 @@ static void mana_poll_tx_cq(struct mana_cq *cq) ...@@ -1064,6 +1105,7 @@ static void mana_poll_tx_cq(struct mana_cq *cq)
case CQE_TX_VLAN_TAGGING_VIOLATION: case CQE_TX_VLAN_TAGGING_VIOLATION:
WARN_ONCE(1, "TX: CQE error %d: ignored.\n", WARN_ONCE(1, "TX: CQE error %d: ignored.\n",
cqe_oob->cqe_hdr.cqe_type); cqe_oob->cqe_hdr.cqe_type);
apc->eth_stats.tx_cqe_err++;
break; break;
default: default:
...@@ -1072,6 +1114,7 @@ static void mana_poll_tx_cq(struct mana_cq *cq) ...@@ -1072,6 +1114,7 @@ static void mana_poll_tx_cq(struct mana_cq *cq)
*/ */
WARN_ONCE(1, "TX: Unexpected CQE type %d: HW BUG?\n", WARN_ONCE(1, "TX: Unexpected CQE type %d: HW BUG?\n",
cqe_oob->cqe_hdr.cqe_type); cqe_oob->cqe_hdr.cqe_type);
apc->eth_stats.tx_cqe_unknown_type++;
return; return;
} }
...@@ -1118,6 +1161,8 @@ static void mana_poll_tx_cq(struct mana_cq *cq) ...@@ -1118,6 +1161,8 @@ static void mana_poll_tx_cq(struct mana_cq *cq)
WARN_ON_ONCE(1); WARN_ON_ONCE(1);
cq->work_done = pkt_transmitted; cq->work_done = pkt_transmitted;
apc->eth_stats.tx_cqes -= pkt_transmitted;
} }
static void mana_post_pkt_rxq(struct mana_rxq *rxq) static void mana_post_pkt_rxq(struct mana_rxq *rxq)
...@@ -1252,12 +1297,15 @@ static void mana_process_rx_cqe(struct mana_rxq *rxq, struct mana_cq *cq, ...@@ -1252,12 +1297,15 @@ static void mana_process_rx_cqe(struct mana_rxq *rxq, struct mana_cq *cq,
struct gdma_context *gc = rxq->gdma_rq->gdma_dev->gdma_context; struct gdma_context *gc = rxq->gdma_rq->gdma_dev->gdma_context;
struct net_device *ndev = rxq->ndev; struct net_device *ndev = rxq->ndev;
struct mana_recv_buf_oob *rxbuf_oob; struct mana_recv_buf_oob *rxbuf_oob;
struct mana_port_context *apc;
struct device *dev = gc->dev; struct device *dev = gc->dev;
void *new_buf, *old_buf; void *new_buf, *old_buf;
struct page *new_page; struct page *new_page;
u32 curr, pktlen; u32 curr, pktlen;
dma_addr_t da; dma_addr_t da;
apc = netdev_priv(ndev);
switch (oob->cqe_hdr.cqe_type) { switch (oob->cqe_hdr.cqe_type) {
case CQE_RX_OKAY: case CQE_RX_OKAY:
break; break;
...@@ -1270,6 +1318,7 @@ static void mana_process_rx_cqe(struct mana_rxq *rxq, struct mana_cq *cq, ...@@ -1270,6 +1318,7 @@ static void mana_process_rx_cqe(struct mana_rxq *rxq, struct mana_cq *cq,
case CQE_RX_COALESCED_4: case CQE_RX_COALESCED_4:
netdev_err(ndev, "RX coalescing is unsupported\n"); netdev_err(ndev, "RX coalescing is unsupported\n");
apc->eth_stats.rx_coalesced_err++;
return; return;
case CQE_RX_OBJECT_FENCE: case CQE_RX_OBJECT_FENCE:
...@@ -1279,6 +1328,7 @@ static void mana_process_rx_cqe(struct mana_rxq *rxq, struct mana_cq *cq, ...@@ -1279,6 +1328,7 @@ static void mana_process_rx_cqe(struct mana_rxq *rxq, struct mana_cq *cq,
default: default:
netdev_err(ndev, "Unknown RX CQE type = %d\n", netdev_err(ndev, "Unknown RX CQE type = %d\n",
oob->cqe_hdr.cqe_type); oob->cqe_hdr.cqe_type);
apc->eth_stats.rx_cqe_unknown_type++;
return; return;
} }
...@@ -1341,11 +1391,15 @@ static void mana_poll_rx_cq(struct mana_cq *cq) ...@@ -1341,11 +1391,15 @@ static void mana_poll_rx_cq(struct mana_cq *cq)
{ {
struct gdma_comp *comp = cq->gdma_comp_buf; struct gdma_comp *comp = cq->gdma_comp_buf;
struct mana_rxq *rxq = cq->rxq; struct mana_rxq *rxq = cq->rxq;
struct mana_port_context *apc;
int comp_read, i; int comp_read, i;
apc = netdev_priv(rxq->ndev);
comp_read = mana_gd_poll_cq(cq->gdma_cq, comp, CQE_POLLING_BUFFER); comp_read = mana_gd_poll_cq(cq->gdma_cq, comp, CQE_POLLING_BUFFER);
WARN_ON_ONCE(comp_read > CQE_POLLING_BUFFER); WARN_ON_ONCE(comp_read > CQE_POLLING_BUFFER);
apc->eth_stats.rx_cqes = comp_read;
rxq->xdp_flush = false; rxq->xdp_flush = false;
for (i = 0; i < comp_read; i++) { for (i = 0; i < comp_read; i++) {
...@@ -1357,6 +1411,8 @@ static void mana_poll_rx_cq(struct mana_cq *cq) ...@@ -1357,6 +1411,8 @@ static void mana_poll_rx_cq(struct mana_cq *cq)
return; return;
mana_process_rx_cqe(rxq, cq, &comp[i]); mana_process_rx_cqe(rxq, cq, &comp[i]);
apc->eth_stats.rx_cqes--;
} }
if (rxq->xdp_flush) if (rxq->xdp_flush)
......
...@@ -13,6 +13,15 @@ static const struct { ...@@ -13,6 +13,15 @@ static const struct {
} mana_eth_stats[] = { } mana_eth_stats[] = {
{"stop_queue", offsetof(struct mana_ethtool_stats, stop_queue)}, {"stop_queue", offsetof(struct mana_ethtool_stats, stop_queue)},
{"wake_queue", offsetof(struct mana_ethtool_stats, wake_queue)}, {"wake_queue", offsetof(struct mana_ethtool_stats, wake_queue)},
{"tx_cqes", offsetof(struct mana_ethtool_stats, tx_cqes)},
{"tx_cq_err", offsetof(struct mana_ethtool_stats, tx_cqe_err)},
{"tx_cqe_unknown_type", offsetof(struct mana_ethtool_stats,
tx_cqe_unknown_type)},
{"rx_cqes", offsetof(struct mana_ethtool_stats, rx_cqes)},
{"rx_coalesced_err", offsetof(struct mana_ethtool_stats,
rx_coalesced_err)},
{"rx_cqe_unknown_type", offsetof(struct mana_ethtool_stats,
rx_cqe_unknown_type)},
}; };
static int mana_get_sset_count(struct net_device *ndev, int stringset) static int mana_get_sset_count(struct net_device *ndev, int stringset)
...@@ -23,7 +32,8 @@ static int mana_get_sset_count(struct net_device *ndev, int stringset) ...@@ -23,7 +32,8 @@ static int mana_get_sset_count(struct net_device *ndev, int stringset)
if (stringset != ETH_SS_STATS) if (stringset != ETH_SS_STATS)
return -EINVAL; return -EINVAL;
return ARRAY_SIZE(mana_eth_stats) + num_queues * 8; return ARRAY_SIZE(mana_eth_stats) + num_queues *
(MANA_STATS_RX_COUNT + MANA_STATS_TX_COUNT);
} }
static void mana_get_strings(struct net_device *ndev, u32 stringset, u8 *data) static void mana_get_strings(struct net_device *ndev, u32 stringset, u8 *data)
...@@ -61,6 +71,22 @@ static void mana_get_strings(struct net_device *ndev, u32 stringset, u8 *data) ...@@ -61,6 +71,22 @@ static void mana_get_strings(struct net_device *ndev, u32 stringset, u8 *data)
p += ETH_GSTRING_LEN; p += ETH_GSTRING_LEN;
sprintf(p, "tx_%d_xdp_xmit", i); sprintf(p, "tx_%d_xdp_xmit", i);
p += ETH_GSTRING_LEN; p += ETH_GSTRING_LEN;
sprintf(p, "tx_%d_tso_packets", i);
p += ETH_GSTRING_LEN;
sprintf(p, "tx_%d_tso_bytes", i);
p += ETH_GSTRING_LEN;
sprintf(p, "tx_%d_tso_inner_packets", i);
p += ETH_GSTRING_LEN;
sprintf(p, "tx_%d_tso_inner_bytes", i);
p += ETH_GSTRING_LEN;
sprintf(p, "tx_%d_long_pkt_fmt", i);
p += ETH_GSTRING_LEN;
sprintf(p, "tx_%d_short_pkt_fmt", i);
p += ETH_GSTRING_LEN;
sprintf(p, "tx_%d_csum_partial", i);
p += ETH_GSTRING_LEN;
sprintf(p, "tx_%d_mana_map_err", i);
p += ETH_GSTRING_LEN;
} }
} }
...@@ -78,6 +104,14 @@ static void mana_get_ethtool_stats(struct net_device *ndev, ...@@ -78,6 +104,14 @@ static void mana_get_ethtool_stats(struct net_device *ndev,
u64 xdp_xmit; u64 xdp_xmit;
u64 xdp_drop; u64 xdp_drop;
u64 xdp_tx; u64 xdp_tx;
u64 tso_packets;
u64 tso_bytes;
u64 tso_inner_packets;
u64 tso_inner_bytes;
u64 long_pkt_fmt;
u64 short_pkt_fmt;
u64 csum_partial;
u64 mana_map_err;
int q, i = 0; int q, i = 0;
if (!apc->port_is_up) if (!apc->port_is_up)
...@@ -113,11 +147,27 @@ static void mana_get_ethtool_stats(struct net_device *ndev, ...@@ -113,11 +147,27 @@ static void mana_get_ethtool_stats(struct net_device *ndev,
packets = tx_stats->packets; packets = tx_stats->packets;
bytes = tx_stats->bytes; bytes = tx_stats->bytes;
xdp_xmit = tx_stats->xdp_xmit; xdp_xmit = tx_stats->xdp_xmit;
tso_packets = tx_stats->tso_packets;
tso_bytes = tx_stats->tso_bytes;
tso_inner_packets = tx_stats->tso_inner_packets;
tso_inner_bytes = tx_stats->tso_inner_bytes;
long_pkt_fmt = tx_stats->long_pkt_fmt;
short_pkt_fmt = tx_stats->short_pkt_fmt;
csum_partial = tx_stats->csum_partial;
mana_map_err = tx_stats->mana_map_err;
} while (u64_stats_fetch_retry(&tx_stats->syncp, start)); } while (u64_stats_fetch_retry(&tx_stats->syncp, start));
data[i++] = packets; data[i++] = packets;
data[i++] = bytes; data[i++] = bytes;
data[i++] = xdp_xmit; data[i++] = xdp_xmit;
data[i++] = tso_packets;
data[i++] = tso_bytes;
data[i++] = tso_inner_packets;
data[i++] = tso_inner_bytes;
data[i++] = long_pkt_fmt;
data[i++] = short_pkt_fmt;
data[i++] = csum_partial;
data[i++] = mana_map_err;
} }
} }
......
...@@ -48,6 +48,10 @@ enum TRI_STATE { ...@@ -48,6 +48,10 @@ enum TRI_STATE {
#define MAX_PORTS_IN_MANA_DEV 256 #define MAX_PORTS_IN_MANA_DEV 256
/* Update this count whenever the respective structures are changed */
#define MANA_STATS_RX_COUNT 5
#define MANA_STATS_TX_COUNT 11
struct mana_stats_rx { struct mana_stats_rx {
u64 packets; u64 packets;
u64 bytes; u64 bytes;
...@@ -61,6 +65,14 @@ struct mana_stats_tx { ...@@ -61,6 +65,14 @@ struct mana_stats_tx {
u64 packets; u64 packets;
u64 bytes; u64 bytes;
u64 xdp_xmit; u64 xdp_xmit;
u64 tso_packets;
u64 tso_bytes;
u64 tso_inner_packets;
u64 tso_inner_bytes;
u64 short_pkt_fmt;
u64 long_pkt_fmt;
u64 csum_partial;
u64 mana_map_err;
struct u64_stats_sync syncp; struct u64_stats_sync syncp;
}; };
...@@ -331,6 +343,12 @@ struct mana_tx_qp { ...@@ -331,6 +343,12 @@ struct mana_tx_qp {
struct mana_ethtool_stats { struct mana_ethtool_stats {
u64 stop_queue; u64 stop_queue;
u64 wake_queue; u64 wake_queue;
u64 tx_cqes;
u64 tx_cqe_err;
u64 tx_cqe_unknown_type;
u64 rx_cqes;
u64 rx_coalesced_err;
u64 rx_cqe_unknown_type;
}; };
struct mana_context { struct mana_context {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment