Commit aa7e17a3 authored by Dmitry Bogdanov's avatar Dmitry Bogdanov Committed by David S. Miller

net: atlantic: additional per-queue stats

This patch adds additional per-queue stats, these could
be useful for debugging and diagnostics.
Signed-off-by: default avatarDmitry Bogdanov <dbogdanov@marvell.com>
Signed-off-by: default avatarMark Starovoytov <mstarovoitov@marvell.com>
Signed-off-by: default avatarIgor Russkikh <irusskikh@marvell.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent d7d8bb92
...@@ -94,6 +94,9 @@ static const char * const aq_ethtool_queue_rx_stat_names[] = { ...@@ -94,6 +94,9 @@ static const char * const aq_ethtool_queue_rx_stat_names[] = {
"%sQueue[%d] InJumboPackets", "%sQueue[%d] InJumboPackets",
"%sQueue[%d] InLroPackets", "%sQueue[%d] InLroPackets",
"%sQueue[%d] InErrors", "%sQueue[%d] InErrors",
"%sQueue[%d] AllocFails",
"%sQueue[%d] SkbAllocFails",
"%sQueue[%d] Polls",
}; };
static const char * const aq_ethtool_queue_tx_stat_names[] = { static const char * const aq_ethtool_queue_tx_stat_names[] = {
......
...@@ -94,6 +94,11 @@ static int aq_get_rxpages(struct aq_ring_s *self, struct aq_ring_buff_s *rxbuf, ...@@ -94,6 +94,11 @@ static int aq_get_rxpages(struct aq_ring_s *self, struct aq_ring_buff_s *rxbuf,
if (!rxbuf->rxdata.page) { if (!rxbuf->rxdata.page) {
ret = aq_get_rxpage(&rxbuf->rxdata, order, ret = aq_get_rxpage(&rxbuf->rxdata, order,
aq_nic_get_dev(self->aq_nic)); aq_nic_get_dev(self->aq_nic));
if (ret) {
u64_stats_update_begin(&self->stats.rx.syncp);
self->stats.rx.alloc_fails++;
u64_stats_update_end(&self->stats.rx.syncp);
}
return ret; return ret;
} }
...@@ -414,6 +419,9 @@ int aq_ring_rx_clean(struct aq_ring_s *self, ...@@ -414,6 +419,9 @@ int aq_ring_rx_clean(struct aq_ring_s *self,
skb = build_skb(aq_buf_vaddr(&buff->rxdata), skb = build_skb(aq_buf_vaddr(&buff->rxdata),
AQ_CFG_RX_FRAME_MAX); AQ_CFG_RX_FRAME_MAX);
if (unlikely(!skb)) { if (unlikely(!skb)) {
u64_stats_update_begin(&self->stats.rx.syncp);
self->stats.rx.skb_alloc_fails++;
u64_stats_update_end(&self->stats.rx.syncp);
err = -ENOMEM; err = -ENOMEM;
goto err_exit; goto err_exit;
} }
...@@ -427,6 +435,9 @@ int aq_ring_rx_clean(struct aq_ring_s *self, ...@@ -427,6 +435,9 @@ int aq_ring_rx_clean(struct aq_ring_s *self,
} else { } else {
skb = napi_alloc_skb(napi, AQ_CFG_RX_HDR_SIZE); skb = napi_alloc_skb(napi, AQ_CFG_RX_HDR_SIZE);
if (unlikely(!skb)) { if (unlikely(!skb)) {
u64_stats_update_begin(&self->stats.rx.syncp);
self->stats.rx.skb_alloc_fails++;
u64_stats_update_end(&self->stats.rx.syncp);
err = -ENOMEM; err = -ENOMEM;
goto err_exit; goto err_exit;
} }
...@@ -599,6 +610,9 @@ unsigned int aq_ring_fill_stats_data(struct aq_ring_s *self, u64 *data) ...@@ -599,6 +610,9 @@ unsigned int aq_ring_fill_stats_data(struct aq_ring_s *self, u64 *data)
data[++count] = self->stats.rx.jumbo_packets; data[++count] = self->stats.rx.jumbo_packets;
data[++count] = self->stats.rx.lro_packets; data[++count] = self->stats.rx.lro_packets;
data[++count] = self->stats.rx.errors; data[++count] = self->stats.rx.errors;
data[++count] = self->stats.rx.alloc_fails;
data[++count] = self->stats.rx.skb_alloc_fails;
data[++count] = self->stats.rx.polls;
} while (u64_stats_fetch_retry_irq(&self->stats.rx.syncp, start)); } while (u64_stats_fetch_retry_irq(&self->stats.rx.syncp, start));
} else { } else {
/* This data should mimic aq_ethtool_queue_tx_stat_names structure */ /* This data should mimic aq_ethtool_queue_tx_stat_names structure */
......
...@@ -95,6 +95,9 @@ struct aq_ring_stats_rx_s { ...@@ -95,6 +95,9 @@ struct aq_ring_stats_rx_s {
u64 bytes; u64 bytes;
u64 lro_packets; u64 lro_packets;
u64 jumbo_packets; u64 jumbo_packets;
u64 alloc_fails;
u64 skb_alloc_fails;
u64 polls;
u64 pg_losts; u64 pg_losts;
u64 pg_flips; u64 pg_flips;
u64 pg_reuses; u64 pg_reuses;
......
...@@ -45,6 +45,9 @@ static int aq_vec_poll(struct napi_struct *napi, int budget) ...@@ -45,6 +45,9 @@ static int aq_vec_poll(struct napi_struct *napi, int budget)
} else { } else {
for (i = 0U, ring = self->ring[0]; for (i = 0U, ring = self->ring[0];
self->tx_rings > i; ++i, ring = self->ring[i]) { self->tx_rings > i; ++i, ring = self->ring[i]) {
u64_stats_update_begin(&ring[AQ_VEC_RX_ID].stats.rx.syncp);
ring[AQ_VEC_RX_ID].stats.rx.polls++;
u64_stats_update_end(&ring[AQ_VEC_RX_ID].stats.rx.syncp);
if (self->aq_hw_ops->hw_ring_tx_head_update) { if (self->aq_hw_ops->hw_ring_tx_head_update) {
err = self->aq_hw_ops->hw_ring_tx_head_update( err = self->aq_hw_ops->hw_ring_tx_head_update(
self->aq_hw, self->aq_hw,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment