Commit 056a7092 authored by Jeroen de Borst's avatar Jeroen de Borst Committed by David S. Miller

gve: Add header split ethtool stats

To record the stats of header split packets, three stats are added in
the driver's ethtool stats.

- rx_hsplit_pkt is the split packets count with header split
- rx_hsplit_bytes is the received header bytes count with header split
- rx_hsplit_unsplit_pkt is the unsplit packet count due to header buffer
  overflow or zero header length when header split is enabled

Currently, it's entering the stats_update critical section more than
once per packet. We have plans to avoid that in the future change to let
all the stats_update happen in one place at the end of
`gve_rx_poll_dqo`.
Co-developed-by: default avatarZiwei Xiao <ziweixiao@google.com>
Signed-off-by: default avatarZiwei Xiao <ziweixiao@google.com>
Signed-off-by: default avatarJeroen de Borst <jeroendb@google.com>
Reviewed-by: default avatarPraveen Kaligineedi <pkaligineedi@google.com>
Reviewed-by: default avatarHarshitha Ramamurthy <hramamurthy@google.com>
Reviewed-by: default avatarWillem de Bruijn <willemb@google.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 5e37d825
...@@ -269,15 +269,19 @@ struct gve_rx_ring { ...@@ -269,15 +269,19 @@ struct gve_rx_ring {
}; };
u64 rbytes; /* free-running bytes received */ u64 rbytes; /* free-running bytes received */
u64 rx_hsplit_bytes; /* free-running header bytes received */
u64 rpackets; /* free-running packets received */ u64 rpackets; /* free-running packets received */
u32 cnt; /* free-running total number of completed packets */ u32 cnt; /* free-running total number of completed packets */
u32 fill_cnt; /* free-running total number of descs and buffs posted */ u32 fill_cnt; /* free-running total number of descs and buffs posted */
u32 mask; /* masks the cnt and fill_cnt to the size of the ring */ u32 mask; /* masks the cnt and fill_cnt to the size of the ring */
u64 rx_hsplit_pkt; /* free-running packets with headers split */
u64 rx_copybreak_pkt; /* free-running count of copybreak packets */ u64 rx_copybreak_pkt; /* free-running count of copybreak packets */
u64 rx_copied_pkt; /* free-running total number of copied packets */ u64 rx_copied_pkt; /* free-running total number of copied packets */
u64 rx_skb_alloc_fail; /* free-running count of skb alloc fails */ u64 rx_skb_alloc_fail; /* free-running count of skb alloc fails */
u64 rx_buf_alloc_fail; /* free-running count of buffer alloc fails */ u64 rx_buf_alloc_fail; /* free-running count of buffer alloc fails */
u64 rx_desc_err_dropped_pkt; /* free-running count of packets dropped by descriptor error */ u64 rx_desc_err_dropped_pkt; /* free-running count of packets dropped by descriptor error */
/* free-running count of unsplit packets due to header buffer overflow or hdr_len is 0 */
u64 rx_hsplit_unsplit_pkt;
u64 rx_cont_packet_cnt; /* free-running multi-fragment packets received */ u64 rx_cont_packet_cnt; /* free-running multi-fragment packets received */
u64 rx_frag_flip_cnt; /* free-running count of rx segments where page_flip was used */ u64 rx_frag_flip_cnt; /* free-running count of rx segments where page_flip was used */
u64 rx_frag_copy_cnt; /* free-running count of rx segments copied */ u64 rx_frag_copy_cnt; /* free-running count of rx segments copied */
......
...@@ -39,17 +39,18 @@ static u32 gve_get_msglevel(struct net_device *netdev) ...@@ -39,17 +39,18 @@ static u32 gve_get_msglevel(struct net_device *netdev)
* as declared in enum xdp_action inside file uapi/linux/bpf.h . * as declared in enum xdp_action inside file uapi/linux/bpf.h .
*/ */
static const char gve_gstrings_main_stats[][ETH_GSTRING_LEN] = { static const char gve_gstrings_main_stats[][ETH_GSTRING_LEN] = {
"rx_packets", "tx_packets", "rx_bytes", "tx_bytes", "rx_packets", "rx_hsplit_pkt", "tx_packets", "rx_bytes",
"rx_dropped", "tx_dropped", "tx_timeouts", "tx_bytes", "rx_dropped", "tx_dropped", "tx_timeouts",
"rx_skb_alloc_fail", "rx_buf_alloc_fail", "rx_desc_err_dropped_pkt", "rx_skb_alloc_fail", "rx_buf_alloc_fail", "rx_desc_err_dropped_pkt",
"rx_hsplit_unsplit_pkt",
"interface_up_cnt", "interface_down_cnt", "reset_cnt", "interface_up_cnt", "interface_down_cnt", "reset_cnt",
"page_alloc_fail", "dma_mapping_error", "stats_report_trigger_cnt", "page_alloc_fail", "dma_mapping_error", "stats_report_trigger_cnt",
}; };
static const char gve_gstrings_rx_stats[][ETH_GSTRING_LEN] = { static const char gve_gstrings_rx_stats[][ETH_GSTRING_LEN] = {
"rx_posted_desc[%u]", "rx_completed_desc[%u]", "rx_consumed_desc[%u]", "rx_bytes[%u]", "rx_posted_desc[%u]", "rx_completed_desc[%u]", "rx_consumed_desc[%u]",
"rx_cont_packet_cnt[%u]", "rx_frag_flip_cnt[%u]", "rx_frag_copy_cnt[%u]", "rx_bytes[%u]", "rx_hsplit_bytes[%u]", "rx_cont_packet_cnt[%u]",
"rx_frag_alloc_cnt[%u]", "rx_frag_flip_cnt[%u]", "rx_frag_copy_cnt[%u]", "rx_frag_alloc_cnt[%u]",
"rx_dropped_pkt[%u]", "rx_copybreak_pkt[%u]", "rx_copied_pkt[%u]", "rx_dropped_pkt[%u]", "rx_copybreak_pkt[%u]", "rx_copied_pkt[%u]",
"rx_queue_drop_cnt[%u]", "rx_no_buffers_posted[%u]", "rx_queue_drop_cnt[%u]", "rx_no_buffers_posted[%u]",
"rx_drops_packet_over_mru[%u]", "rx_drops_invalid_checksum[%u]", "rx_drops_packet_over_mru[%u]", "rx_drops_invalid_checksum[%u]",
...@@ -153,11 +154,13 @@ static void ...@@ -153,11 +154,13 @@ static void
gve_get_ethtool_stats(struct net_device *netdev, gve_get_ethtool_stats(struct net_device *netdev,
struct ethtool_stats *stats, u64 *data) struct ethtool_stats *stats, u64 *data)
{ {
u64 tmp_rx_pkts, tmp_rx_bytes, tmp_rx_skb_alloc_fail, u64 tmp_rx_pkts, tmp_rx_hsplit_pkt, tmp_rx_bytes, tmp_rx_hsplit_bytes,
tmp_rx_buf_alloc_fail, tmp_rx_desc_err_dropped_pkt, tmp_rx_skb_alloc_fail, tmp_rx_buf_alloc_fail,
tmp_rx_desc_err_dropped_pkt, tmp_rx_hsplit_unsplit_pkt,
tmp_tx_pkts, tmp_tx_bytes; tmp_tx_pkts, tmp_tx_bytes;
u64 rx_buf_alloc_fail, rx_desc_err_dropped_pkt, rx_pkts, u64 rx_buf_alloc_fail, rx_desc_err_dropped_pkt, rx_hsplit_unsplit_pkt,
rx_skb_alloc_fail, rx_bytes, tx_pkts, tx_bytes, tx_dropped; rx_pkts, rx_hsplit_pkt, rx_skb_alloc_fail, rx_bytes, tx_pkts, tx_bytes,
tx_dropped;
int stats_idx, base_stats_idx, max_stats_idx; int stats_idx, base_stats_idx, max_stats_idx;
struct stats *report_stats; struct stats *report_stats;
int *rx_qid_to_stats_idx; int *rx_qid_to_stats_idx;
...@@ -184,8 +187,10 @@ gve_get_ethtool_stats(struct net_device *netdev, ...@@ -184,8 +187,10 @@ gve_get_ethtool_stats(struct net_device *netdev,
kfree(rx_qid_to_stats_idx); kfree(rx_qid_to_stats_idx);
return; return;
} }
for (rx_pkts = 0, rx_bytes = 0, rx_skb_alloc_fail = 0, for (rx_pkts = 0, rx_bytes = 0, rx_hsplit_pkt = 0,
rx_buf_alloc_fail = 0, rx_desc_err_dropped_pkt = 0, ring = 0; rx_skb_alloc_fail = 0, rx_buf_alloc_fail = 0,
rx_desc_err_dropped_pkt = 0, rx_hsplit_unsplit_pkt = 0,
ring = 0;
ring < priv->rx_cfg.num_queues; ring++) { ring < priv->rx_cfg.num_queues; ring++) {
if (priv->rx) { if (priv->rx) {
do { do {
...@@ -194,18 +199,23 @@ gve_get_ethtool_stats(struct net_device *netdev, ...@@ -194,18 +199,23 @@ gve_get_ethtool_stats(struct net_device *netdev,
start = start =
u64_stats_fetch_begin(&priv->rx[ring].statss); u64_stats_fetch_begin(&priv->rx[ring].statss);
tmp_rx_pkts = rx->rpackets; tmp_rx_pkts = rx->rpackets;
tmp_rx_hsplit_pkt = rx->rx_hsplit_pkt;
tmp_rx_bytes = rx->rbytes; tmp_rx_bytes = rx->rbytes;
tmp_rx_skb_alloc_fail = rx->rx_skb_alloc_fail; tmp_rx_skb_alloc_fail = rx->rx_skb_alloc_fail;
tmp_rx_buf_alloc_fail = rx->rx_buf_alloc_fail; tmp_rx_buf_alloc_fail = rx->rx_buf_alloc_fail;
tmp_rx_desc_err_dropped_pkt = tmp_rx_desc_err_dropped_pkt =
rx->rx_desc_err_dropped_pkt; rx->rx_desc_err_dropped_pkt;
tmp_rx_hsplit_unsplit_pkt =
rx->rx_hsplit_unsplit_pkt;
} while (u64_stats_fetch_retry(&priv->rx[ring].statss, } while (u64_stats_fetch_retry(&priv->rx[ring].statss,
start)); start));
rx_pkts += tmp_rx_pkts; rx_pkts += tmp_rx_pkts;
rx_hsplit_pkt += tmp_rx_hsplit_pkt;
rx_bytes += tmp_rx_bytes; rx_bytes += tmp_rx_bytes;
rx_skb_alloc_fail += tmp_rx_skb_alloc_fail; rx_skb_alloc_fail += tmp_rx_skb_alloc_fail;
rx_buf_alloc_fail += tmp_rx_buf_alloc_fail; rx_buf_alloc_fail += tmp_rx_buf_alloc_fail;
rx_desc_err_dropped_pkt += tmp_rx_desc_err_dropped_pkt; rx_desc_err_dropped_pkt += tmp_rx_desc_err_dropped_pkt;
rx_hsplit_unsplit_pkt += tmp_rx_hsplit_unsplit_pkt;
} }
} }
for (tx_pkts = 0, tx_bytes = 0, tx_dropped = 0, ring = 0; for (tx_pkts = 0, tx_bytes = 0, tx_dropped = 0, ring = 0;
...@@ -226,6 +236,7 @@ gve_get_ethtool_stats(struct net_device *netdev, ...@@ -226,6 +236,7 @@ gve_get_ethtool_stats(struct net_device *netdev,
i = 0; i = 0;
data[i++] = rx_pkts; data[i++] = rx_pkts;
data[i++] = rx_hsplit_pkt;
data[i++] = tx_pkts; data[i++] = tx_pkts;
data[i++] = rx_bytes; data[i++] = rx_bytes;
data[i++] = tx_bytes; data[i++] = tx_bytes;
...@@ -237,6 +248,7 @@ gve_get_ethtool_stats(struct net_device *netdev, ...@@ -237,6 +248,7 @@ gve_get_ethtool_stats(struct net_device *netdev,
data[i++] = rx_skb_alloc_fail; data[i++] = rx_skb_alloc_fail;
data[i++] = rx_buf_alloc_fail; data[i++] = rx_buf_alloc_fail;
data[i++] = rx_desc_err_dropped_pkt; data[i++] = rx_desc_err_dropped_pkt;
data[i++] = rx_hsplit_unsplit_pkt;
data[i++] = priv->interface_up_cnt; data[i++] = priv->interface_up_cnt;
data[i++] = priv->interface_down_cnt; data[i++] = priv->interface_down_cnt;
data[i++] = priv->reset_cnt; data[i++] = priv->reset_cnt;
...@@ -276,6 +288,7 @@ gve_get_ethtool_stats(struct net_device *netdev, ...@@ -276,6 +288,7 @@ gve_get_ethtool_stats(struct net_device *netdev,
start = start =
u64_stats_fetch_begin(&priv->rx[ring].statss); u64_stats_fetch_begin(&priv->rx[ring].statss);
tmp_rx_bytes = rx->rbytes; tmp_rx_bytes = rx->rbytes;
tmp_rx_hsplit_bytes = rx->rx_hsplit_bytes;
tmp_rx_skb_alloc_fail = rx->rx_skb_alloc_fail; tmp_rx_skb_alloc_fail = rx->rx_skb_alloc_fail;
tmp_rx_buf_alloc_fail = rx->rx_buf_alloc_fail; tmp_rx_buf_alloc_fail = rx->rx_buf_alloc_fail;
tmp_rx_desc_err_dropped_pkt = tmp_rx_desc_err_dropped_pkt =
...@@ -283,6 +296,7 @@ gve_get_ethtool_stats(struct net_device *netdev, ...@@ -283,6 +296,7 @@ gve_get_ethtool_stats(struct net_device *netdev,
} while (u64_stats_fetch_retry(&priv->rx[ring].statss, } while (u64_stats_fetch_retry(&priv->rx[ring].statss,
start)); start));
data[i++] = tmp_rx_bytes; data[i++] = tmp_rx_bytes;
data[i++] = tmp_rx_hsplit_bytes;
data[i++] = rx->rx_cont_packet_cnt; data[i++] = rx->rx_cont_packet_cnt;
data[i++] = rx->rx_frag_flip_cnt; data[i++] = rx->rx_frag_flip_cnt;
data[i++] = rx->rx_frag_copy_cnt; data[i++] = rx->rx_frag_copy_cnt;
......
...@@ -720,6 +720,8 @@ static int gve_rx_dqo(struct napi_struct *napi, struct gve_rx_ring *rx, ...@@ -720,6 +720,8 @@ static int gve_rx_dqo(struct napi_struct *napi, struct gve_rx_ring *rx,
/* Copy the header into the skb in the case of header split */ /* Copy the header into the skb in the case of header split */
if (hsplit) { if (hsplit) {
int unsplit = 0;
if (hdr_len && !hbo) { if (hdr_len && !hbo) {
rx->ctx.skb_head = gve_rx_copy_data(priv->dev, napi, rx->ctx.skb_head = gve_rx_copy_data(priv->dev, napi,
rx->dqo.hdr_bufs.data + rx->dqo.hdr_bufs.data +
...@@ -728,7 +730,14 @@ static int gve_rx_dqo(struct napi_struct *napi, struct gve_rx_ring *rx, ...@@ -728,7 +730,14 @@ static int gve_rx_dqo(struct napi_struct *napi, struct gve_rx_ring *rx,
if (unlikely(!rx->ctx.skb_head)) if (unlikely(!rx->ctx.skb_head))
goto error; goto error;
rx->ctx.skb_tail = rx->ctx.skb_head; rx->ctx.skb_tail = rx->ctx.skb_head;
} else {
unsplit = 1;
} }
u64_stats_update_begin(&rx->statss);
rx->rx_hsplit_pkt++;
rx->rx_hsplit_unsplit_pkt += unsplit;
rx->rx_hsplit_bytes += hdr_len;
u64_stats_update_end(&rx->statss);
} }
/* Sync the portion of dma buffer for CPU to read. */ /* Sync the portion of dma buffer for CPU to read. */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment