Commit 1a7f5124 authored by Eran Ben Elisha's avatar Eran Ben Elisha Committed by Saeed Mahameed

net/mlx5e: Split SW group counters update function

SW group counter update function aggregates sw stats out of many
mlx5e_*_stats resides in a given mlx5e_channel_stats struct.
Split the function into a few helper functions.

This will be used later in the series to calculate specific
mlx5e_*_stats which are not defined inside mlx5e_channel_stats.
Signed-off-by: default avatarEran Ben Elisha <eranbe@nvidia.com>
Reviewed-by: default avatarTariq Toukan <tariqt@nvidia.com>
Signed-off-by: default avatarSaeed Mahameed <saeedm@nvidia.com>
parent 0b676aae
...@@ -248,24 +248,68 @@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(sw) ...@@ -248,24 +248,68 @@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(sw)
return idx; return idx;
} }
static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(sw) static void mlx5e_stats_grp_sw_update_stats_xdp_red(struct mlx5e_sw_stats *s,
struct mlx5e_xdpsq_stats *xdpsq_red_stats)
{ {
struct mlx5e_sw_stats *s = &priv->stats.sw; s->tx_xdp_xmit += xdpsq_red_stats->xmit;
int i; s->tx_xdp_mpwqe += xdpsq_red_stats->mpwqe;
s->tx_xdp_inlnw += xdpsq_red_stats->inlnw;
s->tx_xdp_nops += xdpsq_red_stats->nops;
s->tx_xdp_full += xdpsq_red_stats->full;
s->tx_xdp_err += xdpsq_red_stats->err;
s->tx_xdp_cqes += xdpsq_red_stats->cqes;
}
memset(s, 0, sizeof(*s)); static void mlx5e_stats_grp_sw_update_stats_xdpsq(struct mlx5e_sw_stats *s,
struct mlx5e_xdpsq_stats *xdpsq_stats)
{
s->rx_xdp_tx_xmit += xdpsq_stats->xmit;
s->rx_xdp_tx_mpwqe += xdpsq_stats->mpwqe;
s->rx_xdp_tx_inlnw += xdpsq_stats->inlnw;
s->rx_xdp_tx_nops += xdpsq_stats->nops;
s->rx_xdp_tx_full += xdpsq_stats->full;
s->rx_xdp_tx_err += xdpsq_stats->err;
s->rx_xdp_tx_cqe += xdpsq_stats->cqes;
}
for (i = 0; i < priv->max_nch; i++) { static void mlx5e_stats_grp_sw_update_stats_xsksq(struct mlx5e_sw_stats *s,
struct mlx5e_channel_stats *channel_stats = struct mlx5e_xdpsq_stats *xsksq_stats)
&priv->channel_stats[i]; {
struct mlx5e_xdpsq_stats *xdpsq_red_stats = &channel_stats->xdpsq; s->tx_xsk_xmit += xsksq_stats->xmit;
struct mlx5e_xdpsq_stats *xdpsq_stats = &channel_stats->rq_xdpsq; s->tx_xsk_mpwqe += xsksq_stats->mpwqe;
struct mlx5e_xdpsq_stats *xsksq_stats = &channel_stats->xsksq; s->tx_xsk_inlnw += xsksq_stats->inlnw;
struct mlx5e_rq_stats *xskrq_stats = &channel_stats->xskrq; s->tx_xsk_full += xsksq_stats->full;
struct mlx5e_rq_stats *rq_stats = &channel_stats->rq; s->tx_xsk_err += xsksq_stats->err;
struct mlx5e_ch_stats *ch_stats = &channel_stats->ch; s->tx_xsk_cqes += xsksq_stats->cqes;
int j; }
static void mlx5e_stats_grp_sw_update_stats_xskrq(struct mlx5e_sw_stats *s,
struct mlx5e_rq_stats *xskrq_stats)
{
s->rx_xsk_packets += xskrq_stats->packets;
s->rx_xsk_bytes += xskrq_stats->bytes;
s->rx_xsk_csum_complete += xskrq_stats->csum_complete;
s->rx_xsk_csum_unnecessary += xskrq_stats->csum_unnecessary;
s->rx_xsk_csum_unnecessary_inner += xskrq_stats->csum_unnecessary_inner;
s->rx_xsk_csum_none += xskrq_stats->csum_none;
s->rx_xsk_ecn_mark += xskrq_stats->ecn_mark;
s->rx_xsk_removed_vlan_packets += xskrq_stats->removed_vlan_packets;
s->rx_xsk_xdp_drop += xskrq_stats->xdp_drop;
s->rx_xsk_xdp_redirect += xskrq_stats->xdp_redirect;
s->rx_xsk_wqe_err += xskrq_stats->wqe_err;
s->rx_xsk_mpwqe_filler_cqes += xskrq_stats->mpwqe_filler_cqes;
s->rx_xsk_mpwqe_filler_strides += xskrq_stats->mpwqe_filler_strides;
s->rx_xsk_oversize_pkts_sw_drop += xskrq_stats->oversize_pkts_sw_drop;
s->rx_xsk_buff_alloc_err += xskrq_stats->buff_alloc_err;
s->rx_xsk_cqe_compress_blks += xskrq_stats->cqe_compress_blks;
s->rx_xsk_cqe_compress_pkts += xskrq_stats->cqe_compress_pkts;
s->rx_xsk_congst_umr += xskrq_stats->congst_umr;
s->rx_xsk_arfs_err += xskrq_stats->arfs_err;
}
static void mlx5e_stats_grp_sw_update_stats_rq_stats(struct mlx5e_sw_stats *s,
struct mlx5e_rq_stats *rq_stats)
{
s->rx_packets += rq_stats->packets; s->rx_packets += rq_stats->packets;
s->rx_bytes += rq_stats->bytes; s->rx_bytes += rq_stats->bytes;
s->rx_lro_packets += rq_stats->lro_packets; s->rx_lro_packets += rq_stats->lro_packets;
...@@ -280,13 +324,6 @@ static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(sw) ...@@ -280,13 +324,6 @@ static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(sw)
s->rx_csum_unnecessary_inner += rq_stats->csum_unnecessary_inner; s->rx_csum_unnecessary_inner += rq_stats->csum_unnecessary_inner;
s->rx_xdp_drop += rq_stats->xdp_drop; s->rx_xdp_drop += rq_stats->xdp_drop;
s->rx_xdp_redirect += rq_stats->xdp_redirect; s->rx_xdp_redirect += rq_stats->xdp_redirect;
s->rx_xdp_tx_xmit += xdpsq_stats->xmit;
s->rx_xdp_tx_mpwqe += xdpsq_stats->mpwqe;
s->rx_xdp_tx_inlnw += xdpsq_stats->inlnw;
s->rx_xdp_tx_nops += xdpsq_stats->nops;
s->rx_xdp_tx_full += xdpsq_stats->full;
s->rx_xdp_tx_err += xdpsq_stats->err;
s->rx_xdp_tx_cqe += xdpsq_stats->cqes;
s->rx_wqe_err += rq_stats->wqe_err; s->rx_wqe_err += rq_stats->wqe_err;
s->rx_mpwqe_filler_cqes += rq_stats->mpwqe_filler_cqes; s->rx_mpwqe_filler_cqes += rq_stats->mpwqe_filler_cqes;
s->rx_mpwqe_filler_strides += rq_stats->mpwqe_filler_strides; s->rx_mpwqe_filler_strides += rq_stats->mpwqe_filler_strides;
...@@ -315,50 +352,22 @@ static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(sw) ...@@ -315,50 +352,22 @@ static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(sw)
s->rx_tls_resync_res_skip += rq_stats->tls_resync_res_skip; s->rx_tls_resync_res_skip += rq_stats->tls_resync_res_skip;
s->rx_tls_err += rq_stats->tls_err; s->rx_tls_err += rq_stats->tls_err;
#endif #endif
}
static void mlx5e_stats_grp_sw_update_stats_ch_stats(struct mlx5e_sw_stats *s,
struct mlx5e_ch_stats *ch_stats)
{
s->ch_events += ch_stats->events; s->ch_events += ch_stats->events;
s->ch_poll += ch_stats->poll; s->ch_poll += ch_stats->poll;
s->ch_arm += ch_stats->arm; s->ch_arm += ch_stats->arm;
s->ch_aff_change += ch_stats->aff_change; s->ch_aff_change += ch_stats->aff_change;
s->ch_force_irq += ch_stats->force_irq; s->ch_force_irq += ch_stats->force_irq;
s->ch_eq_rearm += ch_stats->eq_rearm; s->ch_eq_rearm += ch_stats->eq_rearm;
/* xdp redirect */ }
s->tx_xdp_xmit += xdpsq_red_stats->xmit;
s->tx_xdp_mpwqe += xdpsq_red_stats->mpwqe;
s->tx_xdp_inlnw += xdpsq_red_stats->inlnw;
s->tx_xdp_nops += xdpsq_red_stats->nops;
s->tx_xdp_full += xdpsq_red_stats->full;
s->tx_xdp_err += xdpsq_red_stats->err;
s->tx_xdp_cqes += xdpsq_red_stats->cqes;
/* AF_XDP zero-copy */
s->rx_xsk_packets += xskrq_stats->packets;
s->rx_xsk_bytes += xskrq_stats->bytes;
s->rx_xsk_csum_complete += xskrq_stats->csum_complete;
s->rx_xsk_csum_unnecessary += xskrq_stats->csum_unnecessary;
s->rx_xsk_csum_unnecessary_inner += xskrq_stats->csum_unnecessary_inner;
s->rx_xsk_csum_none += xskrq_stats->csum_none;
s->rx_xsk_ecn_mark += xskrq_stats->ecn_mark;
s->rx_xsk_removed_vlan_packets += xskrq_stats->removed_vlan_packets;
s->rx_xsk_xdp_drop += xskrq_stats->xdp_drop;
s->rx_xsk_xdp_redirect += xskrq_stats->xdp_redirect;
s->rx_xsk_wqe_err += xskrq_stats->wqe_err;
s->rx_xsk_mpwqe_filler_cqes += xskrq_stats->mpwqe_filler_cqes;
s->rx_xsk_mpwqe_filler_strides += xskrq_stats->mpwqe_filler_strides;
s->rx_xsk_oversize_pkts_sw_drop += xskrq_stats->oversize_pkts_sw_drop;
s->rx_xsk_buff_alloc_err += xskrq_stats->buff_alloc_err;
s->rx_xsk_cqe_compress_blks += xskrq_stats->cqe_compress_blks;
s->rx_xsk_cqe_compress_pkts += xskrq_stats->cqe_compress_pkts;
s->rx_xsk_congst_umr += xskrq_stats->congst_umr;
s->rx_xsk_arfs_err += xskrq_stats->arfs_err;
s->tx_xsk_xmit += xsksq_stats->xmit;
s->tx_xsk_mpwqe += xsksq_stats->mpwqe;
s->tx_xsk_inlnw += xsksq_stats->inlnw;
s->tx_xsk_full += xsksq_stats->full;
s->tx_xsk_err += xsksq_stats->err;
s->tx_xsk_cqes += xsksq_stats->cqes;
for (j = 0; j < priv->max_opened_tc; j++) {
struct mlx5e_sq_stats *sq_stats = &channel_stats->sq[j];
static void mlx5e_stats_grp_sw_update_stats_sq(struct mlx5e_sw_stats *s,
struct mlx5e_sq_stats *sq_stats)
{
s->tx_packets += sq_stats->packets; s->tx_packets += sq_stats->packets;
s->tx_bytes += sq_stats->bytes; s->tx_bytes += sq_stats->bytes;
s->tx_tso_packets += sq_stats->tso_packets; s->tx_tso_packets += sq_stats->tso_packets;
...@@ -391,6 +400,31 @@ static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(sw) ...@@ -391,6 +400,31 @@ static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(sw)
s->tx_tls_drop_bypass_req += sq_stats->tls_drop_bypass_req; s->tx_tls_drop_bypass_req += sq_stats->tls_drop_bypass_req;
#endif #endif
s->tx_cqes += sq_stats->cqes; s->tx_cqes += sq_stats->cqes;
}
static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(sw)
{
struct mlx5e_sw_stats *s = &priv->stats.sw;
int i;
memset(s, 0, sizeof(*s));
for (i = 0; i < priv->max_nch; i++) {
struct mlx5e_channel_stats *channel_stats =
&priv->channel_stats[i];
int j;
mlx5e_stats_grp_sw_update_stats_rq_stats(s, &channel_stats->rq);
mlx5e_stats_grp_sw_update_stats_xdpsq(s, &channel_stats->rq_xdpsq);
mlx5e_stats_grp_sw_update_stats_ch_stats(s, &channel_stats->ch);
/* xdp redirect */
mlx5e_stats_grp_sw_update_stats_xdp_red(s, &channel_stats->xdpsq);
/* AF_XDP zero-copy */
mlx5e_stats_grp_sw_update_stats_xskrq(s, &channel_stats->xskrq);
mlx5e_stats_grp_sw_update_stats_xsksq(s, &channel_stats->xsksq);
for (j = 0; j < priv->max_opened_tc; j++) {
mlx5e_stats_grp_sw_update_stats_sq(s, &channel_stats->sq[j]);
/* https://gcc.gnu.org/bugzilla/show_bug.cgi?id=92657 */ /* https://gcc.gnu.org/bugzilla/show_bug.cgi?id=92657 */
barrier(); barrier();
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment