Commit 64b68e36 authored by Or Har-Toov's avatar Or Har-Toov Committed by Saeed Mahameed

net/mlx5: Refactor and expand rep vport stat group

Expand representor vport stat group to support all counters from the
vport stat group, to count all the traffic passing through the vport.

Fix current implementation where fill_stats and update_stats use
different structs.
Signed-off-by: default avatarOr Har-Toov <ohartoov@nvidia.com>
Reviewed-by: default avatarMaor Gottlieb <maorg@nvidia.com>
Signed-off-by: default avatarSaeed Mahameed <saeedm@nvidia.com>
parent 7c33e739
...@@ -85,18 +85,25 @@ static const struct counter_desc sw_rep_stats_desc[] = { ...@@ -85,18 +85,25 @@ static const struct counter_desc sw_rep_stats_desc[] = {
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_bytes) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_bytes) },
}; };
struct vport_stats {
u64 vport_rx_packets;
u64 vport_tx_packets;
u64 vport_rx_bytes;
u64 vport_tx_bytes;
};
static const struct counter_desc vport_rep_stats_desc[] = { static const struct counter_desc vport_rep_stats_desc[] = {
{ MLX5E_DECLARE_STAT(struct vport_stats, vport_rx_packets) }, { MLX5E_DECLARE_STAT(struct mlx5e_rep_stats, vport_rx_packets) },
{ MLX5E_DECLARE_STAT(struct vport_stats, vport_rx_bytes) }, { MLX5E_DECLARE_STAT(struct mlx5e_rep_stats, vport_rx_bytes) },
{ MLX5E_DECLARE_STAT(struct vport_stats, vport_tx_packets) }, { MLX5E_DECLARE_STAT(struct mlx5e_rep_stats, vport_tx_packets) },
{ MLX5E_DECLARE_STAT(struct vport_stats, vport_tx_bytes) }, { MLX5E_DECLARE_STAT(struct mlx5e_rep_stats, vport_tx_bytes) },
{ MLX5E_DECLARE_STAT(struct mlx5e_rep_stats,
rx_vport_rdma_unicast_packets) },
{ MLX5E_DECLARE_STAT(struct mlx5e_rep_stats, rx_vport_rdma_unicast_bytes) },
{ MLX5E_DECLARE_STAT(struct mlx5e_rep_stats,
tx_vport_rdma_unicast_packets) },
{ MLX5E_DECLARE_STAT(struct mlx5e_rep_stats, tx_vport_rdma_unicast_bytes) },
{ MLX5E_DECLARE_STAT(struct mlx5e_rep_stats,
rx_vport_rdma_multicast_packets) },
{ MLX5E_DECLARE_STAT(struct mlx5e_rep_stats,
rx_vport_rdma_multicast_bytes) },
{ MLX5E_DECLARE_STAT(struct mlx5e_rep_stats,
tx_vport_rdma_multicast_packets) },
{ MLX5E_DECLARE_STAT(struct mlx5e_rep_stats,
tx_vport_rdma_multicast_bytes) },
}; };
#define NUM_VPORT_REP_SW_COUNTERS ARRAY_SIZE(sw_rep_stats_desc) #define NUM_VPORT_REP_SW_COUNTERS ARRAY_SIZE(sw_rep_stats_desc)
...@@ -161,33 +168,80 @@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(vport_rep) ...@@ -161,33 +168,80 @@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(vport_rep)
int i; int i;
for (i = 0; i < NUM_VPORT_REP_HW_COUNTERS; i++) for (i = 0; i < NUM_VPORT_REP_HW_COUNTERS; i++)
data[idx++] = MLX5E_READ_CTR64_CPU(&priv->stats.vf_vport, data[idx++] = MLX5E_READ_CTR64_CPU(&priv->stats.rep_stats,
vport_rep_stats_desc, i); vport_rep_stats_desc, i);
return idx; return idx;
} }
static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(vport_rep) static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(vport_rep)
{ {
struct mlx5e_rep_stats *rep_stats = &priv->stats.rep_stats;
int outlen = MLX5_ST_SZ_BYTES(query_vport_counter_out);
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
struct mlx5e_rep_priv *rpriv = priv->ppriv; struct mlx5e_rep_priv *rpriv = priv->ppriv;
struct mlx5_eswitch_rep *rep = rpriv->rep; struct mlx5_eswitch_rep *rep = rpriv->rep;
struct rtnl_link_stats64 *vport_stats; u32 *out;
struct ifla_vf_stats vf_stats;
int err; int err;
err = mlx5_eswitch_get_vport_stats(esw, rep->vport, &vf_stats); out = kvzalloc(outlen, GFP_KERNEL);
if (!out)
return;
err = mlx5_core_query_vport_counter(esw->dev, 1, rep->vport - 1, 0, out);
if (err) { if (err) {
netdev_warn(priv->netdev, "vport %d error %d reading stats\n", netdev_warn(priv->netdev, "vport %d error %d reading stats\n",
rep->vport, err); rep->vport, err);
return; return;
} }
vport_stats = &priv->stats.vf_vport; #define MLX5_GET_CTR(p, x) \
MLX5_GET64(query_vport_counter_out, p, x)
/* flip tx/rx as we are reporting the counters for the switch vport */ /* flip tx/rx as we are reporting the counters for the switch vport */
vport_stats->rx_packets = vf_stats.tx_packets; rep_stats->vport_rx_packets =
vport_stats->rx_bytes = vf_stats.tx_bytes; MLX5_GET_CTR(out, transmitted_ib_unicast.packets) +
vport_stats->tx_packets = vf_stats.rx_packets; MLX5_GET_CTR(out, transmitted_eth_unicast.packets) +
vport_stats->tx_bytes = vf_stats.rx_bytes; MLX5_GET_CTR(out, transmitted_ib_multicast.packets) +
MLX5_GET_CTR(out, transmitted_eth_multicast.packets) +
MLX5_GET_CTR(out, transmitted_eth_broadcast.packets);
rep_stats->vport_tx_packets =
MLX5_GET_CTR(out, received_ib_unicast.packets) +
MLX5_GET_CTR(out, received_eth_unicast.packets) +
MLX5_GET_CTR(out, received_ib_multicast.packets) +
MLX5_GET_CTR(out, received_eth_multicast.packets) +
MLX5_GET_CTR(out, received_eth_broadcast.packets);
rep_stats->vport_rx_bytes =
MLX5_GET_CTR(out, transmitted_ib_unicast.octets) +
MLX5_GET_CTR(out, transmitted_eth_unicast.octets) +
MLX5_GET_CTR(out, transmitted_ib_multicast.octets) +
MLX5_GET_CTR(out, transmitted_eth_broadcast.octets);
rep_stats->vport_tx_bytes =
MLX5_GET_CTR(out, received_ib_unicast.octets) +
MLX5_GET_CTR(out, received_eth_unicast.octets) +
MLX5_GET_CTR(out, received_ib_multicast.octets) +
MLX5_GET_CTR(out, received_eth_multicast.octets) +
MLX5_GET_CTR(out, received_eth_broadcast.octets);
rep_stats->rx_vport_rdma_unicast_packets =
MLX5_GET_CTR(out, transmitted_ib_unicast.packets);
rep_stats->tx_vport_rdma_unicast_packets =
MLX5_GET_CTR(out, received_ib_unicast.packets);
rep_stats->rx_vport_rdma_unicast_bytes =
MLX5_GET_CTR(out, transmitted_ib_unicast.octets);
rep_stats->tx_vport_rdma_unicast_bytes =
MLX5_GET_CTR(out, received_ib_unicast.octets);
rep_stats->rx_vport_rdma_multicast_packets =
MLX5_GET_CTR(out, transmitted_ib_multicast.packets);
rep_stats->tx_vport_rdma_multicast_packets =
MLX5_GET_CTR(out, received_ib_multicast.packets);
rep_stats->rx_vport_rdma_multicast_bytes =
MLX5_GET_CTR(out, transmitted_ib_multicast.octets);
rep_stats->tx_vport_rdma_multicast_bytes =
MLX5_GET_CTR(out, received_ib_multicast.octets);
kvfree(out);
} }
static void mlx5e_rep_get_strings(struct net_device *dev, static void mlx5e_rep_get_strings(struct net_device *dev,
......
...@@ -463,6 +463,21 @@ struct mlx5e_ptp_cq_stats { ...@@ -463,6 +463,21 @@ struct mlx5e_ptp_cq_stats {
u64 resync_event; u64 resync_event;
}; };
struct mlx5e_rep_stats {
u64 vport_rx_packets;
u64 vport_tx_packets;
u64 vport_rx_bytes;
u64 vport_tx_bytes;
u64 rx_vport_rdma_unicast_packets;
u64 tx_vport_rdma_unicast_packets;
u64 rx_vport_rdma_unicast_bytes;
u64 tx_vport_rdma_unicast_bytes;
u64 rx_vport_rdma_multicast_packets;
u64 tx_vport_rdma_multicast_packets;
u64 rx_vport_rdma_multicast_bytes;
u64 tx_vport_rdma_multicast_bytes;
};
struct mlx5e_stats { struct mlx5e_stats {
struct mlx5e_sw_stats sw; struct mlx5e_sw_stats sw;
struct mlx5e_qcounter_stats qcnt; struct mlx5e_qcounter_stats qcnt;
...@@ -471,6 +486,7 @@ struct mlx5e_stats { ...@@ -471,6 +486,7 @@ struct mlx5e_stats {
struct mlx5e_pport_stats pport; struct mlx5e_pport_stats pport;
struct rtnl_link_stats64 vf_vport; struct rtnl_link_stats64 vf_vport;
struct mlx5e_pcie_stats pcie; struct mlx5e_pcie_stats pcie;
struct mlx5e_rep_stats rep_stats;
}; };
extern mlx5e_stats_grp_t mlx5e_nic_stats_grps[]; extern mlx5e_stats_grp_t mlx5e_nic_stats_grps[];
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment