Commit 1a0058cf authored by Tariq Toukan's avatar Tariq Toukan Committed by Jakub Kicinski

net/mlx4_en: Remove unused performance counters

Performance analysis counters are maintained under the MLX4_EN_PERF_STAT
definition, which is never set.
Clean them up, with all related structures and logic.
Signed-off-by: default avatarTariq Toukan <tariqt@nvidia.com>
Reviewed-by: default avatarMoshe Shemesh <moshe@nvidia.com>
Link: https://lore.kernel.org/r/20201118103427.4314-1-tariqt@nvidia.comSigned-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parent 12f4bd86
...@@ -2027,7 +2027,6 @@ static void mlx4_en_clear_stats(struct net_device *dev) ...@@ -2027,7 +2027,6 @@ static void mlx4_en_clear_stats(struct net_device *dev)
if (mlx4_en_DUMP_ETH_STATS(mdev, priv->port, 1)) if (mlx4_en_DUMP_ETH_STATS(mdev, priv->port, 1))
en_dbg(HW, priv, "Failed dumping statistics\n"); en_dbg(HW, priv, "Failed dumping statistics\n");
memset(&priv->pstats, 0, sizeof(priv->pstats));
memset(&priv->pkstats, 0, sizeof(priv->pkstats)); memset(&priv->pkstats, 0, sizeof(priv->pkstats));
memset(&priv->port_stats, 0, sizeof(priv->port_stats)); memset(&priv->port_stats, 0, sizeof(priv->port_stats));
memset(&priv->rx_flowstats, 0, sizeof(priv->rx_flowstats)); memset(&priv->rx_flowstats, 0, sizeof(priv->rx_flowstats));
......
...@@ -914,7 +914,6 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud ...@@ -914,7 +914,6 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
wmb(); /* ensure HW sees CQ consumer before we post new buffers */ wmb(); /* ensure HW sees CQ consumer before we post new buffers */
ring->cons = cq->mcq.cons_index; ring->cons = cq->mcq.cons_index;
} }
AVG_PERF_COUNTER(priv->pstats.rx_coal_avg, polled);
mlx4_en_refill_rx_buffers(priv, ring); mlx4_en_refill_rx_buffers(priv, ring);
...@@ -966,8 +965,6 @@ int mlx4_en_poll_rx_cq(struct napi_struct *napi, int budget) ...@@ -966,8 +965,6 @@ int mlx4_en_poll_rx_cq(struct napi_struct *napi, int budget)
/* in case we got here because of !clean_complete */ /* in case we got here because of !clean_complete */
done = budget; done = budget;
INC_PERF_COUNTER(priv->pstats.napi_quota);
cpu_curr = smp_processor_id(); cpu_curr = smp_processor_id();
idata = irq_desc_get_irq_data(cq->irq_desc); idata = irq_desc_get_irq_data(cq->irq_desc);
aff = irq_data_get_affinity_mask(idata); aff = irq_data_get_affinity_mask(idata);
......
...@@ -864,9 +864,6 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -864,9 +864,6 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
if (unlikely(!priv->port_up)) if (unlikely(!priv->port_up))
goto tx_drop; goto tx_drop;
/* fetch ring->cons far ahead before needing it to avoid stall */
ring_cons = READ_ONCE(ring->cons);
real_size = get_real_size(skb, shinfo, dev, &lso_header_size, real_size = get_real_size(skb, shinfo, dev, &lso_header_size,
&inline_ok, &fragptr); &inline_ok, &fragptr);
if (unlikely(!real_size)) if (unlikely(!real_size))
...@@ -898,10 +895,6 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -898,10 +895,6 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
netdev_txq_bql_enqueue_prefetchw(ring->tx_queue); netdev_txq_bql_enqueue_prefetchw(ring->tx_queue);
/* Track current inflight packets for performance analysis */
AVG_PERF_COUNTER(priv->pstats.inflight_avg,
(u32)(ring->prod - ring_cons - 1));
/* Packet is good - grab an index and transmit it */ /* Packet is good - grab an index and transmit it */
index = ring->prod & ring->size_mask; index = ring->prod & ring->size_mask;
bf_index = ring->prod; bf_index = ring->prod;
...@@ -1012,7 +1005,6 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -1012,7 +1005,6 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
ring->packets++; ring->packets++;
} }
ring->bytes += tx_info->nr_bytes; ring->bytes += tx_info->nr_bytes;
AVG_PERF_COUNTER(priv->pstats.tx_pktsz_avg, skb->len);
if (tx_info->inl) if (tx_info->inl)
build_inline_wqe(tx_desc, skb, shinfo, fragptr); build_inline_wqe(tx_desc, skb, shinfo, fragptr);
...@@ -1141,10 +1133,6 @@ netdev_tx_t mlx4_en_xmit_frame(struct mlx4_en_rx_ring *rx_ring, ...@@ -1141,10 +1133,6 @@ netdev_tx_t mlx4_en_xmit_frame(struct mlx4_en_rx_ring *rx_ring,
index = ring->prod & ring->size_mask; index = ring->prod & ring->size_mask;
tx_info = &ring->tx_info[index]; tx_info = &ring->tx_info[index];
/* Track current inflight packets for performance analysis */
AVG_PERF_COUNTER(priv->pstats.inflight_avg,
(u32)(ring->prod - READ_ONCE(ring->cons) - 1));
tx_desc = ring->buf + (index << LOG_TXBB_SIZE); tx_desc = ring->buf + (index << LOG_TXBB_SIZE);
data = &tx_desc->data; data = &tx_desc->data;
...@@ -1169,7 +1157,6 @@ netdev_tx_t mlx4_en_xmit_frame(struct mlx4_en_rx_ring *rx_ring, ...@@ -1169,7 +1157,6 @@ netdev_tx_t mlx4_en_xmit_frame(struct mlx4_en_rx_ring *rx_ring,
cpu_to_be32(MLX4_EN_BIT_DESC_OWN) : 0); cpu_to_be32(MLX4_EN_BIT_DESC_OWN) : 0);
rx_ring->xdp_tx++; rx_ring->xdp_tx++;
AVG_PERF_COUNTER(priv->pstats.tx_pktsz_avg, length);
ring->prod += MLX4_EN_XDP_TX_NRTXBB; ring->prod += MLX4_EN_XDP_TX_NRTXBB;
......
...@@ -170,27 +170,6 @@ ...@@ -170,27 +170,6 @@
#define MLX4_EN_LOOPBACK_RETRIES 5 #define MLX4_EN_LOOPBACK_RETRIES 5
#define MLX4_EN_LOOPBACK_TIMEOUT 100 #define MLX4_EN_LOOPBACK_TIMEOUT 100
#ifdef MLX4_EN_PERF_STAT
/* Number of samples to 'average' */
#define AVG_SIZE 128
#define AVG_FACTOR 1024
#define INC_PERF_COUNTER(cnt) (++(cnt))
#define ADD_PERF_COUNTER(cnt, add) ((cnt) += (add))
#define AVG_PERF_COUNTER(cnt, sample) \
((cnt) = ((cnt) * (AVG_SIZE - 1) + (sample) * AVG_FACTOR) / AVG_SIZE)
#define GET_PERF_COUNTER(cnt) (cnt)
#define GET_AVG_PERF_COUNTER(cnt) ((cnt) / AVG_FACTOR)
#else
#define INC_PERF_COUNTER(cnt) do {} while (0)
#define ADD_PERF_COUNTER(cnt, add) do {} while (0)
#define AVG_PERF_COUNTER(cnt, sample) do {} while (0)
#define GET_PERF_COUNTER(cnt) (0)
#define GET_AVG_PERF_COUNTER(cnt) (0)
#endif /* MLX4_EN_PERF_STAT */
/* Constants for TX flow */ /* Constants for TX flow */
enum { enum {
MAX_INLINE = 104, /* 128 - 16 - 4 - 4 */ MAX_INLINE = 104, /* 128 - 16 - 4 - 4 */
...@@ -599,7 +578,6 @@ struct mlx4_en_priv { ...@@ -599,7 +578,6 @@ struct mlx4_en_priv {
struct work_struct linkstate_task; struct work_struct linkstate_task;
struct delayed_work stats_task; struct delayed_work stats_task;
struct delayed_work service_task; struct delayed_work service_task;
struct mlx4_en_perf_stats pstats;
struct mlx4_en_pkt_stats pkstats; struct mlx4_en_pkt_stats pkstats;
struct mlx4_en_counter_stats pf_stats; struct mlx4_en_counter_stats pf_stats;
struct mlx4_en_flow_stats_rx rx_priority_flowstats[MLX4_NUM_PRIORITIES]; struct mlx4_en_flow_stats_rx rx_priority_flowstats[MLX4_NUM_PRIORITIES];
......
...@@ -2,12 +2,6 @@ ...@@ -2,12 +2,6 @@
#ifndef _MLX4_STATS_ #ifndef _MLX4_STATS_
#define _MLX4_STATS_ #define _MLX4_STATS_
#ifdef MLX4_EN_PERF_STAT
#define NUM_PERF_STATS NUM_PERF_COUNTERS
#else
#define NUM_PERF_STATS 0
#endif
#define NUM_PRIORITIES 9 #define NUM_PRIORITIES 9
#define NUM_PRIORITY_STATS 2 #define NUM_PRIORITY_STATS 2
...@@ -46,16 +40,6 @@ struct mlx4_en_port_stats { ...@@ -46,16 +40,6 @@ struct mlx4_en_port_stats {
#define NUM_PORT_STATS 10 #define NUM_PORT_STATS 10
}; };
struct mlx4_en_perf_stats {
u32 tx_poll;
u64 tx_pktsz_avg;
u32 inflight_avg;
u16 tx_coal_avg;
u16 rx_coal_avg;
u32 napi_quota;
#define NUM_PERF_COUNTERS 6
};
struct mlx4_en_xdp_stats { struct mlx4_en_xdp_stats {
unsigned long rx_xdp_drop; unsigned long rx_xdp_drop;
unsigned long rx_xdp_tx; unsigned long rx_xdp_tx;
...@@ -135,7 +119,7 @@ enum { ...@@ -135,7 +119,7 @@ enum {
}; };
#define NUM_ALL_STATS (NUM_MAIN_STATS + NUM_PORT_STATS + NUM_PKT_STATS + \ #define NUM_ALL_STATS (NUM_MAIN_STATS + NUM_PORT_STATS + NUM_PKT_STATS + \
NUM_FLOW_STATS + NUM_PERF_STATS + NUM_PF_STATS + \ NUM_FLOW_STATS + NUM_PF_STATS + \
NUM_XDP_STATS + NUM_PHY_STATS) NUM_XDP_STATS + NUM_PHY_STATS)
#define MLX4_FIND_NETDEV_STAT(n) (offsetof(struct net_device_stats, n) / \ #define MLX4_FIND_NETDEV_STAT(n) (offsetof(struct net_device_stats, n) / \
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment