Commit 05909bab authored by Eran Ben Elisha's avatar Eran Ben Elisha Committed by Saeed Mahameed

net/mlx5e: Avoid reset netdev stats on configuration changes

Move all RQ, SQ and channel counters from the channel objects into the
priv structure.  With this change, counters will not be reset upon
channel configuration changes.

Channel's statistics for SQs which are associated with TCs higher than
zero will be presented in ethtool -S, only for SQs which were opened at
least once since the module was loaded (regardless of their open/close
current status).  This is done in order to decrease the total amount of
statistics presented and calculated for the common out of box use (no
QoS).

mlx5e_channel_stats is a compound of CH,RQ,SQs stats in order to
create locality for the NAPI when handling TX and RX of the same
channel.

Align the new statistics struct per ring to avoid several channels
update to the same cache line at the same time.
Packet rate was tested, no degradation sensed.
Signed-off-by: default avatarEran Ben Elisha <eranbe@mellanox.com>
CC: Qing Huang <qing.huang@oracle.com>
Signed-off-by: default avatarSaeed Mahameed <saeedm@mellanox.com>
parent 868a01a2
...@@ -358,7 +358,6 @@ struct mlx5e_txqsq { ...@@ -358,7 +358,6 @@ struct mlx5e_txqsq {
/* dirtied @xmit */ /* dirtied @xmit */
u16 pc ____cacheline_aligned_in_smp; u16 pc ____cacheline_aligned_in_smp;
u32 dma_fifo_pc; u32 dma_fifo_pc;
struct mlx5e_sq_stats stats;
struct mlx5e_cq cq; struct mlx5e_cq cq;
...@@ -371,6 +370,7 @@ struct mlx5e_txqsq { ...@@ -371,6 +370,7 @@ struct mlx5e_txqsq {
/* read only */ /* read only */
struct mlx5_wq_cyc wq; struct mlx5_wq_cyc wq;
u32 dma_fifo_mask; u32 dma_fifo_mask;
struct mlx5e_sq_stats *stats;
void __iomem *uar_map; void __iomem *uar_map;
struct netdev_queue *txq; struct netdev_queue *txq;
u32 sqn; u32 sqn;
...@@ -526,7 +526,7 @@ struct mlx5e_rq { ...@@ -526,7 +526,7 @@ struct mlx5e_rq {
struct mlx5e_channel *channel; struct mlx5e_channel *channel;
struct device *pdev; struct device *pdev;
struct net_device *netdev; struct net_device *netdev;
struct mlx5e_rq_stats stats; struct mlx5e_rq_stats *stats;
struct mlx5e_cq cq; struct mlx5e_cq cq;
struct mlx5e_page_cache page_cache; struct mlx5e_page_cache page_cache;
struct hwtstamp_config *tstamp; struct hwtstamp_config *tstamp;
...@@ -574,7 +574,7 @@ struct mlx5e_channel { ...@@ -574,7 +574,7 @@ struct mlx5e_channel {
/* data path - accessed per napi poll */ /* data path - accessed per napi poll */
struct irq_desc *irq_desc; struct irq_desc *irq_desc;
struct mlx5e_ch_stats stats; struct mlx5e_ch_stats *stats;
/* control */ /* control */
struct mlx5e_priv *priv; struct mlx5e_priv *priv;
...@@ -590,6 +590,12 @@ struct mlx5e_channels { ...@@ -590,6 +590,12 @@ struct mlx5e_channels {
struct mlx5e_params params; struct mlx5e_params params;
}; };
struct mlx5e_channel_stats {
struct mlx5e_ch_stats ch;
struct mlx5e_sq_stats sq[MLX5E_MAX_NUM_TC];
struct mlx5e_rq_stats rq;
} ____cacheline_aligned_in_smp;
enum mlx5e_traffic_types { enum mlx5e_traffic_types {
MLX5E_TT_IPV4_TCP, MLX5E_TT_IPV4_TCP,
MLX5E_TT_IPV6_TCP, MLX5E_TT_IPV6_TCP,
...@@ -793,6 +799,8 @@ struct mlx5e_priv { ...@@ -793,6 +799,8 @@ struct mlx5e_priv {
struct mlx5_core_dev *mdev; struct mlx5_core_dev *mdev;
struct net_device *netdev; struct net_device *netdev;
struct mlx5e_stats stats; struct mlx5e_stats stats;
struct mlx5e_channel_stats channel_stats[MLX5E_MAX_NUM_CHANNELS];
u8 max_opened_tc;
struct hwtstamp_config tstamp; struct hwtstamp_config tstamp;
u16 q_counter; u16 q_counter;
u16 drop_rq_q_counter; u16 drop_rq_q_counter;
......
...@@ -174,7 +174,7 @@ mlx5e_tls_handle_ooo(struct mlx5e_tls_offload_context *context, ...@@ -174,7 +174,7 @@ mlx5e_tls_handle_ooo(struct mlx5e_tls_offload_context *context,
int headln; int headln;
int i; int i;
sq->stats.tls_ooo++; sq->stats->tls_ooo++;
if (mlx5e_tls_get_sync_data(context, tcp_seq, &info)) { if (mlx5e_tls_get_sync_data(context, tcp_seq, &info)) {
/* We might get here if a retransmission reaches the driver /* We might get here if a retransmission reaches the driver
...@@ -220,7 +220,7 @@ mlx5e_tls_handle_ooo(struct mlx5e_tls_offload_context *context, ...@@ -220,7 +220,7 @@ mlx5e_tls_handle_ooo(struct mlx5e_tls_offload_context *context,
skb_shinfo(nskb)->nr_frags = info.nr_frags; skb_shinfo(nskb)->nr_frags = info.nr_frags;
nskb->data_len = info.sync_len; nskb->data_len = info.sync_len;
nskb->len += info.sync_len; nskb->len += info.sync_len;
sq->stats.tls_resync_bytes += nskb->len; sq->stats->tls_resync_bytes += nskb->len;
mlx5e_tls_complete_sync_skb(skb, nskb, tcp_seq, headln, mlx5e_tls_complete_sync_skb(skb, nskb, tcp_seq, headln,
cpu_to_be64(info.rcd_sn)); cpu_to_be64(info.rcd_sn));
mlx5e_sq_xmit(sq, nskb, *wqe, *pi); mlx5e_sq_xmit(sq, nskb, *wqe, *pi);
......
...@@ -423,6 +423,7 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c, ...@@ -423,6 +423,7 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c,
rq->ix = c->ix; rq->ix = c->ix;
rq->mdev = mdev; rq->mdev = mdev;
rq->hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu); rq->hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu);
rq->stats = &c->priv->channel_stats[c->ix].rq;
rq->xdp_prog = params->xdp_prog ? bpf_prog_inc(params->xdp_prog) : NULL; rq->xdp_prog = params->xdp_prog ? bpf_prog_inc(params->xdp_prog) : NULL;
if (IS_ERR(rq->xdp_prog)) { if (IS_ERR(rq->xdp_prog)) {
...@@ -1003,7 +1004,8 @@ static int mlx5e_alloc_txqsq(struct mlx5e_channel *c, ...@@ -1003,7 +1004,8 @@ static int mlx5e_alloc_txqsq(struct mlx5e_channel *c,
int txq_ix, int txq_ix,
struct mlx5e_params *params, struct mlx5e_params *params,
struct mlx5e_sq_param *param, struct mlx5e_sq_param *param,
struct mlx5e_txqsq *sq) struct mlx5e_txqsq *sq,
int tc)
{ {
void *sqc_wq = MLX5_ADDR_OF(sqc, param->sqc, wq); void *sqc_wq = MLX5_ADDR_OF(sqc, param->sqc, wq);
struct mlx5_core_dev *mdev = c->mdev; struct mlx5_core_dev *mdev = c->mdev;
...@@ -1018,6 +1020,7 @@ static int mlx5e_alloc_txqsq(struct mlx5e_channel *c, ...@@ -1018,6 +1020,7 @@ static int mlx5e_alloc_txqsq(struct mlx5e_channel *c,
sq->txq_ix = txq_ix; sq->txq_ix = txq_ix;
sq->uar_map = mdev->mlx5e_res.bfreg.map; sq->uar_map = mdev->mlx5e_res.bfreg.map;
sq->min_inline_mode = params->tx_min_inline_mode; sq->min_inline_mode = params->tx_min_inline_mode;
sq->stats = &c->priv->channel_stats[c->ix].sq[tc];
INIT_WORK(&sq->recover.recover_work, mlx5e_sq_recover); INIT_WORK(&sq->recover.recover_work, mlx5e_sq_recover);
if (MLX5_IPSEC_DEV(c->priv->mdev)) if (MLX5_IPSEC_DEV(c->priv->mdev))
set_bit(MLX5E_SQ_STATE_IPSEC, &sq->state); set_bit(MLX5E_SQ_STATE_IPSEC, &sq->state);
...@@ -1176,13 +1179,14 @@ static int mlx5e_open_txqsq(struct mlx5e_channel *c, ...@@ -1176,13 +1179,14 @@ static int mlx5e_open_txqsq(struct mlx5e_channel *c,
int txq_ix, int txq_ix,
struct mlx5e_params *params, struct mlx5e_params *params,
struct mlx5e_sq_param *param, struct mlx5e_sq_param *param,
struct mlx5e_txqsq *sq) struct mlx5e_txqsq *sq,
int tc)
{ {
struct mlx5e_create_sq_param csp = {}; struct mlx5e_create_sq_param csp = {};
u32 tx_rate; u32 tx_rate;
int err; int err;
err = mlx5e_alloc_txqsq(c, txq_ix, params, param, sq); err = mlx5e_alloc_txqsq(c, txq_ix, params, param, sq, tc);
if (err) if (err)
return err; return err;
...@@ -1370,7 +1374,7 @@ static void mlx5e_sq_recover(struct work_struct *work) ...@@ -1370,7 +1374,7 @@ static void mlx5e_sq_recover(struct work_struct *work)
return; return;
mlx5e_reset_txqsq_cc_pc(sq); mlx5e_reset_txqsq_cc_pc(sq);
sq->stats.recover++; sq->stats->recover++;
recover->last_recover = jiffies; recover->last_recover = jiffies;
mlx5e_activate_txqsq(sq); mlx5e_activate_txqsq(sq);
} }
...@@ -1665,14 +1669,14 @@ static int mlx5e_open_sqs(struct mlx5e_channel *c, ...@@ -1665,14 +1669,14 @@ static int mlx5e_open_sqs(struct mlx5e_channel *c,
struct mlx5e_params *params, struct mlx5e_params *params,
struct mlx5e_channel_param *cparam) struct mlx5e_channel_param *cparam)
{ {
int err; struct mlx5e_priv *priv = c->priv;
int tc; int err, tc, max_nch = priv->profile->max_nch(priv->mdev);
for (tc = 0; tc < params->num_tc; tc++) { for (tc = 0; tc < params->num_tc; tc++) {
int txq_ix = c->ix + tc * params->num_channels; int txq_ix = c->ix + tc * max_nch;
err = mlx5e_open_txqsq(c, c->priv->tisn[tc], txq_ix, err = mlx5e_open_txqsq(c, c->priv->tisn[tc], txq_ix,
params, &cparam->sq, &c->sq[tc]); params, &cparam->sq, &c->sq[tc], tc);
if (err) if (err)
goto err_close_sqs; goto err_close_sqs;
} }
...@@ -1802,6 +1806,7 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix, ...@@ -1802,6 +1806,7 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
c->mkey_be = cpu_to_be32(priv->mdev->mlx5e_res.mkey.key); c->mkey_be = cpu_to_be32(priv->mdev->mlx5e_res.mkey.key);
c->num_tc = params->num_tc; c->num_tc = params->num_tc;
c->xdp = !!params->xdp_prog; c->xdp = !!params->xdp_prog;
c->stats = &priv->channel_stats[ix].ch;
mlx5_vector2eqn(priv->mdev, ix, &eqn, &irq); mlx5_vector2eqn(priv->mdev, ix, &eqn, &irq);
c->irq_desc = irq_to_desc(irq); c->irq_desc = irq_to_desc(irq);
...@@ -2634,7 +2639,7 @@ static void mlx5e_build_channels_tx_maps(struct mlx5e_priv *priv) ...@@ -2634,7 +2639,7 @@ static void mlx5e_build_channels_tx_maps(struct mlx5e_priv *priv)
struct mlx5e_txqsq *sq; struct mlx5e_txqsq *sq;
int i, tc; int i, tc;
for (i = 0; i < priv->channels.num; i++) for (i = 0; i < priv->profile->max_nch(priv->mdev); i++)
for (tc = 0; tc < priv->profile->max_tc; tc++) for (tc = 0; tc < priv->profile->max_tc; tc++)
priv->channel_tc2txq[i][tc] = i + tc * priv->channels.num; priv->channel_tc2txq[i][tc] = i + tc * priv->channels.num;
...@@ -3139,6 +3144,8 @@ static int mlx5e_setup_tc_mqprio(struct net_device *netdev, ...@@ -3139,6 +3144,8 @@ static int mlx5e_setup_tc_mqprio(struct net_device *netdev,
if (err) if (err)
goto out; goto out;
priv->max_opened_tc = max_t(u8, priv->max_opened_tc,
new_channels.params.num_tc);
mlx5e_switch_priv_channels(priv, &new_channels, NULL); mlx5e_switch_priv_channels(priv, &new_channels, NULL);
out: out:
mutex_unlock(&priv->state_lock); mutex_unlock(&priv->state_lock);
...@@ -3826,7 +3833,7 @@ static bool mlx5e_tx_timeout_eq_recover(struct net_device *dev, ...@@ -3826,7 +3833,7 @@ static bool mlx5e_tx_timeout_eq_recover(struct net_device *dev,
return false; return false;
netdev_err(dev, "Recover %d eqes on EQ 0x%x\n", eqe_count, eq->eqn); netdev_err(dev, "Recover %d eqes on EQ 0x%x\n", eqe_count, eq->eqn);
sq->channel->stats.eq_rearm++; sq->channel->stats->eq_rearm++;
return true; return true;
} }
...@@ -4250,6 +4257,7 @@ static void mlx5e_build_nic_netdev_priv(struct mlx5_core_dev *mdev, ...@@ -4250,6 +4257,7 @@ static void mlx5e_build_nic_netdev_priv(struct mlx5_core_dev *mdev,
priv->profile = profile; priv->profile = profile;
priv->ppriv = ppriv; priv->ppriv = ppriv;
priv->msglevel = MLX5E_MSG_LEVEL; priv->msglevel = MLX5E_MSG_LEVEL;
priv->max_opened_tc = 1;
mlx5e_build_nic_params(mdev, &priv->channels.params, mlx5e_build_nic_params(mdev, &priv->channels.params,
profile->max_nch(mdev), netdev->mtu); profile->max_nch(mdev), netdev->mtu);
......
...@@ -138,13 +138,13 @@ static void mlx5e_rep_update_sw_counters(struct mlx5e_priv *priv) ...@@ -138,13 +138,13 @@ static void mlx5e_rep_update_sw_counters(struct mlx5e_priv *priv)
for (i = 0; i < priv->channels.num; i++) { for (i = 0; i < priv->channels.num; i++) {
struct mlx5e_channel *c = priv->channels.c[i]; struct mlx5e_channel *c = priv->channels.c[i];
rq_stats = &c->rq.stats; rq_stats = c->rq.stats;
s->rx_packets += rq_stats->packets; s->rx_packets += rq_stats->packets;
s->rx_bytes += rq_stats->bytes; s->rx_bytes += rq_stats->bytes;
for (j = 0; j < priv->channels.params.num_tc; j++) { for (j = 0; j < priv->channels.params.num_tc; j++) {
sq_stats = &c->sq[j].stats; sq_stats = c->sq[j].stats;
s->tx_packets += sq_stats->packets; s->tx_packets += sq_stats->packets;
s->tx_bytes += sq_stats->bytes; s->tx_bytes += sq_stats->bytes;
......
...@@ -65,7 +65,7 @@ static inline void mlx5e_read_title_slot(struct mlx5e_rq *rq, ...@@ -65,7 +65,7 @@ static inline void mlx5e_read_title_slot(struct mlx5e_rq *rq,
mlx5e_read_cqe_slot(cq, cqcc, &cq->title); mlx5e_read_cqe_slot(cq, cqcc, &cq->title);
cq->decmprs_left = be32_to_cpu(cq->title.byte_cnt); cq->decmprs_left = be32_to_cpu(cq->title.byte_cnt);
cq->decmprs_wqe_counter = be16_to_cpu(cq->title.wqe_counter); cq->decmprs_wqe_counter = be16_to_cpu(cq->title.wqe_counter);
rq->stats.cqe_compress_blks++; rq->stats->cqe_compress_blks++;
} }
static inline void mlx5e_read_mini_arr_slot(struct mlx5e_cq *cq, u32 cqcc) static inline void mlx5e_read_mini_arr_slot(struct mlx5e_cq *cq, u32 cqcc)
...@@ -146,7 +146,7 @@ static inline u32 mlx5e_decompress_cqes_cont(struct mlx5e_rq *rq, ...@@ -146,7 +146,7 @@ static inline u32 mlx5e_decompress_cqes_cont(struct mlx5e_rq *rq,
mlx5e_cqes_update_owner(cq, cq->wq.cc, cqcc - cq->wq.cc); mlx5e_cqes_update_owner(cq, cq->wq.cc, cqcc - cq->wq.cc);
cq->wq.cc = cqcc; cq->wq.cc = cqcc;
cq->decmprs_left -= cqe_count; cq->decmprs_left -= cqe_count;
rq->stats.cqe_compress_pkts += cqe_count; rq->stats->cqe_compress_pkts += cqe_count;
return cqe_count; return cqe_count;
} }
...@@ -176,14 +176,15 @@ static inline bool mlx5e_rx_cache_put(struct mlx5e_rq *rq, ...@@ -176,14 +176,15 @@ static inline bool mlx5e_rx_cache_put(struct mlx5e_rq *rq,
{ {
struct mlx5e_page_cache *cache = &rq->page_cache; struct mlx5e_page_cache *cache = &rq->page_cache;
u32 tail_next = (cache->tail + 1) & (MLX5E_CACHE_SIZE - 1); u32 tail_next = (cache->tail + 1) & (MLX5E_CACHE_SIZE - 1);
struct mlx5e_rq_stats *stats = rq->stats;
if (tail_next == cache->head) { if (tail_next == cache->head) {
rq->stats.cache_full++; stats->cache_full++;
return false; return false;
} }
if (unlikely(mlx5e_page_is_reserved(dma_info->page))) { if (unlikely(mlx5e_page_is_reserved(dma_info->page))) {
rq->stats.cache_waive++; stats->cache_waive++;
return false; return false;
} }
...@@ -196,20 +197,21 @@ static inline bool mlx5e_rx_cache_get(struct mlx5e_rq *rq, ...@@ -196,20 +197,21 @@ static inline bool mlx5e_rx_cache_get(struct mlx5e_rq *rq,
struct mlx5e_dma_info *dma_info) struct mlx5e_dma_info *dma_info)
{ {
struct mlx5e_page_cache *cache = &rq->page_cache; struct mlx5e_page_cache *cache = &rq->page_cache;
struct mlx5e_rq_stats *stats = rq->stats;
if (unlikely(cache->head == cache->tail)) { if (unlikely(cache->head == cache->tail)) {
rq->stats.cache_empty++; stats->cache_empty++;
return false; return false;
} }
if (page_ref_count(cache->page_cache[cache->head].page) != 1) { if (page_ref_count(cache->page_cache[cache->head].page) != 1) {
rq->stats.cache_busy++; stats->cache_busy++;
return false; return false;
} }
*dma_info = cache->page_cache[cache->head]; *dma_info = cache->page_cache[cache->head];
cache->head = (cache->head + 1) & (MLX5E_CACHE_SIZE - 1); cache->head = (cache->head + 1) & (MLX5E_CACHE_SIZE - 1);
rq->stats.cache_reuse++; stats->cache_reuse++;
dma_sync_single_for_device(rq->pdev, dma_info->addr, dma_sync_single_for_device(rq->pdev, dma_info->addr,
RQ_PAGE_SIZE(rq), RQ_PAGE_SIZE(rq),
...@@ -294,7 +296,7 @@ static inline void mlx5e_free_rx_wqe_reuse(struct mlx5e_rq *rq, ...@@ -294,7 +296,7 @@ static inline void mlx5e_free_rx_wqe_reuse(struct mlx5e_rq *rq,
struct mlx5e_wqe_frag_info *wi) struct mlx5e_wqe_frag_info *wi)
{ {
if (mlx5e_page_reuse(rq, wi)) { if (mlx5e_page_reuse(rq, wi)) {
rq->stats.page_reuse++; rq->stats->page_reuse++;
return; return;
} }
...@@ -452,7 +454,7 @@ static int mlx5e_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix) ...@@ -452,7 +454,7 @@ static int mlx5e_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix)
dma_info--; dma_info--;
mlx5e_page_release(rq, dma_info, true); mlx5e_page_release(rq, dma_info, true);
} }
rq->stats.buff_alloc_err++; rq->stats->buff_alloc_err++;
return err; return err;
} }
...@@ -480,7 +482,7 @@ bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq) ...@@ -480,7 +482,7 @@ bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq)
err = mlx5e_alloc_rx_wqe(rq, wqe, wq->head); err = mlx5e_alloc_rx_wqe(rq, wqe, wq->head);
if (unlikely(err)) { if (unlikely(err)) {
rq->stats.buff_alloc_err++; rq->stats->buff_alloc_err++;
break; break;
} }
...@@ -652,6 +654,7 @@ static inline void mlx5e_handle_csum(struct net_device *netdev, ...@@ -652,6 +654,7 @@ static inline void mlx5e_handle_csum(struct net_device *netdev,
struct sk_buff *skb, struct sk_buff *skb,
bool lro) bool lro)
{ {
struct mlx5e_rq_stats *stats = rq->stats;
int network_depth = 0; int network_depth = 0;
if (unlikely(!(netdev->features & NETIF_F_RXCSUM))) if (unlikely(!(netdev->features & NETIF_F_RXCSUM)))
...@@ -659,7 +662,7 @@ static inline void mlx5e_handle_csum(struct net_device *netdev, ...@@ -659,7 +662,7 @@ static inline void mlx5e_handle_csum(struct net_device *netdev,
if (lro) { if (lro) {
skb->ip_summed = CHECKSUM_UNNECESSARY; skb->ip_summed = CHECKSUM_UNNECESSARY;
rq->stats.csum_unnecessary++; stats->csum_unnecessary++;
return; return;
} }
...@@ -674,7 +677,7 @@ static inline void mlx5e_handle_csum(struct net_device *netdev, ...@@ -674,7 +677,7 @@ static inline void mlx5e_handle_csum(struct net_device *netdev,
skb->csum = csum_partial(skb->data + ETH_HLEN, skb->csum = csum_partial(skb->data + ETH_HLEN,
network_depth - ETH_HLEN, network_depth - ETH_HLEN,
skb->csum); skb->csum);
rq->stats.csum_complete++; stats->csum_complete++;
return; return;
} }
...@@ -684,15 +687,15 @@ static inline void mlx5e_handle_csum(struct net_device *netdev, ...@@ -684,15 +687,15 @@ static inline void mlx5e_handle_csum(struct net_device *netdev,
if (cqe_is_tunneled(cqe)) { if (cqe_is_tunneled(cqe)) {
skb->csum_level = 1; skb->csum_level = 1;
skb->encapsulation = 1; skb->encapsulation = 1;
rq->stats.csum_unnecessary_inner++; stats->csum_unnecessary_inner++;
return; return;
} }
rq->stats.csum_unnecessary++; stats->csum_unnecessary++;
return; return;
} }
csum_none: csum_none:
skb->ip_summed = CHECKSUM_NONE; skb->ip_summed = CHECKSUM_NONE;
rq->stats.csum_none++; stats->csum_none++;
} }
static inline void mlx5e_build_rx_skb(struct mlx5_cqe64 *cqe, static inline void mlx5e_build_rx_skb(struct mlx5_cqe64 *cqe,
...@@ -701,6 +704,7 @@ static inline void mlx5e_build_rx_skb(struct mlx5_cqe64 *cqe, ...@@ -701,6 +704,7 @@ static inline void mlx5e_build_rx_skb(struct mlx5_cqe64 *cqe,
struct sk_buff *skb) struct sk_buff *skb)
{ {
u8 lro_num_seg = be32_to_cpu(cqe->srqn) >> 24; u8 lro_num_seg = be32_to_cpu(cqe->srqn) >> 24;
struct mlx5e_rq_stats *stats = rq->stats;
struct net_device *netdev = rq->netdev; struct net_device *netdev = rq->netdev;
skb->mac_len = ETH_HLEN; skb->mac_len = ETH_HLEN;
...@@ -710,9 +714,9 @@ static inline void mlx5e_build_rx_skb(struct mlx5_cqe64 *cqe, ...@@ -710,9 +714,9 @@ static inline void mlx5e_build_rx_skb(struct mlx5_cqe64 *cqe,
/* Subtract one since we already counted this as one /* Subtract one since we already counted this as one
* "regular" packet in mlx5e_complete_rx_cqe() * "regular" packet in mlx5e_complete_rx_cqe()
*/ */
rq->stats.packets += lro_num_seg - 1; stats->packets += lro_num_seg - 1;
rq->stats.lro_packets++; stats->lro_packets++;
rq->stats.lro_bytes += cqe_bcnt; stats->lro_bytes += cqe_bcnt;
} }
if (unlikely(mlx5e_rx_hw_stamp(rq->tstamp))) if (unlikely(mlx5e_rx_hw_stamp(rq->tstamp)))
...@@ -727,7 +731,7 @@ static inline void mlx5e_build_rx_skb(struct mlx5_cqe64 *cqe, ...@@ -727,7 +731,7 @@ static inline void mlx5e_build_rx_skb(struct mlx5_cqe64 *cqe,
if (cqe_has_vlan(cqe)) { if (cqe_has_vlan(cqe)) {
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
be16_to_cpu(cqe->vlan_info)); be16_to_cpu(cqe->vlan_info));
rq->stats.removed_vlan_packets++; stats->removed_vlan_packets++;
} }
skb->mark = be32_to_cpu(cqe->sop_drop_qpn) & MLX5E_TC_FLOW_ID_MASK; skb->mark = be32_to_cpu(cqe->sop_drop_qpn) & MLX5E_TC_FLOW_ID_MASK;
...@@ -741,8 +745,10 @@ static inline void mlx5e_complete_rx_cqe(struct mlx5e_rq *rq, ...@@ -741,8 +745,10 @@ static inline void mlx5e_complete_rx_cqe(struct mlx5e_rq *rq,
u32 cqe_bcnt, u32 cqe_bcnt,
struct sk_buff *skb) struct sk_buff *skb)
{ {
rq->stats.packets++; struct mlx5e_rq_stats *stats = rq->stats;
rq->stats.bytes += cqe_bcnt;
stats->packets++;
stats->bytes += cqe_bcnt;
mlx5e_build_rx_skb(cqe, cqe_bcnt, rq, skb); mlx5e_build_rx_skb(cqe, cqe_bcnt, rq, skb);
} }
...@@ -774,10 +780,12 @@ static inline bool mlx5e_xmit_xdp_frame(struct mlx5e_rq *rq, ...@@ -774,10 +780,12 @@ static inline bool mlx5e_xmit_xdp_frame(struct mlx5e_rq *rq,
dma_addr_t dma_addr = di->addr + data_offset; dma_addr_t dma_addr = di->addr + data_offset;
unsigned int dma_len = xdp->data_end - xdp->data; unsigned int dma_len = xdp->data_end - xdp->data;
struct mlx5e_rq_stats *stats = rq->stats;
prefetchw(wqe); prefetchw(wqe);
if (unlikely(dma_len < MLX5E_XDP_MIN_INLINE || rq->hw_mtu < dma_len)) { if (unlikely(dma_len < MLX5E_XDP_MIN_INLINE || rq->hw_mtu < dma_len)) {
rq->stats.xdp_drop++; stats->xdp_drop++;
return false; return false;
} }
...@@ -787,7 +795,7 @@ static inline bool mlx5e_xmit_xdp_frame(struct mlx5e_rq *rq, ...@@ -787,7 +795,7 @@ static inline bool mlx5e_xmit_xdp_frame(struct mlx5e_rq *rq,
mlx5e_xmit_xdp_doorbell(sq); mlx5e_xmit_xdp_doorbell(sq);
sq->db.doorbell = false; sq->db.doorbell = false;
} }
rq->stats.xdp_tx_full++; stats->xdp_tx_full++;
return false; return false;
} }
...@@ -821,7 +829,7 @@ static inline bool mlx5e_xmit_xdp_frame(struct mlx5e_rq *rq, ...@@ -821,7 +829,7 @@ static inline bool mlx5e_xmit_xdp_frame(struct mlx5e_rq *rq,
sq->db.doorbell = true; sq->db.doorbell = true;
rq->stats.xdp_tx++; stats->xdp_tx++;
return true; return true;
} }
...@@ -868,7 +876,7 @@ static inline bool mlx5e_xdp_handle(struct mlx5e_rq *rq, ...@@ -868,7 +876,7 @@ static inline bool mlx5e_xdp_handle(struct mlx5e_rq *rq,
case XDP_ABORTED: case XDP_ABORTED:
trace_xdp_exception(rq->netdev, prog, act); trace_xdp_exception(rq->netdev, prog, act);
case XDP_DROP: case XDP_DROP:
rq->stats.xdp_drop++; rq->stats->xdp_drop++;
return true; return true;
} }
} }
...@@ -881,7 +889,7 @@ struct sk_buff *mlx5e_build_linear_skb(struct mlx5e_rq *rq, void *va, ...@@ -881,7 +889,7 @@ struct sk_buff *mlx5e_build_linear_skb(struct mlx5e_rq *rq, void *va,
struct sk_buff *skb = build_skb(va, frag_size); struct sk_buff *skb = build_skb(va, frag_size);
if (unlikely(!skb)) { if (unlikely(!skb)) {
rq->stats.buff_alloc_err++; rq->stats->buff_alloc_err++;
return NULL; return NULL;
} }
...@@ -913,7 +921,7 @@ struct sk_buff *skb_from_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe, ...@@ -913,7 +921,7 @@ struct sk_buff *skb_from_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe,
wi->offset += frag_size; wi->offset += frag_size;
if (unlikely((cqe->op_own >> 4) != MLX5_CQE_RESP_SEND)) { if (unlikely((cqe->op_own >> 4) != MLX5_CQE_RESP_SEND)) {
rq->stats.wqe_err++; rq->stats->wqe_err++;
return NULL; return NULL;
} }
...@@ -1030,7 +1038,7 @@ mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *w ...@@ -1030,7 +1038,7 @@ mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *w
skb = napi_alloc_skb(rq->cq.napi, skb = napi_alloc_skb(rq->cq.napi,
ALIGN(MLX5_MPWRQ_SMALL_PACKET_THRESHOLD, sizeof(long))); ALIGN(MLX5_MPWRQ_SMALL_PACKET_THRESHOLD, sizeof(long)));
if (unlikely(!skb)) { if (unlikely(!skb)) {
rq->stats.buff_alloc_err++; rq->stats->buff_alloc_err++;
return NULL; return NULL;
} }
...@@ -1116,12 +1124,12 @@ void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) ...@@ -1116,12 +1124,12 @@ void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
wi->consumed_strides += cstrides; wi->consumed_strides += cstrides;
if (unlikely((cqe->op_own >> 4) != MLX5_CQE_RESP_SEND)) { if (unlikely((cqe->op_own >> 4) != MLX5_CQE_RESP_SEND)) {
rq->stats.wqe_err++; rq->stats->wqe_err++;
goto mpwrq_cqe_out; goto mpwrq_cqe_out;
} }
if (unlikely(mpwrq_is_filler_cqe(cqe))) { if (unlikely(mpwrq_is_filler_cqe(cqe))) {
rq->stats.mpwqe_filler++; rq->stats->mpwqe_filler++;
goto mpwrq_cqe_out; goto mpwrq_cqe_out;
} }
...@@ -1276,6 +1284,7 @@ static inline void mlx5i_complete_rx_cqe(struct mlx5e_rq *rq, ...@@ -1276,6 +1284,7 @@ static inline void mlx5i_complete_rx_cqe(struct mlx5e_rq *rq,
u32 cqe_bcnt, u32 cqe_bcnt,
struct sk_buff *skb) struct sk_buff *skb)
{ {
struct mlx5e_rq_stats *stats = rq->stats;
struct hwtstamp_config *tstamp; struct hwtstamp_config *tstamp;
struct net_device *netdev; struct net_device *netdev;
struct mlx5e_priv *priv; struct mlx5e_priv *priv;
...@@ -1337,9 +1346,9 @@ static inline void mlx5i_complete_rx_cqe(struct mlx5e_rq *rq, ...@@ -1337,9 +1346,9 @@ static inline void mlx5i_complete_rx_cqe(struct mlx5e_rq *rq,
skb->dev = netdev; skb->dev = netdev;
rq->stats.csum_complete++; stats->csum_complete++;
rq->stats.packets++; stats->packets++;
rq->stats.bytes += cqe_bcnt; stats->bytes += cqe_bcnt;
} }
void mlx5i_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) void mlx5i_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
......
...@@ -111,20 +111,19 @@ static int mlx5e_grp_sw_fill_stats(struct mlx5e_priv *priv, u64 *data, int idx) ...@@ -111,20 +111,19 @@ static int mlx5e_grp_sw_fill_stats(struct mlx5e_priv *priv, u64 *data, int idx)
void mlx5e_grp_sw_update_stats(struct mlx5e_priv *priv) void mlx5e_grp_sw_update_stats(struct mlx5e_priv *priv)
{ {
struct mlx5e_sw_stats temp, *s = &temp; struct mlx5e_sw_stats temp, *s = &temp;
struct mlx5e_rq_stats *rq_stats; int i;
struct mlx5e_sq_stats *sq_stats;
struct mlx5e_ch_stats *ch_stats;
int i, j;
memset(s, 0, sizeof(*s)); memset(s, 0, sizeof(*s));
read_lock(&priv->stats_lock); read_lock(&priv->stats_lock);
if (!priv->channels_active) if (!priv->channels_active)
goto out; goto out;
for (i = 0; i < priv->channels.num; i++) {
struct mlx5e_channel *c = priv->channels.c[i];
rq_stats = &c->rq.stats; for (i = 0; i < priv->profile->max_nch(priv->mdev); i++) {
ch_stats = &c->stats; struct mlx5e_channel_stats *channel_stats =
&priv->channel_stats[i];
struct mlx5e_rq_stats *rq_stats = &channel_stats->rq;
struct mlx5e_ch_stats *ch_stats = &channel_stats->ch;
int j;
s->rx_packets += rq_stats->packets; s->rx_packets += rq_stats->packets;
s->rx_bytes += rq_stats->bytes; s->rx_bytes += rq_stats->bytes;
...@@ -151,8 +150,8 @@ void mlx5e_grp_sw_update_stats(struct mlx5e_priv *priv) ...@@ -151,8 +150,8 @@ void mlx5e_grp_sw_update_stats(struct mlx5e_priv *priv)
s->rx_cache_waive += rq_stats->cache_waive; s->rx_cache_waive += rq_stats->cache_waive;
s->ch_eq_rearm += ch_stats->eq_rearm; s->ch_eq_rearm += ch_stats->eq_rearm;
for (j = 0; j < priv->channels.params.num_tc; j++) { for (j = 0; j < priv->max_opened_tc; j++) {
sq_stats = &c->sq[j].stats; struct mlx5e_sq_stats *sq_stats = &channel_stats->sq[j];
s->tx_packets += sq_stats->packets; s->tx_packets += sq_stats->packets;
s->tx_bytes += sq_stats->bytes; s->tx_bytes += sq_stats->bytes;
...@@ -1160,30 +1159,37 @@ static const struct counter_desc ch_stats_desc[] = { ...@@ -1160,30 +1159,37 @@ static const struct counter_desc ch_stats_desc[] = {
static int mlx5e_grp_channels_get_num_stats(struct mlx5e_priv *priv) static int mlx5e_grp_channels_get_num_stats(struct mlx5e_priv *priv)
{ {
return (NUM_RQ_STATS * priv->channels.num) + int max_nch = priv->profile->max_nch(priv->mdev);
(NUM_CH_STATS * priv->channels.num) +
(NUM_SQ_STATS * priv->channels.num * priv->channels.params.num_tc); if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
return 0;
return (NUM_RQ_STATS * max_nch) +
(NUM_CH_STATS * max_nch) +
(NUM_SQ_STATS * max_nch * priv->max_opened_tc);
} }
static int mlx5e_grp_channels_fill_strings(struct mlx5e_priv *priv, u8 *data, static int mlx5e_grp_channels_fill_strings(struct mlx5e_priv *priv, u8 *data,
int idx) int idx)
{ {
int max_nch = priv->profile->max_nch(priv->mdev);
int i, j, tc; int i, j, tc;
if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
return idx; return idx;
for (i = 0; i < priv->channels.num; i++) for (i = 0; i < max_nch; i++)
for (j = 0; j < NUM_CH_STATS; j++) for (j = 0; j < NUM_CH_STATS; j++)
sprintf(data + (idx++) * ETH_GSTRING_LEN, sprintf(data + (idx++) * ETH_GSTRING_LEN,
ch_stats_desc[j].format, i); ch_stats_desc[j].format, i);
for (i = 0; i < priv->channels.num; i++) for (i = 0; i < max_nch; i++)
for (j = 0; j < NUM_RQ_STATS; j++) for (j = 0; j < NUM_RQ_STATS; j++)
sprintf(data + (idx++) * ETH_GSTRING_LEN, rq_stats_desc[j].format, i); sprintf(data + (idx++) * ETH_GSTRING_LEN, rq_stats_desc[j].format, i);
for (tc = 0; tc < priv->channels.params.num_tc; tc++) /* priv->channel_tc2txq[i][tc] is valid only when device is open */
for (i = 0; i < priv->channels.num; i++) for (tc = 0; tc < priv->max_opened_tc; tc++)
for (i = 0; i < max_nch; i++)
for (j = 0; j < NUM_SQ_STATS; j++) for (j = 0; j < NUM_SQ_STATS; j++)
sprintf(data + (idx++) * ETH_GSTRING_LEN, sprintf(data + (idx++) * ETH_GSTRING_LEN,
sq_stats_desc[j].format, sq_stats_desc[j].format,
...@@ -1195,29 +1201,29 @@ static int mlx5e_grp_channels_fill_strings(struct mlx5e_priv *priv, u8 *data, ...@@ -1195,29 +1201,29 @@ static int mlx5e_grp_channels_fill_strings(struct mlx5e_priv *priv, u8 *data,
static int mlx5e_grp_channels_fill_stats(struct mlx5e_priv *priv, u64 *data, static int mlx5e_grp_channels_fill_stats(struct mlx5e_priv *priv, u64 *data,
int idx) int idx)
{ {
struct mlx5e_channels *channels = &priv->channels; int max_nch = priv->profile->max_nch(priv->mdev);
int i, j, tc; int i, j, tc;
if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
return idx; return idx;
for (i = 0; i < channels->num; i++) for (i = 0; i < max_nch; i++)
for (j = 0; j < NUM_CH_STATS; j++) for (j = 0; j < NUM_CH_STATS; j++)
data[idx++] = data[idx++] =
MLX5E_READ_CTR64_CPU(&channels->c[i]->stats, MLX5E_READ_CTR64_CPU(&priv->channel_stats[i].ch,
ch_stats_desc, j); ch_stats_desc, j);
for (i = 0; i < channels->num; i++) for (i = 0; i < max_nch; i++)
for (j = 0; j < NUM_RQ_STATS; j++) for (j = 0; j < NUM_RQ_STATS; j++)
data[idx++] = data[idx++] =
MLX5E_READ_CTR64_CPU(&channels->c[i]->rq.stats, MLX5E_READ_CTR64_CPU(&priv->channel_stats[i].rq,
rq_stats_desc, j); rq_stats_desc, j);
for (tc = 0; tc < priv->channels.params.num_tc; tc++) for (tc = 0; tc < priv->max_opened_tc; tc++)
for (i = 0; i < channels->num; i++) for (i = 0; i < max_nch; i++)
for (j = 0; j < NUM_SQ_STATS; j++) for (j = 0; j < NUM_SQ_STATS; j++)
data[idx++] = data[idx++] =
MLX5E_READ_CTR64_CPU(&channels->c[i]->sq[tc].stats, MLX5E_READ_CTR64_CPU(&priv->channel_stats[i].sq[tc],
sq_stats_desc, j); sq_stats_desc, j);
return idx; return idx;
......
...@@ -220,28 +220,29 @@ mlx5e_txwqe_build_eseg_csum(struct mlx5e_txqsq *sq, struct sk_buff *skb, struct ...@@ -220,28 +220,29 @@ mlx5e_txwqe_build_eseg_csum(struct mlx5e_txqsq *sq, struct sk_buff *skb, struct
if (skb->encapsulation) { if (skb->encapsulation) {
eseg->cs_flags |= MLX5_ETH_WQE_L3_INNER_CSUM | eseg->cs_flags |= MLX5_ETH_WQE_L3_INNER_CSUM |
MLX5_ETH_WQE_L4_INNER_CSUM; MLX5_ETH_WQE_L4_INNER_CSUM;
sq->stats.csum_partial_inner++; sq->stats->csum_partial_inner++;
} else { } else {
eseg->cs_flags |= MLX5_ETH_WQE_L4_CSUM; eseg->cs_flags |= MLX5_ETH_WQE_L4_CSUM;
sq->stats.csum_partial++; sq->stats->csum_partial++;
} }
} else } else
sq->stats.csum_none++; sq->stats->csum_none++;
} }
static inline u16 static inline u16
mlx5e_tx_get_gso_ihs(struct mlx5e_txqsq *sq, struct sk_buff *skb) mlx5e_tx_get_gso_ihs(struct mlx5e_txqsq *sq, struct sk_buff *skb)
{ {
struct mlx5e_sq_stats *stats = sq->stats;
u16 ihs; u16 ihs;
if (skb->encapsulation) { if (skb->encapsulation) {
ihs = skb_inner_transport_offset(skb) + inner_tcp_hdrlen(skb); ihs = skb_inner_transport_offset(skb) + inner_tcp_hdrlen(skb);
sq->stats.tso_inner_packets++; stats->tso_inner_packets++;
sq->stats.tso_inner_bytes += skb->len - ihs; stats->tso_inner_bytes += skb->len - ihs;
} else { } else {
ihs = skb_transport_offset(skb) + tcp_hdrlen(skb); ihs = skb_transport_offset(skb) + tcp_hdrlen(skb);
sq->stats.tso_packets++; stats->tso_packets++;
sq->stats.tso_bytes += skb->len - ihs; stats->tso_bytes += skb->len - ihs;
} }
return ihs; return ihs;
...@@ -311,7 +312,7 @@ static inline void mlx5e_fill_sq_frag_edge(struct mlx5e_txqsq *sq, ...@@ -311,7 +312,7 @@ static inline void mlx5e_fill_sq_frag_edge(struct mlx5e_txqsq *sq,
wi->num_wqebbs = 1; wi->num_wqebbs = 1;
mlx5e_post_nop(wq, sq->sqn, &sq->pc); mlx5e_post_nop(wq, sq->sqn, &sq->pc);
} }
sq->stats.nop += nnops; sq->stats->nop += nnops;
} }
static inline void static inline void
...@@ -337,7 +338,7 @@ mlx5e_txwqe_complete(struct mlx5e_txqsq *sq, struct sk_buff *skb, ...@@ -337,7 +338,7 @@ mlx5e_txwqe_complete(struct mlx5e_txqsq *sq, struct sk_buff *skb,
sq->pc += wi->num_wqebbs; sq->pc += wi->num_wqebbs;
if (unlikely(!mlx5e_wqc_has_room_for(wq, sq->cc, sq->pc, MLX5E_SQ_STOP_ROOM))) { if (unlikely(!mlx5e_wqc_has_room_for(wq, sq->cc, sq->pc, MLX5E_SQ_STOP_ROOM))) {
netif_tx_stop_queue(sq->txq); netif_tx_stop_queue(sq->txq);
sq->stats.stopped++; sq->stats->stopped++;
} }
if (!skb->xmit_more || netif_xmit_stopped(sq->txq)) if (!skb->xmit_more || netif_xmit_stopped(sq->txq))
...@@ -355,6 +356,7 @@ netdev_tx_t mlx5e_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb, ...@@ -355,6 +356,7 @@ netdev_tx_t mlx5e_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
struct mlx5_wqe_data_seg *dseg; struct mlx5_wqe_data_seg *dseg;
struct mlx5e_tx_wqe_info *wi; struct mlx5e_tx_wqe_info *wi;
struct mlx5e_sq_stats *stats = sq->stats;
unsigned char *skb_data = skb->data; unsigned char *skb_data = skb->data;
unsigned int skb_len = skb->len; unsigned int skb_len = skb->len;
u16 ds_cnt, ds_cnt_inl = 0; u16 ds_cnt, ds_cnt_inl = 0;
...@@ -371,17 +373,17 @@ netdev_tx_t mlx5e_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb, ...@@ -371,17 +373,17 @@ netdev_tx_t mlx5e_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
mss = cpu_to_be16(skb_shinfo(skb)->gso_size); mss = cpu_to_be16(skb_shinfo(skb)->gso_size);
ihs = mlx5e_tx_get_gso_ihs(sq, skb); ihs = mlx5e_tx_get_gso_ihs(sq, skb);
num_bytes = skb->len + (skb_shinfo(skb)->gso_segs - 1) * ihs; num_bytes = skb->len + (skb_shinfo(skb)->gso_segs - 1) * ihs;
sq->stats.packets += skb_shinfo(skb)->gso_segs; stats->packets += skb_shinfo(skb)->gso_segs;
} else { } else {
opcode = MLX5_OPCODE_SEND; opcode = MLX5_OPCODE_SEND;
mss = 0; mss = 0;
ihs = mlx5e_calc_min_inline(sq->min_inline_mode, skb); ihs = mlx5e_calc_min_inline(sq->min_inline_mode, skb);
num_bytes = max_t(unsigned int, skb->len, ETH_ZLEN); num_bytes = max_t(unsigned int, skb->len, ETH_ZLEN);
sq->stats.packets++; stats->packets++;
} }
sq->stats.bytes += num_bytes; stats->bytes += num_bytes;
sq->stats.xmit_more += skb->xmit_more; stats->xmit_more += skb->xmit_more;
headlen = skb_len - ihs - skb->data_len; headlen = skb_len - ihs - skb->data_len;
ds_cnt += !!headlen; ds_cnt += !!headlen;
...@@ -415,7 +417,7 @@ netdev_tx_t mlx5e_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb, ...@@ -415,7 +417,7 @@ netdev_tx_t mlx5e_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
if (skb_vlan_tag_present(skb)) { if (skb_vlan_tag_present(skb)) {
mlx5e_insert_vlan(eseg->inline_hdr.start, skb, mlx5e_insert_vlan(eseg->inline_hdr.start, skb,
ihs - VLAN_HLEN, &skb_data, &skb_len); ihs - VLAN_HLEN, &skb_data, &skb_len);
sq->stats.added_vlan_packets++; stats->added_vlan_packets++;
} else { } else {
memcpy(eseg->inline_hdr.start, skb_data, ihs); memcpy(eseg->inline_hdr.start, skb_data, ihs);
mlx5e_tx_skb_pull_inline(&skb_data, &skb_len, ihs); mlx5e_tx_skb_pull_inline(&skb_data, &skb_len, ihs);
...@@ -427,7 +429,7 @@ netdev_tx_t mlx5e_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb, ...@@ -427,7 +429,7 @@ netdev_tx_t mlx5e_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
if (skb->vlan_proto == cpu_to_be16(ETH_P_8021AD)) if (skb->vlan_proto == cpu_to_be16(ETH_P_8021AD))
eseg->insert.type |= cpu_to_be16(MLX5_ETH_WQE_SVLAN); eseg->insert.type |= cpu_to_be16(MLX5_ETH_WQE_SVLAN);
eseg->insert.vlan_tci = cpu_to_be16(skb_vlan_tag_get(skb)); eseg->insert.vlan_tci = cpu_to_be16(skb_vlan_tag_get(skb));
sq->stats.added_vlan_packets++; stats->added_vlan_packets++;
} }
num_dma = mlx5e_txwqe_build_dsegs(sq, skb, skb_data, headlen, dseg); num_dma = mlx5e_txwqe_build_dsegs(sq, skb, skb_data, headlen, dseg);
...@@ -440,7 +442,7 @@ netdev_tx_t mlx5e_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb, ...@@ -440,7 +442,7 @@ netdev_tx_t mlx5e_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
return NETDEV_TX_OK; return NETDEV_TX_OK;
err_drop: err_drop:
sq->stats.dropped++; stats->dropped++;
dev_kfree_skb_any(skb); dev_kfree_skb_any(skb);
return NETDEV_TX_OK; return NETDEV_TX_OK;
...@@ -524,7 +526,7 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget) ...@@ -524,7 +526,7 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget)
queue_work(cq->channel->priv->wq, queue_work(cq->channel->priv->wq,
&sq->recover.recover_work); &sq->recover.recover_work);
} }
sq->stats.cqe_err++; sq->stats->cqe_err++;
} }
do { do {
...@@ -584,7 +586,7 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget) ...@@ -584,7 +586,7 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget)
MLX5E_SQ_STOP_ROOM) && MLX5E_SQ_STOP_ROOM) &&
!test_bit(MLX5E_SQ_STATE_RECOVERING, &sq->state)) { !test_bit(MLX5E_SQ_STATE_RECOVERING, &sq->state)) {
netif_tx_wake_queue(sq->txq); netif_tx_wake_queue(sq->txq);
sq->stats.wake++; sq->stats->wake++;
} }
return (i == MLX5E_TX_CQ_POLL_BUDGET); return (i == MLX5E_TX_CQ_POLL_BUDGET);
...@@ -641,6 +643,7 @@ netdev_tx_t mlx5i_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb, ...@@ -641,6 +643,7 @@ netdev_tx_t mlx5i_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
struct mlx5_wqe_data_seg *dseg; struct mlx5_wqe_data_seg *dseg;
struct mlx5e_tx_wqe_info *wi; struct mlx5e_tx_wqe_info *wi;
struct mlx5e_sq_stats *stats = sq->stats;
unsigned char *skb_data = skb->data; unsigned char *skb_data = skb->data;
unsigned int skb_len = skb->len; unsigned int skb_len = skb->len;
u16 headlen, ihs, pi, frag_pi; u16 headlen, ihs, pi, frag_pi;
...@@ -659,17 +662,17 @@ netdev_tx_t mlx5i_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb, ...@@ -659,17 +662,17 @@ netdev_tx_t mlx5i_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
mss = cpu_to_be16(skb_shinfo(skb)->gso_size); mss = cpu_to_be16(skb_shinfo(skb)->gso_size);
ihs = mlx5e_tx_get_gso_ihs(sq, skb); ihs = mlx5e_tx_get_gso_ihs(sq, skb);
num_bytes = skb->len + (skb_shinfo(skb)->gso_segs - 1) * ihs; num_bytes = skb->len + (skb_shinfo(skb)->gso_segs - 1) * ihs;
sq->stats.packets += skb_shinfo(skb)->gso_segs; stats->packets += skb_shinfo(skb)->gso_segs;
} else { } else {
opcode = MLX5_OPCODE_SEND; opcode = MLX5_OPCODE_SEND;
mss = 0; mss = 0;
ihs = mlx5e_calc_min_inline(sq->min_inline_mode, skb); ihs = mlx5e_calc_min_inline(sq->min_inline_mode, skb);
num_bytes = max_t(unsigned int, skb->len, ETH_ZLEN); num_bytes = max_t(unsigned int, skb->len, ETH_ZLEN);
sq->stats.packets++; stats->packets++;
} }
sq->stats.bytes += num_bytes; stats->bytes += num_bytes;
sq->stats.xmit_more += skb->xmit_more; stats->xmit_more += skb->xmit_more;
headlen = skb_len - ihs - skb->data_len; headlen = skb_len - ihs - skb->data_len;
ds_cnt += !!headlen; ds_cnt += !!headlen;
...@@ -716,7 +719,7 @@ netdev_tx_t mlx5i_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb, ...@@ -716,7 +719,7 @@ netdev_tx_t mlx5i_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
return NETDEV_TX_OK; return NETDEV_TX_OK;
err_drop: err_drop:
sq->stats.dropped++; stats->dropped++;
dev_kfree_skb_any(skb); dev_kfree_skb_any(skb);
return NETDEV_TX_OK; return NETDEV_TX_OK;
......
...@@ -46,24 +46,26 @@ static inline bool mlx5e_channel_no_affinity_change(struct mlx5e_channel *c) ...@@ -46,24 +46,26 @@ static inline bool mlx5e_channel_no_affinity_change(struct mlx5e_channel *c)
static void mlx5e_handle_tx_dim(struct mlx5e_txqsq *sq) static void mlx5e_handle_tx_dim(struct mlx5e_txqsq *sq)
{ {
struct mlx5e_sq_stats *stats = sq->stats;
struct net_dim_sample dim_sample; struct net_dim_sample dim_sample;
if (unlikely(!test_bit(MLX5E_SQ_STATE_AM, &sq->state))) if (unlikely(!test_bit(MLX5E_SQ_STATE_AM, &sq->state)))
return; return;
net_dim_sample(sq->cq.event_ctr, sq->stats.packets, sq->stats.bytes, net_dim_sample(sq->cq.event_ctr, stats->packets, stats->bytes,
&dim_sample); &dim_sample);
net_dim(&sq->dim, dim_sample); net_dim(&sq->dim, dim_sample);
} }
static void mlx5e_handle_rx_dim(struct mlx5e_rq *rq) static void mlx5e_handle_rx_dim(struct mlx5e_rq *rq)
{ {
struct mlx5e_rq_stats *stats = rq->stats;
struct net_dim_sample dim_sample; struct net_dim_sample dim_sample;
if (unlikely(!test_bit(MLX5E_RQ_STATE_AM, &rq->state))) if (unlikely(!test_bit(MLX5E_RQ_STATE_AM, &rq->state)))
return; return;
net_dim_sample(rq->cq.event_ctr, rq->stats.packets, rq->stats.bytes, net_dim_sample(rq->cq.event_ctr, stats->packets, stats->bytes,
&dim_sample); &dim_sample);
net_dim(&rq->dim, dim_sample); net_dim(&rq->dim, dim_sample);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment