Commit f62a5e71 authored by Jakub Kicinski's avatar Jakub Kicinski

Merge branch 'mlx5e-per-queue-coalescing'

Tariq Toukan says:

====================
mlx5e per-queue coalescing

This patchset adds ethtool per-queue coalescing support for the mlx5e
driver.

The series introduce some changes needed as preparations for the final
patch which adds the support and implements the callbacks.  Main
changes:
- DIM code movements into its own header file.
- Switch to dynamic allocation of the DIM struct in the RQs/SQs.
- Allow coalescing config change without channels reset when possible.
====================

Link: https://lore.kernel.org/r/20240419080445.417574-1-tariqt@nvidia.comSigned-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parents b240fc56 651ebaad
...@@ -320,6 +320,8 @@ struct mlx5e_params { ...@@ -320,6 +320,8 @@ struct mlx5e_params {
bool scatter_fcs_en; bool scatter_fcs_en;
bool rx_dim_enabled; bool rx_dim_enabled;
bool tx_dim_enabled; bool tx_dim_enabled;
bool rx_moder_use_cqe_mode;
bool tx_moder_use_cqe_mode;
u32 pflags; u32 pflags;
struct bpf_prog *xdp_prog; struct bpf_prog *xdp_prog;
struct mlx5e_xsk *xsk; struct mlx5e_xsk *xsk;
...@@ -430,7 +432,7 @@ struct mlx5e_txqsq { ...@@ -430,7 +432,7 @@ struct mlx5e_txqsq {
u16 cc; u16 cc;
u16 skb_fifo_cc; u16 skb_fifo_cc;
u32 dma_fifo_cc; u32 dma_fifo_cc;
struct dim dim; /* Adaptive Moderation */ struct dim *dim; /* Adaptive Moderation */
/* dirtied @xmit */ /* dirtied @xmit */
u16 pc ____cacheline_aligned_in_smp; u16 pc ____cacheline_aligned_in_smp;
...@@ -722,7 +724,7 @@ struct mlx5e_rq { ...@@ -722,7 +724,7 @@ struct mlx5e_rq {
int ix; int ix;
unsigned int hw_mtu; unsigned int hw_mtu;
struct dim dim; /* Dynamic Interrupt Moderation */ struct dim *dim; /* Dynamic Interrupt Moderation */
/* XDP */ /* XDP */
struct bpf_prog __rcu *xdp_prog; struct bpf_prog __rcu *xdp_prog;
...@@ -797,6 +799,10 @@ struct mlx5e_channel { ...@@ -797,6 +799,10 @@ struct mlx5e_channel {
int cpu; int cpu;
/* Sync between icosq recovery and XSK enable/disable. */ /* Sync between icosq recovery and XSK enable/disable. */
struct mutex icosq_recovery_lock; struct mutex icosq_recovery_lock;
/* coalescing configuration */
struct dim_cq_moder rx_cq_moder;
struct dim_cq_moder tx_cq_moder;
}; };
struct mlx5e_ptp; struct mlx5e_ptp;
...@@ -1040,6 +1046,11 @@ void mlx5e_close_rq(struct mlx5e_rq *rq); ...@@ -1040,6 +1046,11 @@ void mlx5e_close_rq(struct mlx5e_rq *rq);
int mlx5e_create_rq(struct mlx5e_rq *rq, struct mlx5e_rq_param *param, u16 q_counter); int mlx5e_create_rq(struct mlx5e_rq *rq, struct mlx5e_rq_param *param, u16 q_counter);
void mlx5e_destroy_rq(struct mlx5e_rq *rq); void mlx5e_destroy_rq(struct mlx5e_rq *rq);
bool mlx5e_reset_rx_moderation(struct dim_cq_moder *cq_moder, u8 cq_period_mode,
bool dim_enabled);
bool mlx5e_reset_rx_channels_moderation(struct mlx5e_channels *chs, u8 cq_period_mode,
bool dim_enabled, bool keep_dim_state);
struct mlx5e_sq_param; struct mlx5e_sq_param;
int mlx5e_open_xdpsq(struct mlx5e_channel *c, struct mlx5e_params *params, int mlx5e_open_xdpsq(struct mlx5e_channel *c, struct mlx5e_params *params,
struct mlx5e_sq_param *param, struct xsk_buff_pool *xsk_pool, struct mlx5e_sq_param *param, struct xsk_buff_pool *xsk_pool,
...@@ -1060,6 +1071,10 @@ int mlx5e_open_cq(struct mlx5_core_dev *mdev, struct dim_cq_moder moder, ...@@ -1060,6 +1071,10 @@ int mlx5e_open_cq(struct mlx5_core_dev *mdev, struct dim_cq_moder moder,
struct mlx5e_cq_param *param, struct mlx5e_create_cq_param *ccp, struct mlx5e_cq_param *param, struct mlx5e_create_cq_param *ccp,
struct mlx5e_cq *cq); struct mlx5e_cq *cq);
void mlx5e_close_cq(struct mlx5e_cq *cq); void mlx5e_close_cq(struct mlx5e_cq *cq);
int mlx5e_modify_cq_period_mode(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
u8 cq_period_mode);
int mlx5e_modify_cq_moderation(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
u16 cq_period, u16 cq_max_count, u8 cq_period_mode);
int mlx5e_open_locked(struct net_device *netdev); int mlx5e_open_locked(struct net_device *netdev);
int mlx5e_close_locked(struct net_device *netdev); int mlx5e_close_locked(struct net_device *netdev);
...@@ -1118,6 +1133,11 @@ int mlx5e_create_sq_rdy(struct mlx5_core_dev *mdev, ...@@ -1118,6 +1133,11 @@ int mlx5e_create_sq_rdy(struct mlx5_core_dev *mdev,
void mlx5e_tx_err_cqe_work(struct work_struct *recover_work); void mlx5e_tx_err_cqe_work(struct work_struct *recover_work);
void mlx5e_close_txqsq(struct mlx5e_txqsq *sq); void mlx5e_close_txqsq(struct mlx5e_txqsq *sq);
bool mlx5e_reset_tx_moderation(struct dim_cq_moder *cq_moder, u8 cq_period_mode,
bool dim_enabled);
bool mlx5e_reset_tx_channels_moderation(struct mlx5e_channels *chs, u8 cq_period_mode,
bool dim_enabled, bool keep_dim_state);
static inline bool mlx5_tx_swp_supported(struct mlx5_core_dev *mdev) static inline bool mlx5_tx_swp_supported(struct mlx5_core_dev *mdev)
{ {
return MLX5_CAP_ETH(mdev, swp) && return MLX5_CAP_ETH(mdev, swp) &&
...@@ -1179,6 +1199,10 @@ int mlx5e_ethtool_set_coalesce(struct mlx5e_priv *priv, ...@@ -1179,6 +1199,10 @@ int mlx5e_ethtool_set_coalesce(struct mlx5e_priv *priv,
struct ethtool_coalesce *coal, struct ethtool_coalesce *coal,
struct kernel_ethtool_coalesce *kernel_coal, struct kernel_ethtool_coalesce *kernel_coal,
struct netlink_ext_ack *extack); struct netlink_ext_ack *extack);
int mlx5e_get_per_queue_coalesce(struct net_device *dev, u32 queue,
struct ethtool_coalesce *coal);
int mlx5e_set_per_queue_coalesce(struct net_device *dev, u32 queue,
struct ethtool_coalesce *coal);
u32 mlx5e_ethtool_get_rxfh_key_size(struct mlx5e_priv *priv); u32 mlx5e_ethtool_get_rxfh_key_size(struct mlx5e_priv *priv);
u32 mlx5e_ethtool_get_rxfh_indir_size(struct mlx5e_priv *priv); u32 mlx5e_ethtool_get_rxfh_indir_size(struct mlx5e_priv *priv);
int mlx5e_ethtool_get_ts_info(struct mlx5e_priv *priv, int mlx5e_ethtool_get_ts_info(struct mlx5e_priv *priv,
...@@ -1210,8 +1234,6 @@ int mlx5e_netdev_change_profile(struct mlx5e_priv *priv, ...@@ -1210,8 +1234,6 @@ int mlx5e_netdev_change_profile(struct mlx5e_priv *priv,
void mlx5e_netdev_attach_nic_profile(struct mlx5e_priv *priv); void mlx5e_netdev_attach_nic_profile(struct mlx5e_priv *priv);
void mlx5e_set_netdev_mtu_boundaries(struct mlx5e_priv *priv); void mlx5e_set_netdev_mtu_boundaries(struct mlx5e_priv *priv);
void mlx5e_build_nic_params(struct mlx5e_priv *priv, struct mlx5e_xsk *xsk, u16 mtu); void mlx5e_build_nic_params(struct mlx5e_priv *priv, struct mlx5e_xsk *xsk, u16 mtu);
void mlx5e_rx_dim_work(struct work_struct *work);
void mlx5e_tx_dim_work(struct work_struct *work);
void mlx5e_set_xdp_feature(struct net_device *netdev); void mlx5e_set_xdp_feature(struct net_device *netdev);
netdev_features_t mlx5e_features_check(struct sk_buff *skb, netdev_features_t mlx5e_features_check(struct sk_buff *skb,
......
...@@ -3,6 +3,7 @@ ...@@ -3,6 +3,7 @@
#include "channels.h" #include "channels.h"
#include "en.h" #include "en.h"
#include "en/dim.h"
#include "en/ptp.h" #include "en/ptp.h"
unsigned int mlx5e_channels_get_num(struct mlx5e_channels *chs) unsigned int mlx5e_channels_get_num(struct mlx5e_channels *chs)
...@@ -55,3 +56,85 @@ bool mlx5e_channels_get_ptp_rqn(struct mlx5e_channels *chs, u32 *rqn) ...@@ -55,3 +56,85 @@ bool mlx5e_channels_get_ptp_rqn(struct mlx5e_channels *chs, u32 *rqn)
*rqn = c->rq.rqn; *rqn = c->rq.rqn;
return true; return true;
} }
int mlx5e_channels_rx_change_dim(struct mlx5e_channels *chs, bool enable)
{
int i;
for (i = 0; i < chs->num; i++) {
int err = mlx5e_dim_rx_change(&chs->c[i]->rq, enable);
if (err)
return err;
}
return 0;
}
int mlx5e_channels_tx_change_dim(struct mlx5e_channels *chs, bool enable)
{
int i, tc;
for (i = 0; i < chs->num; i++) {
for (tc = 0; tc < mlx5e_get_dcb_num_tc(&chs->params); tc++) {
int err = mlx5e_dim_tx_change(&chs->c[i]->sq[tc], enable);
if (err)
return err;
}
}
return 0;
}
int mlx5e_channels_rx_toggle_dim(struct mlx5e_channels *chs)
{
int i;
for (i = 0; i < chs->num; i++) {
/* If dim is enabled for the channel, reset the dim state so the
* collected statistics will be reset. This is useful for
* supporting legacy interfaces that allow things like changing
* the CQ period mode for all channels without disturbing
* individual channel configurations.
*/
if (chs->c[i]->rq.dim) {
int err;
mlx5e_dim_rx_change(&chs->c[i]->rq, false);
err = mlx5e_dim_rx_change(&chs->c[i]->rq, true);
if (err)
return err;
}
}
return 0;
}
int mlx5e_channels_tx_toggle_dim(struct mlx5e_channels *chs)
{
int i, tc;
for (i = 0; i < chs->num; i++) {
for (tc = 0; tc < mlx5e_get_dcb_num_tc(&chs->params); tc++) {
int err;
/* If dim is enabled for the channel, reset the dim
* state so the collected statistics will be reset. This
* is useful for supporting legacy interfaces that allow
* things like changing the CQ period mode for all
* channels without disturbing individual channel
* configurations.
*/
if (!chs->c[i]->sq[tc].dim)
continue;
mlx5e_dim_tx_change(&chs->c[i]->sq[tc], false);
err = mlx5e_dim_tx_change(&chs->c[i]->sq[tc], true);
if (err)
return err;
}
}
return 0;
}
...@@ -15,5 +15,9 @@ void mlx5e_channels_get_regular_rqn(struct mlx5e_channels *chs, unsigned int ix, ...@@ -15,5 +15,9 @@ void mlx5e_channels_get_regular_rqn(struct mlx5e_channels *chs, unsigned int ix,
void mlx5e_channels_get_xsk_rqn(struct mlx5e_channels *chs, unsigned int ix, u32 *rqn, void mlx5e_channels_get_xsk_rqn(struct mlx5e_channels *chs, unsigned int ix, u32 *rqn,
u32 *vhca_id); u32 *vhca_id);
bool mlx5e_channels_get_ptp_rqn(struct mlx5e_channels *chs, u32 *rqn); bool mlx5e_channels_get_ptp_rqn(struct mlx5e_channels *chs, u32 *rqn);
int mlx5e_channels_rx_change_dim(struct mlx5e_channels *chs, bool enabled);
int mlx5e_channels_tx_change_dim(struct mlx5e_channels *chs, bool enabled);
int mlx5e_channels_rx_toggle_dim(struct mlx5e_channels *chs);
int mlx5e_channels_tx_toggle_dim(struct mlx5e_channels *chs);
#endif /* __MLX5_EN_CHANNELS_H__ */ #endif /* __MLX5_EN_CHANNELS_H__ */
/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
/* Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved */
#ifndef __MLX5_EN_DIM_H__
#define __MLX5_EN_DIM_H__
#include <linux/dim.h>
#include <linux/types.h>
#include <linux/mlx5/mlx5_ifc.h>
/* Forward declarations */
struct mlx5e_rq;
struct mlx5e_txqsq;
struct work_struct;
/* convert a boolean value for cqe mode to appropriate dim constant
* true : DIM_CQ_PERIOD_MODE_START_FROM_CQE
* false : DIM_CQ_PERIOD_MODE_START_FROM_EQE
*/
static inline int mlx5e_dim_cq_period_mode(bool start_from_cqe)
{
return start_from_cqe ? DIM_CQ_PERIOD_MODE_START_FROM_CQE :
DIM_CQ_PERIOD_MODE_START_FROM_EQE;
}
static inline enum mlx5_cq_period_mode
mlx5e_cq_period_mode(enum dim_cq_period_mode cq_period_mode)
{
switch (cq_period_mode) {
case DIM_CQ_PERIOD_MODE_START_FROM_EQE:
return MLX5_CQ_PERIOD_MODE_START_FROM_EQE;
case DIM_CQ_PERIOD_MODE_START_FROM_CQE:
return MLX5_CQ_PERIOD_MODE_START_FROM_CQE;
default:
WARN_ON_ONCE(true);
return MLX5_CQ_PERIOD_MODE_START_FROM_EQE;
}
}
void mlx5e_rx_dim_work(struct work_struct *work);
void mlx5e_tx_dim_work(struct work_struct *work);
int mlx5e_dim_rx_change(struct mlx5e_rq *rq, bool enabled);
int mlx5e_dim_tx_change(struct mlx5e_txqsq *sq, bool enabled);
#endif /* __MLX5_EN_DIM_H__ */
...@@ -6,6 +6,7 @@ ...@@ -6,6 +6,7 @@
#include "en/port.h" #include "en/port.h"
#include "en_accel/en_accel.h" #include "en_accel/en_accel.h"
#include "en_accel/ipsec.h" #include "en_accel/ipsec.h"
#include <linux/dim.h>
#include <net/page_pool/types.h> #include <net/page_pool/types.h>
#include <net/xdp_sock_drv.h> #include <net/xdp_sock_drv.h>
...@@ -513,77 +514,6 @@ int mlx5e_validate_params(struct mlx5_core_dev *mdev, struct mlx5e_params *param ...@@ -513,77 +514,6 @@ int mlx5e_validate_params(struct mlx5_core_dev *mdev, struct mlx5e_params *param
return 0; return 0;
} }
static struct dim_cq_moder mlx5e_get_def_tx_moderation(u8 cq_period_mode)
{
struct dim_cq_moder moder = {};
moder.cq_period_mode = cq_period_mode;
moder.pkts = MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_PKTS;
moder.usec = MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC;
if (cq_period_mode == MLX5_CQ_PERIOD_MODE_START_FROM_CQE)
moder.usec = MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC_FROM_CQE;
return moder;
}
static struct dim_cq_moder mlx5e_get_def_rx_moderation(u8 cq_period_mode)
{
struct dim_cq_moder moder = {};
moder.cq_period_mode = cq_period_mode;
moder.pkts = MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_PKTS;
moder.usec = MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC;
if (cq_period_mode == MLX5_CQ_PERIOD_MODE_START_FROM_CQE)
moder.usec = MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC_FROM_CQE;
return moder;
}
static u8 mlx5_to_net_dim_cq_period_mode(u8 cq_period_mode)
{
return cq_period_mode == MLX5_CQ_PERIOD_MODE_START_FROM_CQE ?
DIM_CQ_PERIOD_MODE_START_FROM_CQE :
DIM_CQ_PERIOD_MODE_START_FROM_EQE;
}
void mlx5e_reset_tx_moderation(struct mlx5e_params *params, u8 cq_period_mode)
{
if (params->tx_dim_enabled) {
u8 dim_period_mode = mlx5_to_net_dim_cq_period_mode(cq_period_mode);
params->tx_cq_moderation = net_dim_get_def_tx_moderation(dim_period_mode);
} else {
params->tx_cq_moderation = mlx5e_get_def_tx_moderation(cq_period_mode);
}
}
void mlx5e_reset_rx_moderation(struct mlx5e_params *params, u8 cq_period_mode)
{
if (params->rx_dim_enabled) {
u8 dim_period_mode = mlx5_to_net_dim_cq_period_mode(cq_period_mode);
params->rx_cq_moderation = net_dim_get_def_rx_moderation(dim_period_mode);
} else {
params->rx_cq_moderation = mlx5e_get_def_rx_moderation(cq_period_mode);
}
}
void mlx5e_set_tx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode)
{
mlx5e_reset_tx_moderation(params, cq_period_mode);
MLX5E_SET_PFLAG(params, MLX5E_PFLAG_TX_CQE_BASED_MODER,
params->tx_cq_moderation.cq_period_mode ==
MLX5_CQ_PERIOD_MODE_START_FROM_CQE);
}
void mlx5e_set_rx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode)
{
mlx5e_reset_rx_moderation(params, cq_period_mode);
MLX5E_SET_PFLAG(params, MLX5E_PFLAG_RX_CQE_BASED_MODER,
params->rx_cq_moderation.cq_period_mode ==
MLX5_CQ_PERIOD_MODE_START_FROM_CQE);
}
bool slow_pci_heuristic(struct mlx5_core_dev *mdev) bool slow_pci_heuristic(struct mlx5_core_dev *mdev)
{ {
u32 link_speed = 0; u32 link_speed = 0;
......
...@@ -77,11 +77,6 @@ u8 mlx5e_mpwrq_max_log_rq_pkts(struct mlx5_core_dev *mdev, u8 page_shift, ...@@ -77,11 +77,6 @@ u8 mlx5e_mpwrq_max_log_rq_pkts(struct mlx5_core_dev *mdev, u8 page_shift,
/* Parameter calculations */ /* Parameter calculations */
void mlx5e_reset_tx_moderation(struct mlx5e_params *params, u8 cq_period_mode);
void mlx5e_reset_rx_moderation(struct mlx5e_params *params, u8 cq_period_mode);
void mlx5e_set_tx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode);
void mlx5e_set_rx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode);
bool slow_pci_heuristic(struct mlx5_core_dev *mdev); bool slow_pci_heuristic(struct mlx5_core_dev *mdev);
int mlx5e_mpwrq_validate_regular(struct mlx5_core_dev *mdev, struct mlx5e_params *params); int mlx5e_mpwrq_validate_regular(struct mlx5_core_dev *mdev, struct mlx5e_params *params);
int mlx5e_mpwrq_validate_xsk(struct mlx5_core_dev *mdev, struct mlx5e_params *params, int mlx5e_mpwrq_validate_xsk(struct mlx5_core_dev *mdev, struct mlx5e_params *params,
......
...@@ -30,21 +30,22 @@ ...@@ -30,21 +30,22 @@
* SOFTWARE. * SOFTWARE.
*/ */
#include <linux/dim.h>
#include "en.h" #include "en.h"
#include "en/dim.h"
static void static void
mlx5e_complete_dim_work(struct dim *dim, struct dim_cq_moder moder, mlx5e_complete_dim_work(struct dim *dim, struct dim_cq_moder moder,
struct mlx5_core_dev *mdev, struct mlx5_core_cq *mcq) struct mlx5_core_dev *mdev, struct mlx5_core_cq *mcq)
{ {
mlx5_core_modify_cq_moderation(mdev, mcq, moder.usec, moder.pkts); mlx5e_modify_cq_moderation(mdev, mcq, moder.usec, moder.pkts,
mlx5e_cq_period_mode(moder.cq_period_mode));
dim->state = DIM_START_MEASURE; dim->state = DIM_START_MEASURE;
} }
void mlx5e_rx_dim_work(struct work_struct *work) void mlx5e_rx_dim_work(struct work_struct *work)
{ {
struct dim *dim = container_of(work, struct dim, work); struct dim *dim = container_of(work, struct dim, work);
struct mlx5e_rq *rq = container_of(dim, struct mlx5e_rq, dim); struct mlx5e_rq *rq = dim->priv;
struct dim_cq_moder cur_moder = struct dim_cq_moder cur_moder =
net_dim_get_rx_moderation(dim->mode, dim->profile_ix); net_dim_get_rx_moderation(dim->mode, dim->profile_ix);
...@@ -54,9 +55,95 @@ void mlx5e_rx_dim_work(struct work_struct *work) ...@@ -54,9 +55,95 @@ void mlx5e_rx_dim_work(struct work_struct *work)
void mlx5e_tx_dim_work(struct work_struct *work) void mlx5e_tx_dim_work(struct work_struct *work)
{ {
struct dim *dim = container_of(work, struct dim, work); struct dim *dim = container_of(work, struct dim, work);
struct mlx5e_txqsq *sq = container_of(dim, struct mlx5e_txqsq, dim); struct mlx5e_txqsq *sq = dim->priv;
struct dim_cq_moder cur_moder = struct dim_cq_moder cur_moder =
net_dim_get_tx_moderation(dim->mode, dim->profile_ix); net_dim_get_tx_moderation(dim->mode, dim->profile_ix);
mlx5e_complete_dim_work(dim, cur_moder, sq->cq.mdev, &sq->cq.mcq); mlx5e_complete_dim_work(dim, cur_moder, sq->cq.mdev, &sq->cq.mcq);
} }
static struct dim *mlx5e_dim_enable(struct mlx5_core_dev *mdev,
void (*work_fun)(struct work_struct *), int cpu,
u8 cq_period_mode, struct mlx5_core_cq *mcq,
void *queue)
{
struct dim *dim;
int err;
dim = kvzalloc_node(sizeof(*dim), GFP_KERNEL, cpu_to_node(cpu));
if (!dim)
return ERR_PTR(-ENOMEM);
INIT_WORK(&dim->work, work_fun);
dim->mode = cq_period_mode;
dim->priv = queue;
err = mlx5e_modify_cq_period_mode(mdev, mcq, dim->mode);
if (err) {
kvfree(dim);
return ERR_PTR(err);
}
return dim;
}
static void mlx5e_dim_disable(struct dim *dim)
{
cancel_work_sync(&dim->work);
kvfree(dim);
}
int mlx5e_dim_rx_change(struct mlx5e_rq *rq, bool enable)
{
if (enable == !!rq->dim)
return 0;
if (enable) {
struct mlx5e_channel *c = rq->channel;
struct dim *dim;
dim = mlx5e_dim_enable(rq->mdev, mlx5e_rx_dim_work, c->cpu,
c->rx_cq_moder.cq_period_mode, &rq->cq.mcq, rq);
if (IS_ERR(dim))
return PTR_ERR(dim);
rq->dim = dim;
__set_bit(MLX5E_RQ_STATE_DIM, &rq->state);
} else {
__clear_bit(MLX5E_RQ_STATE_DIM, &rq->state);
mlx5e_dim_disable(rq->dim);
rq->dim = NULL;
}
return 0;
}
int mlx5e_dim_tx_change(struct mlx5e_txqsq *sq, bool enable)
{
if (enable == !!sq->dim)
return 0;
if (enable) {
struct mlx5e_channel *c = sq->channel;
struct dim *dim;
dim = mlx5e_dim_enable(sq->mdev, mlx5e_tx_dim_work, c->cpu,
c->tx_cq_moder.cq_period_mode, &sq->cq.mcq, sq);
if (IS_ERR(dim))
return PTR_ERR(dim);
sq->dim = dim;
__set_bit(MLX5E_SQ_STATE_DIM, &sq->state);
} else {
__clear_bit(MLX5E_SQ_STATE_DIM, &sq->state);
mlx5e_dim_disable(sq->dim);
sq->dim = NULL;
}
return 0;
}
...@@ -30,9 +30,12 @@ ...@@ -30,9 +30,12 @@
* SOFTWARE. * SOFTWARE.
*/ */
#include <linux/dim.h>
#include <linux/ethtool_netlink.h> #include <linux/ethtool_netlink.h>
#include "en.h" #include "en.h"
#include "en/channels.h"
#include "en/dim.h"
#include "en/port.h" #include "en/port.h"
#include "en/params.h" #include "en/params.h"
#include "en/ptp.h" #include "en/ptp.h"
...@@ -565,16 +568,13 @@ int mlx5e_ethtool_get_coalesce(struct mlx5e_priv *priv, ...@@ -565,16 +568,13 @@ int mlx5e_ethtool_get_coalesce(struct mlx5e_priv *priv,
coal->rx_coalesce_usecs = rx_moder->usec; coal->rx_coalesce_usecs = rx_moder->usec;
coal->rx_max_coalesced_frames = rx_moder->pkts; coal->rx_max_coalesced_frames = rx_moder->pkts;
coal->use_adaptive_rx_coalesce = priv->channels.params.rx_dim_enabled; coal->use_adaptive_rx_coalesce = priv->channels.params.rx_dim_enabled;
kernel_coal->use_cqe_mode_rx = priv->channels.params.rx_moder_use_cqe_mode;
tx_moder = &priv->channels.params.tx_cq_moderation; tx_moder = &priv->channels.params.tx_cq_moderation;
coal->tx_coalesce_usecs = tx_moder->usec; coal->tx_coalesce_usecs = tx_moder->usec;
coal->tx_max_coalesced_frames = tx_moder->pkts; coal->tx_max_coalesced_frames = tx_moder->pkts;
coal->use_adaptive_tx_coalesce = priv->channels.params.tx_dim_enabled; coal->use_adaptive_tx_coalesce = priv->channels.params.tx_dim_enabled;
kernel_coal->use_cqe_mode_tx = priv->channels.params.tx_moder_use_cqe_mode;
kernel_coal->use_cqe_mode_rx =
MLX5E_GET_PFLAG(&priv->channels.params, MLX5E_PFLAG_RX_CQE_BASED_MODER);
kernel_coal->use_cqe_mode_tx =
MLX5E_GET_PFLAG(&priv->channels.params, MLX5E_PFLAG_TX_CQE_BASED_MODER);
return 0; return 0;
} }
...@@ -589,11 +589,73 @@ static int mlx5e_get_coalesce(struct net_device *netdev, ...@@ -589,11 +589,73 @@ static int mlx5e_get_coalesce(struct net_device *netdev,
return mlx5e_ethtool_get_coalesce(priv, coal, kernel_coal); return mlx5e_ethtool_get_coalesce(priv, coal, kernel_coal);
} }
static int mlx5e_ethtool_get_per_queue_coalesce(struct mlx5e_priv *priv, u32 queue,
struct ethtool_coalesce *coal)
{
struct dim_cq_moder cur_moder;
struct mlx5e_channels *chs;
struct mlx5e_channel *c;
if (!MLX5_CAP_GEN(priv->mdev, cq_moderation))
return -EOPNOTSUPP;
mutex_lock(&priv->state_lock);
chs = &priv->channels;
if (chs->num <= queue) {
mutex_unlock(&priv->state_lock);
return -EINVAL;
}
c = chs->c[queue];
coal->use_adaptive_rx_coalesce = !!c->rq.dim;
if (coal->use_adaptive_rx_coalesce) {
cur_moder = net_dim_get_rx_moderation(c->rq.dim->mode,
c->rq.dim->profile_ix);
coal->rx_coalesce_usecs = cur_moder.usec;
coal->rx_max_coalesced_frames = cur_moder.pkts;
} else {
coal->rx_coalesce_usecs = c->rx_cq_moder.usec;
coal->rx_max_coalesced_frames = c->rx_cq_moder.pkts;
}
coal->use_adaptive_tx_coalesce = !!c->sq[0].dim;
if (coal->use_adaptive_tx_coalesce) {
/* NOTE: Will only display DIM coalesce profile information of
* first channel. The current interface cannot display this
* information for all tc.
*/
cur_moder = net_dim_get_tx_moderation(c->sq[0].dim->mode,
c->sq[0].dim->profile_ix);
coal->tx_coalesce_usecs = cur_moder.usec;
coal->tx_max_coalesced_frames = cur_moder.pkts;
} else {
coal->tx_coalesce_usecs = c->tx_cq_moder.usec;
coal->tx_max_coalesced_frames = c->tx_cq_moder.pkts;
}
mutex_unlock(&priv->state_lock);
return 0;
}
int mlx5e_get_per_queue_coalesce(struct net_device *dev, u32 queue,
struct ethtool_coalesce *coal)
{
struct mlx5e_priv *priv = netdev_priv(dev);
return mlx5e_ethtool_get_per_queue_coalesce(priv, queue, coal);
}
#define MLX5E_MAX_COAL_TIME MLX5_MAX_CQ_PERIOD #define MLX5E_MAX_COAL_TIME MLX5_MAX_CQ_PERIOD
#define MLX5E_MAX_COAL_FRAMES MLX5_MAX_CQ_COUNT #define MLX5E_MAX_COAL_FRAMES MLX5_MAX_CQ_COUNT
static void static void
mlx5e_set_priv_channels_tx_coalesce(struct mlx5e_priv *priv, struct ethtool_coalesce *coal) mlx5e_set_priv_channels_tx_coalesce(struct mlx5e_priv *priv, struct dim_cq_moder *moder)
{ {
int tc; int tc;
int i; int i;
...@@ -601,38 +663,35 @@ mlx5e_set_priv_channels_tx_coalesce(struct mlx5e_priv *priv, struct ethtool_coal ...@@ -601,38 +663,35 @@ mlx5e_set_priv_channels_tx_coalesce(struct mlx5e_priv *priv, struct ethtool_coal
for (i = 0; i < priv->channels.num; ++i) { for (i = 0; i < priv->channels.num; ++i) {
struct mlx5e_channel *c = priv->channels.c[i]; struct mlx5e_channel *c = priv->channels.c[i];
struct mlx5_core_dev *mdev = c->mdev; struct mlx5_core_dev *mdev = c->mdev;
enum mlx5_cq_period_mode mode;
mode = mlx5e_cq_period_mode(moder->cq_period_mode);
c->tx_cq_moder = *moder;
for (tc = 0; tc < c->num_tc; tc++) { for (tc = 0; tc < c->num_tc; tc++) {
mlx5_core_modify_cq_moderation(mdev, mlx5e_modify_cq_moderation(mdev, &c->sq[tc].cq.mcq,
&c->sq[tc].cq.mcq, moder->usec, moder->pkts,
coal->tx_coalesce_usecs, mode);
coal->tx_max_coalesced_frames);
} }
} }
} }
static void static void
mlx5e_set_priv_channels_rx_coalesce(struct mlx5e_priv *priv, struct ethtool_coalesce *coal) mlx5e_set_priv_channels_rx_coalesce(struct mlx5e_priv *priv, struct dim_cq_moder *moder)
{ {
int i; int i;
for (i = 0; i < priv->channels.num; ++i) { for (i = 0; i < priv->channels.num; ++i) {
struct mlx5e_channel *c = priv->channels.c[i]; struct mlx5e_channel *c = priv->channels.c[i];
struct mlx5_core_dev *mdev = c->mdev; struct mlx5_core_dev *mdev = c->mdev;
enum mlx5_cq_period_mode mode;
mlx5_core_modify_cq_moderation(mdev, &c->rq.cq.mcq, mode = mlx5e_cq_period_mode(moder->cq_period_mode);
coal->rx_coalesce_usecs, c->rx_cq_moder = *moder;
coal->rx_max_coalesced_frames);
}
}
/* convert a boolean value of cq_mode to mlx5 period mode mlx5e_modify_cq_moderation(mdev, &c->rq.cq.mcq, moder->usec, moder->pkts,
* true : MLX5_CQ_PERIOD_MODE_START_FROM_CQE mode);
* false : MLX5_CQ_PERIOD_MODE_START_FROM_EQE }
*/
static int cqe_mode_to_period_mode(bool val)
{
return val ? MLX5_CQ_PERIOD_MODE_START_FROM_CQE : MLX5_CQ_PERIOD_MODE_START_FROM_EQE;
} }
int mlx5e_ethtool_set_coalesce(struct mlx5e_priv *priv, int mlx5e_ethtool_set_coalesce(struct mlx5e_priv *priv,
...@@ -642,13 +701,14 @@ int mlx5e_ethtool_set_coalesce(struct mlx5e_priv *priv, ...@@ -642,13 +701,14 @@ int mlx5e_ethtool_set_coalesce(struct mlx5e_priv *priv,
{ {
struct dim_cq_moder *rx_moder, *tx_moder; struct dim_cq_moder *rx_moder, *tx_moder;
struct mlx5_core_dev *mdev = priv->mdev; struct mlx5_core_dev *mdev = priv->mdev;
bool rx_dim_enabled, tx_dim_enabled;
struct mlx5e_params new_params; struct mlx5e_params new_params;
bool reset_rx, reset_tx; bool reset_rx, reset_tx;
bool reset = true;
u8 cq_period_mode; u8 cq_period_mode;
int err = 0; int err = 0;
if (!MLX5_CAP_GEN(mdev, cq_moderation)) if (!MLX5_CAP_GEN(mdev, cq_moderation) ||
!MLX5_CAP_GEN(mdev, cq_period_mode_modify))
return -EOPNOTSUPP; return -EOPNOTSUPP;
if (coal->tx_coalesce_usecs > MLX5E_MAX_COAL_TIME || if (coal->tx_coalesce_usecs > MLX5E_MAX_COAL_TIME ||
...@@ -671,60 +731,70 @@ int mlx5e_ethtool_set_coalesce(struct mlx5e_priv *priv, ...@@ -671,60 +731,70 @@ int mlx5e_ethtool_set_coalesce(struct mlx5e_priv *priv,
return -EOPNOTSUPP; return -EOPNOTSUPP;
} }
rx_dim_enabled = !!coal->use_adaptive_rx_coalesce;
tx_dim_enabled = !!coal->use_adaptive_tx_coalesce;
mutex_lock(&priv->state_lock); mutex_lock(&priv->state_lock);
new_params = priv->channels.params; new_params = priv->channels.params;
rx_moder = &new_params.rx_cq_moderation; cq_period_mode = mlx5e_dim_cq_period_mode(kernel_coal->use_cqe_mode_rx);
rx_moder->usec = coal->rx_coalesce_usecs; reset_rx = mlx5e_reset_rx_channels_moderation(&priv->channels, cq_period_mode,
rx_moder->pkts = coal->rx_max_coalesced_frames; rx_dim_enabled, false);
new_params.rx_dim_enabled = !!coal->use_adaptive_rx_coalesce; MLX5E_SET_PFLAG(&new_params, MLX5E_PFLAG_RX_CQE_BASED_MODER, cq_period_mode);
tx_moder = &new_params.tx_cq_moderation; cq_period_mode = mlx5e_dim_cq_period_mode(kernel_coal->use_cqe_mode_tx);
tx_moder->usec = coal->tx_coalesce_usecs; reset_tx = mlx5e_reset_tx_channels_moderation(&priv->channels, cq_period_mode,
tx_moder->pkts = coal->tx_max_coalesced_frames; tx_dim_enabled, false);
new_params.tx_dim_enabled = !!coal->use_adaptive_tx_coalesce; MLX5E_SET_PFLAG(&new_params, MLX5E_PFLAG_TX_CQE_BASED_MODER, cq_period_mode);
reset_rx = !!coal->use_adaptive_rx_coalesce != priv->channels.params.rx_dim_enabled; reset_rx |= rx_dim_enabled != new_params.rx_dim_enabled;
reset_tx = !!coal->use_adaptive_tx_coalesce != priv->channels.params.tx_dim_enabled; reset_tx |= tx_dim_enabled != new_params.tx_dim_enabled;
cq_period_mode = cqe_mode_to_period_mode(kernel_coal->use_cqe_mode_rx); /* Solely used for global ethtool get coalesce */
if (cq_period_mode != rx_moder->cq_period_mode) { rx_moder = &new_params.rx_cq_moderation;
mlx5e_set_rx_cq_mode_params(&new_params, cq_period_mode); new_params.rx_dim_enabled = rx_dim_enabled;
reset_rx = true; new_params.rx_moder_use_cqe_mode = kernel_coal->use_cqe_mode_rx;
}
cq_period_mode = cqe_mode_to_period_mode(kernel_coal->use_cqe_mode_tx); tx_moder = &new_params.tx_cq_moderation;
if (cq_period_mode != tx_moder->cq_period_mode) { new_params.tx_dim_enabled = tx_dim_enabled;
mlx5e_set_tx_cq_mode_params(&new_params, cq_period_mode); new_params.tx_moder_use_cqe_mode = kernel_coal->use_cqe_mode_tx;
reset_tx = true;
}
if (reset_rx) { if (reset_rx) {
u8 mode = MLX5E_GET_PFLAG(&new_params, mlx5e_channels_rx_change_dim(&priv->channels, false);
MLX5E_PFLAG_RX_CQE_BASED_MODER); mlx5e_reset_rx_moderation(rx_moder, new_params.rx_moder_use_cqe_mode,
rx_dim_enabled);
mlx5e_reset_rx_moderation(&new_params, mode); mlx5e_set_priv_channels_rx_coalesce(priv, rx_moder);
} else if (!rx_dim_enabled) {
rx_moder->usec = coal->rx_coalesce_usecs;
rx_moder->pkts = coal->rx_max_coalesced_frames;
mlx5e_set_priv_channels_rx_coalesce(priv, rx_moder);
} }
if (reset_tx) { if (reset_tx) {
u8 mode = MLX5E_GET_PFLAG(&new_params, mlx5e_channels_tx_change_dim(&priv->channels, false);
MLX5E_PFLAG_TX_CQE_BASED_MODER); mlx5e_reset_tx_moderation(tx_moder, new_params.tx_moder_use_cqe_mode,
tx_dim_enabled);
mlx5e_reset_tx_moderation(&new_params, mode); mlx5e_set_priv_channels_tx_coalesce(priv, tx_moder);
} } else if (!tx_dim_enabled) {
tx_moder->usec = coal->tx_coalesce_usecs;
tx_moder->pkts = coal->tx_max_coalesced_frames;
/* If DIM state hasn't changed, it's possible to modify interrupt mlx5e_set_priv_channels_tx_coalesce(priv, tx_moder);
* moderation parameters on the fly, even if the channels are open.
*/
if (!reset_rx && !reset_tx && test_bit(MLX5E_STATE_OPENED, &priv->state)) {
if (!coal->use_adaptive_rx_coalesce)
mlx5e_set_priv_channels_rx_coalesce(priv, coal);
if (!coal->use_adaptive_tx_coalesce)
mlx5e_set_priv_channels_tx_coalesce(priv, coal);
reset = false;
} }
err = mlx5e_safe_switch_params(priv, &new_params, NULL, NULL, reset); /* DIM enable/disable Rx and Tx channels */
err = mlx5e_channels_rx_change_dim(&priv->channels, rx_dim_enabled);
if (err)
goto state_unlock;
err = mlx5e_channels_tx_change_dim(&priv->channels, tx_dim_enabled);
if (err)
goto state_unlock;
err = mlx5e_safe_switch_params(priv, &new_params, NULL, NULL, false);
state_unlock:
mutex_unlock(&priv->state_lock); mutex_unlock(&priv->state_lock);
return err; return err;
} }
...@@ -739,6 +809,88 @@ static int mlx5e_set_coalesce(struct net_device *netdev, ...@@ -739,6 +809,88 @@ static int mlx5e_set_coalesce(struct net_device *netdev,
return mlx5e_ethtool_set_coalesce(priv, coal, kernel_coal, extack); return mlx5e_ethtool_set_coalesce(priv, coal, kernel_coal, extack);
} }
static int mlx5e_ethtool_set_per_queue_coalesce(struct mlx5e_priv *priv, u32 queue,
struct ethtool_coalesce *coal)
{
struct mlx5_core_dev *mdev = priv->mdev;
bool rx_dim_enabled, tx_dim_enabled;
struct mlx5e_channels *chs;
struct mlx5e_channel *c;
int err = 0;
int tc;
if (!MLX5_CAP_GEN(mdev, cq_moderation))
return -EOPNOTSUPP;
if (coal->tx_coalesce_usecs > MLX5E_MAX_COAL_TIME ||
coal->rx_coalesce_usecs > MLX5E_MAX_COAL_TIME) {
netdev_info(priv->netdev, "%s: maximum coalesce time supported is %lu usecs\n",
__func__, MLX5E_MAX_COAL_TIME);
return -ERANGE;
}
if (coal->tx_max_coalesced_frames > MLX5E_MAX_COAL_FRAMES ||
coal->rx_max_coalesced_frames > MLX5E_MAX_COAL_FRAMES) {
netdev_info(priv->netdev, "%s: maximum coalesced frames supported is %lu\n",
__func__, MLX5E_MAX_COAL_FRAMES);
return -ERANGE;
}
rx_dim_enabled = !!coal->use_adaptive_rx_coalesce;
tx_dim_enabled = !!coal->use_adaptive_tx_coalesce;
mutex_lock(&priv->state_lock);
chs = &priv->channels;
if (chs->num <= queue) {
mutex_unlock(&priv->state_lock);
return -EINVAL;
}
c = chs->c[queue];
err = mlx5e_dim_rx_change(&c->rq, rx_dim_enabled);
if (err)
goto state_unlock;
for (tc = 0; tc < c->num_tc; tc++) {
err = mlx5e_dim_tx_change(&c->sq[tc], tx_dim_enabled);
if (err)
goto state_unlock;
}
if (!rx_dim_enabled) {
c->rx_cq_moder.usec = coal->rx_coalesce_usecs;
c->rx_cq_moder.pkts = coal->rx_max_coalesced_frames;
mlx5_core_modify_cq_moderation(mdev, &c->rq.cq.mcq,
coal->rx_coalesce_usecs,
coal->rx_max_coalesced_frames);
}
if (!tx_dim_enabled) {
c->tx_cq_moder.usec = coal->tx_coalesce_usecs;
c->tx_cq_moder.pkts = coal->tx_max_coalesced_frames;
for (tc = 0; tc < c->num_tc; tc++)
mlx5_core_modify_cq_moderation(mdev, &c->sq[tc].cq.mcq,
coal->tx_coalesce_usecs,
coal->tx_max_coalesced_frames);
}
state_unlock:
mutex_unlock(&priv->state_lock);
return err;
}
int mlx5e_set_per_queue_coalesce(struct net_device *dev, u32 queue,
struct ethtool_coalesce *coal)
{
struct mlx5e_priv *priv = netdev_priv(dev);
return mlx5e_ethtool_set_per_queue_coalesce(priv, queue, coal);
}
static void ptys2ethtool_supported_link(struct mlx5_core_dev *mdev, static void ptys2ethtool_supported_link(struct mlx5_core_dev *mdev,
unsigned long *supported_modes, unsigned long *supported_modes,
u32 eth_proto_cap) u32 eth_proto_cap)
...@@ -1914,7 +2066,7 @@ static int set_pflag_cqe_based_moder(struct net_device *netdev, bool enable, ...@@ -1914,7 +2066,7 @@ static int set_pflag_cqe_based_moder(struct net_device *netdev, bool enable,
if (enable && !MLX5_CAP_GEN(priv->mdev, cq_period_start_from_cqe)) if (enable && !MLX5_CAP_GEN(priv->mdev, cq_period_start_from_cqe))
return -EOPNOTSUPP; return -EOPNOTSUPP;
cq_period_mode = cqe_mode_to_period_mode(enable); cq_period_mode = mlx5e_dim_cq_period_mode(enable);
current_cq_period_mode = is_rx_cq ? current_cq_period_mode = is_rx_cq ?
priv->channels.params.rx_cq_moderation.cq_period_mode : priv->channels.params.rx_cq_moderation.cq_period_mode :
...@@ -1924,12 +2076,22 @@ static int set_pflag_cqe_based_moder(struct net_device *netdev, bool enable, ...@@ -1924,12 +2076,22 @@ static int set_pflag_cqe_based_moder(struct net_device *netdev, bool enable,
return 0; return 0;
new_params = priv->channels.params; new_params = priv->channels.params;
if (is_rx_cq) if (is_rx_cq) {
mlx5e_set_rx_cq_mode_params(&new_params, cq_period_mode); mlx5e_reset_rx_channels_moderation(&priv->channels, cq_period_mode,
else false, true);
mlx5e_set_tx_cq_mode_params(&new_params, cq_period_mode); mlx5e_channels_rx_toggle_dim(&priv->channels);
MLX5E_SET_PFLAG(&new_params, MLX5E_PFLAG_RX_CQE_BASED_MODER,
cq_period_mode);
} else {
mlx5e_reset_tx_channels_moderation(&priv->channels, cq_period_mode,
false, true);
mlx5e_channels_tx_toggle_dim(&priv->channels);
MLX5E_SET_PFLAG(&new_params, MLX5E_PFLAG_TX_CQE_BASED_MODER,
cq_period_mode);
}
return mlx5e_safe_switch_params(priv, &new_params, NULL, NULL, true); /* Update pflags of existing channels without resetting them */
return mlx5e_safe_switch_params(priv, &new_params, NULL, NULL, false);
} }
static int set_pflag_tx_cqe_based_moder(struct net_device *netdev, bool enable) static int set_pflag_tx_cqe_based_moder(struct net_device *netdev, bool enable)
...@@ -2454,6 +2616,8 @@ const struct ethtool_ops mlx5e_ethtool_ops = { ...@@ -2454,6 +2616,8 @@ const struct ethtool_ops mlx5e_ethtool_ops = {
.set_channels = mlx5e_set_channels, .set_channels = mlx5e_set_channels,
.get_coalesce = mlx5e_get_coalesce, .get_coalesce = mlx5e_get_coalesce,
.set_coalesce = mlx5e_set_coalesce, .set_coalesce = mlx5e_set_coalesce,
.get_per_queue_coalesce = mlx5e_get_per_queue_coalesce,
.set_per_queue_coalesce = mlx5e_set_per_queue_coalesce,
.get_link_ksettings = mlx5e_get_link_ksettings, .get_link_ksettings = mlx5e_get_link_ksettings,
.set_link_ksettings = mlx5e_set_link_ksettings, .set_link_ksettings = mlx5e_set_link_ksettings,
.get_rxfh_key_size = mlx5e_get_rxfh_key_size, .get_rxfh_key_size = mlx5e_get_rxfh_key_size,
......
...@@ -30,6 +30,7 @@ ...@@ -30,6 +30,7 @@
* SOFTWARE. * SOFTWARE.
*/ */
#include <linux/dim.h>
#include <net/tc_act/tc_gact.h> #include <net/tc_act/tc_gact.h>
#include <linux/mlx5/fs.h> #include <linux/mlx5/fs.h>
#include <net/vxlan.h> #include <net/vxlan.h>
...@@ -43,6 +44,7 @@ ...@@ -43,6 +44,7 @@
#include <net/xdp_sock_drv.h> #include <net/xdp_sock_drv.h>
#include "eswitch.h" #include "eswitch.h"
#include "en.h" #include "en.h"
#include "en/dim.h"
#include "en/txrx.h" #include "en/txrx.h"
#include "en_tc.h" #include "en_tc.h"
#include "en_rep.h" #include "en_rep.h"
...@@ -960,17 +962,6 @@ static int mlx5e_alloc_rq(struct mlx5e_params *params, ...@@ -960,17 +962,6 @@ static int mlx5e_alloc_rq(struct mlx5e_params *params,
} }
} }
INIT_WORK(&rq->dim.work, mlx5e_rx_dim_work);
switch (params->rx_cq_moderation.cq_period_mode) {
case MLX5_CQ_PERIOD_MODE_START_FROM_CQE:
rq->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_CQE;
break;
case MLX5_CQ_PERIOD_MODE_START_FROM_EQE:
default:
rq->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
}
return 0; return 0;
err_destroy_page_pool: err_destroy_page_pool:
...@@ -1020,6 +1011,7 @@ static void mlx5e_free_rq(struct mlx5e_rq *rq) ...@@ -1020,6 +1011,7 @@ static void mlx5e_free_rq(struct mlx5e_rq *rq)
mlx5e_free_wqe_alloc_info(rq); mlx5e_free_wqe_alloc_info(rq);
} }
kvfree(rq->dim);
xdp_rxq_info_unreg(&rq->xdp_rxq); xdp_rxq_info_unreg(&rq->xdp_rxq);
page_pool_destroy(rq->page_pool); page_pool_destroy(rq->page_pool);
mlx5_wq_destroy(&rq->wq_ctrl); mlx5_wq_destroy(&rq->wq_ctrl);
...@@ -1300,8 +1292,21 @@ int mlx5e_open_rq(struct mlx5e_params *params, struct mlx5e_rq_param *param, ...@@ -1300,8 +1292,21 @@ int mlx5e_open_rq(struct mlx5e_params *params, struct mlx5e_rq_param *param,
if (MLX5_CAP_ETH(mdev, cqe_checksum_full)) if (MLX5_CAP_ETH(mdev, cqe_checksum_full))
__set_bit(MLX5E_RQ_STATE_CSUM_FULL, &rq->state); __set_bit(MLX5E_RQ_STATE_CSUM_FULL, &rq->state);
if (params->rx_dim_enabled) if (rq->channel && !params->rx_dim_enabled) {
__set_bit(MLX5E_RQ_STATE_DIM, &rq->state); rq->channel->rx_cq_moder = params->rx_cq_moderation;
} else if (rq->channel) {
u8 cq_period_mode;
cq_period_mode = params->rx_moder_use_cqe_mode ?
DIM_CQ_PERIOD_MODE_START_FROM_CQE :
DIM_CQ_PERIOD_MODE_START_FROM_EQE;
mlx5e_reset_rx_moderation(&rq->channel->rx_cq_moder, cq_period_mode,
params->rx_dim_enabled);
err = mlx5e_dim_rx_change(rq, params->rx_dim_enabled);
if (err)
goto err_destroy_rq;
}
/* We disable csum_complete when XDP is enabled since /* We disable csum_complete when XDP is enabled since
* XDP programs might manipulate packets which will render * XDP programs might manipulate packets which will render
...@@ -1347,7 +1352,8 @@ void mlx5e_deactivate_rq(struct mlx5e_rq *rq) ...@@ -1347,7 +1352,8 @@ void mlx5e_deactivate_rq(struct mlx5e_rq *rq)
void mlx5e_close_rq(struct mlx5e_rq *rq) void mlx5e_close_rq(struct mlx5e_rq *rq)
{ {
cancel_work_sync(&rq->dim.work); if (rq->dim)
cancel_work_sync(&rq->dim->work);
cancel_work_sync(&rq->recover_work); cancel_work_sync(&rq->recover_work);
mlx5e_destroy_rq(rq); mlx5e_destroy_rq(rq);
mlx5e_free_rx_descs(rq); mlx5e_free_rx_descs(rq);
...@@ -1623,9 +1629,6 @@ static int mlx5e_alloc_txqsq(struct mlx5e_channel *c, ...@@ -1623,9 +1629,6 @@ static int mlx5e_alloc_txqsq(struct mlx5e_channel *c,
if (err) if (err)
goto err_sq_wq_destroy; goto err_sq_wq_destroy;
INIT_WORK(&sq->dim.work, mlx5e_tx_dim_work);
sq->dim.mode = params->tx_cq_moderation.cq_period_mode;
return 0; return 0;
err_sq_wq_destroy: err_sq_wq_destroy:
...@@ -1636,6 +1639,7 @@ static int mlx5e_alloc_txqsq(struct mlx5e_channel *c, ...@@ -1636,6 +1639,7 @@ static int mlx5e_alloc_txqsq(struct mlx5e_channel *c,
void mlx5e_free_txqsq(struct mlx5e_txqsq *sq) void mlx5e_free_txqsq(struct mlx5e_txqsq *sq)
{ {
kvfree(sq->dim);
mlx5e_free_txqsq_db(sq); mlx5e_free_txqsq_db(sq);
mlx5_wq_destroy(&sq->wq_ctrl); mlx5_wq_destroy(&sq->wq_ctrl);
} }
...@@ -1791,11 +1795,27 @@ int mlx5e_open_txqsq(struct mlx5e_channel *c, u32 tisn, int txq_ix, ...@@ -1791,11 +1795,27 @@ int mlx5e_open_txqsq(struct mlx5e_channel *c, u32 tisn, int txq_ix,
if (tx_rate) if (tx_rate)
mlx5e_set_sq_maxrate(c->netdev, sq, tx_rate); mlx5e_set_sq_maxrate(c->netdev, sq, tx_rate);
if (params->tx_dim_enabled) if (sq->channel && !params->tx_dim_enabled) {
sq->state |= BIT(MLX5E_SQ_STATE_DIM); sq->channel->tx_cq_moder = params->tx_cq_moderation;
} else if (sq->channel) {
u8 cq_period_mode;
cq_period_mode = params->tx_moder_use_cqe_mode ?
DIM_CQ_PERIOD_MODE_START_FROM_CQE :
DIM_CQ_PERIOD_MODE_START_FROM_EQE;
mlx5e_reset_tx_moderation(&sq->channel->tx_cq_moder,
cq_period_mode,
params->tx_dim_enabled);
err = mlx5e_dim_tx_change(sq, params->tx_dim_enabled);
if (err)
goto err_destroy_sq;
}
return 0; return 0;
err_destroy_sq:
mlx5e_destroy_sq(c->mdev, sq->sqn);
err_free_txqsq: err_free_txqsq:
mlx5e_free_txqsq(sq); mlx5e_free_txqsq(sq);
...@@ -1847,7 +1867,8 @@ void mlx5e_close_txqsq(struct mlx5e_txqsq *sq) ...@@ -1847,7 +1867,8 @@ void mlx5e_close_txqsq(struct mlx5e_txqsq *sq)
struct mlx5_core_dev *mdev = sq->mdev; struct mlx5_core_dev *mdev = sq->mdev;
struct mlx5_rate_limit rl = {0}; struct mlx5_rate_limit rl = {0};
cancel_work_sync(&sq->dim.work); if (sq->dim)
cancel_work_sync(&sq->dim->work);
cancel_work_sync(&sq->recover_work); cancel_work_sync(&sq->recover_work);
mlx5e_destroy_sq(mdev, sq->sqn); mlx5e_destroy_sq(mdev, sq->sqn);
if (sq->rate_limit) { if (sq->rate_limit) {
...@@ -1866,6 +1887,49 @@ void mlx5e_tx_err_cqe_work(struct work_struct *recover_work) ...@@ -1866,6 +1887,49 @@ void mlx5e_tx_err_cqe_work(struct work_struct *recover_work)
mlx5e_reporter_tx_err_cqe(sq); mlx5e_reporter_tx_err_cqe(sq);
} }
static struct dim_cq_moder mlx5e_get_def_tx_moderation(u8 cq_period_mode)
{
return (struct dim_cq_moder) {
.cq_period_mode = cq_period_mode,
.pkts = MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_PKTS,
.usec = cq_period_mode == DIM_CQ_PERIOD_MODE_START_FROM_CQE ?
MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC_FROM_CQE :
MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC,
};
}
bool mlx5e_reset_tx_moderation(struct dim_cq_moder *cq_moder, u8 cq_period_mode,
bool dim_enabled)
{
bool reset_needed = cq_moder->cq_period_mode != cq_period_mode;
if (dim_enabled)
*cq_moder = net_dim_get_def_tx_moderation(cq_period_mode);
else
*cq_moder = mlx5e_get_def_tx_moderation(cq_period_mode);
return reset_needed;
}
bool mlx5e_reset_tx_channels_moderation(struct mlx5e_channels *chs, u8 cq_period_mode,
bool dim_enabled, bool keep_dim_state)
{
bool reset = false;
int i, tc;
for (i = 0; i < chs->num; i++) {
for (tc = 0; tc < mlx5e_get_dcb_num_tc(&chs->params); tc++) {
if (keep_dim_state)
dim_enabled = !!chs->c[i]->sq[tc].dim;
reset |= mlx5e_reset_tx_moderation(&chs->c[i]->tx_cq_moder,
cq_period_mode, dim_enabled);
}
}
return reset;
}
static int mlx5e_open_icosq(struct mlx5e_channel *c, struct mlx5e_params *params, static int mlx5e_open_icosq(struct mlx5e_channel *c, struct mlx5e_params *params,
struct mlx5e_sq_param *param, struct mlx5e_icosq *sq, struct mlx5e_sq_param *param, struct mlx5e_icosq *sq,
work_func_t recover_work_func) work_func_t recover_work_func)
...@@ -2089,7 +2153,8 @@ static int mlx5e_create_cq(struct mlx5e_cq *cq, struct mlx5e_cq_param *param) ...@@ -2089,7 +2153,8 @@ static int mlx5e_create_cq(struct mlx5e_cq *cq, struct mlx5e_cq_param *param)
mlx5_fill_page_frag_array(&cq->wq_ctrl.buf, mlx5_fill_page_frag_array(&cq->wq_ctrl.buf,
(__be64 *)MLX5_ADDR_OF(create_cq_in, in, pas)); (__be64 *)MLX5_ADDR_OF(create_cq_in, in, pas));
MLX5_SET(cqc, cqc, cq_period_mode, param->cq_period_mode); MLX5_SET(cqc, cqc, cq_period_mode, mlx5e_cq_period_mode(param->cq_period_mode));
MLX5_SET(cqc, cqc, c_eqn_or_apu_element, eqn); MLX5_SET(cqc, cqc, c_eqn_or_apu_element, eqn);
MLX5_SET(cqc, cqc, uar_page, mdev->priv.uar->index); MLX5_SET(cqc, cqc, uar_page, mdev->priv.uar->index);
MLX5_SET(cqc, cqc, log_page_size, cq->wq_ctrl.buf.page_shift - MLX5_SET(cqc, cqc, log_page_size, cq->wq_ctrl.buf.page_shift -
...@@ -2127,8 +2192,10 @@ int mlx5e_open_cq(struct mlx5_core_dev *mdev, struct dim_cq_moder moder, ...@@ -2127,8 +2192,10 @@ int mlx5e_open_cq(struct mlx5_core_dev *mdev, struct dim_cq_moder moder,
if (err) if (err)
goto err_free_cq; goto err_free_cq;
if (MLX5_CAP_GEN(mdev, cq_moderation)) if (MLX5_CAP_GEN(mdev, cq_moderation) &&
mlx5_core_modify_cq_moderation(mdev, &cq->mcq, moder.usec, moder.pkts); MLX5_CAP_GEN(mdev, cq_period_mode_modify))
mlx5e_modify_cq_moderation(mdev, &cq->mcq, moder.usec, moder.pkts,
mlx5e_cq_period_mode(moder.cq_period_mode));
return 0; return 0;
err_free_cq: err_free_cq:
...@@ -2143,6 +2210,40 @@ void mlx5e_close_cq(struct mlx5e_cq *cq) ...@@ -2143,6 +2210,40 @@ void mlx5e_close_cq(struct mlx5e_cq *cq)
mlx5e_free_cq(cq); mlx5e_free_cq(cq);
} }
int mlx5e_modify_cq_period_mode(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
u8 cq_period_mode)
{
u32 in[MLX5_ST_SZ_DW(modify_cq_in)] = {};
void *cqc;
MLX5_SET(modify_cq_in, in, cqn, cq->cqn);
cqc = MLX5_ADDR_OF(modify_cq_in, in, cq_context);
MLX5_SET(cqc, cqc, cq_period_mode, mlx5e_cq_period_mode(cq_period_mode));
MLX5_SET(modify_cq_in, in,
modify_field_select_resize_field_select.modify_field_select.modify_field_select,
MLX5_CQ_MODIFY_PERIOD_MODE);
return mlx5_core_modify_cq(dev, cq, in, sizeof(in));
}
int mlx5e_modify_cq_moderation(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
u16 cq_period, u16 cq_max_count, u8 cq_period_mode)
{
u32 in[MLX5_ST_SZ_DW(modify_cq_in)] = {};
void *cqc;
MLX5_SET(modify_cq_in, in, cqn, cq->cqn);
cqc = MLX5_ADDR_OF(modify_cq_in, in, cq_context);
MLX5_SET(cqc, cqc, cq_period, cq_period);
MLX5_SET(cqc, cqc, cq_max_count, cq_max_count);
MLX5_SET(cqc, cqc, cq_period_mode, cq_period_mode);
MLX5_SET(modify_cq_in, in,
modify_field_select_resize_field_select.modify_field_select.modify_field_select,
MLX5_CQ_MODIFY_PERIOD | MLX5_CQ_MODIFY_COUNT | MLX5_CQ_MODIFY_PERIOD_MODE);
return mlx5_core_modify_cq(dev, cq, in, sizeof(in));
}
static int mlx5e_open_tx_cqs(struct mlx5e_channel *c, static int mlx5e_open_tx_cqs(struct mlx5e_channel *c,
struct mlx5e_params *params, struct mlx5e_params *params,
struct mlx5e_create_cq_param *ccp, struct mlx5e_create_cq_param *ccp,
...@@ -3960,6 +4061,47 @@ static int set_feature_rx_all(struct net_device *netdev, bool enable) ...@@ -3960,6 +4061,47 @@ static int set_feature_rx_all(struct net_device *netdev, bool enable)
return mlx5_set_port_fcs(mdev, !enable); return mlx5_set_port_fcs(mdev, !enable);
} }
static struct dim_cq_moder mlx5e_get_def_rx_moderation(u8 cq_period_mode)
{
return (struct dim_cq_moder) {
.cq_period_mode = cq_period_mode,
.pkts = MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_PKTS,
.usec = cq_period_mode == DIM_CQ_PERIOD_MODE_START_FROM_CQE ?
MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC_FROM_CQE :
MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC,
};
}
bool mlx5e_reset_rx_moderation(struct dim_cq_moder *cq_moder, u8 cq_period_mode,
bool dim_enabled)
{
bool reset_needed = cq_moder->cq_period_mode != cq_period_mode;
if (dim_enabled)
*cq_moder = net_dim_get_def_rx_moderation(cq_period_mode);
else
*cq_moder = mlx5e_get_def_rx_moderation(cq_period_mode);
return reset_needed;
}
bool mlx5e_reset_rx_channels_moderation(struct mlx5e_channels *chs, u8 cq_period_mode,
bool dim_enabled, bool keep_dim_state)
{
bool reset = false;
int i;
for (i = 0; i < chs->num; i++) {
if (keep_dim_state)
dim_enabled = !!chs->c[i]->rq.dim;
reset |= mlx5e_reset_rx_moderation(&chs->c[i]->rx_cq_moder,
cq_period_mode, dim_enabled);
}
return reset;
}
static int mlx5e_set_rx_port_ts(struct mlx5_core_dev *mdev, bool enable) static int mlx5e_set_rx_port_ts(struct mlx5_core_dev *mdev, bool enable)
{ {
u32 in[MLX5_ST_SZ_DW(pcmr_reg)] = {}; u32 in[MLX5_ST_SZ_DW(pcmr_reg)] = {};
...@@ -5024,7 +5166,6 @@ void mlx5e_build_nic_params(struct mlx5e_priv *priv, struct mlx5e_xsk *xsk, u16 ...@@ -5024,7 +5166,6 @@ void mlx5e_build_nic_params(struct mlx5e_priv *priv, struct mlx5e_xsk *xsk, u16
{ {
struct mlx5e_params *params = &priv->channels.params; struct mlx5e_params *params = &priv->channels.params;
struct mlx5_core_dev *mdev = priv->mdev; struct mlx5_core_dev *mdev = priv->mdev;
u8 rx_cq_period_mode;
params->sw_mtu = mtu; params->sw_mtu = mtu;
params->hard_mtu = MLX5E_ETH_HARD_MTU; params->hard_mtu = MLX5E_ETH_HARD_MTU;
...@@ -5058,13 +5199,16 @@ void mlx5e_build_nic_params(struct mlx5e_priv *priv, struct mlx5e_xsk *xsk, u16 ...@@ -5058,13 +5199,16 @@ void mlx5e_build_nic_params(struct mlx5e_priv *priv, struct mlx5e_xsk *xsk, u16
params->packet_merge.timeout = mlx5e_choose_lro_timeout(mdev, MLX5E_DEFAULT_LRO_TIMEOUT); params->packet_merge.timeout = mlx5e_choose_lro_timeout(mdev, MLX5E_DEFAULT_LRO_TIMEOUT);
/* CQ moderation params */ /* CQ moderation params */
rx_cq_period_mode = MLX5_CAP_GEN(mdev, cq_period_start_from_cqe) ? params->rx_dim_enabled = MLX5_CAP_GEN(mdev, cq_moderation) &&
MLX5_CQ_PERIOD_MODE_START_FROM_CQE : MLX5_CAP_GEN(mdev, cq_period_mode_modify);
MLX5_CQ_PERIOD_MODE_START_FROM_EQE; params->tx_dim_enabled = MLX5_CAP_GEN(mdev, cq_moderation) &&
params->rx_dim_enabled = MLX5_CAP_GEN(mdev, cq_moderation); MLX5_CAP_GEN(mdev, cq_period_mode_modify);
params->tx_dim_enabled = MLX5_CAP_GEN(mdev, cq_moderation); params->rx_moder_use_cqe_mode = !!MLX5_CAP_GEN(mdev, cq_period_start_from_cqe);
mlx5e_set_rx_cq_mode_params(params, rx_cq_period_mode); params->tx_moder_use_cqe_mode = false;
mlx5e_set_tx_cq_mode_params(params, MLX5_CQ_PERIOD_MODE_START_FROM_EQE); mlx5e_reset_rx_moderation(&params->rx_cq_moderation, params->rx_moder_use_cqe_mode,
params->rx_dim_enabled);
mlx5e_reset_tx_moderation(&params->tx_cq_moderation, params->tx_moder_use_cqe_mode,
params->tx_dim_enabled);
/* TX inline */ /* TX inline */
mlx5_query_min_inline(mdev, &params->tx_min_inline_mode); mlx5_query_min_inline(mdev, &params->tx_min_inline_mode);
......
...@@ -30,6 +30,7 @@ ...@@ -30,6 +30,7 @@
* SOFTWARE. * SOFTWARE.
*/ */
#include <linux/dim.h>
#include <linux/debugfs.h> #include <linux/debugfs.h>
#include <linux/mlx5/fs.h> #include <linux/mlx5/fs.h>
#include <net/switchdev.h> #include <net/switchdev.h>
...@@ -40,6 +41,7 @@ ...@@ -40,6 +41,7 @@
#include "eswitch.h" #include "eswitch.h"
#include "en.h" #include "en.h"
#include "en/dim.h"
#include "en_rep.h" #include "en_rep.h"
#include "en/params.h" #include "en/params.h"
#include "en/txrx.h" #include "en/txrx.h"
...@@ -426,6 +428,8 @@ static const struct ethtool_ops mlx5e_rep_ethtool_ops = { ...@@ -426,6 +428,8 @@ static const struct ethtool_ops mlx5e_rep_ethtool_ops = {
.set_channels = mlx5e_rep_set_channels, .set_channels = mlx5e_rep_set_channels,
.get_coalesce = mlx5e_rep_get_coalesce, .get_coalesce = mlx5e_rep_get_coalesce,
.set_coalesce = mlx5e_rep_set_coalesce, .set_coalesce = mlx5e_rep_set_coalesce,
.get_per_queue_coalesce = mlx5e_get_per_queue_coalesce,
.set_per_queue_coalesce = mlx5e_set_per_queue_coalesce,
.get_rxfh_key_size = mlx5e_rep_get_rxfh_key_size, .get_rxfh_key_size = mlx5e_rep_get_rxfh_key_size,
.get_rxfh_indir_size = mlx5e_rep_get_rxfh_indir_size, .get_rxfh_indir_size = mlx5e_rep_get_rxfh_indir_size,
}; };
...@@ -836,10 +840,6 @@ static void mlx5e_build_rep_params(struct net_device *netdev) ...@@ -836,10 +840,6 @@ static void mlx5e_build_rep_params(struct net_device *netdev)
struct mlx5_core_dev *mdev = priv->mdev; struct mlx5_core_dev *mdev = priv->mdev;
struct mlx5e_params *params; struct mlx5e_params *params;
u8 cq_period_mode = MLX5_CAP_GEN(mdev, cq_period_start_from_cqe) ?
MLX5_CQ_PERIOD_MODE_START_FROM_CQE :
MLX5_CQ_PERIOD_MODE_START_FROM_EQE;
params = &priv->channels.params; params = &priv->channels.params;
params->num_channels = MLX5E_REP_PARAMS_DEF_NUM_CHANNELS; params->num_channels = MLX5E_REP_PARAMS_DEF_NUM_CHANNELS;
...@@ -867,7 +867,7 @@ static void mlx5e_build_rep_params(struct net_device *netdev) ...@@ -867,7 +867,7 @@ static void mlx5e_build_rep_params(struct net_device *netdev)
/* CQ moderation params */ /* CQ moderation params */
params->rx_dim_enabled = MLX5_CAP_GEN(mdev, cq_moderation); params->rx_dim_enabled = MLX5_CAP_GEN(mdev, cq_moderation);
mlx5e_set_rx_cq_mode_params(params, cq_period_mode); params->rx_moder_use_cqe_mode = !!MLX5_CAP_GEN(mdev, cq_period_start_from_cqe);
params->mqprio.num_tc = 1; params->mqprio.num_tc = 1;
if (rep->vport != MLX5_VPORT_UPLINK) if (rep->vport != MLX5_VPORT_UPLINK)
......
...@@ -55,7 +55,7 @@ static void mlx5e_handle_tx_dim(struct mlx5e_txqsq *sq) ...@@ -55,7 +55,7 @@ static void mlx5e_handle_tx_dim(struct mlx5e_txqsq *sq)
return; return;
dim_update_sample(sq->cq.event_ctr, stats->packets, stats->bytes, &dim_sample); dim_update_sample(sq->cq.event_ctr, stats->packets, stats->bytes, &dim_sample);
net_dim(&sq->dim, dim_sample); net_dim(sq->dim, dim_sample);
} }
static void mlx5e_handle_rx_dim(struct mlx5e_rq *rq) static void mlx5e_handle_rx_dim(struct mlx5e_rq *rq)
...@@ -67,7 +67,7 @@ static void mlx5e_handle_rx_dim(struct mlx5e_rq *rq) ...@@ -67,7 +67,7 @@ static void mlx5e_handle_rx_dim(struct mlx5e_rq *rq)
return; return;
dim_update_sample(rq->cq.event_ctr, stats->packets, stats->bytes, &dim_sample); dim_update_sample(rq->cq.event_ctr, stats->packets, stats->bytes, &dim_sample);
net_dim(&rq->dim, dim_sample); net_dim(rq->dim, dim_sample);
} }
void mlx5e_trigger_irq(struct mlx5e_icosq *sq) void mlx5e_trigger_irq(struct mlx5e_icosq *sq)
......
...@@ -95,9 +95,10 @@ enum { ...@@ -95,9 +95,10 @@ enum {
}; };
enum { enum {
MLX5_CQ_MODIFY_PERIOD = 1 << 0, MLX5_CQ_MODIFY_PERIOD = BIT(0),
MLX5_CQ_MODIFY_COUNT = 1 << 1, MLX5_CQ_MODIFY_COUNT = BIT(1),
MLX5_CQ_MODIFY_OVERRUN = 1 << 2, MLX5_CQ_MODIFY_OVERRUN = BIT(2),
MLX5_CQ_MODIFY_PERIOD_MODE = BIT(4),
}; };
enum { enum {
......
...@@ -1686,7 +1686,8 @@ struct mlx5_ifc_cmd_hca_cap_bits { ...@@ -1686,7 +1686,8 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 cq_oi[0x1]; u8 cq_oi[0x1];
u8 cq_resize[0x1]; u8 cq_resize[0x1];
u8 cq_moderation[0x1]; u8 cq_moderation[0x1];
u8 reserved_at_223[0x3]; u8 cq_period_mode_modify[0x1];
u8 reserved_at_224[0x2];
u8 cq_eq_remap[0x1]; u8 cq_eq_remap[0x1];
u8 pg[0x1]; u8 pg[0x1];
u8 block_lb_mc[0x1]; u8 block_lb_mc[0x1];
...@@ -4385,10 +4386,10 @@ enum { ...@@ -4385,10 +4386,10 @@ enum {
MLX5_CQC_ST_FIRED = 0xa, MLX5_CQC_ST_FIRED = 0xa,
}; };
enum { enum mlx5_cq_period_mode {
MLX5_CQ_PERIOD_MODE_START_FROM_EQE = 0x0, MLX5_CQ_PERIOD_MODE_START_FROM_EQE = 0x0,
MLX5_CQ_PERIOD_MODE_START_FROM_CQE = 0x1, MLX5_CQ_PERIOD_MODE_START_FROM_CQE = 0x1,
MLX5_CQ_PERIOD_NUM_MODES MLX5_CQ_PERIOD_NUM_MODES,
}; };
struct mlx5_ifc_cqc_bits { struct mlx5_ifc_cqc_bits {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment