Commit 73281b78 authored by Tariq Toukan's avatar Tariq Toukan Committed by Saeed Mahameed

net/mlx5e: Derive Striding RQ size from MTU

In Striding RQ, each WQE serves multiple packets
(hence called Multi-Packet WQE, MPWQE).
The size of a MPWQE is constant (currently 256KB).

Upon a ringparam set operation, we calculate the number of
MPWQEs per RQ. For this, first it is needed to determine the
number of packets that can reside within a single MPWQE.
In this patch we use the actual MTU size instead of ETH_DATA_LEN
for this calculation.

This implies that a change in MTU might require a change
in Striding RQ ring size.

In addition, this obsoletes some WQEs-to-packets translation
functions and helps delete ~60 LOC.
Signed-off-by: default avatarTariq Toukan <tariqt@mellanox.com>
Signed-off-by: default avatarSaeed Mahameed <saeedm@mellanox.com>
parent 472a1e44
...@@ -63,18 +63,6 @@ ...@@ -63,18 +63,6 @@
#define MLX5E_MAX_DSCP 64 #define MLX5E_MAX_DSCP 64
#define MLX5E_MAX_NUM_TC 8 #define MLX5E_MAX_NUM_TC 8
#define MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE 0x6
#define MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE 0xa
#define MLX5E_PARAMS_MAXIMUM_LOG_SQ_SIZE 0xd
#define MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE 0x1
#define MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE 0xa
#define MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE 0xd
#define MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE_MPW 0x2
#define MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE_MPW 0x3
#define MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE_MPW 0x6
#define MLX5_RX_HEADROOM NET_SKB_PAD #define MLX5_RX_HEADROOM NET_SKB_PAD
#define MLX5_SKB_FRAG_SZ(len) (SKB_DATA_ALIGN(len) + \ #define MLX5_SKB_FRAG_SZ(len) (SKB_DATA_ALIGN(len) + \
SKB_DATA_ALIGN(sizeof(struct skb_shared_info))) SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
...@@ -95,9 +83,27 @@ ...@@ -95,9 +83,27 @@
#define MLX5_MPWRQ_PAGES_PER_WQE BIT(MLX5_MPWRQ_WQE_PAGE_ORDER) #define MLX5_MPWRQ_PAGES_PER_WQE BIT(MLX5_MPWRQ_WQE_PAGE_ORDER)
#define MLX5_MTT_OCTW(npages) (ALIGN(npages, 8) / 2) #define MLX5_MTT_OCTW(npages) (ALIGN(npages, 8) / 2)
#define MLX5E_REQUIRED_MTTS(wqes) \ #define MLX5E_REQUIRED_WQE_MTTS (ALIGN(MLX5_MPWRQ_PAGES_PER_WQE, 8))
(wqes * ALIGN(MLX5_MPWRQ_PAGES_PER_WQE, 8)) #define MLX5E_REQUIRED_MTTS(wqes) (wqes * MLX5E_REQUIRED_WQE_MTTS)
#define MLX5E_VALID_NUM_MTTS(num_mtts) (MLX5_MTT_OCTW(num_mtts) - 1 <= U16_MAX) #define MLX5E_MAX_RQ_NUM_MTTS \
((1 << 16) * 2) /* So that MLX5_MTT_OCTW(num_mtts) fits into u16 */
#define MLX5E_ORDER2_MAX_PACKET_MTU (order_base_2(10 * 1024))
#define MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE_MPW \
(ilog2(MLX5E_MAX_RQ_NUM_MTTS / MLX5E_REQUIRED_WQE_MTTS))
#define MLX5E_LOG_MAX_RQ_NUM_PACKETS_MPW \
(MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE_MPW + \
(MLX5_MPWRQ_LOG_WQE_SZ - MLX5E_ORDER2_MAX_PACKET_MTU))
#define MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE 0x6
#define MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE 0xa
#define MLX5E_PARAMS_MAXIMUM_LOG_SQ_SIZE 0xd
#define MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE 0x1
#define MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE 0xa
#define MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE min_t(u8, 0xd, \
MLX5E_LOG_MAX_RQ_NUM_PACKETS_MPW)
#define MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE_MPW 0x2
#define MLX5_UMR_ALIGN (2048) #define MLX5_UMR_ALIGN (2048)
#define MLX5_MPWRQ_SMALL_PACKET_THRESHOLD (256) #define MLX5_MPWRQ_SMALL_PACKET_THRESHOLD (256)
...@@ -155,26 +161,6 @@ static inline u16 mlx5_min_rx_wqes(int wq_type, u32 wq_size) ...@@ -155,26 +161,6 @@ static inline u16 mlx5_min_rx_wqes(int wq_type, u32 wq_size)
} }
} }
static inline int mlx5_min_log_rq_size(int wq_type)
{
switch (wq_type) {
case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
return MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE_MPW;
default:
return MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE;
}
}
static inline int mlx5_max_log_rq_size(int wq_type)
{
switch (wq_type) {
case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
return MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE_MPW;
default:
return MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE;
}
}
static inline int mlx5e_get_max_num_channels(struct mlx5_core_dev *mdev) static inline int mlx5e_get_max_num_channels(struct mlx5_core_dev *mdev)
{ {
return is_kdump_kernel() ? return is_kdump_kernel() ?
...@@ -233,7 +219,7 @@ enum mlx5e_priv_flag { ...@@ -233,7 +219,7 @@ enum mlx5e_priv_flag {
struct mlx5e_params { struct mlx5e_params {
u8 log_sq_size; u8 log_sq_size;
u8 rq_wq_type; u8 rq_wq_type;
u8 log_rq_size; u8 log_rq_mtu_frames;
u16 num_channels; u16 num_channels;
u8 num_tc; u8 num_tc;
bool rx_cqe_compress_def; bool rx_cqe_compress_def;
...@@ -849,11 +835,6 @@ void mlx5e_dealloc_rx_wqe(struct mlx5e_rq *rq, u16 ix); ...@@ -849,11 +835,6 @@ void mlx5e_dealloc_rx_wqe(struct mlx5e_rq *rq, u16 ix);
void mlx5e_dealloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix); void mlx5e_dealloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix);
void mlx5e_free_rx_mpwqe(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi); void mlx5e_free_rx_mpwqe(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi);
u8 mlx5e_mpwqe_get_log_stride_size(struct mlx5_core_dev *mdev,
struct mlx5e_params *params);
u8 mlx5e_mpwqe_get_log_num_strides(struct mlx5_core_dev *mdev,
struct mlx5e_params *params);
void mlx5e_update_stats(struct mlx5e_priv *priv); void mlx5e_update_stats(struct mlx5e_priv *priv);
int mlx5e_create_flow_steering(struct mlx5e_priv *priv); int mlx5e_create_flow_steering(struct mlx5e_priv *priv);
......
...@@ -220,60 +220,12 @@ static void mlx5e_get_ethtool_stats(struct net_device *dev, ...@@ -220,60 +220,12 @@ static void mlx5e_get_ethtool_stats(struct net_device *dev,
mlx5e_ethtool_get_ethtool_stats(priv, stats, data); mlx5e_ethtool_get_ethtool_stats(priv, stats, data);
} }
static u32 mlx5e_rx_wqes_to_packets(struct mlx5e_priv *priv, int rq_wq_type,
int num_wqe)
{
int packets_per_wqe;
int stride_size;
int num_strides;
int wqe_size;
if (rq_wq_type != MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ)
return num_wqe;
stride_size = 1 << mlx5e_mpwqe_get_log_stride_size(priv->mdev, &priv->channels.params);
num_strides = 1 << mlx5e_mpwqe_get_log_num_strides(priv->mdev, &priv->channels.params);
wqe_size = stride_size * num_strides;
packets_per_wqe = wqe_size /
ALIGN(ETH_DATA_LEN, stride_size);
return (1 << (order_base_2(num_wqe * packets_per_wqe) - 1));
}
static u32 mlx5e_packets_to_rx_wqes(struct mlx5e_priv *priv, int rq_wq_type,
int num_packets)
{
int packets_per_wqe;
int stride_size;
int num_strides;
int wqe_size;
int num_wqes;
if (rq_wq_type != MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ)
return num_packets;
stride_size = 1 << mlx5e_mpwqe_get_log_stride_size(priv->mdev, &priv->channels.params);
num_strides = 1 << mlx5e_mpwqe_get_log_num_strides(priv->mdev, &priv->channels.params);
wqe_size = stride_size * num_strides;
num_packets = (1 << order_base_2(num_packets));
packets_per_wqe = wqe_size /
ALIGN(ETH_DATA_LEN, stride_size);
num_wqes = DIV_ROUND_UP(num_packets, packets_per_wqe);
return 1 << (order_base_2(num_wqes));
}
void mlx5e_ethtool_get_ringparam(struct mlx5e_priv *priv, void mlx5e_ethtool_get_ringparam(struct mlx5e_priv *priv,
struct ethtool_ringparam *param) struct ethtool_ringparam *param)
{ {
int rq_wq_type = priv->channels.params.rq_wq_type; param->rx_max_pending = 1 << MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE;
param->rx_max_pending = mlx5e_rx_wqes_to_packets(priv, rq_wq_type,
1 << mlx5_max_log_rq_size(rq_wq_type));
param->tx_max_pending = 1 << MLX5E_PARAMS_MAXIMUM_LOG_SQ_SIZE; param->tx_max_pending = 1 << MLX5E_PARAMS_MAXIMUM_LOG_SQ_SIZE;
param->rx_pending = mlx5e_rx_wqes_to_packets(priv, rq_wq_type, param->rx_pending = 1 << priv->channels.params.log_rq_mtu_frames;
1 << priv->channels.params.log_rq_size);
param->tx_pending = 1 << priv->channels.params.log_sq_size; param->tx_pending = 1 << priv->channels.params.log_sq_size;
} }
...@@ -288,13 +240,9 @@ static void mlx5e_get_ringparam(struct net_device *dev, ...@@ -288,13 +240,9 @@ static void mlx5e_get_ringparam(struct net_device *dev,
int mlx5e_ethtool_set_ringparam(struct mlx5e_priv *priv, int mlx5e_ethtool_set_ringparam(struct mlx5e_priv *priv,
struct ethtool_ringparam *param) struct ethtool_ringparam *param)
{ {
int rq_wq_type = priv->channels.params.rq_wq_type;
struct mlx5e_channels new_channels = {}; struct mlx5e_channels new_channels = {};
u32 rx_pending_wqes;
u32 min_rq_size;
u8 log_rq_size; u8 log_rq_size;
u8 log_sq_size; u8 log_sq_size;
u32 num_mtts;
int err = 0; int err = 0;
if (param->rx_jumbo_pending) { if (param->rx_jumbo_pending) {
...@@ -308,23 +256,10 @@ int mlx5e_ethtool_set_ringparam(struct mlx5e_priv *priv, ...@@ -308,23 +256,10 @@ int mlx5e_ethtool_set_ringparam(struct mlx5e_priv *priv,
return -EINVAL; return -EINVAL;
} }
min_rq_size = mlx5e_rx_wqes_to_packets(priv, rq_wq_type, if (param->rx_pending < (1 << MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE)) {
1 << mlx5_min_log_rq_size(rq_wq_type));
rx_pending_wqes = mlx5e_packets_to_rx_wqes(priv, rq_wq_type,
param->rx_pending);
if (param->rx_pending < min_rq_size) {
netdev_info(priv->netdev, "%s: rx_pending (%d) < min (%d)\n", netdev_info(priv->netdev, "%s: rx_pending (%d) < min (%d)\n",
__func__, param->rx_pending, __func__, param->rx_pending,
min_rq_size); 1 << MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE);
return -EINVAL;
}
num_mtts = MLX5E_REQUIRED_MTTS(rx_pending_wqes);
if (priv->channels.params.rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ &&
!MLX5E_VALID_NUM_MTTS(num_mtts)) {
netdev_info(priv->netdev, "%s: rx_pending (%d) request can't be satisfied, try to reduce.\n",
__func__, param->rx_pending);
return -EINVAL; return -EINVAL;
} }
...@@ -335,17 +270,17 @@ int mlx5e_ethtool_set_ringparam(struct mlx5e_priv *priv, ...@@ -335,17 +270,17 @@ int mlx5e_ethtool_set_ringparam(struct mlx5e_priv *priv,
return -EINVAL; return -EINVAL;
} }
log_rq_size = order_base_2(rx_pending_wqes); log_rq_size = order_base_2(param->rx_pending);
log_sq_size = order_base_2(param->tx_pending); log_sq_size = order_base_2(param->tx_pending);
if (log_rq_size == priv->channels.params.log_rq_size && if (log_rq_size == priv->channels.params.log_rq_mtu_frames &&
log_sq_size == priv->channels.params.log_sq_size) log_sq_size == priv->channels.params.log_sq_size)
return 0; return 0;
mutex_lock(&priv->state_lock); mutex_lock(&priv->state_lock);
new_channels.params = priv->channels.params; new_channels.params = priv->channels.params;
new_channels.params.log_rq_size = log_rq_size; new_channels.params.log_rq_mtu_frames = log_rq_size;
new_channels.params.log_sq_size = log_sq_size; new_channels.params.log_sq_size = log_sq_size;
if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) { if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) {
......
...@@ -78,15 +78,38 @@ bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev *mdev) ...@@ -78,15 +78,38 @@ bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev *mdev)
MLX5_CAP_ETH(mdev, reg_umr_sq); MLX5_CAP_ETH(mdev, reg_umr_sq);
} }
u8 mlx5e_mpwqe_get_log_stride_size(struct mlx5_core_dev *mdev, static u32 mlx5e_mpwqe_get_linear_frag_sz(struct mlx5e_params *params)
struct mlx5e_params *params) {
u16 hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu);
return hw_mtu;
}
static u8 mlx5e_mpwqe_log_pkts_per_wqe(struct mlx5e_params *params)
{
u32 linear_frag_sz = mlx5e_mpwqe_get_linear_frag_sz(params);
return MLX5_MPWRQ_LOG_WQE_SZ - order_base_2(linear_frag_sz);
}
static u8 mlx5e_mpwqe_get_log_rq_size(struct mlx5e_params *params)
{
if (params->log_rq_mtu_frames <
mlx5e_mpwqe_log_pkts_per_wqe(params) + MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE_MPW)
return MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE_MPW;
return params->log_rq_mtu_frames - mlx5e_mpwqe_log_pkts_per_wqe(params);
}
static u8 mlx5e_mpwqe_get_log_stride_size(struct mlx5_core_dev *mdev,
struct mlx5e_params *params)
{ {
return MLX5E_MPWQE_STRIDE_SZ(mdev, return MLX5E_MPWQE_STRIDE_SZ(mdev,
MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS)); MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS));
} }
u8 mlx5e_mpwqe_get_log_num_strides(struct mlx5_core_dev *mdev, static u8 mlx5e_mpwqe_get_log_num_strides(struct mlx5_core_dev *mdev,
struct mlx5e_params *params) struct mlx5e_params *params)
{ {
return MLX5_MPWRQ_LOG_WQE_SZ - return MLX5_MPWRQ_LOG_WQE_SZ -
mlx5e_mpwqe_get_log_stride_size(mdev, params); mlx5e_mpwqe_get_log_stride_size(mdev, params);
...@@ -109,17 +132,13 @@ void mlx5e_init_rq_type_params(struct mlx5_core_dev *mdev, ...@@ -109,17 +132,13 @@ void mlx5e_init_rq_type_params(struct mlx5_core_dev *mdev,
struct mlx5e_params *params) struct mlx5e_params *params)
{ {
params->lro_wqe_sz = MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ; params->lro_wqe_sz = MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ;
params->log_rq_mtu_frames = is_kdump_kernel() ?
MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE :
MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE;
switch (params->rq_wq_type) { switch (params->rq_wq_type) {
case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ: case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
params->log_rq_size = is_kdump_kernel() ?
MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE_MPW :
MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE_MPW;
break; break;
default: /* MLX5_WQ_TYPE_LINKED_LIST */ default: /* MLX5_WQ_TYPE_LINKED_LIST */
params->log_rq_size = is_kdump_kernel() ?
MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE :
MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE;
/* Extra room needed for build_skb */ /* Extra room needed for build_skb */
params->lro_wqe_sz -= mlx5e_get_rq_headroom(params) + params->lro_wqe_sz -= mlx5e_get_rq_headroom(params) +
SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
...@@ -127,7 +146,7 @@ void mlx5e_init_rq_type_params(struct mlx5_core_dev *mdev, ...@@ -127,7 +146,7 @@ void mlx5e_init_rq_type_params(struct mlx5_core_dev *mdev,
mlx5_core_info(mdev, "MLX5E: StrdRq(%d) RqSz(%ld) StrdSz(%ld) RxCqeCmprss(%d)\n", mlx5_core_info(mdev, "MLX5E: StrdRq(%d) RqSz(%ld) StrdSz(%ld) RxCqeCmprss(%d)\n",
params->rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ, params->rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ,
BIT(params->log_rq_size), BIT(params->log_rq_mtu_frames),
BIT(mlx5e_mpwqe_get_log_stride_size(mdev, params)), BIT(mlx5e_mpwqe_get_log_stride_size(mdev, params)),
MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS)); MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS));
} }
...@@ -351,9 +370,6 @@ static int mlx5e_create_umr_mkey(struct mlx5_core_dev *mdev, ...@@ -351,9 +370,6 @@ static int mlx5e_create_umr_mkey(struct mlx5_core_dev *mdev,
u32 *in; u32 *in;
int err; int err;
if (!MLX5E_VALID_NUM_MTTS(npages))
return -EINVAL;
in = kvzalloc(inlen, GFP_KERNEL); in = kvzalloc(inlen, GFP_KERNEL);
if (!in) if (!in)
return -ENOMEM; return -ENOMEM;
...@@ -1872,14 +1888,15 @@ static void mlx5e_build_rq_param(struct mlx5e_priv *priv, ...@@ -1872,14 +1888,15 @@ static void mlx5e_build_rq_param(struct mlx5e_priv *priv,
MLX5_SET(wq, wq, log_wqe_stride_size, MLX5_SET(wq, wq, log_wqe_stride_size,
mlx5e_mpwqe_get_log_stride_size(mdev, params) - 6); mlx5e_mpwqe_get_log_stride_size(mdev, params) - 6);
MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ); MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ);
MLX5_SET(wq, wq, log_wq_sz, mlx5e_mpwqe_get_log_rq_size(params));
break; break;
default: /* MLX5_WQ_TYPE_LINKED_LIST */ default: /* MLX5_WQ_TYPE_LINKED_LIST */
MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_LINKED_LIST); MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_LINKED_LIST);
MLX5_SET(wq, wq, log_wq_sz, params->log_rq_mtu_frames);
} }
MLX5_SET(wq, wq, end_padding_mode, MLX5_WQ_END_PAD_MODE_ALIGN); MLX5_SET(wq, wq, end_padding_mode, MLX5_WQ_END_PAD_MODE_ALIGN);
MLX5_SET(wq, wq, log_wq_stride, ilog2(sizeof(struct mlx5e_rx_wqe))); MLX5_SET(wq, wq, log_wq_stride, ilog2(sizeof(struct mlx5e_rx_wqe)));
MLX5_SET(wq, wq, log_wq_sz, params->log_rq_size);
MLX5_SET(wq, wq, pd, mdev->mlx5e_res.pdn); MLX5_SET(wq, wq, pd, mdev->mlx5e_res.pdn);
MLX5_SET(rqc, rqc, counter_set_id, priv->q_counter); MLX5_SET(rqc, rqc, counter_set_id, priv->q_counter);
MLX5_SET(rqc, rqc, vsd, params->vlan_strip_disable); MLX5_SET(rqc, rqc, vsd, params->vlan_strip_disable);
...@@ -1939,16 +1956,17 @@ static void mlx5e_build_rx_cq_param(struct mlx5e_priv *priv, ...@@ -1939,16 +1956,17 @@ static void mlx5e_build_rx_cq_param(struct mlx5e_priv *priv,
struct mlx5e_params *params, struct mlx5e_params *params,
struct mlx5e_cq_param *param) struct mlx5e_cq_param *param)
{ {
struct mlx5_core_dev *mdev = priv->mdev;
void *cqc = param->cqc; void *cqc = param->cqc;
u8 log_cq_size; u8 log_cq_size;
switch (params->rq_wq_type) { switch (params->rq_wq_type) {
case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ: case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
log_cq_size = params->log_rq_size + log_cq_size = mlx5e_mpwqe_get_log_rq_size(params) +
mlx5e_mpwqe_get_log_num_strides(priv->mdev, params); mlx5e_mpwqe_get_log_num_strides(mdev, params);
break; break;
default: /* MLX5_WQ_TYPE_LINKED_LIST */ default: /* MLX5_WQ_TYPE_LINKED_LIST */
log_cq_size = params->log_rq_size; log_cq_size = params->log_rq_mtu_frames;
} }
MLX5_SET(cqc, cqc, log_cq_size, log_cq_size); MLX5_SET(cqc, cqc, log_cq_size, log_cq_size);
...@@ -3421,11 +3439,20 @@ static int mlx5e_change_mtu(struct net_device *netdev, int new_mtu) ...@@ -3421,11 +3439,20 @@ static int mlx5e_change_mtu(struct net_device *netdev, int new_mtu)
mutex_lock(&priv->state_lock); mutex_lock(&priv->state_lock);
params = &priv->channels.params; params = &priv->channels.params;
reset = !params->lro_en &&
(params->rq_wq_type != MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ);
reset = !params->lro_en;
reset = reset && test_bit(MLX5E_STATE_OPENED, &priv->state); reset = reset && test_bit(MLX5E_STATE_OPENED, &priv->state);
new_channels.params = *params;
new_channels.params.sw_mtu = new_mtu;
if (params->rq_wq_type != MLX5_WQ_TYPE_LINKED_LIST) {
u8 ppw_old = mlx5e_mpwqe_log_pkts_per_wqe(params);
u8 ppw_new = mlx5e_mpwqe_log_pkts_per_wqe(&new_channels.params);
reset = reset && (ppw_old != ppw_new);
}
if (!reset) { if (!reset) {
params->sw_mtu = new_mtu; params->sw_mtu = new_mtu;
mlx5e_set_dev_port_mtu(priv); mlx5e_set_dev_port_mtu(priv);
...@@ -3433,8 +3460,6 @@ static int mlx5e_change_mtu(struct net_device *netdev, int new_mtu) ...@@ -3433,8 +3460,6 @@ static int mlx5e_change_mtu(struct net_device *netdev, int new_mtu)
goto out; goto out;
} }
new_channels.params = *params;
new_channels.params.sw_mtu = new_mtu;
err = mlx5e_open_channels(priv, &new_channels); err = mlx5e_open_channels(priv, &new_channels);
if (err) if (err)
goto out; goto out;
......
...@@ -880,7 +880,7 @@ static void mlx5e_build_rep_params(struct mlx5_core_dev *mdev, ...@@ -880,7 +880,7 @@ static void mlx5e_build_rep_params(struct mlx5_core_dev *mdev,
params->hard_mtu = MLX5E_ETH_HARD_MTU; params->hard_mtu = MLX5E_ETH_HARD_MTU;
params->log_sq_size = MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE; params->log_sq_size = MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE;
params->rq_wq_type = MLX5_WQ_TYPE_LINKED_LIST; params->rq_wq_type = MLX5_WQ_TYPE_LINKED_LIST;
params->log_rq_size = MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE; params->log_rq_mtu_frames = MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE;
params->rx_dim_enabled = MLX5_CAP_GEN(mdev, cq_moderation); params->rx_dim_enabled = MLX5_CAP_GEN(mdev, cq_moderation);
mlx5e_set_rx_cq_mode_params(params, cq_period_mode); mlx5e_set_rx_cq_mode_params(params, cq_period_mode);
......
...@@ -61,7 +61,7 @@ static void mlx5i_build_nic_params(struct mlx5_core_dev *mdev, ...@@ -61,7 +61,7 @@ static void mlx5i_build_nic_params(struct mlx5_core_dev *mdev,
mlx5e_init_rq_type_params(mdev, params); mlx5e_init_rq_type_params(mdev, params);
/* RQ size in ipoib by default is 512 */ /* RQ size in ipoib by default is 512 */
params->log_rq_size = is_kdump_kernel() ? params->log_rq_mtu_frames = is_kdump_kernel() ?
MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE : MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE :
MLX5I_PARAMS_DEFAULT_LOG_RQ_SIZE; MLX5I_PARAMS_DEFAULT_LOG_RQ_SIZE;
......
...@@ -290,7 +290,7 @@ static void mlx5i_pkey_init(struct mlx5_core_dev *mdev, ...@@ -290,7 +290,7 @@ static void mlx5i_pkey_init(struct mlx5_core_dev *mdev,
netdev->ethtool_ops = &mlx5i_pkey_ethtool_ops; netdev->ethtool_ops = &mlx5i_pkey_ethtool_ops;
/* Use dummy rqs */ /* Use dummy rqs */
priv->channels.params.log_rq_size = MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE; priv->channels.params.log_rq_mtu_frames = MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE;
} }
/* Called directly before IPoIB netdevice is destroyed to cleanup SW structs */ /* Called directly before IPoIB netdevice is destroyed to cleanup SW structs */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment