Commit 79873fb6 authored by David S. Miller's avatar David S. Miller

Merge branch 'mlx5-fixes'

Saeed Mahameed says:

====================
Mellanox mlx5e fixes for 4.11-rc1

This series includes some important bug fixes for mlx5e driver.

Three misc fixes:
From Mohamad, compilation fix on s390 system
From Me, A fix for driver unload when switchdev mode is on.
From Tariq, HW LRO frag size optimization for when build_skb is not used
(striding RQ mode).

Three CQE compression related fixes:
Two fixes from Tariq and I, to correctly setup CQE compression
parameters on driver load and on arbitrary user modifications.
Last patch, fixes a very critical issue that was originally reported
by Tom, where the driver reported csum errors or even page ref issues
for when cqe compression is enabled and rapidly active.

For your convenience this series was generated on top of net-next branch:
005c3490 ('Revert "ath10k: Search SMBIOS for OEM board file extension"')

for -stable:
net/mlx5e: Register/unregister vport representors on interface (for kernel >= 4.9)
net/mlx5e: Do not reduce LRO WQE size when not using build_skb (for kernel >= 4.9)
net/mlx5e: Fix broken CQE compression initialization (for kernel >= 4.9)
net/mlx5e: Update MPWQE stride size when modifying CQE compress state (for kernel >= 4.7)
net/mlx5e: Fix wrong CQE decompression (for kernel >= 4.7)
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents eee2faab 36154be4
...@@ -816,6 +816,7 @@ int mlx5e_get_max_linkspeed(struct mlx5_core_dev *mdev, u32 *speed); ...@@ -816,6 +816,7 @@ int mlx5e_get_max_linkspeed(struct mlx5_core_dev *mdev, u32 *speed);
void mlx5e_set_rx_cq_mode_params(struct mlx5e_params *params, void mlx5e_set_rx_cq_mode_params(struct mlx5e_params *params,
u8 cq_period_mode); u8 cq_period_mode);
void mlx5e_set_rq_type_params(struct mlx5e_priv *priv, u8 rq_type);
static inline void mlx5e_tx_notify_hw(struct mlx5e_sq *sq, static inline void mlx5e_tx_notify_hw(struct mlx5e_sq *sq,
struct mlx5_wqe_ctrl_seg *ctrl, int bf_sz) struct mlx5_wqe_ctrl_seg *ctrl, int bf_sz)
......
...@@ -1487,6 +1487,7 @@ static int set_pflag_rx_cqe_compress(struct net_device *netdev, ...@@ -1487,6 +1487,7 @@ static int set_pflag_rx_cqe_compress(struct net_device *netdev,
mlx5e_modify_rx_cqe_compression_locked(priv, enable); mlx5e_modify_rx_cqe_compression_locked(priv, enable);
priv->params.rx_cqe_compress_def = enable; priv->params.rx_cqe_compress_def = enable;
mlx5e_set_rq_type_params(priv, priv->params.rq_wq_type);
return 0; return 0;
} }
......
...@@ -79,9 +79,10 @@ static bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev *mdev) ...@@ -79,9 +79,10 @@ static bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev *mdev)
MLX5_CAP_ETH(mdev, reg_umr_sq); MLX5_CAP_ETH(mdev, reg_umr_sq);
} }
static void mlx5e_set_rq_type_params(struct mlx5e_priv *priv, u8 rq_type) void mlx5e_set_rq_type_params(struct mlx5e_priv *priv, u8 rq_type)
{ {
priv->params.rq_wq_type = rq_type; priv->params.rq_wq_type = rq_type;
priv->params.lro_wqe_sz = MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ;
switch (priv->params.rq_wq_type) { switch (priv->params.rq_wq_type) {
case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ: case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
priv->params.log_rq_size = is_kdump_kernel() ? priv->params.log_rq_size = is_kdump_kernel() ?
...@@ -98,6 +99,10 @@ static void mlx5e_set_rq_type_params(struct mlx5e_priv *priv, u8 rq_type) ...@@ -98,6 +99,10 @@ static void mlx5e_set_rq_type_params(struct mlx5e_priv *priv, u8 rq_type)
priv->params.log_rq_size = is_kdump_kernel() ? priv->params.log_rq_size = is_kdump_kernel() ?
MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE : MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE :
MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE; MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE;
/* Extra room needed for build_skb */
priv->params.lro_wqe_sz -= MLX5_RX_HEADROOM +
SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
} }
priv->params.min_rx_wqes = mlx5_min_rx_wqes(priv->params.rq_wq_type, priv->params.min_rx_wqes = mlx5_min_rx_wqes(priv->params.rq_wq_type,
BIT(priv->params.log_rq_size)); BIT(priv->params.log_rq_size));
...@@ -3521,6 +3526,9 @@ static void mlx5e_build_nic_netdev_priv(struct mlx5_core_dev *mdev, ...@@ -3521,6 +3526,9 @@ static void mlx5e_build_nic_netdev_priv(struct mlx5_core_dev *mdev,
cqe_compress_heuristic(link_speed, pci_bw); cqe_compress_heuristic(link_speed, pci_bw);
} }
MLX5E_SET_PFLAG(priv, MLX5E_PFLAG_RX_CQE_COMPRESS,
priv->params.rx_cqe_compress_def);
mlx5e_set_rq_priv_params(priv); mlx5e_set_rq_priv_params(priv);
if (priv->params.rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) if (priv->params.rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ)
priv->params.lro_en = true; priv->params.lro_en = true;
...@@ -3547,16 +3555,9 @@ static void mlx5e_build_nic_netdev_priv(struct mlx5_core_dev *mdev, ...@@ -3547,16 +3555,9 @@ static void mlx5e_build_nic_netdev_priv(struct mlx5_core_dev *mdev,
mlx5e_build_default_indir_rqt(mdev, priv->params.indirection_rqt, mlx5e_build_default_indir_rqt(mdev, priv->params.indirection_rqt,
MLX5E_INDIR_RQT_SIZE, profile->max_nch(mdev)); MLX5E_INDIR_RQT_SIZE, profile->max_nch(mdev));
priv->params.lro_wqe_sz =
MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ -
/* Extra room needed for build_skb */
MLX5_RX_HEADROOM -
SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
/* Initialize pflags */ /* Initialize pflags */
MLX5E_SET_PFLAG(priv, MLX5E_PFLAG_RX_CQE_BASED_MODER, MLX5E_SET_PFLAG(priv, MLX5E_PFLAG_RX_CQE_BASED_MODER,
priv->params.rx_cq_period_mode == MLX5_CQ_PERIOD_MODE_START_FROM_CQE); priv->params.rx_cq_period_mode == MLX5_CQ_PERIOD_MODE_START_FROM_CQE);
MLX5E_SET_PFLAG(priv, MLX5E_PFLAG_RX_CQE_COMPRESS, priv->params.rx_cqe_compress_def);
mutex_init(&priv->state_lock); mutex_init(&priv->state_lock);
...@@ -3970,6 +3971,19 @@ static void mlx5e_register_vport_rep(struct mlx5_core_dev *mdev) ...@@ -3970,6 +3971,19 @@ static void mlx5e_register_vport_rep(struct mlx5_core_dev *mdev)
} }
} }
static void mlx5e_unregister_vport_rep(struct mlx5_core_dev *mdev)
{
struct mlx5_eswitch *esw = mdev->priv.eswitch;
int total_vfs = MLX5_TOTAL_VPORTS(mdev);
int vport;
if (!MLX5_CAP_GEN(mdev, vport_group_manager))
return;
for (vport = 1; vport < total_vfs; vport++)
mlx5_eswitch_unregister_vport_rep(esw, vport);
}
void mlx5e_detach_netdev(struct mlx5_core_dev *mdev, struct net_device *netdev) void mlx5e_detach_netdev(struct mlx5_core_dev *mdev, struct net_device *netdev)
{ {
struct mlx5e_priv *priv = netdev_priv(netdev); struct mlx5e_priv *priv = netdev_priv(netdev);
...@@ -4016,6 +4030,7 @@ static int mlx5e_attach(struct mlx5_core_dev *mdev, void *vpriv) ...@@ -4016,6 +4030,7 @@ static int mlx5e_attach(struct mlx5_core_dev *mdev, void *vpriv)
return err; return err;
} }
mlx5e_register_vport_rep(mdev);
return 0; return 0;
} }
...@@ -4027,6 +4042,7 @@ static void mlx5e_detach(struct mlx5_core_dev *mdev, void *vpriv) ...@@ -4027,6 +4042,7 @@ static void mlx5e_detach(struct mlx5_core_dev *mdev, void *vpriv)
if (!netif_device_present(netdev)) if (!netif_device_present(netdev))
return; return;
mlx5e_unregister_vport_rep(mdev);
mlx5e_detach_netdev(mdev, netdev); mlx5e_detach_netdev(mdev, netdev);
mlx5e_destroy_mdev_resources(mdev); mlx5e_destroy_mdev_resources(mdev);
} }
...@@ -4045,8 +4061,6 @@ static void *mlx5e_add(struct mlx5_core_dev *mdev) ...@@ -4045,8 +4061,6 @@ static void *mlx5e_add(struct mlx5_core_dev *mdev)
if (err) if (err)
return NULL; return NULL;
mlx5e_register_vport_rep(mdev);
if (MLX5_CAP_GEN(mdev, vport_group_manager)) if (MLX5_CAP_GEN(mdev, vport_group_manager))
ppriv = &esw->offloads.vport_reps[0]; ppriv = &esw->offloads.vport_reps[0];
...@@ -4098,13 +4112,7 @@ void mlx5e_destroy_netdev(struct mlx5_core_dev *mdev, struct mlx5e_priv *priv) ...@@ -4098,13 +4112,7 @@ void mlx5e_destroy_netdev(struct mlx5_core_dev *mdev, struct mlx5e_priv *priv)
static void mlx5e_remove(struct mlx5_core_dev *mdev, void *vpriv) static void mlx5e_remove(struct mlx5_core_dev *mdev, void *vpriv)
{ {
struct mlx5_eswitch *esw = mdev->priv.eswitch;
int total_vfs = MLX5_TOTAL_VPORTS(mdev);
struct mlx5e_priv *priv = vpriv; struct mlx5e_priv *priv = vpriv;
int vport;
for (vport = 1; vport < total_vfs; vport++)
mlx5_eswitch_unregister_vport_rep(esw, vport);
unregister_netdev(priv->netdev); unregister_netdev(priv->netdev);
mlx5e_detach(mdev, vpriv); mlx5e_detach(mdev, vpriv);
......
...@@ -30,6 +30,7 @@ ...@@ -30,6 +30,7 @@
* SOFTWARE. * SOFTWARE.
*/ */
#include <linux/prefetch.h>
#include <linux/ip.h> #include <linux/ip.h>
#include <linux/ipv6.h> #include <linux/ipv6.h>
#include <linux/tcp.h> #include <linux/tcp.h>
...@@ -93,19 +94,18 @@ static inline void mlx5e_cqes_update_owner(struct mlx5e_cq *cq, u32 cqcc, int n) ...@@ -93,19 +94,18 @@ static inline void mlx5e_cqes_update_owner(struct mlx5e_cq *cq, u32 cqcc, int n)
static inline void mlx5e_decompress_cqe(struct mlx5e_rq *rq, static inline void mlx5e_decompress_cqe(struct mlx5e_rq *rq,
struct mlx5e_cq *cq, u32 cqcc) struct mlx5e_cq *cq, u32 cqcc)
{ {
u16 wqe_cnt_step;
cq->title.byte_cnt = cq->mini_arr[cq->mini_arr_idx].byte_cnt; cq->title.byte_cnt = cq->mini_arr[cq->mini_arr_idx].byte_cnt;
cq->title.check_sum = cq->mini_arr[cq->mini_arr_idx].checksum; cq->title.check_sum = cq->mini_arr[cq->mini_arr_idx].checksum;
cq->title.op_own &= 0xf0; cq->title.op_own &= 0xf0;
cq->title.op_own |= 0x01 & (cqcc >> cq->wq.log_sz); cq->title.op_own |= 0x01 & (cqcc >> cq->wq.log_sz);
cq->title.wqe_counter = cpu_to_be16(cq->decmprs_wqe_counter); cq->title.wqe_counter = cpu_to_be16(cq->decmprs_wqe_counter);
wqe_cnt_step = if (rq->wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ)
rq->wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ ? cq->decmprs_wqe_counter +=
mpwrq_get_cqe_consumed_strides(&cq->title) : 1; mpwrq_get_cqe_consumed_strides(&cq->title);
else
cq->decmprs_wqe_counter = cq->decmprs_wqe_counter =
(cq->decmprs_wqe_counter + wqe_cnt_step) & rq->wq.sz_m1; (cq->decmprs_wqe_counter + 1) & rq->wq.sz_m1;
} }
static inline void mlx5e_decompress_cqe_no_hash(struct mlx5e_rq *rq, static inline void mlx5e_decompress_cqe_no_hash(struct mlx5e_rq *rq,
...@@ -171,6 +171,7 @@ void mlx5e_modify_rx_cqe_compression_locked(struct mlx5e_priv *priv, bool val) ...@@ -171,6 +171,7 @@ void mlx5e_modify_rx_cqe_compression_locked(struct mlx5e_priv *priv, bool val)
mlx5e_close_locked(priv->netdev); mlx5e_close_locked(priv->netdev);
MLX5E_SET_PFLAG(priv, MLX5E_PFLAG_RX_CQE_COMPRESS, val); MLX5E_SET_PFLAG(priv, MLX5E_PFLAG_RX_CQE_COMPRESS, val);
mlx5e_set_rq_type_params(priv, priv->params.rq_wq_type);
if (was_opened) if (was_opened)
mlx5e_open_locked(priv->netdev); mlx5e_open_locked(priv->netdev);
......
...@@ -30,6 +30,7 @@ ...@@ -30,6 +30,7 @@
* SOFTWARE. * SOFTWARE.
*/ */
#include <linux/prefetch.h>
#include <linux/ip.h> #include <linux/ip.h>
#include <linux/udp.h> #include <linux/udp.h>
#include <net/udp.h> #include <net/udp.h>
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment