Commit f12ed9c0 authored by David S. Miller's avatar David S. Miller

Merge tag 'mlx5-updates-2022-11-12' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux

Saeed Mahameed says:

====================
mlx5-updates-2022-11-12

Misc updates to mlx5 driver

1) Support enhanced CQE compression, on ConnectX6-Dx
   Reduce irq rate, cpu utilization and latency.

2) Connection tracking: Optimize the pre_ct table lookup for rules
   installed on chain 0.

3) implement ethtool get_link_ext_stats for PHY down events

4) Expose device vhca_id to debugfs

5) misc cleanups and trivial changes
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 6970ef27 e07c4924
...@@ -840,7 +840,7 @@ static const struct devlink_trap_group mlx5_trap_groups_arr[] = { ...@@ -840,7 +840,7 @@ static const struct devlink_trap_group mlx5_trap_groups_arr[] = {
DEVLINK_TRAP_GROUP_GENERIC(L2_DROPS, 0), DEVLINK_TRAP_GROUP_GENERIC(L2_DROPS, 0),
}; };
static int mlx5_devlink_traps_register(struct devlink *devlink) int mlx5_devlink_traps_register(struct devlink *devlink)
{ {
struct mlx5_core_dev *core_dev = devlink_priv(devlink); struct mlx5_core_dev *core_dev = devlink_priv(devlink);
int err; int err;
...@@ -862,7 +862,7 @@ static int mlx5_devlink_traps_register(struct devlink *devlink) ...@@ -862,7 +862,7 @@ static int mlx5_devlink_traps_register(struct devlink *devlink)
return err; return err;
} }
static void mlx5_devlink_traps_unregister(struct devlink *devlink) void mlx5_devlink_traps_unregister(struct devlink *devlink)
{ {
devl_traps_unregister(devlink, mlx5_traps_arr, ARRAY_SIZE(mlx5_traps_arr)); devl_traps_unregister(devlink, mlx5_traps_arr, ARRAY_SIZE(mlx5_traps_arr));
devl_trap_groups_unregister(devlink, mlx5_trap_groups_arr, devl_trap_groups_unregister(devlink, mlx5_trap_groups_arr,
...@@ -889,17 +889,11 @@ int mlx5_devlink_register(struct devlink *devlink) ...@@ -889,17 +889,11 @@ int mlx5_devlink_register(struct devlink *devlink)
if (err) if (err)
goto max_uc_list_err; goto max_uc_list_err;
err = mlx5_devlink_traps_register(devlink);
if (err)
goto traps_reg_err;
if (!mlx5_core_is_mp_slave(dev)) if (!mlx5_core_is_mp_slave(dev))
devlink_set_features(devlink, DEVLINK_F_RELOAD); devlink_set_features(devlink, DEVLINK_F_RELOAD);
return 0; return 0;
traps_reg_err:
mlx5_devlink_max_uc_list_param_unregister(devlink);
max_uc_list_err: max_uc_list_err:
mlx5_devlink_auxdev_params_unregister(devlink); mlx5_devlink_auxdev_params_unregister(devlink);
auxdev_reg_err: auxdev_reg_err:
...@@ -910,7 +904,6 @@ int mlx5_devlink_register(struct devlink *devlink) ...@@ -910,7 +904,6 @@ int mlx5_devlink_register(struct devlink *devlink)
void mlx5_devlink_unregister(struct devlink *devlink) void mlx5_devlink_unregister(struct devlink *devlink)
{ {
mlx5_devlink_traps_unregister(devlink);
mlx5_devlink_max_uc_list_param_unregister(devlink); mlx5_devlink_max_uc_list_param_unregister(devlink);
mlx5_devlink_auxdev_params_unregister(devlink); mlx5_devlink_auxdev_params_unregister(devlink);
devlink_params_unregister(devlink, mlx5_devlink_params, devlink_params_unregister(devlink, mlx5_devlink_params,
......
...@@ -30,6 +30,8 @@ void mlx5_devlink_trap_report(struct mlx5_core_dev *dev, int trap_id, struct sk_ ...@@ -30,6 +30,8 @@ void mlx5_devlink_trap_report(struct mlx5_core_dev *dev, int trap_id, struct sk_
int mlx5_devlink_trap_get_num_active(struct mlx5_core_dev *dev); int mlx5_devlink_trap_get_num_active(struct mlx5_core_dev *dev);
int mlx5_devlink_traps_get_action(struct mlx5_core_dev *dev, int trap_id, int mlx5_devlink_traps_get_action(struct mlx5_core_dev *dev, int trap_id,
enum devlink_trap_action *action); enum devlink_trap_action *action);
int mlx5_devlink_traps_register(struct devlink *devlink);
void mlx5_devlink_traps_unregister(struct devlink *devlink);
struct devlink *mlx5_devlink_alloc(struct device *dev); struct devlink *mlx5_devlink_alloc(struct device *dev);
void mlx5_devlink_free(struct devlink *devlink); void mlx5_devlink_free(struct devlink *devlink);
......
...@@ -344,6 +344,7 @@ enum { ...@@ -344,6 +344,7 @@ enum {
MLX5E_RQ_STATE_CSUM_FULL, /* cqe_csum_full hw bit is set */ MLX5E_RQ_STATE_CSUM_FULL, /* cqe_csum_full hw bit is set */
MLX5E_RQ_STATE_MINI_CQE_HW_STRIDX, /* set when mini_cqe_resp_stride_index cap is used */ MLX5E_RQ_STATE_MINI_CQE_HW_STRIDX, /* set when mini_cqe_resp_stride_index cap is used */
MLX5E_RQ_STATE_SHAMPO, /* set when SHAMPO cap is used */ MLX5E_RQ_STATE_SHAMPO, /* set when SHAMPO cap is used */
MLX5E_RQ_STATE_MINI_CQE_ENHANCED, /* set when enhanced mini_cqe_cap is used */
}; };
struct mlx5e_cq { struct mlx5e_cq {
...@@ -370,6 +371,7 @@ struct mlx5e_cq_decomp { ...@@ -370,6 +371,7 @@ struct mlx5e_cq_decomp {
u8 mini_arr_idx; u8 mini_arr_idx;
u16 left; u16 left;
u16 wqe_counter; u16 wqe_counter;
bool last_cqe_title;
} ____cacheline_aligned_in_smp; } ____cacheline_aligned_in_smp;
enum mlx5e_dma_map_type { enum mlx5e_dma_map_type {
......
...@@ -607,14 +607,6 @@ void mlx5e_init_rq_type_params(struct mlx5_core_dev *mdev, ...@@ -607,14 +607,6 @@ void mlx5e_init_rq_type_params(struct mlx5_core_dev *mdev,
params->log_rq_mtu_frames = is_kdump_kernel() ? params->log_rq_mtu_frames = is_kdump_kernel() ?
MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE : MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE :
MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE; MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE;
mlx5_core_info(mdev, "MLX5E: StrdRq(%d) RqSz(%ld) StrdSz(%ld) RxCqeCmprss(%d)\n",
params->rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ,
params->rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ ?
BIT(mlx5e_mpwqe_get_log_rq_size(mdev, params, NULL)) :
BIT(params->log_rq_mtu_frames),
BIT(mlx5e_mpwqe_get_log_stride_size(mdev, params, NULL)),
MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS));
} }
void mlx5e_set_rq_type(struct mlx5_core_dev *mdev, struct mlx5e_params *params) void mlx5e_set_rq_type(struct mlx5_core_dev *mdev, struct mlx5e_params *params)
...@@ -852,6 +844,10 @@ static void mlx5e_build_rx_cq_param(struct mlx5_core_dev *mdev, ...@@ -852,6 +844,10 @@ static void mlx5e_build_rx_cq_param(struct mlx5_core_dev *mdev,
if (MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS)) { if (MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS)) {
MLX5_SET(cqc, cqc, mini_cqe_res_format, hw_stridx ? MLX5_SET(cqc, cqc, mini_cqe_res_format, hw_stridx ?
MLX5_CQE_FORMAT_CSUM_STRIDX : MLX5_CQE_FORMAT_CSUM); MLX5_CQE_FORMAT_CSUM_STRIDX : MLX5_CQE_FORMAT_CSUM);
MLX5_SET(cqc, cqc, cqe_compression_layout,
MLX5_CAP_GEN(mdev, enhanced_cqe_compression) ?
MLX5_CQE_COMPRESS_LAYOUT_ENHANCED :
MLX5_CQE_COMPRESS_LAYOUT_BASIC);
MLX5_SET(cqc, cqc, cqe_comp_en, 1); MLX5_SET(cqc, cqc, cqe_comp_en, 1);
} }
......
...@@ -154,4 +154,18 @@ int mlx5e_build_channel_param(struct mlx5_core_dev *mdev, ...@@ -154,4 +154,18 @@ int mlx5e_build_channel_param(struct mlx5_core_dev *mdev,
u16 mlx5e_calc_sq_stop_room(struct mlx5_core_dev *mdev, struct mlx5e_params *params); u16 mlx5e_calc_sq_stop_room(struct mlx5_core_dev *mdev, struct mlx5e_params *params);
int mlx5e_validate_params(struct mlx5_core_dev *mdev, struct mlx5e_params *params); int mlx5e_validate_params(struct mlx5_core_dev *mdev, struct mlx5e_params *params);
static inline void mlx5e_params_print_info(struct mlx5_core_dev *mdev,
struct mlx5e_params *params)
{
mlx5_core_info(mdev, "MLX5E: StrdRq(%d) RqSz(%ld) StrdSz(%ld) RxCqeCmprss(%d %s)\n",
params->rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ,
params->rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ ?
BIT(mlx5e_mpwqe_get_log_rq_size(mdev, params, NULL)) :
BIT(params->log_rq_mtu_frames),
BIT(mlx5e_mpwqe_get_log_stride_size(mdev, params, NULL)),
MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS),
MLX5_CAP_GEN(mdev, enhanced_cqe_compression) ?
"enhanced" : "basic");
};
#endif /* __MLX5_EN_PARAMS_H__ */ #endif /* __MLX5_EN_PARAMS_H__ */
...@@ -690,7 +690,6 @@ static bool mlx5e_restore_tunnel(struct mlx5e_priv *priv, struct sk_buff *skb, ...@@ -690,7 +690,6 @@ static bool mlx5e_restore_tunnel(struct mlx5e_priv *priv, struct sk_buff *skb,
err = mapping_find(uplink_priv->tunnel_mapping, tun_id, &key); err = mapping_find(uplink_priv->tunnel_mapping, tun_id, &key);
if (err) { if (err) {
WARN_ON_ONCE(true);
netdev_dbg(priv->netdev, netdev_dbg(priv->netdev,
"Couldn't find tunnel for tun_id: %d, err: %d\n", "Couldn't find tunnel for tun_id: %d, err: %d\n",
tun_id, err); tun_id, err);
......
...@@ -1774,35 +1774,42 @@ mlx5_tc_ct_del_ft_cb(struct mlx5_tc_ct_priv *ct_priv, struct mlx5_ct_ft *ft) ...@@ -1774,35 +1774,42 @@ mlx5_tc_ct_del_ft_cb(struct mlx5_tc_ct_priv *ct_priv, struct mlx5_ct_ft *ft)
/* We translate the tc filter with CT action to the following HW model: /* We translate the tc filter with CT action to the following HW model:
* *
* +---------------------+ * +---------------------+
* + ft prio (tc chain) + * + ft prio (tc chain) +
* + original match + * + original match +
* +---------------------+ * +---------------------+
* | set chain miss mapping * | set chain miss mapping
* | set fte_id * | set fte_id
* | set tunnel_id * | set tunnel_id
* | do decap * | do decap
* v * |
* +---------------------+ * +-------------+
* + pre_ct/pre_ct_nat + if matches +-------------------------+ * | Chain 0 |
* + zone+nat match +---------------->+ post_act (see below) + * | optimization|
* +---------------------+ set zone +-------------------------+ * | v
* | set zone * | +---------------------+
* v * | + pre_ct/pre_ct_nat + if matches +----------------------+
* +--------------------+ * | + zone+nat match +---------------->+ post_act (see below) +
* + CT (nat or no nat) + * | +---------------------+ set zone +----------------------+
* + tuple + zone match + * | |
* +--------------------+ * +-------------+ set zone
* | set mark * |
* | set labels_id * v
* | set established * +--------------------+
* | set zone_restore * + CT (nat or no nat) +
* | do nat (if needed) * + tuple + zone match +
* v * +--------------------+
* +--------------+ * | set mark
* + post_act + original filter actions * | set labels_id
* + fte_id match +------------------------> * | set established
* +--------------+ * | set zone_restore
* | do nat (if needed)
* v
* +--------------+
* + post_act + original filter actions
* + fte_id match +------------------------>
* +--------------+
*
*/ */
static struct mlx5_flow_handle * static struct mlx5_flow_handle *
__mlx5_tc_ct_flow_offload(struct mlx5_tc_ct_priv *ct_priv, __mlx5_tc_ct_flow_offload(struct mlx5_tc_ct_priv *ct_priv,
...@@ -1818,6 +1825,7 @@ __mlx5_tc_ct_flow_offload(struct mlx5_tc_ct_priv *ct_priv, ...@@ -1818,6 +1825,7 @@ __mlx5_tc_ct_flow_offload(struct mlx5_tc_ct_priv *ct_priv,
struct mlx5_ct_flow *ct_flow; struct mlx5_ct_flow *ct_flow;
int chain_mapping = 0, err; int chain_mapping = 0, err;
struct mlx5_ct_ft *ft; struct mlx5_ct_ft *ft;
u16 zone;
ct_flow = kzalloc(sizeof(*ct_flow), GFP_KERNEL); ct_flow = kzalloc(sizeof(*ct_flow), GFP_KERNEL);
if (!ct_flow) { if (!ct_flow) {
...@@ -1884,6 +1892,25 @@ __mlx5_tc_ct_flow_offload(struct mlx5_tc_ct_priv *ct_priv, ...@@ -1884,6 +1892,25 @@ __mlx5_tc_ct_flow_offload(struct mlx5_tc_ct_priv *ct_priv,
} }
} }
/* Change original rule point to ct table
* Chain 0 sets the zone and jumps to ct table
* Other chains jump to pre_ct table to align with act_ct cached logic
*/
pre_ct_attr->dest_chain = 0;
if (!attr->chain) {
zone = ft->zone & MLX5_CT_ZONE_MASK;
err = mlx5e_tc_match_to_reg_set(priv->mdev, pre_mod_acts, ct_priv->ns_type,
ZONE_TO_REG, zone);
if (err) {
ct_dbg("Failed to set zone register mapping");
goto err_mapping;
}
pre_ct_attr->dest_ft = nat ? ct_priv->ct_nat : ct_priv->ct;
} else {
pre_ct_attr->dest_ft = nat ? ft->pre_ct_nat.ft : ft->pre_ct.ft;
}
mod_hdr = mlx5_modify_header_alloc(priv->mdev, ct_priv->ns_type, mod_hdr = mlx5_modify_header_alloc(priv->mdev, ct_priv->ns_type,
pre_mod_acts->num_actions, pre_mod_acts->num_actions,
pre_mod_acts->actions); pre_mod_acts->actions);
...@@ -1893,10 +1920,6 @@ __mlx5_tc_ct_flow_offload(struct mlx5_tc_ct_priv *ct_priv, ...@@ -1893,10 +1920,6 @@ __mlx5_tc_ct_flow_offload(struct mlx5_tc_ct_priv *ct_priv,
goto err_mapping; goto err_mapping;
} }
pre_ct_attr->modify_hdr = mod_hdr; pre_ct_attr->modify_hdr = mod_hdr;
/* Change original rule point to ct table */
pre_ct_attr->dest_chain = 0;
pre_ct_attr->dest_ft = nat ? ft->pre_ct_nat.ft : ft->pre_ct.ft;
ct_flow->pre_ct_rule = mlx5_tc_rule_insert(priv, orig_spec, ct_flow->pre_ct_rule = mlx5_tc_rule_insert(priv, orig_spec,
pre_ct_attr); pre_ct_attr);
if (IS_ERR(ct_flow->pre_ct_rule)) { if (IS_ERR(ct_flow->pre_ct_rule)) {
......
...@@ -125,10 +125,8 @@ mlx5e_get_ktls_tx_priv_ctx(struct tls_context *tls_ctx) ...@@ -125,10 +125,8 @@ mlx5e_get_ktls_tx_priv_ctx(struct tls_context *tls_ctx)
/* struct for callback API management */ /* struct for callback API management */
struct mlx5e_async_ctx { struct mlx5e_async_ctx {
struct mlx5_async_work context; struct mlx5_async_work context;
struct mlx5_async_ctx async_ctx; struct mlx5_async_ctx *async_ctx;
struct work_struct work;
struct mlx5e_ktls_offload_context_tx *priv_tx; struct mlx5e_ktls_offload_context_tx *priv_tx;
struct completion complete;
int err; int err;
union { union {
u32 out_create[MLX5_ST_SZ_DW(create_tis_out)]; u32 out_create[MLX5_ST_SZ_DW(create_tis_out)];
...@@ -136,34 +134,33 @@ struct mlx5e_async_ctx { ...@@ -136,34 +134,33 @@ struct mlx5e_async_ctx {
}; };
}; };
static struct mlx5e_async_ctx *mlx5e_bulk_async_init(struct mlx5_core_dev *mdev, int n) struct mlx5e_bulk_async_ctx {
struct mlx5_async_ctx async_ctx;
DECLARE_FLEX_ARRAY(struct mlx5e_async_ctx, arr);
};
static struct mlx5e_bulk_async_ctx *mlx5e_bulk_async_init(struct mlx5_core_dev *mdev, int n)
{ {
struct mlx5e_async_ctx *bulk_async; struct mlx5e_bulk_async_ctx *bulk_async;
int sz;
int i; int i;
bulk_async = kvcalloc(n, sizeof(struct mlx5e_async_ctx), GFP_KERNEL); sz = struct_size(bulk_async, arr, n);
bulk_async = kvzalloc(sz, GFP_KERNEL);
if (!bulk_async) if (!bulk_async)
return NULL; return NULL;
for (i = 0; i < n; i++) { mlx5_cmd_init_async_ctx(mdev, &bulk_async->async_ctx);
struct mlx5e_async_ctx *async = &bulk_async[i];
mlx5_cmd_init_async_ctx(mdev, &async->async_ctx); for (i = 0; i < n; i++)
init_completion(&async->complete); bulk_async->arr[i].async_ctx = &bulk_async->async_ctx;
}
return bulk_async; return bulk_async;
} }
static void mlx5e_bulk_async_cleanup(struct mlx5e_async_ctx *bulk_async, int n) static void mlx5e_bulk_async_cleanup(struct mlx5e_bulk_async_ctx *bulk_async)
{ {
int i; mlx5_cmd_cleanup_async_ctx(&bulk_async->async_ctx);
for (i = 0; i < n; i++) {
struct mlx5e_async_ctx *async = &bulk_async[i];
mlx5_cmd_cleanup_async_ctx(&async->async_ctx);
}
kvfree(bulk_async); kvfree(bulk_async);
} }
...@@ -176,12 +173,10 @@ static void create_tis_callback(int status, struct mlx5_async_work *context) ...@@ -176,12 +173,10 @@ static void create_tis_callback(int status, struct mlx5_async_work *context)
if (status) { if (status) {
async->err = status; async->err = status;
priv_tx->create_err = 1; priv_tx->create_err = 1;
goto out; return;
} }
priv_tx->tisn = MLX5_GET(create_tis_out, async->out_create, tisn); priv_tx->tisn = MLX5_GET(create_tis_out, async->out_create, tisn);
out:
complete(&async->complete);
} }
static void destroy_tis_callback(int status, struct mlx5_async_work *context) static void destroy_tis_callback(int status, struct mlx5_async_work *context)
...@@ -190,7 +185,6 @@ static void destroy_tis_callback(int status, struct mlx5_async_work *context) ...@@ -190,7 +185,6 @@ static void destroy_tis_callback(int status, struct mlx5_async_work *context)
container_of(context, struct mlx5e_async_ctx, context); container_of(context, struct mlx5e_async_ctx, context);
struct mlx5e_ktls_offload_context_tx *priv_tx = async->priv_tx; struct mlx5e_ktls_offload_context_tx *priv_tx = async->priv_tx;
complete(&async->complete);
kfree(priv_tx); kfree(priv_tx);
} }
...@@ -214,7 +208,7 @@ mlx5e_tls_priv_tx_init(struct mlx5_core_dev *mdev, struct mlx5e_tls_sw_stats *sw ...@@ -214,7 +208,7 @@ mlx5e_tls_priv_tx_init(struct mlx5_core_dev *mdev, struct mlx5e_tls_sw_stats *sw
goto err_out; goto err_out;
} else { } else {
async->priv_tx = priv_tx; async->priv_tx = priv_tx;
err = mlx5e_ktls_create_tis_cb(mdev, &async->async_ctx, err = mlx5e_ktls_create_tis_cb(mdev, async->async_ctx,
async->out_create, sizeof(async->out_create), async->out_create, sizeof(async->out_create),
create_tis_callback, &async->context); create_tis_callback, &async->context);
if (err) if (err)
...@@ -232,13 +226,12 @@ static void mlx5e_tls_priv_tx_cleanup(struct mlx5e_ktls_offload_context_tx *priv ...@@ -232,13 +226,12 @@ static void mlx5e_tls_priv_tx_cleanup(struct mlx5e_ktls_offload_context_tx *priv
struct mlx5e_async_ctx *async) struct mlx5e_async_ctx *async)
{ {
if (priv_tx->create_err) { if (priv_tx->create_err) {
complete(&async->complete);
kfree(priv_tx); kfree(priv_tx);
return; return;
} }
async->priv_tx = priv_tx; async->priv_tx = priv_tx;
mlx5e_ktls_destroy_tis_cb(priv_tx->mdev, priv_tx->tisn, mlx5e_ktls_destroy_tis_cb(priv_tx->mdev, priv_tx->tisn,
&async->async_ctx, async->async_ctx,
async->out_destroy, sizeof(async->out_destroy), async->out_destroy, sizeof(async->out_destroy),
destroy_tis_callback, &async->context); destroy_tis_callback, &async->context);
} }
...@@ -247,7 +240,7 @@ static void mlx5e_tls_priv_tx_list_cleanup(struct mlx5_core_dev *mdev, ...@@ -247,7 +240,7 @@ static void mlx5e_tls_priv_tx_list_cleanup(struct mlx5_core_dev *mdev,
struct list_head *list, int size) struct list_head *list, int size)
{ {
struct mlx5e_ktls_offload_context_tx *obj, *n; struct mlx5e_ktls_offload_context_tx *obj, *n;
struct mlx5e_async_ctx *bulk_async; struct mlx5e_bulk_async_ctx *bulk_async;
int i; int i;
bulk_async = mlx5e_bulk_async_init(mdev, size); bulk_async = mlx5e_bulk_async_init(mdev, size);
...@@ -256,16 +249,11 @@ static void mlx5e_tls_priv_tx_list_cleanup(struct mlx5_core_dev *mdev, ...@@ -256,16 +249,11 @@ static void mlx5e_tls_priv_tx_list_cleanup(struct mlx5_core_dev *mdev,
i = 0; i = 0;
list_for_each_entry_safe(obj, n, list, list_node) { list_for_each_entry_safe(obj, n, list, list_node) {
mlx5e_tls_priv_tx_cleanup(obj, &bulk_async[i]); mlx5e_tls_priv_tx_cleanup(obj, &bulk_async->arr[i]);
i++; i++;
} }
for (i = 0; i < size; i++) { mlx5e_bulk_async_cleanup(bulk_async);
struct mlx5e_async_ctx *async = &bulk_async[i];
wait_for_completion(&async->complete);
}
mlx5e_bulk_async_cleanup(bulk_async, size);
} }
/* Recycling pool API */ /* Recycling pool API */
...@@ -291,7 +279,7 @@ static void create_work(struct work_struct *work) ...@@ -291,7 +279,7 @@ static void create_work(struct work_struct *work)
struct mlx5e_tls_tx_pool *pool = struct mlx5e_tls_tx_pool *pool =
container_of(work, struct mlx5e_tls_tx_pool, create_work); container_of(work, struct mlx5e_tls_tx_pool, create_work);
struct mlx5e_ktls_offload_context_tx *obj; struct mlx5e_ktls_offload_context_tx *obj;
struct mlx5e_async_ctx *bulk_async; struct mlx5e_bulk_async_ctx *bulk_async;
LIST_HEAD(local_list); LIST_HEAD(local_list);
int i, j, err = 0; int i, j, err = 0;
...@@ -300,7 +288,7 @@ static void create_work(struct work_struct *work) ...@@ -300,7 +288,7 @@ static void create_work(struct work_struct *work)
return; return;
for (i = 0; i < MLX5E_TLS_TX_POOL_BULK; i++) { for (i = 0; i < MLX5E_TLS_TX_POOL_BULK; i++) {
obj = mlx5e_tls_priv_tx_init(pool->mdev, pool->sw_stats, &bulk_async[i]); obj = mlx5e_tls_priv_tx_init(pool->mdev, pool->sw_stats, &bulk_async->arr[i]);
if (IS_ERR(obj)) { if (IS_ERR(obj)) {
err = PTR_ERR(obj); err = PTR_ERR(obj);
break; break;
...@@ -309,14 +297,13 @@ static void create_work(struct work_struct *work) ...@@ -309,14 +297,13 @@ static void create_work(struct work_struct *work)
} }
for (j = 0; j < i; j++) { for (j = 0; j < i; j++) {
struct mlx5e_async_ctx *async = &bulk_async[j]; struct mlx5e_async_ctx *async = &bulk_async->arr[j];
wait_for_completion(&async->complete);
if (!err && async->err) if (!err && async->err)
err = async->err; err = async->err;
} }
atomic64_add(i, &pool->sw_stats->tx_tls_pool_alloc); atomic64_add(i, &pool->sw_stats->tx_tls_pool_alloc);
mlx5e_bulk_async_cleanup(bulk_async, MLX5E_TLS_TX_POOL_BULK); mlx5e_bulk_async_cleanup(bulk_async);
if (err) if (err)
goto err_out; goto err_out;
......
...@@ -57,7 +57,6 @@ struct mlx5e_arfs_tables { ...@@ -57,7 +57,6 @@ struct mlx5e_arfs_tables {
struct arfs_table arfs_tables[ARFS_NUM_TYPES]; struct arfs_table arfs_tables[ARFS_NUM_TYPES];
/* Protect aRFS rules list */ /* Protect aRFS rules list */
spinlock_t arfs_lock; spinlock_t arfs_lock;
struct list_head rules;
int last_filter_id; int last_filter_id;
struct workqueue_struct *wq; struct workqueue_struct *wq;
}; };
...@@ -376,7 +375,6 @@ int mlx5e_arfs_create_tables(struct mlx5e_flow_steering *fs, ...@@ -376,7 +375,6 @@ int mlx5e_arfs_create_tables(struct mlx5e_flow_steering *fs,
return -ENOMEM; return -ENOMEM;
spin_lock_init(&arfs->arfs_lock); spin_lock_init(&arfs->arfs_lock);
INIT_LIST_HEAD(&arfs->rules);
arfs->wq = create_singlethread_workqueue("mlx5e_arfs"); arfs->wq = create_singlethread_workqueue("mlx5e_arfs");
if (!arfs->wq) if (!arfs->wq)
goto err; goto err;
......
...@@ -2463,4 +2463,5 @@ const struct ethtool_ops mlx5e_ethtool_ops = { ...@@ -2463,4 +2463,5 @@ const struct ethtool_ops mlx5e_ethtool_ops = {
.get_eth_mac_stats = mlx5e_get_eth_mac_stats, .get_eth_mac_stats = mlx5e_get_eth_mac_stats,
.get_eth_ctrl_stats = mlx5e_get_eth_ctrl_stats, .get_eth_ctrl_stats = mlx5e_get_eth_ctrl_stats,
.get_rmon_stats = mlx5e_get_rmon_stats, .get_rmon_stats = mlx5e_get_rmon_stats,
.get_link_ext_stats = mlx5e_get_link_ext_stats
}; };
...@@ -1205,6 +1205,13 @@ int mlx5e_open_rq(struct mlx5e_params *params, struct mlx5e_rq_param *param, ...@@ -1205,6 +1205,13 @@ int mlx5e_open_rq(struct mlx5e_params *params, struct mlx5e_rq_param *param,
MLX5_CAP_GEN(mdev, mini_cqe_resp_stride_index)) MLX5_CAP_GEN(mdev, mini_cqe_resp_stride_index))
__set_bit(MLX5E_RQ_STATE_MINI_CQE_HW_STRIDX, &rq->state); __set_bit(MLX5E_RQ_STATE_MINI_CQE_HW_STRIDX, &rq->state);
/* For enhanced CQE compression packet processing. decompress
* session according to the enhanced layout.
*/
if (MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS) &&
MLX5_CAP_GEN(mdev, enhanced_cqe_compression))
__set_bit(MLX5E_RQ_STATE_MINI_CQE_ENHANCED, &rq->state);
return 0; return 0;
err_destroy_rq: err_destroy_rq:
...@@ -1895,6 +1902,7 @@ static int mlx5e_alloc_cq_common(struct mlx5e_priv *priv, ...@@ -1895,6 +1902,7 @@ static int mlx5e_alloc_cq_common(struct mlx5e_priv *priv,
struct mlx5_cqe64 *cqe = mlx5_cqwq_get_wqe(&cq->wq, i); struct mlx5_cqe64 *cqe = mlx5_cqwq_get_wqe(&cq->wq, i);
cqe->op_own = 0xf1; cqe->op_own = 0xf1;
cqe->validity_iteration_count = 0xff;
} }
cq->mdev = mdev; cq->mdev = mdev;
...@@ -3061,7 +3069,10 @@ int mlx5e_open_locked(struct net_device *netdev) ...@@ -3061,7 +3069,10 @@ int mlx5e_open_locked(struct net_device *netdev)
if (err) if (err)
goto err_clear_state_opened_flag; goto err_clear_state_opened_flag;
priv->profile->update_rx(priv); err = priv->profile->update_rx(priv);
if (err)
goto err_close_channels;
mlx5e_selq_apply(&priv->selq); mlx5e_selq_apply(&priv->selq);
mlx5e_activate_priv_channels(priv); mlx5e_activate_priv_channels(priv);
mlx5e_apply_traps(priv, true); mlx5e_apply_traps(priv, true);
...@@ -3071,6 +3082,8 @@ int mlx5e_open_locked(struct net_device *netdev) ...@@ -3071,6 +3082,8 @@ int mlx5e_open_locked(struct net_device *netdev)
mlx5e_queue_update_stats(priv); mlx5e_queue_update_stats(priv);
return 0; return 0;
err_close_channels:
mlx5e_close_channels(&priv->channels);
err_clear_state_opened_flag: err_clear_state_opened_flag:
clear_bit(MLX5E_STATE_OPENED, &priv->state); clear_bit(MLX5E_STATE_OPENED, &priv->state);
mlx5e_selq_cancel(&priv->selq); mlx5e_selq_cancel(&priv->selq);
...@@ -5947,6 +5960,7 @@ static int mlx5e_probe(struct auxiliary_device *adev, ...@@ -5947,6 +5960,7 @@ static int mlx5e_probe(struct auxiliary_device *adev,
mlx5e_dcbnl_init_app(priv); mlx5e_dcbnl_init_app(priv);
mlx5_uplink_netdev_set(mdev, netdev); mlx5_uplink_netdev_set(mdev, netdev);
mlx5e_params_print_info(mdev, &priv->channels.params);
return 0; return 0;
err_resume: err_resume:
......
...@@ -89,6 +89,25 @@ static inline void mlx5e_read_cqe_slot(struct mlx5_cqwq *wq, ...@@ -89,6 +89,25 @@ static inline void mlx5e_read_cqe_slot(struct mlx5_cqwq *wq,
memcpy(data, mlx5_cqwq_get_wqe(wq, ci), sizeof(struct mlx5_cqe64)); memcpy(data, mlx5_cqwq_get_wqe(wq, ci), sizeof(struct mlx5_cqe64));
} }
static void mlx5e_read_enhanced_title_slot(struct mlx5e_rq *rq,
struct mlx5_cqe64 *cqe)
{
struct mlx5e_cq_decomp *cqd = &rq->cqd;
struct mlx5_cqe64 *title = &cqd->title;
memcpy(title, cqe, sizeof(struct mlx5_cqe64));
if (likely(test_bit(MLX5E_RQ_STATE_MINI_CQE_HW_STRIDX, &rq->state)))
return;
if (rq->wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ)
cqd->wqe_counter = mpwrq_get_cqe_stride_index(title) +
mpwrq_get_cqe_consumed_strides(title);
else
cqd->wqe_counter =
mlx5_wq_cyc_ctr2ix(&rq->wqe.wq, be16_to_cpu(title->wqe_counter) + 1);
}
static inline void mlx5e_read_title_slot(struct mlx5e_rq *rq, static inline void mlx5e_read_title_slot(struct mlx5e_rq *rq,
struct mlx5_cqwq *wq, struct mlx5_cqwq *wq,
u32 cqcc) u32 cqcc)
...@@ -175,6 +194,38 @@ static inline void mlx5e_decompress_cqe_no_hash(struct mlx5e_rq *rq, ...@@ -175,6 +194,38 @@ static inline void mlx5e_decompress_cqe_no_hash(struct mlx5e_rq *rq,
cqd->title.rss_hash_result = 0; cqd->title.rss_hash_result = 0;
} }
static u32 mlx5e_decompress_enhanced_cqe(struct mlx5e_rq *rq,
struct mlx5_cqwq *wq,
struct mlx5_cqe64 *cqe,
int budget_rem)
{
struct mlx5e_cq_decomp *cqd = &rq->cqd;
u32 cqcc, left;
u32 i;
left = get_cqe_enhanced_num_mini_cqes(cqe);
/* Here we avoid breaking the cqe compression session in the middle
* in case budget is not sufficient to handle all of it. In this case
* we return work_done == budget_rem to give 'busy' napi indication.
*/
if (unlikely(left > budget_rem))
return budget_rem;
cqcc = wq->cc;
cqd->mini_arr_idx = 0;
memcpy(cqd->mini_arr, cqe, sizeof(struct mlx5_cqe64));
for (i = 0; i < left; i++, cqd->mini_arr_idx++, cqcc++) {
mlx5e_decompress_cqe_no_hash(rq, wq, cqcc);
INDIRECT_CALL_3(rq->handle_rx_cqe, mlx5e_handle_rx_cqe_mpwrq,
mlx5e_handle_rx_cqe, mlx5e_handle_rx_cqe_mpwrq_shampo,
rq, &cqd->title);
}
wq->cc = cqcc;
rq->stats->cqe_compress_pkts += left;
return left;
}
static inline u32 mlx5e_decompress_cqes_cont(struct mlx5e_rq *rq, static inline u32 mlx5e_decompress_cqes_cont(struct mlx5e_rq *rq,
struct mlx5_cqwq *wq, struct mlx5_cqwq *wq,
int update_owner_only, int update_owner_only,
...@@ -220,7 +271,7 @@ static inline u32 mlx5e_decompress_cqes_start(struct mlx5e_rq *rq, ...@@ -220,7 +271,7 @@ static inline u32 mlx5e_decompress_cqes_start(struct mlx5e_rq *rq,
rq, &cqd->title); rq, &cqd->title);
cqd->mini_arr_idx++; cqd->mini_arr_idx++;
return mlx5e_decompress_cqes_cont(rq, wq, 1, budget_rem) - 1; return mlx5e_decompress_cqes_cont(rq, wq, 1, budget_rem);
} }
static inline bool mlx5e_rx_cache_put(struct mlx5e_rq *rq, struct page *page) static inline bool mlx5e_rx_cache_put(struct mlx5e_rq *rq, struct page *page)
...@@ -2211,45 +2262,102 @@ static void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cq ...@@ -2211,45 +2262,102 @@ static void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cq
mlx5_wq_ll_pop(wq, cqe->wqe_id, &wqe->next.next_wqe_index); mlx5_wq_ll_pop(wq, cqe->wqe_id, &wqe->next.next_wqe_index);
} }
int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget) static int mlx5e_rx_cq_process_enhanced_cqe_comp(struct mlx5e_rq *rq,
struct mlx5_cqwq *cqwq,
int budget_rem)
{ {
struct mlx5e_rq *rq = container_of(cq, struct mlx5e_rq, cq); struct mlx5_cqe64 *cqe, *title_cqe = NULL;
struct mlx5_cqwq *cqwq = &cq->wq; struct mlx5e_cq_decomp *cqd = &rq->cqd;
struct mlx5_cqe64 *cqe;
int work_done = 0; int work_done = 0;
if (unlikely(!test_bit(MLX5E_RQ_STATE_ENABLED, &rq->state))) cqe = mlx5_cqwq_get_cqe_enahnced_comp(cqwq);
return 0; if (!cqe)
return work_done;
if (rq->cqd.left) { if (cqd->last_cqe_title &&
work_done += mlx5e_decompress_cqes_cont(rq, cqwq, 0, budget); (mlx5_get_cqe_format(cqe) == MLX5_COMPRESSED)) {
if (work_done >= budget) rq->stats->cqe_compress_blks++;
goto out; cqd->last_cqe_title = false;
} }
cqe = mlx5_cqwq_get_cqe(cqwq); do {
if (!cqe) { if (mlx5_get_cqe_format(cqe) == MLX5_COMPRESSED) {
if (unlikely(work_done)) if (title_cqe) {
goto out; mlx5e_read_enhanced_title_slot(rq, title_cqe);
return 0; title_cqe = NULL;
rq->stats->cqe_compress_blks++;
}
work_done +=
mlx5e_decompress_enhanced_cqe(rq, cqwq, cqe,
budget_rem - work_done);
continue;
}
title_cqe = cqe;
mlx5_cqwq_pop(cqwq);
INDIRECT_CALL_3(rq->handle_rx_cqe, mlx5e_handle_rx_cqe_mpwrq,
mlx5e_handle_rx_cqe, mlx5e_handle_rx_cqe_mpwrq_shampo,
rq, cqe);
work_done++;
} while (work_done < budget_rem &&
(cqe = mlx5_cqwq_get_cqe_enahnced_comp(cqwq)));
/* last cqe might be title on next poll bulk */
if (title_cqe) {
mlx5e_read_enhanced_title_slot(rq, title_cqe);
cqd->last_cqe_title = true;
} }
do { return work_done;
}
static int mlx5e_rx_cq_process_basic_cqe_comp(struct mlx5e_rq *rq,
struct mlx5_cqwq *cqwq,
int budget_rem)
{
struct mlx5_cqe64 *cqe;
int work_done = 0;
if (rq->cqd.left)
work_done += mlx5e_decompress_cqes_cont(rq, cqwq, 0, budget_rem);
while (work_done < budget_rem && (cqe = mlx5_cqwq_get_cqe(cqwq))) {
if (mlx5_get_cqe_format(cqe) == MLX5_COMPRESSED) { if (mlx5_get_cqe_format(cqe) == MLX5_COMPRESSED) {
work_done += work_done +=
mlx5e_decompress_cqes_start(rq, cqwq, mlx5e_decompress_cqes_start(rq, cqwq,
budget - work_done); budget_rem - work_done);
continue; continue;
} }
mlx5_cqwq_pop(cqwq); mlx5_cqwq_pop(cqwq);
INDIRECT_CALL_3(rq->handle_rx_cqe, mlx5e_handle_rx_cqe_mpwrq, INDIRECT_CALL_3(rq->handle_rx_cqe, mlx5e_handle_rx_cqe_mpwrq,
mlx5e_handle_rx_cqe, mlx5e_handle_rx_cqe_mpwrq_shampo, mlx5e_handle_rx_cqe, mlx5e_handle_rx_cqe_mpwrq_shampo,
rq, cqe); rq, cqe);
} while ((++work_done < budget) && (cqe = mlx5_cqwq_get_cqe(cqwq))); work_done++;
}
return work_done;
}
int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget)
{
struct mlx5e_rq *rq = container_of(cq, struct mlx5e_rq, cq);
struct mlx5_cqwq *cqwq = &cq->wq;
int work_done;
if (unlikely(!test_bit(MLX5E_RQ_STATE_ENABLED, &rq->state)))
return 0;
if (test_bit(MLX5E_RQ_STATE_MINI_CQE_ENHANCED, &rq->state))
work_done = mlx5e_rx_cq_process_enhanced_cqe_comp(rq, cqwq,
budget);
else
work_done = mlx5e_rx_cq_process_basic_cqe_comp(rq, cqwq,
budget);
if (work_done == 0)
return 0;
out:
if (test_bit(MLX5E_RQ_STATE_SHAMPO, &rq->state) && rq->hw_gro_data->skb) if (test_bit(MLX5E_RQ_STATE_SHAMPO, &rq->state) && rq->hw_gro_data->skb)
mlx5e_shampo_flush_skb(rq, NULL, false); mlx5e_shampo_flush_skb(rq, NULL, false);
......
...@@ -1241,6 +1241,23 @@ static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(phy) ...@@ -1241,6 +1241,23 @@ static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(phy)
mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0); mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
} }
void mlx5e_get_link_ext_stats(struct net_device *dev,
struct ethtool_link_ext_stats *stats)
{
struct mlx5e_priv *priv = netdev_priv(dev);
u32 out[MLX5_ST_SZ_DW(ppcnt_reg)] = {};
u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {};
int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
MLX5_SET(ppcnt_reg, in, local_port, 1);
MLX5_SET(ppcnt_reg, in, grp, MLX5_PHYSICAL_LAYER_COUNTERS_GROUP);
mlx5_core_access_reg(priv->mdev, in, sz, out,
MLX5_ST_SZ_BYTES(ppcnt_reg), MLX5_REG_PPCNT, 0, 0);
stats->link_down_events = MLX5_GET(ppcnt_reg, out,
counter_set.phys_layer_cntrs.link_down_events);
}
static int fec_num_lanes(struct mlx5_core_dev *dev) static int fec_num_lanes(struct mlx5_core_dev *dev)
{ {
u32 out[MLX5_ST_SZ_DW(pmlp_reg)] = {}; u32 out[MLX5_ST_SZ_DW(pmlp_reg)] = {};
......
...@@ -126,6 +126,8 @@ void mlx5e_stats_eth_ctrl_get(struct mlx5e_priv *priv, ...@@ -126,6 +126,8 @@ void mlx5e_stats_eth_ctrl_get(struct mlx5e_priv *priv,
void mlx5e_stats_rmon_get(struct mlx5e_priv *priv, void mlx5e_stats_rmon_get(struct mlx5e_priv *priv,
struct ethtool_rmon_stats *rmon, struct ethtool_rmon_stats *rmon,
const struct ethtool_rmon_hist_range **ranges); const struct ethtool_rmon_hist_range **ranges);
void mlx5e_get_link_ext_stats(struct net_device *dev,
struct ethtool_link_ext_stats *stats);
/* Concrete NIC Stats */ /* Concrete NIC Stats */
......
...@@ -1060,12 +1060,9 @@ static int mlx5e_hairpin_flow_add(struct mlx5e_priv *priv, ...@@ -1060,12 +1060,9 @@ static int mlx5e_hairpin_flow_add(struct mlx5e_priv *priv,
hash_hairpin_info(peer_id, match_prio)); hash_hairpin_info(peer_id, match_prio));
mutex_unlock(&tc->hairpin_tbl_lock); mutex_unlock(&tc->hairpin_tbl_lock);
params.log_data_size = 16; params.log_data_size = clamp_t(u8, 16,
params.log_data_size = min_t(u8, params.log_data_size, MLX5_CAP_GEN(priv->mdev, log_min_hairpin_wq_data_sz),
MLX5_CAP_GEN(priv->mdev, log_max_hairpin_wq_data_sz)); MLX5_CAP_GEN(priv->mdev, log_max_hairpin_wq_data_sz));
params.log_data_size = max_t(u8, params.log_data_size,
MLX5_CAP_GEN(priv->mdev, log_min_hairpin_wq_data_sz));
params.log_num_packets = params.log_data_size - params.log_num_packets = params.log_data_size -
MLX5_MPWRQ_MIN_LOG_STRIDE_SZ(priv->mdev); MLX5_MPWRQ_MIN_LOG_STRIDE_SZ(priv->mdev);
params.log_num_packets = min_t(u8, params.log_num_packets, params.log_num_packets = min_t(u8, params.log_num_packets,
......
...@@ -1722,7 +1722,7 @@ void mlx5_esw_bridge_fdb_update_used(struct net_device *dev, u16 vport_num, u16 ...@@ -1722,7 +1722,7 @@ void mlx5_esw_bridge_fdb_update_used(struct net_device *dev, u16 vport_num, u16
entry = mlx5_esw_bridge_fdb_lookup(bridge, fdb_info->addr, fdb_info->vid); entry = mlx5_esw_bridge_fdb_lookup(bridge, fdb_info->addr, fdb_info->vid);
if (!entry) { if (!entry) {
esw_debug(br_offloads->esw->dev, esw_debug(br_offloads->esw->dev,
"FDB entry with specified key not found (MAC=%pM,vid=%u,vport=%u)\n", "FDB update entry with specified key not found (MAC=%pM,vid=%u,vport=%u)\n",
fdb_info->addr, fdb_info->vid, vport_num); fdb_info->addr, fdb_info->vid, vport_num);
return; return;
} }
...@@ -1775,9 +1775,9 @@ void mlx5_esw_bridge_fdb_remove(struct net_device *dev, u16 vport_num, u16 esw_o ...@@ -1775,9 +1775,9 @@ void mlx5_esw_bridge_fdb_remove(struct net_device *dev, u16 vport_num, u16 esw_o
bridge = port->bridge; bridge = port->bridge;
entry = mlx5_esw_bridge_fdb_lookup(bridge, fdb_info->addr, fdb_info->vid); entry = mlx5_esw_bridge_fdb_lookup(bridge, fdb_info->addr, fdb_info->vid);
if (!entry) { if (!entry) {
esw_warn(esw->dev, esw_debug(esw->dev,
"FDB entry with specified key not found (MAC=%pM,vid=%u,vport=%u)\n", "FDB remove entry with specified key not found (MAC=%pM,vid=%u,vport=%u)\n",
fdb_info->addr, fdb_info->vid, vport_num); fdb_info->addr, fdb_info->vid, vport_num);
return; return;
} }
......
...@@ -561,12 +561,17 @@ static int mlx5i_open(struct net_device *netdev) ...@@ -561,12 +561,17 @@ static int mlx5i_open(struct net_device *netdev)
if (err) if (err)
goto err_remove_fs_underlay_qp; goto err_remove_fs_underlay_qp;
epriv->profile->update_rx(epriv); err = epriv->profile->update_rx(epriv);
if (err)
goto err_close_channels;
mlx5e_activate_priv_channels(epriv); mlx5e_activate_priv_channels(epriv);
mutex_unlock(&epriv->state_lock); mutex_unlock(&epriv->state_lock);
return 0; return 0;
err_close_channels:
mlx5e_close_channels(&epriv->channels);
err_remove_fs_underlay_qp: err_remove_fs_underlay_qp:
mlx5_fs_remove_rx_underlay_qpn(mdev, ipriv->qpn); mlx5_fs_remove_rx_underlay_qpn(mdev, ipriv->qpn);
err_reset_qp: err_reset_qp:
......
...@@ -221,12 +221,16 @@ static int mlx5i_pkey_open(struct net_device *netdev) ...@@ -221,12 +221,16 @@ static int mlx5i_pkey_open(struct net_device *netdev)
mlx5_core_warn(mdev, "opening child channels failed, %d\n", err); mlx5_core_warn(mdev, "opening child channels failed, %d\n", err);
goto err_clear_state_opened_flag; goto err_clear_state_opened_flag;
} }
epriv->profile->update_rx(epriv); err = epriv->profile->update_rx(epriv);
if (err)
goto err_close_channels;
mlx5e_activate_priv_channels(epriv); mlx5e_activate_priv_channels(epriv);
mutex_unlock(&epriv->state_lock); mutex_unlock(&epriv->state_lock);
return 0; return 0;
err_close_channels:
mlx5e_close_channels(&epriv->channels);
err_clear_state_opened_flag: err_clear_state_opened_flag:
mlx5e_destroy_tis(mdev, epriv->tisn[0][0]); mlx5e_destroy_tis(mdev, epriv->tisn[0][0]);
err_remove_rx_uderlay_qp: err_remove_rx_uderlay_qp:
......
...@@ -1306,8 +1306,15 @@ static int mlx5_load(struct mlx5_core_dev *dev) ...@@ -1306,8 +1306,15 @@ static int mlx5_load(struct mlx5_core_dev *dev)
mlx5_sf_dev_table_create(dev); mlx5_sf_dev_table_create(dev);
err = mlx5_devlink_traps_register(priv_to_devlink(dev));
if (err)
goto err_traps_reg;
return 0; return 0;
err_traps_reg:
mlx5_sf_dev_table_destroy(dev);
mlx5_sriov_detach(dev);
err_sriov: err_sriov:
mlx5_lag_remove_mdev(dev); mlx5_lag_remove_mdev(dev);
mlx5_ec_cleanup(dev); mlx5_ec_cleanup(dev);
...@@ -1336,6 +1343,7 @@ static int mlx5_load(struct mlx5_core_dev *dev) ...@@ -1336,6 +1343,7 @@ static int mlx5_load(struct mlx5_core_dev *dev)
static void mlx5_unload(struct mlx5_core_dev *dev) static void mlx5_unload(struct mlx5_core_dev *dev)
{ {
mlx5_devlink_traps_unregister(priv_to_devlink(dev));
mlx5_sf_dev_table_destroy(dev); mlx5_sf_dev_table_destroy(dev);
mlx5_sriov_detach(dev); mlx5_sriov_detach(dev);
mlx5_eswitch_disable(dev->priv.eswitch); mlx5_eswitch_disable(dev->priv.eswitch);
...@@ -1580,6 +1588,16 @@ static int mlx5_hca_caps_alloc(struct mlx5_core_dev *dev) ...@@ -1580,6 +1588,16 @@ static int mlx5_hca_caps_alloc(struct mlx5_core_dev *dev)
return -ENOMEM; return -ENOMEM;
} }
static int vhca_id_show(struct seq_file *file, void *priv)
{
struct mlx5_core_dev *dev = file->private;
seq_printf(file, "0x%x\n", MLX5_CAP_GEN(dev, vhca_id));
return 0;
}
DEFINE_SHOW_ATTRIBUTE(vhca_id);
int mlx5_mdev_init(struct mlx5_core_dev *dev, int profile_idx) int mlx5_mdev_init(struct mlx5_core_dev *dev, int profile_idx)
{ {
struct mlx5_priv *priv = &dev->priv; struct mlx5_priv *priv = &dev->priv;
...@@ -1604,6 +1622,7 @@ int mlx5_mdev_init(struct mlx5_core_dev *dev, int profile_idx) ...@@ -1604,6 +1622,7 @@ int mlx5_mdev_init(struct mlx5_core_dev *dev, int profile_idx)
priv->numa_node = dev_to_node(mlx5_core_dma_dev(dev)); priv->numa_node = dev_to_node(mlx5_core_dma_dev(dev));
priv->dbg.dbg_root = debugfs_create_dir(dev_name(dev->device), priv->dbg.dbg_root = debugfs_create_dir(dev_name(dev->device),
mlx5_debugfs_root); mlx5_debugfs_root);
debugfs_create_file("vhca_id", 0400, priv->dbg.dbg_root, dev, &vhca_id_fops);
INIT_LIST_HEAD(&priv->traps); INIT_LIST_HEAD(&priv->traps);
err = mlx5_tout_init(dev); err = mlx5_tout_init(dev);
......
...@@ -292,7 +292,7 @@ int mlx5dr_table_destroy(struct mlx5dr_table *tbl) ...@@ -292,7 +292,7 @@ int mlx5dr_table_destroy(struct mlx5dr_table *tbl)
mlx5dr_dbg_tbl_del(tbl); mlx5dr_dbg_tbl_del(tbl);
ret = dr_table_destroy_sw_owned_tbl(tbl); ret = dr_table_destroy_sw_owned_tbl(tbl);
if (ret) if (ret)
mlx5dr_err(tbl->dmn, "Failed to destoy sw owned table\n"); mlx5dr_err(tbl->dmn, "Failed to destroy sw owned table\n");
dr_table_uninit(tbl); dr_table_uninit(tbl);
......
...@@ -243,6 +243,23 @@ static inline struct mlx5_cqe64 *mlx5_cqwq_get_cqe(struct mlx5_cqwq *wq) ...@@ -243,6 +243,23 @@ static inline struct mlx5_cqe64 *mlx5_cqwq_get_cqe(struct mlx5_cqwq *wq)
return cqe; return cqe;
} }
static inline
struct mlx5_cqe64 *mlx5_cqwq_get_cqe_enahnced_comp(struct mlx5_cqwq *wq)
{
u8 sw_validity_iteration_count = mlx5_cqwq_get_wrap_cnt(wq) & 0xff;
u32 ci = mlx5_cqwq_get_ci(wq);
struct mlx5_cqe64 *cqe;
cqe = mlx5_cqwq_get_wqe(wq, ci);
if (cqe->validity_iteration_count != sw_validity_iteration_count)
return NULL;
/* ensure cqe content is read after cqe ownership bit/validity byte */
dma_rmb();
return cqe;
}
static inline u32 mlx5_wq_ll_get_size(struct mlx5_wq_ll *wq) static inline u32 mlx5_wq_ll_get_size(struct mlx5_wq_ll *wq)
{ {
return (u32)wq->fbc.sz_m1 + 1; return (u32)wq->fbc.sz_m1 + 1;
......
...@@ -882,6 +882,12 @@ static inline u8 get_cqe_opcode(struct mlx5_cqe64 *cqe) ...@@ -882,6 +882,12 @@ static inline u8 get_cqe_opcode(struct mlx5_cqe64 *cqe)
return cqe->op_own >> 4; return cqe->op_own >> 4;
} }
static inline u8 get_cqe_enhanced_num_mini_cqes(struct mlx5_cqe64 *cqe)
{
/* num_of_mini_cqes is zero based */
return get_cqe_opcode(cqe) + 1;
}
static inline u8 get_cqe_lro_tcppsh(struct mlx5_cqe64 *cqe) static inline u8 get_cqe_lro_tcppsh(struct mlx5_cqe64 *cqe)
{ {
return (cqe->lro.tcppsh_abort_dupack >> 6) & 1; return (cqe->lro.tcppsh_abort_dupack >> 6) & 1;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment