Commit cb560795 authored by Paolo Abeni's avatar Paolo Abeni

Merge branch 'mlx5-misc-fixes-2024-10-15'

Tariq Toukan says:

====================
mlx5 misc fixes 2024-10-15

This patchset provides misc bug fixes from the team to the mlx5 core and
Eth drivers.

Series generated against:
commit 174714f0 ("selftests: drivers: net: fix name not defined")
====================

Link: https://patch.msgid.link/20241015093208.197603-1-tariqt@nvidia.comSigned-off-by: default avatarPaolo Abeni <pabeni@redhat.com>
parents 7decd1f5 4dbc1d1a
......@@ -1765,6 +1765,10 @@ static void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec, bool force
}
}
#define MLX5_MAX_MANAGE_PAGES_CMD_ENT 1
#define MLX5_CMD_MASK ((1UL << (cmd->vars.max_reg_cmds + \
MLX5_MAX_MANAGE_PAGES_CMD_ENT)) - 1)
static void mlx5_cmd_trigger_completions(struct mlx5_core_dev *dev)
{
struct mlx5_cmd *cmd = &dev->cmd;
......@@ -1776,7 +1780,7 @@ static void mlx5_cmd_trigger_completions(struct mlx5_core_dev *dev)
/* wait for pending handlers to complete */
mlx5_eq_synchronize_cmd_irq(dev);
spin_lock_irqsave(&dev->cmd.alloc_lock, flags);
vector = ~dev->cmd.vars.bitmask & ((1ul << (1 << dev->cmd.vars.log_sz)) - 1);
vector = ~dev->cmd.vars.bitmask & MLX5_CMD_MASK;
if (!vector)
goto no_trig;
......@@ -2361,7 +2365,7 @@ int mlx5_cmd_enable(struct mlx5_core_dev *dev)
cmd->state = MLX5_CMDIF_STATE_DOWN;
cmd->vars.max_reg_cmds = (1 << cmd->vars.log_sz) - 1;
cmd->vars.bitmask = (1UL << cmd->vars.max_reg_cmds) - 1;
cmd->vars.bitmask = MLX5_CMD_MASK;
sema_init(&cmd->vars.sem, cmd->vars.max_reg_cmds);
sema_init(&cmd->vars.pages_sem, 1);
......
......@@ -6509,6 +6509,8 @@ static void _mlx5e_remove(struct auxiliary_device *adev)
mlx5e_dcbnl_delete_app(priv);
unregister_netdev(priv->netdev);
_mlx5e_suspend(adev, false);
/* Avoid cleanup if profile rollback failed. */
if (priv->profile)
priv->profile->cleanup(priv);
mlx5e_destroy_netdev(priv);
mlx5e_devlink_port_unregister(mlx5e_dev);
......
......@@ -1061,6 +1061,12 @@ int mlx5_comp_eqn_get(struct mlx5_core_dev *dev, u16 vecidx, int *eqn)
struct mlx5_eq_comp *eq;
int ret = 0;
if (vecidx >= table->max_comp_eqs) {
mlx5_core_dbg(dev, "Requested vector index %u should be less than %u",
vecidx, table->max_comp_eqs);
return -EINVAL;
}
mutex_lock(&table->comp_lock);
eq = xa_load(&table->comp_eqs, vecidx);
if (eq) {
......
......@@ -1489,7 +1489,7 @@ int mlx5_eswitch_enable_locked(struct mlx5_eswitch *esw, int num_vfs)
}
if (err)
goto abort;
goto err_esw_enable;
esw->fdb_table.flags |= MLX5_ESW_FDB_CREATED;
......@@ -1503,7 +1503,8 @@ int mlx5_eswitch_enable_locked(struct mlx5_eswitch *esw, int num_vfs)
return 0;
abort:
err_esw_enable:
mlx5_eq_notifier_unregister(esw->dev, &esw->nb);
mlx5_esw_acls_ns_cleanup(esw);
return err;
}
......
......@@ -691,7 +691,6 @@ static int hws_bwc_matcher_move(struct mlx5hws_bwc_matcher *bwc_matcher)
static int
hws_bwc_matcher_rehash_size(struct mlx5hws_bwc_matcher *bwc_matcher)
{
u32 num_of_rules;
int ret;
/* If the current matcher size is already at its max size, we can't
......@@ -705,8 +704,7 @@ hws_bwc_matcher_rehash_size(struct mlx5hws_bwc_matcher *bwc_matcher)
* Need to check again if we really need rehash.
* If the reason for rehash was size, but not any more - skip rehash.
*/
num_of_rules = __atomic_load_n(&bwc_matcher->num_of_rules, __ATOMIC_RELAXED);
if (!hws_bwc_matcher_rehash_size_needed(bwc_matcher, num_of_rules))
if (!hws_bwc_matcher_rehash_size_needed(bwc_matcher, bwc_matcher->num_of_rules))
return 0;
/* Now we're done all the checking - do the rehash:
......
......@@ -46,6 +46,7 @@ struct mlx5hws_context {
struct mlx5hws_send_engine *send_queue;
size_t queues;
struct mutex *bwc_send_queue_locks; /* protect BWC queues */
struct lock_class_key *bwc_lock_class_keys;
struct list_head tbl_list;
struct mlx5hws_context_debug_info debug_info;
struct xarray peer_ctx_xa;
......
......@@ -1925,7 +1925,7 @@ mlx5hws_definer_calc_layout(struct mlx5hws_context *ctx,
ret = hws_definer_conv_match_params_to_hl(ctx, mt, match_hl);
if (ret) {
mlx5hws_err(ctx, "Failed to convert items to header layout\n");
goto free_fc;
goto free_match_hl;
}
/* Find the match definer layout for header layout match union */
......@@ -1946,7 +1946,7 @@ mlx5hws_definer_calc_layout(struct mlx5hws_context *ctx,
free_fc:
kfree(mt->fc);
free_match_hl:
kfree(match_hl);
return ret;
}
......
......@@ -941,14 +941,18 @@ static void __hws_send_queues_close(struct mlx5hws_context *ctx, u16 queues)
static void hws_send_queues_bwc_locks_destroy(struct mlx5hws_context *ctx)
{
int bwc_queues = ctx->queues - 1;
int bwc_queues = mlx5hws_bwc_queues(ctx);
int i;
if (!mlx5hws_context_bwc_supported(ctx))
return;
for (i = 0; i < bwc_queues; i++)
for (i = 0; i < bwc_queues; i++) {
mutex_destroy(&ctx->bwc_send_queue_locks[i]);
lockdep_unregister_key(ctx->bwc_lock_class_keys + i);
}
kfree(ctx->bwc_lock_class_keys);
kfree(ctx->bwc_send_queue_locks);
}
......@@ -977,10 +981,22 @@ static int hws_bwc_send_queues_init(struct mlx5hws_context *ctx)
if (!ctx->bwc_send_queue_locks)
return -ENOMEM;
for (i = 0; i < bwc_queues; i++)
ctx->bwc_lock_class_keys = kcalloc(bwc_queues,
sizeof(*ctx->bwc_lock_class_keys),
GFP_KERNEL);
if (!ctx->bwc_lock_class_keys)
goto err_lock_class_keys;
for (i = 0; i < bwc_queues; i++) {
mutex_init(&ctx->bwc_send_queue_locks[i]);
lockdep_register_key(ctx->bwc_lock_class_keys + i);
}
return 0;
err_lock_class_keys:
kfree(ctx->bwc_send_queue_locks);
return -ENOMEM;
}
int mlx5hws_send_queues_open(struct mlx5hws_context *ctx,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment