Commit 1f8a7bee authored by Yuval Avnery's avatar Yuval Avnery Committed by Saeed Mahameed

net/mlx5: Add EQ enable/disable API

Previously, EQ joined the chain notifier on creation.
This forced the caller to be ready to handle events before creating
the EQ through eq_create_generic interface.

To help the caller control when the created EQ will be attached to the
IRQ, add enable/disable API.
Signed-off-by: default avatarYuval Avnery <yuvalav@mellanox.com>
Signed-off-by: default avatarSaeed Mahameed <saeedm@mellanox.com>
parent 81bfa206
...@@ -1560,15 +1560,21 @@ mlx5_ib_create_pf_eq(struct mlx5_ib_dev *dev, struct mlx5_ib_pf_eq *eq) ...@@ -1560,15 +1560,21 @@ mlx5_ib_create_pf_eq(struct mlx5_ib_dev *dev, struct mlx5_ib_pf_eq *eq)
.irq_index = 0, .irq_index = 0,
.mask = 1 << MLX5_EVENT_TYPE_PAGE_FAULT, .mask = 1 << MLX5_EVENT_TYPE_PAGE_FAULT,
.nent = MLX5_IB_NUM_PF_EQE, .nent = MLX5_IB_NUM_PF_EQE,
.nb = &eq->irq_nb,
}; };
eq->core = mlx5_eq_create_generic(dev->mdev, &param); eq->core = mlx5_eq_create_generic(dev->mdev, &param);
if (IS_ERR(eq->core)) { if (IS_ERR(eq->core)) {
err = PTR_ERR(eq->core); err = PTR_ERR(eq->core);
goto err_wq; goto err_wq;
} }
err = mlx5_eq_enable(dev->mdev, eq->core, &eq->irq_nb);
if (err) {
mlx5_ib_err(dev, "failed to enable odp EQ %d\n", err);
goto err_eq;
}
return 0; return 0;
err_eq:
mlx5_eq_destroy_generic(dev->mdev, eq->core);
err_wq: err_wq:
destroy_workqueue(eq->wq); destroy_workqueue(eq->wq);
err_mempool: err_mempool:
...@@ -1581,6 +1587,7 @@ mlx5_ib_destroy_pf_eq(struct mlx5_ib_dev *dev, struct mlx5_ib_pf_eq *eq) ...@@ -1581,6 +1587,7 @@ mlx5_ib_destroy_pf_eq(struct mlx5_ib_dev *dev, struct mlx5_ib_pf_eq *eq)
{ {
int err; int err;
mlx5_eq_disable(dev->mdev, eq->core, &eq->irq_nb);
err = mlx5_eq_destroy_generic(dev->mdev, eq->core); err = mlx5_eq_destroy_generic(dev->mdev, eq->core);
cancel_work_sync(&eq->work); cancel_work_sync(&eq->work);
destroy_workqueue(eq->wq); destroy_workqueue(eq->wq);
......
...@@ -304,27 +304,14 @@ create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, ...@@ -304,27 +304,14 @@ create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq,
eq->irqn = pci_irq_vector(dev->pdev, vecidx); eq->irqn = pci_irq_vector(dev->pdev, vecidx);
eq->dev = dev; eq->dev = dev;
eq->doorbell = priv->uar->map + MLX5_EQ_DOORBEL_OFFSET; eq->doorbell = priv->uar->map + MLX5_EQ_DOORBEL_OFFSET;
eq->irq_nb = param->nb;
err = mlx5_irq_attach_nb(dev->priv.eq_table->irq_table, vecidx,
param->nb);
if (err)
goto err_eq;
err = mlx5_debug_eq_add(dev, eq); err = mlx5_debug_eq_add(dev, eq);
if (err) if (err)
goto err_detach; goto err_eq;
/* EQs are created in ARMED state
*/
eq_update_ci(eq, 1);
kvfree(in); kvfree(in);
return 0; return 0;
err_detach:
mlx5_irq_detach_nb(dev->priv.eq_table->irq_table, vecidx, eq->irq_nb);
err_eq: err_eq:
mlx5_cmd_destroy_eq(dev, eq->eqn); mlx5_cmd_destroy_eq(dev, eq->eqn);
...@@ -336,17 +323,49 @@ create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, ...@@ -336,17 +323,49 @@ create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq,
return err; return err;
} }
/**
* mlx5_eq_enable - Enable EQ for receiving EQEs
* @dev - Device which owns the eq
* @eq - EQ to enable
* @nb - notifier call block
* mlx5_eq_enable - must be called after EQ is created in device.
*/
int mlx5_eq_enable(struct mlx5_core_dev *dev, struct mlx5_eq *eq,
struct notifier_block *nb)
{
struct mlx5_eq_table *eq_table = dev->priv.eq_table;
int err;
err = mlx5_irq_attach_nb(eq_table->irq_table, eq->vecidx, nb);
if (!err)
eq_update_ci(eq, 1);
return err;
}
EXPORT_SYMBOL(mlx5_eq_enable);
/**
* mlx5_eq_disable - Enable EQ for receiving EQEs
* @dev - Device which owns the eq
* @eq - EQ to disable
* @nb - notifier call block
* mlx5_eq_disable - must be called before EQ is destroyed.
*/
void mlx5_eq_disable(struct mlx5_core_dev *dev, struct mlx5_eq *eq,
struct notifier_block *nb)
{
struct mlx5_eq_table *eq_table = dev->priv.eq_table;
mlx5_irq_detach_nb(eq_table->irq_table, eq->vecidx, nb);
}
EXPORT_SYMBOL(mlx5_eq_disable);
static int destroy_unmap_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq) static int destroy_unmap_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq)
{ {
int err; int err;
mlx5_debug_eq_remove(dev, eq); mlx5_debug_eq_remove(dev, eq);
err = mlx5_irq_detach_nb(dev->priv.eq_table->irq_table,
eq->vecidx, eq->irq_nb);
if (err)
mlx5_core_warn(eq->dev, "eq failed to detach from irq. err %d",
err);
err = mlx5_cmd_destroy_eq(dev, eq->eqn); err = mlx5_cmd_destroy_eq(dev, eq->eqn);
if (err) if (err)
mlx5_core_warn(dev, "failed to destroy a previously created eq: eqn %d\n", mlx5_core_warn(dev, "failed to destroy a previously created eq: eqn %d\n",
...@@ -544,14 +563,17 @@ static int create_async_eqs(struct mlx5_core_dev *dev) ...@@ -544,14 +563,17 @@ static int create_async_eqs(struct mlx5_core_dev *dev)
.irq_index = 0, .irq_index = 0,
.mask = 1ull << MLX5_EVENT_TYPE_CMD, .mask = 1ull << MLX5_EVENT_TYPE_CMD,
.nent = MLX5_NUM_CMD_EQE, .nent = MLX5_NUM_CMD_EQE,
.nb = &table->cmd_eq.irq_nb,
}; };
err = create_async_eq(dev, &table->cmd_eq.core, &param); err = create_async_eq(dev, &table->cmd_eq.core, &param);
if (err) { if (err) {
mlx5_core_warn(dev, "failed to create cmd EQ %d\n", err); mlx5_core_warn(dev, "failed to create cmd EQ %d\n", err);
goto err0; goto err0;
} }
err = mlx5_eq_enable(dev, &table->cmd_eq.core, &table->cmd_eq.irq_nb);
if (err) {
mlx5_core_warn(dev, "failed to enable cmd EQ %d\n", err);
goto err1;
}
mlx5_cmd_use_events(dev); mlx5_cmd_use_events(dev);
table->async_eq.irq_nb.notifier_call = mlx5_eq_async_int; table->async_eq.irq_nb.notifier_call = mlx5_eq_async_int;
...@@ -559,12 +581,17 @@ static int create_async_eqs(struct mlx5_core_dev *dev) ...@@ -559,12 +581,17 @@ static int create_async_eqs(struct mlx5_core_dev *dev)
.irq_index = 0, .irq_index = 0,
.mask = gather_async_events_mask(dev), .mask = gather_async_events_mask(dev),
.nent = MLX5_NUM_ASYNC_EQE, .nent = MLX5_NUM_ASYNC_EQE,
.nb = &table->async_eq.irq_nb,
}; };
err = create_async_eq(dev, &table->async_eq.core, &param); err = create_async_eq(dev, &table->async_eq.core, &param);
if (err) { if (err) {
mlx5_core_warn(dev, "failed to create async EQ %d\n", err); mlx5_core_warn(dev, "failed to create async EQ %d\n", err);
goto err1; goto err2;
}
err = mlx5_eq_enable(dev, &table->async_eq.core,
&table->async_eq.irq_nb);
if (err) {
mlx5_core_warn(dev, "failed to enable async EQ %d\n", err);
goto err3;
} }
table->pages_eq.irq_nb.notifier_call = mlx5_eq_async_int; table->pages_eq.irq_nb.notifier_call = mlx5_eq_async_int;
...@@ -572,21 +599,31 @@ static int create_async_eqs(struct mlx5_core_dev *dev) ...@@ -572,21 +599,31 @@ static int create_async_eqs(struct mlx5_core_dev *dev)
.irq_index = 0, .irq_index = 0,
.mask = 1 << MLX5_EVENT_TYPE_PAGE_REQUEST, .mask = 1 << MLX5_EVENT_TYPE_PAGE_REQUEST,
.nent = /* TODO: sriov max_vf + */ 1, .nent = /* TODO: sriov max_vf + */ 1,
.nb = &table->pages_eq.irq_nb,
}; };
err = create_async_eq(dev, &table->pages_eq.core, &param); err = create_async_eq(dev, &table->pages_eq.core, &param);
if (err) { if (err) {
mlx5_core_warn(dev, "failed to create pages EQ %d\n", err); mlx5_core_warn(dev, "failed to create pages EQ %d\n", err);
goto err2; goto err4;
}
err = mlx5_eq_enable(dev, &table->pages_eq.core,
&table->pages_eq.irq_nb);
if (err) {
mlx5_core_warn(dev, "failed to enable pages EQ %d\n", err);
goto err5;
} }
return err; return err;
err2: err5:
destroy_async_eq(dev, &table->pages_eq.core);
err4:
mlx5_eq_disable(dev, &table->async_eq.core, &table->async_eq.irq_nb);
err3:
destroy_async_eq(dev, &table->async_eq.core); destroy_async_eq(dev, &table->async_eq.core);
err2:
err1:
mlx5_cmd_use_polling(dev); mlx5_cmd_use_polling(dev);
mlx5_eq_disable(dev, &table->cmd_eq.core, &table->cmd_eq.irq_nb);
err1:
destroy_async_eq(dev, &table->cmd_eq.core); destroy_async_eq(dev, &table->cmd_eq.core);
err0: err0:
mlx5_eq_notifier_unregister(dev, &table->cq_err_nb); mlx5_eq_notifier_unregister(dev, &table->cq_err_nb);
...@@ -598,11 +635,13 @@ static void destroy_async_eqs(struct mlx5_core_dev *dev) ...@@ -598,11 +635,13 @@ static void destroy_async_eqs(struct mlx5_core_dev *dev)
struct mlx5_eq_table *table = dev->priv.eq_table; struct mlx5_eq_table *table = dev->priv.eq_table;
int err; int err;
mlx5_eq_disable(dev, &table->pages_eq.core, &table->pages_eq.irq_nb);
err = destroy_async_eq(dev, &table->pages_eq.core); err = destroy_async_eq(dev, &table->pages_eq.core);
if (err) if (err)
mlx5_core_err(dev, "failed to destroy pages eq, err(%d)\n", mlx5_core_err(dev, "failed to destroy pages eq, err(%d)\n",
err); err);
mlx5_eq_disable(dev, &table->async_eq.core, &table->async_eq.irq_nb);
err = destroy_async_eq(dev, &table->async_eq.core); err = destroy_async_eq(dev, &table->async_eq.core);
if (err) if (err)
mlx5_core_err(dev, "failed to destroy async eq, err(%d)\n", mlx5_core_err(dev, "failed to destroy async eq, err(%d)\n",
...@@ -610,6 +649,7 @@ static void destroy_async_eqs(struct mlx5_core_dev *dev) ...@@ -610,6 +649,7 @@ static void destroy_async_eqs(struct mlx5_core_dev *dev)
mlx5_cmd_use_polling(dev); mlx5_cmd_use_polling(dev);
mlx5_eq_disable(dev, &table->cmd_eq.core, &table->cmd_eq.irq_nb);
err = destroy_async_eq(dev, &table->cmd_eq.core); err = destroy_async_eq(dev, &table->cmd_eq.core);
if (err) if (err)
mlx5_core_err(dev, "failed to destroy command eq, err(%d)\n", mlx5_core_err(dev, "failed to destroy command eq, err(%d)\n",
...@@ -711,6 +751,7 @@ static void destroy_comp_eqs(struct mlx5_core_dev *dev) ...@@ -711,6 +751,7 @@ static void destroy_comp_eqs(struct mlx5_core_dev *dev)
list_for_each_entry_safe(eq, n, &table->comp_eqs_list, list) { list_for_each_entry_safe(eq, n, &table->comp_eqs_list, list) {
list_del(&eq->list); list_del(&eq->list);
mlx5_eq_disable(dev, &eq->core, &eq->irq_nb);
if (destroy_unmap_eq(dev, &eq->core)) if (destroy_unmap_eq(dev, &eq->core))
mlx5_core_warn(dev, "failed to destroy comp EQ 0x%x\n", mlx5_core_warn(dev, "failed to destroy comp EQ 0x%x\n",
eq->core.eqn); eq->core.eqn);
...@@ -752,13 +793,19 @@ static int create_comp_eqs(struct mlx5_core_dev *dev) ...@@ -752,13 +793,19 @@ static int create_comp_eqs(struct mlx5_core_dev *dev)
.irq_index = vecidx, .irq_index = vecidx,
.mask = 0, .mask = 0,
.nent = nent, .nent = nent,
.nb = &eq->irq_nb,
}; };
err = create_map_eq(dev, &eq->core, &param); err = create_map_eq(dev, &eq->core, &param);
if (err) { if (err) {
kfree(eq); kfree(eq);
goto clean; goto clean;
} }
err = mlx5_eq_enable(dev, &eq->core, &eq->irq_nb);
if (err) {
destroy_unmap_eq(dev, &eq->core);
kfree(eq);
goto clean;
}
mlx5_core_dbg(dev, "allocated completion EQN %d\n", eq->core.eqn); mlx5_core_dbg(dev, "allocated completion EQN %d\n", eq->core.eqn);
/* add tail, to keep the list ordered, for mlx5_vector2eqn to work */ /* add tail, to keep the list ordered, for mlx5_vector2eqn to work */
list_add_tail(&eq->list, &table->comp_eqs_list); list_add_tail(&eq->list, &table->comp_eqs_list);
......
...@@ -33,7 +33,6 @@ struct mlx5_eq { ...@@ -33,7 +33,6 @@ struct mlx5_eq {
u8 eqn; u8 eqn;
int nent; int nent;
struct mlx5_rsc_debug *dbg; struct mlx5_rsc_debug *dbg;
struct notifier_block *irq_nb; /* For destroy only */
}; };
struct mlx5_eq_async { struct mlx5_eq_async {
......
...@@ -16,13 +16,16 @@ struct mlx5_eq_param { ...@@ -16,13 +16,16 @@ struct mlx5_eq_param {
u8 irq_index; u8 irq_index;
int nent; int nent;
u64 mask; u64 mask;
struct notifier_block *nb;
}; };
struct mlx5_eq * struct mlx5_eq *
mlx5_eq_create_generic(struct mlx5_core_dev *dev, struct mlx5_eq_param *param); mlx5_eq_create_generic(struct mlx5_core_dev *dev, struct mlx5_eq_param *param);
int int
mlx5_eq_destroy_generic(struct mlx5_core_dev *dev, struct mlx5_eq *eq); mlx5_eq_destroy_generic(struct mlx5_core_dev *dev, struct mlx5_eq *eq);
int mlx5_eq_enable(struct mlx5_core_dev *dev, struct mlx5_eq *eq,
struct notifier_block *nb);
void mlx5_eq_disable(struct mlx5_core_dev *dev, struct mlx5_eq *eq,
struct notifier_block *nb);
struct mlx5_eqe *mlx5_eq_get_eqe(struct mlx5_eq *eq, u32 cc); struct mlx5_eqe *mlx5_eq_get_eqe(struct mlx5_eq *eq, u32 cc);
void mlx5_eq_update_ci(struct mlx5_eq *eq, u32 cc, bool arm); void mlx5_eq_update_ci(struct mlx5_eq *eq, u32 cc, bool arm);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment