Commit d6b2785c authored by Moshe Shemesh's avatar Moshe Shemesh Committed by Saeed Mahameed

net/mlx5: Cleanup IRQs in case of unload failure

When mlx5_stop_eqs fails to destroy any of the eqs it returns with an error.
In such failure flow the function will return without
releasing all EQs irqs and then pci_free_irq_vectors will fail.
Fix by only warn on destroy EQ failure and continue to release other
EQs and their irqs.

It fixes the following kernel trace:
kernel: kernel BUG at drivers/pci/msi.c:352!
...
...
kernel: Call Trace:
kernel: pci_disable_msix+0xd3/0x100
kernel: pci_free_irq_vectors+0xe/0x20
kernel: mlx5_load_one.isra.17+0x9f5/0xec0 [mlx5_core]

Fixes: e126ba97 ("mlx5: Add driver for Mellanox Connect-IB adapters")
Signed-off-by: default avatarMoshe Shemesh <moshe@mellanox.com>
Signed-off-by: default avatarSaeed Mahameed <saeedm@mellanox.com>
parent 139ed6c6
...@@ -775,7 +775,7 @@ int mlx5_start_eqs(struct mlx5_core_dev *dev) ...@@ -775,7 +775,7 @@ int mlx5_start_eqs(struct mlx5_core_dev *dev)
return err; return err;
} }
int mlx5_stop_eqs(struct mlx5_core_dev *dev) void mlx5_stop_eqs(struct mlx5_core_dev *dev)
{ {
struct mlx5_eq_table *table = &dev->priv.eq_table; struct mlx5_eq_table *table = &dev->priv.eq_table;
int err; int err;
...@@ -784,22 +784,28 @@ int mlx5_stop_eqs(struct mlx5_core_dev *dev) ...@@ -784,22 +784,28 @@ int mlx5_stop_eqs(struct mlx5_core_dev *dev)
if (MLX5_CAP_GEN(dev, pg)) { if (MLX5_CAP_GEN(dev, pg)) {
err = mlx5_destroy_unmap_eq(dev, &table->pfault_eq); err = mlx5_destroy_unmap_eq(dev, &table->pfault_eq);
if (err) if (err)
return err; mlx5_core_err(dev, "failed to destroy page fault eq, err(%d)\n",
err);
} }
#endif #endif
err = mlx5_destroy_unmap_eq(dev, &table->pages_eq); err = mlx5_destroy_unmap_eq(dev, &table->pages_eq);
if (err) if (err)
return err; mlx5_core_err(dev, "failed to destroy pages eq, err(%d)\n",
err);
mlx5_destroy_unmap_eq(dev, &table->async_eq); err = mlx5_destroy_unmap_eq(dev, &table->async_eq);
if (err)
mlx5_core_err(dev, "failed to destroy async eq, err(%d)\n",
err);
mlx5_cmd_use_polling(dev); mlx5_cmd_use_polling(dev);
err = mlx5_destroy_unmap_eq(dev, &table->cmd_eq); err = mlx5_destroy_unmap_eq(dev, &table->cmd_eq);
if (err) if (err) {
mlx5_core_err(dev, "failed to destroy command eq, err(%d)\n",
err);
mlx5_cmd_use_events(dev); mlx5_cmd_use_events(dev);
}
return err;
} }
int mlx5_core_eq_query(struct mlx5_core_dev *dev, struct mlx5_eq *eq, int mlx5_core_eq_query(struct mlx5_core_dev *dev, struct mlx5_eq *eq,
......
...@@ -1049,7 +1049,7 @@ int mlx5_create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, u8 vecidx, ...@@ -1049,7 +1049,7 @@ int mlx5_create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, u8 vecidx,
enum mlx5_eq_type type); enum mlx5_eq_type type);
int mlx5_destroy_unmap_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq); int mlx5_destroy_unmap_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq);
int mlx5_start_eqs(struct mlx5_core_dev *dev); int mlx5_start_eqs(struct mlx5_core_dev *dev);
int mlx5_stop_eqs(struct mlx5_core_dev *dev); void mlx5_stop_eqs(struct mlx5_core_dev *dev);
int mlx5_vector2eqn(struct mlx5_core_dev *dev, int vector, int *eqn, int mlx5_vector2eqn(struct mlx5_core_dev *dev, int vector, int *eqn,
unsigned int *irqn); unsigned int *irqn);
int mlx5_core_attach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid, u32 qpn); int mlx5_core_attach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid, u32 qpn);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment