Commit e8abebb3 authored by Shay Drory's avatar Shay Drory Committed by Saeed Mahameed

net/mlx5: Extend mlx5_irq_request to request IRQ from the kernel

Extend mlx5_irq_request so that IRQs will be requested upon EQ creation,
and not on driver boot.
Signed-off-by: default avatarShay Drory <shayd@nvidia.com>
Reviewed-by: default avatarLeon Romanovsky <leonro@nvidia.com>
Signed-off-by: default avatarSaeed Mahameed <saeedm@nvidia.com>
parent 2de61538
......@@ -17,7 +17,6 @@ struct mlx5_irq {
struct atomic_notifier_head nh;
cpumask_var_t mask;
char name[MLX5_MAX_IRQ_NAME];
spinlock_t lock; /* protects affinity assignment */
struct kref kref;
int irqn;
};
......@@ -60,7 +59,7 @@ int mlx5_irq_get_num_comp(struct mlx5_irq_table *table)
static struct mlx5_irq *mlx5_irq_get(struct mlx5_core_dev *dev, int vecidx)
{
struct mlx5_irq_table *irq_table = dev->priv.irq_table;
struct mlx5_irq_table *irq_table = mlx5_irq_table_get(dev);
return &irq_table->irq[vecidx];
}
......@@ -192,37 +191,7 @@ int mlx5_irq_detach_nb(struct mlx5_irq *irq, struct notifier_block *nb)
return atomic_notifier_chain_unregister(&irq->nh, nb);
}
void mlx5_irq_release(struct mlx5_irq *irq)
{
synchronize_irq(irq->irqn);
irq_put(irq);
}
struct mlx5_irq *mlx5_irq_request(struct mlx5_core_dev *dev, int vecidx,
struct cpumask *affinity)
{
struct mlx5_irq_table *table = mlx5_irq_table_get(dev);
struct mlx5_irq *irq = &table->irq[vecidx];
int err;
err = kref_get_unless_zero(&irq->kref);
if (!err)
return ERR_PTR(-ENOENT);
spin_lock(&irq->lock);
if (!cpumask_empty(irq->mask)) {
/* already configured */
spin_unlock(&irq->lock);
return irq;
}
cpumask_copy(irq->mask, affinity);
irq_set_affinity_hint(irq->irqn, irq->mask);
spin_unlock(&irq->lock);
return irq;
}
static irqreturn_t mlx5_irq_int_handler(int irq, void *nh)
static irqreturn_t irq_int_handler(int irq, void *nh)
{
atomic_notifier_call_chain(nh, 0, NULL);
return IRQ_HANDLED;
......@@ -230,7 +199,7 @@ static irqreturn_t mlx5_irq_int_handler(int irq, void *nh)
static void irq_set_name(char *name, int vecidx)
{
if (vecidx == 0) {
if (!vecidx) {
snprintf(name, MLX5_MAX_IRQ_NAME, "mlx5_async");
return;
}
......@@ -239,41 +208,67 @@ static void irq_set_name(char *name, int vecidx)
vecidx - MLX5_IRQ_VEC_COMP_BASE);
}
static int request_irqs(struct mlx5_core_dev *dev, int nvec)
static int irq_request(struct mlx5_core_dev *dev, int i)
{
struct mlx5_irq *irq = mlx5_irq_get(dev, i);
char name[MLX5_MAX_IRQ_NAME];
int err;
int i;
for (i = 0; i < nvec; i++) {
struct mlx5_irq *irq = mlx5_irq_get(dev, i);
irq->irqn = pci_irq_vector(dev->pdev, i);
irq_set_name(name, i);
ATOMIC_INIT_NOTIFIER_HEAD(&irq->nh);
snprintf(irq->name, MLX5_MAX_IRQ_NAME,
"%s@pci:%s", name, pci_name(dev->pdev));
err = request_irq(irq->irqn, mlx5_irq_int_handler, 0, irq->name,
&irq->nh);
if (err) {
mlx5_core_err(dev, "Failed to request irq\n");
goto err_request_irq;
}
if (!zalloc_cpumask_var(&irq->mask, GFP_KERNEL)) {
mlx5_core_warn(dev, "zalloc_cpumask_var failed\n");
err = -ENOMEM;
goto err_request_irq;
}
spin_lock_init(&irq->lock);
kref_init(&irq->kref);
irq->irqn = pci_irq_vector(dev->pdev, i);
irq_set_name(name, i);
ATOMIC_INIT_NOTIFIER_HEAD(&irq->nh);
snprintf(irq->name, MLX5_MAX_IRQ_NAME,
"%s@pci:%s", name, pci_name(dev->pdev));
err = request_irq(irq->irqn, irq_int_handler, 0, irq->name,
&irq->nh);
if (err) {
mlx5_core_err(dev, "Failed to request irq. err = %d\n", err);
return err;
}
if (!zalloc_cpumask_var(&irq->mask, GFP_KERNEL)) {
mlx5_core_warn(dev, "zalloc_cpumask_var failed\n");
free_irq(irq->irqn, &irq->nh);
return -ENOMEM;
}
kref_init(&irq->kref);
return 0;
}
err_request_irq:
while (i--)
irq_put(mlx5_irq_get(dev, i));
/**
* mlx5_irq_release - release an IRQ back to the system.
* @irq: irq to be released.
*/
void mlx5_irq_release(struct mlx5_irq *irq)
{
synchronize_irq(irq->irqn);
irq_put(irq);
}
return err;
/**
* mlx5_irq_request - request an IRQ for mlx5 device.
* @dev: mlx5 device that requesting the IRQ.
* @vecidx: vector index of the IRQ. This argument is ignore if affinity is
* provided.
* @affinity: cpumask requested for this IRQ.
*
* This function returns a pointer to IRQ, or ERR_PTR in case of error.
*/
struct mlx5_irq *mlx5_irq_request(struct mlx5_core_dev *dev, int vecidx,
struct cpumask *affinity)
{
struct mlx5_irq_table *table = mlx5_irq_table_get(dev);
struct mlx5_irq *irq = &table->irq[vecidx];
int ret;
ret = kref_get_unless_zero(&irq->kref);
if (ret)
return irq;
ret = irq_request(dev, vecidx);
if (ret)
return ERR_PTR(ret);
cpumask_copy(irq->mask, affinity);
irq_set_affinity_hint(irq->irqn, irq->mask);
return irq;
}
static void irq_clear_rmap(struct mlx5_core_dev *dev)
......@@ -369,14 +364,8 @@ int mlx5_irq_table_create(struct mlx5_core_dev *dev)
if (err)
goto err_set_rmap;
err = request_irqs(dev, nvec);
if (err)
goto err_request_irqs;
return 0;
err_request_irqs:
irq_clear_rmap(dev);
err_set_rmap:
pci_free_irq_vectors(dev->pdev);
err_free_irq:
......@@ -392,14 +381,11 @@ static void irq_table_clear_rmap(struct mlx5_irq_table *table)
void mlx5_irq_table_destroy(struct mlx5_core_dev *dev)
{
struct mlx5_irq_table *table = dev->priv.irq_table;
int i;
if (mlx5_core_is_sf(dev))
return;
irq_table_clear_rmap(table);
for (i = 0; i < table->nvec; i++)
irq_release(&mlx5_irq_get(dev, i)->kref);
pci_free_irq_vectors(dev->pdev);
kfree(table->irq);
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment