Commit 2d0b41a3 authored by Shay Drory's avatar Shay Drory Committed by Saeed Mahameed

net/mlx5: Refcount mlx5_irq with integer

Currently, all access to mlx5 IRQs are done undere a lock. Hance, there
isn't a reason to have kref in struct mlx5_irq.
Switch it to integer.
Signed-off-by: default avatarShay Drory <shayd@nvidia.com>
Reviewed-by: default avatarParav Pandit <parav@nvidia.com>
Signed-off-by: default avatarSaeed Mahameed <saeedm@nvidia.com>
parent 68fefb70
...@@ -32,7 +32,7 @@ struct mlx5_irq { ...@@ -32,7 +32,7 @@ struct mlx5_irq {
cpumask_var_t mask; cpumask_var_t mask;
char name[MLX5_MAX_IRQ_NAME]; char name[MLX5_MAX_IRQ_NAME];
struct mlx5_irq_pool *pool; struct mlx5_irq_pool *pool;
struct kref kref; int refcount;
u32 index; u32 index;
int irqn; int irqn;
}; };
...@@ -138,9 +138,8 @@ int mlx5_set_msix_vec_count(struct mlx5_core_dev *dev, int function_id, ...@@ -138,9 +138,8 @@ int mlx5_set_msix_vec_count(struct mlx5_core_dev *dev, int function_id,
return ret; return ret;
} }
static void irq_release(struct kref *kref) static void irq_release(struct mlx5_irq *irq)
{ {
struct mlx5_irq *irq = container_of(kref, struct mlx5_irq, kref);
struct mlx5_irq_pool *pool = irq->pool; struct mlx5_irq_pool *pool = irq->pool;
xa_erase(&pool->irqs, irq->index); xa_erase(&pool->irqs, irq->index);
...@@ -159,10 +158,31 @@ static void irq_put(struct mlx5_irq *irq) ...@@ -159,10 +158,31 @@ static void irq_put(struct mlx5_irq *irq)
struct mlx5_irq_pool *pool = irq->pool; struct mlx5_irq_pool *pool = irq->pool;
mutex_lock(&pool->lock); mutex_lock(&pool->lock);
kref_put(&irq->kref, irq_release); irq->refcount--;
if (!irq->refcount)
irq_release(irq);
mutex_unlock(&pool->lock); mutex_unlock(&pool->lock);
} }
static int irq_get_locked(struct mlx5_irq *irq)
{
lockdep_assert_held(&irq->pool->lock);
if (WARN_ON_ONCE(!irq->refcount))
return 0;
irq->refcount++;
return 1;
}
static int irq_get(struct mlx5_irq *irq)
{
int err;
mutex_lock(&irq->pool->lock);
err = irq_get_locked(irq);
mutex_unlock(&irq->pool->lock);
return err;
}
static irqreturn_t irq_int_handler(int irq, void *nh) static irqreturn_t irq_int_handler(int irq, void *nh)
{ {
atomic_notifier_call_chain(nh, 0, NULL); atomic_notifier_call_chain(nh, 0, NULL);
...@@ -214,7 +234,7 @@ static struct mlx5_irq *irq_request(struct mlx5_irq_pool *pool, int i) ...@@ -214,7 +234,7 @@ static struct mlx5_irq *irq_request(struct mlx5_irq_pool *pool, int i)
err = -ENOMEM; err = -ENOMEM;
goto err_cpumask; goto err_cpumask;
} }
kref_init(&irq->kref); irq->refcount = 1;
irq->index = i; irq->index = i;
err = xa_err(xa_store(&pool->irqs, irq->index, irq, GFP_KERNEL)); err = xa_err(xa_store(&pool->irqs, irq->index, irq, GFP_KERNEL));
if (err) { if (err) {
...@@ -235,18 +255,18 @@ static struct mlx5_irq *irq_request(struct mlx5_irq_pool *pool, int i) ...@@ -235,18 +255,18 @@ static struct mlx5_irq *irq_request(struct mlx5_irq_pool *pool, int i)
int mlx5_irq_attach_nb(struct mlx5_irq *irq, struct notifier_block *nb) int mlx5_irq_attach_nb(struct mlx5_irq *irq, struct notifier_block *nb)
{ {
int err; int ret;
err = kref_get_unless_zero(&irq->kref); ret = irq_get(irq);
if (WARN_ON_ONCE(!err)) if (!ret)
/* Something very bad happens here, we are enabling EQ /* Something very bad happens here, we are enabling EQ
* on non-existing IRQ. * on non-existing IRQ.
*/ */
return -ENOENT; return -ENOENT;
err = atomic_notifier_chain_register(&irq->nh, nb); ret = atomic_notifier_chain_register(&irq->nh, nb);
if (err) if (ret)
irq_put(irq); irq_put(irq);
return err; return ret;
} }
int mlx5_irq_detach_nb(struct mlx5_irq *irq, struct notifier_block *nb) int mlx5_irq_detach_nb(struct mlx5_irq *irq, struct notifier_block *nb)
...@@ -301,10 +321,9 @@ static struct mlx5_irq *irq_pool_find_least_loaded(struct mlx5_irq_pool *pool, ...@@ -301,10 +321,9 @@ static struct mlx5_irq *irq_pool_find_least_loaded(struct mlx5_irq_pool *pool,
xa_for_each_range(&pool->irqs, index, iter, start, end) { xa_for_each_range(&pool->irqs, index, iter, start, end) {
if (!cpumask_equal(iter->mask, affinity)) if (!cpumask_equal(iter->mask, affinity))
continue; continue;
if (kref_read(&iter->kref) < pool->min_threshold) if (iter->refcount < pool->min_threshold)
return iter; return iter;
if (!irq || kref_read(&iter->kref) < if (!irq || iter->refcount < irq->refcount)
kref_read(&irq->kref))
irq = iter; irq = iter;
} }
return irq; return irq;
...@@ -319,7 +338,7 @@ static struct mlx5_irq *irq_pool_request_affinity(struct mlx5_irq_pool *pool, ...@@ -319,7 +338,7 @@ static struct mlx5_irq *irq_pool_request_affinity(struct mlx5_irq_pool *pool,
mutex_lock(&pool->lock); mutex_lock(&pool->lock);
least_loaded_irq = irq_pool_find_least_loaded(pool, affinity); least_loaded_irq = irq_pool_find_least_loaded(pool, affinity);
if (least_loaded_irq && if (least_loaded_irq &&
kref_read(&least_loaded_irq->kref) < pool->min_threshold) least_loaded_irq->refcount < pool->min_threshold)
goto out; goto out;
new_irq = irq_pool_create_irq(pool, affinity); new_irq = irq_pool_create_irq(pool, affinity);
if (IS_ERR(new_irq)) { if (IS_ERR(new_irq)) {
...@@ -337,11 +356,11 @@ static struct mlx5_irq *irq_pool_request_affinity(struct mlx5_irq_pool *pool, ...@@ -337,11 +356,11 @@ static struct mlx5_irq *irq_pool_request_affinity(struct mlx5_irq_pool *pool,
least_loaded_irq = new_irq; least_loaded_irq = new_irq;
goto unlock; goto unlock;
out: out:
kref_get(&least_loaded_irq->kref); irq_get_locked(least_loaded_irq);
if (kref_read(&least_loaded_irq->kref) > pool->max_threshold) if (least_loaded_irq->refcount > pool->max_threshold)
mlx5_core_dbg(pool->dev, "IRQ %u overloaded, pool_name: %s, %u EQs on this irq\n", mlx5_core_dbg(pool->dev, "IRQ %u overloaded, pool_name: %s, %u EQs on this irq\n",
least_loaded_irq->irqn, pool->name, least_loaded_irq->irqn, pool->name,
kref_read(&least_loaded_irq->kref) / MLX5_EQ_REFS_PER_IRQ); least_loaded_irq->refcount / MLX5_EQ_REFS_PER_IRQ);
unlock: unlock:
mutex_unlock(&pool->lock); mutex_unlock(&pool->lock);
return least_loaded_irq; return least_loaded_irq;
...@@ -357,7 +376,7 @@ irq_pool_request_vector(struct mlx5_irq_pool *pool, int vecidx, ...@@ -357,7 +376,7 @@ irq_pool_request_vector(struct mlx5_irq_pool *pool, int vecidx,
mutex_lock(&pool->lock); mutex_lock(&pool->lock);
irq = xa_load(&pool->irqs, vecidx); irq = xa_load(&pool->irqs, vecidx);
if (irq) { if (irq) {
kref_get(&irq->kref); irq_get_locked(irq);
goto unlock; goto unlock;
} }
irq = irq_request(pool, vecidx); irq = irq_request(pool, vecidx);
...@@ -424,7 +443,7 @@ struct mlx5_irq *mlx5_irq_request(struct mlx5_core_dev *dev, u16 vecidx, ...@@ -424,7 +443,7 @@ struct mlx5_irq *mlx5_irq_request(struct mlx5_core_dev *dev, u16 vecidx,
return irq; return irq;
mlx5_core_dbg(dev, "irq %u mapped to cpu %*pbl, %u EQs on this irq\n", mlx5_core_dbg(dev, "irq %u mapped to cpu %*pbl, %u EQs on this irq\n",
irq->irqn, cpumask_pr_args(affinity), irq->irqn, cpumask_pr_args(affinity),
kref_read(&irq->kref) / MLX5_EQ_REFS_PER_IRQ); irq->refcount / MLX5_EQ_REFS_PER_IRQ);
return irq; return irq;
} }
...@@ -456,8 +475,12 @@ static void irq_pool_free(struct mlx5_irq_pool *pool) ...@@ -456,8 +475,12 @@ static void irq_pool_free(struct mlx5_irq_pool *pool)
struct mlx5_irq *irq; struct mlx5_irq *irq;
unsigned long index; unsigned long index;
/* There are cases in which we are destrying the irq_table before
* freeing all the IRQs, fast teardown for example. Hence, free the irqs
* which might not have been freed.
*/
xa_for_each(&pool->irqs, index, irq) xa_for_each(&pool->irqs, index, irq)
irq_release(&irq->kref); irq_release(irq);
xa_destroy(&pool->irqs); xa_destroy(&pool->irqs);
kvfree(pool); kvfree(pool);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment