Commit ad2d3ef4 authored by Jason Gunthorpe's avatar Jason Gunthorpe

RDMA/mlx5: Lock access to ent->available_mrs/limit when doing queue_work

Accesses to these members needs to be locked. There is no reason not to
hold a spinlock while calling queue_work(), so move the tests into a
helper and always call it under lock.

The helper should be called when available_mrs is adjusted.

Link: https://lore.kernel.org/r/20200310082238.239865-10-leon@kernel.orgSigned-off-by: default avatarLeon Romanovsky <leonro@mellanox.com>
Signed-off-by: default avatarJason Gunthorpe <jgg@mellanox.com>
parent a1d8854a
...@@ -134,6 +134,10 @@ static void create_mkey_callback(int status, struct mlx5_async_work *context) ...@@ -134,6 +134,10 @@ static void create_mkey_callback(int status, struct mlx5_async_work *context)
list_add_tail(&mr->list, &ent->head); list_add_tail(&mr->list, &ent->head);
ent->available_mrs++; ent->available_mrs++;
ent->total_mrs++; ent->total_mrs++;
/*
* Creating is always done in response to some demand, so do not call
* queue_adjust_cache_locked().
*/
spin_unlock_irqrestore(&ent->lock, flags); spin_unlock_irqrestore(&ent->lock, flags);
if (!completion_done(&ent->compl)) if (!completion_done(&ent->compl))
...@@ -367,6 +371,20 @@ static int someone_adding(struct mlx5_mr_cache *cache) ...@@ -367,6 +371,20 @@ static int someone_adding(struct mlx5_mr_cache *cache)
return 0; return 0;
} }
/*
* Check if the bucket is outside the high/low water mark and schedule an async
* update. The cache refill has hysteresis, once the low water mark is hit it is
* refilled up to the high mark.
*/
static void queue_adjust_cache_locked(struct mlx5_cache_ent *ent)
{
lockdep_assert_held(&ent->lock);
if (ent->available_mrs < ent->limit ||
ent->available_mrs > 2 * ent->limit)
queue_work(ent->dev->cache.wq, &ent->work);
}
static void __cache_work_func(struct mlx5_cache_ent *ent) static void __cache_work_func(struct mlx5_cache_ent *ent)
{ {
struct mlx5_ib_dev *dev = ent->dev; struct mlx5_ib_dev *dev = ent->dev;
...@@ -462,9 +480,8 @@ struct mlx5_ib_mr *mlx5_mr_cache_alloc(struct mlx5_ib_dev *dev, ...@@ -462,9 +480,8 @@ struct mlx5_ib_mr *mlx5_mr_cache_alloc(struct mlx5_ib_dev *dev,
list); list);
list_del(&mr->list); list_del(&mr->list);
ent->available_mrs--; ent->available_mrs--;
queue_adjust_cache_locked(ent);
spin_unlock_irq(&ent->lock); spin_unlock_irq(&ent->lock);
if (ent->available_mrs < ent->limit)
queue_work(cache->wq, &ent->work);
return mr; return mr;
} }
} }
...@@ -487,14 +504,12 @@ static struct mlx5_ib_mr *alloc_cached_mr(struct mlx5_cache_ent *req_ent) ...@@ -487,14 +504,12 @@ static struct mlx5_ib_mr *alloc_cached_mr(struct mlx5_cache_ent *req_ent)
list); list);
list_del(&mr->list); list_del(&mr->list);
ent->available_mrs--; ent->available_mrs--;
queue_adjust_cache_locked(ent);
spin_unlock_irq(&ent->lock); spin_unlock_irq(&ent->lock);
if (ent->available_mrs < ent->limit)
queue_work(dev->cache.wq, &ent->work);
break; break;
} }
queue_adjust_cache_locked(ent);
spin_unlock_irq(&ent->lock); spin_unlock_irq(&ent->lock);
queue_work(dev->cache.wq, &ent->work);
} }
if (!mr) if (!mr)
...@@ -516,7 +531,6 @@ static void detach_mr_from_cache(struct mlx5_ib_mr *mr) ...@@ -516,7 +531,6 @@ static void detach_mr_from_cache(struct mlx5_ib_mr *mr)
void mlx5_mr_cache_free(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr) void mlx5_mr_cache_free(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
{ {
struct mlx5_cache_ent *ent = mr->cache_ent; struct mlx5_cache_ent *ent = mr->cache_ent;
int shrink = 0;
if (!ent) if (!ent)
return; return;
...@@ -524,20 +538,14 @@ void mlx5_mr_cache_free(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr) ...@@ -524,20 +538,14 @@ void mlx5_mr_cache_free(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
if (mlx5_mr_cache_invalidate(mr)) { if (mlx5_mr_cache_invalidate(mr)) {
detach_mr_from_cache(mr); detach_mr_from_cache(mr);
destroy_mkey(dev, mr); destroy_mkey(dev, mr);
if (ent->available_mrs < ent->limit)
queue_work(dev->cache.wq, &ent->work);
return; return;
} }
spin_lock_irq(&ent->lock); spin_lock_irq(&ent->lock);
list_add_tail(&mr->list, &ent->head); list_add_tail(&mr->list, &ent->head);
ent->available_mrs++; ent->available_mrs++;
if (ent->available_mrs > 2 * ent->limit) queue_adjust_cache_locked(ent);
shrink = 1;
spin_unlock_irq(&ent->lock); spin_unlock_irq(&ent->lock);
if (shrink)
queue_work(dev->cache.wq, &ent->work);
} }
static void clean_keys(struct mlx5_ib_dev *dev, int c) static void clean_keys(struct mlx5_ib_dev *dev, int c)
...@@ -653,7 +661,9 @@ int mlx5_mr_cache_init(struct mlx5_ib_dev *dev) ...@@ -653,7 +661,9 @@ int mlx5_mr_cache_init(struct mlx5_ib_dev *dev)
ent->limit = dev->mdev->profile->mr_cache[i].limit; ent->limit = dev->mdev->profile->mr_cache[i].limit;
else else
ent->limit = 0; ent->limit = 0;
queue_work(cache->wq, &ent->work); spin_lock_irq(&ent->lock);
queue_adjust_cache_locked(ent);
spin_unlock_irq(&ent->lock);
} }
mlx5_mr_cache_debugfs_init(dev); mlx5_mr_cache_debugfs_init(dev);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment