Commit 58d4d50e authored by Dragos Tatulea's avatar Dragos Tatulea Committed by Michael S. Tsirkin

vdpa/mlx5: Rename mr_mtx -> lock

Now that the mr resources have their own namespace in the
struct, give the lock a clearer name.
Signed-off-by: default avatarDragos Tatulea <dtatulea@nvidia.com>
Reviewed-by: default avatarCosmin Ratiu <cratiu@nvidia.com>
Acked-by: default avatarEugenio Pérez <eperezma@redhat.com>
Message-Id: <20240830105838.2666587-7-dtatulea@nvidia.com>
Signed-off-by: default avatarMichael S. Tsirkin <mst@redhat.com>
parent 5fc85679
...@@ -87,7 +87,7 @@ struct mlx5_vdpa_mr_resources { ...@@ -87,7 +87,7 @@ struct mlx5_vdpa_mr_resources {
struct mlx5_vdpa_mr *mr[MLX5_VDPA_NUM_AS]; struct mlx5_vdpa_mr *mr[MLX5_VDPA_NUM_AS];
unsigned int group2asid[MLX5_VDPA_NUMVQ_GROUPS]; unsigned int group2asid[MLX5_VDPA_NUMVQ_GROUPS];
struct list_head mr_list_head; struct list_head mr_list_head;
struct mutex mr_mtx; struct mutex lock;
}; };
struct mlx5_vdpa_dev { struct mlx5_vdpa_dev {
......
...@@ -666,9 +666,9 @@ static void _mlx5_vdpa_put_mr(struct mlx5_vdpa_dev *mvdev, ...@@ -666,9 +666,9 @@ static void _mlx5_vdpa_put_mr(struct mlx5_vdpa_dev *mvdev,
void mlx5_vdpa_put_mr(struct mlx5_vdpa_dev *mvdev, void mlx5_vdpa_put_mr(struct mlx5_vdpa_dev *mvdev,
struct mlx5_vdpa_mr *mr) struct mlx5_vdpa_mr *mr)
{ {
mutex_lock(&mvdev->mres.mr_mtx); mutex_lock(&mvdev->mres.lock);
_mlx5_vdpa_put_mr(mvdev, mr); _mlx5_vdpa_put_mr(mvdev, mr);
mutex_unlock(&mvdev->mres.mr_mtx); mutex_unlock(&mvdev->mres.lock);
} }
static void _mlx5_vdpa_get_mr(struct mlx5_vdpa_dev *mvdev, static void _mlx5_vdpa_get_mr(struct mlx5_vdpa_dev *mvdev,
...@@ -683,9 +683,9 @@ static void _mlx5_vdpa_get_mr(struct mlx5_vdpa_dev *mvdev, ...@@ -683,9 +683,9 @@ static void _mlx5_vdpa_get_mr(struct mlx5_vdpa_dev *mvdev,
void mlx5_vdpa_get_mr(struct mlx5_vdpa_dev *mvdev, void mlx5_vdpa_get_mr(struct mlx5_vdpa_dev *mvdev,
struct mlx5_vdpa_mr *mr) struct mlx5_vdpa_mr *mr)
{ {
mutex_lock(&mvdev->mres.mr_mtx); mutex_lock(&mvdev->mres.lock);
_mlx5_vdpa_get_mr(mvdev, mr); _mlx5_vdpa_get_mr(mvdev, mr);
mutex_unlock(&mvdev->mres.mr_mtx); mutex_unlock(&mvdev->mres.lock);
} }
void mlx5_vdpa_update_mr(struct mlx5_vdpa_dev *mvdev, void mlx5_vdpa_update_mr(struct mlx5_vdpa_dev *mvdev,
...@@ -694,19 +694,19 @@ void mlx5_vdpa_update_mr(struct mlx5_vdpa_dev *mvdev, ...@@ -694,19 +694,19 @@ void mlx5_vdpa_update_mr(struct mlx5_vdpa_dev *mvdev,
{ {
struct mlx5_vdpa_mr *old_mr = mvdev->mres.mr[asid]; struct mlx5_vdpa_mr *old_mr = mvdev->mres.mr[asid];
mutex_lock(&mvdev->mres.mr_mtx); mutex_lock(&mvdev->mres.lock);
_mlx5_vdpa_put_mr(mvdev, old_mr); _mlx5_vdpa_put_mr(mvdev, old_mr);
mvdev->mres.mr[asid] = new_mr; mvdev->mres.mr[asid] = new_mr;
mutex_unlock(&mvdev->mres.mr_mtx); mutex_unlock(&mvdev->mres.lock);
} }
static void mlx5_vdpa_show_mr_leaks(struct mlx5_vdpa_dev *mvdev) static void mlx5_vdpa_show_mr_leaks(struct mlx5_vdpa_dev *mvdev)
{ {
struct mlx5_vdpa_mr *mr; struct mlx5_vdpa_mr *mr;
mutex_lock(&mvdev->mres.mr_mtx); mutex_lock(&mvdev->mres.lock);
list_for_each_entry(mr, &mvdev->mres.mr_list_head, mr_list) { list_for_each_entry(mr, &mvdev->mres.mr_list_head, mr_list) {
...@@ -715,7 +715,7 @@ static void mlx5_vdpa_show_mr_leaks(struct mlx5_vdpa_dev *mvdev) ...@@ -715,7 +715,7 @@ static void mlx5_vdpa_show_mr_leaks(struct mlx5_vdpa_dev *mvdev)
mr, mr->mkey, refcount_read(&mr->refcount)); mr, mr->mkey, refcount_read(&mr->refcount));
} }
mutex_unlock(&mvdev->mres.mr_mtx); mutex_unlock(&mvdev->mres.lock);
} }
...@@ -782,9 +782,9 @@ struct mlx5_vdpa_mr *mlx5_vdpa_create_mr(struct mlx5_vdpa_dev *mvdev, ...@@ -782,9 +782,9 @@ struct mlx5_vdpa_mr *mlx5_vdpa_create_mr(struct mlx5_vdpa_dev *mvdev,
if (!mr) if (!mr)
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
mutex_lock(&mvdev->mres.mr_mtx); mutex_lock(&mvdev->mres.lock);
err = _mlx5_vdpa_create_mr(mvdev, mr, iotlb); err = _mlx5_vdpa_create_mr(mvdev, mr, iotlb);
mutex_unlock(&mvdev->mres.mr_mtx); mutex_unlock(&mvdev->mres.lock);
if (err) if (err)
goto out_err; goto out_err;
......
...@@ -256,7 +256,7 @@ int mlx5_vdpa_alloc_resources(struct mlx5_vdpa_dev *mvdev) ...@@ -256,7 +256,7 @@ int mlx5_vdpa_alloc_resources(struct mlx5_vdpa_dev *mvdev)
mlx5_vdpa_warn(mvdev, "resources already allocated\n"); mlx5_vdpa_warn(mvdev, "resources already allocated\n");
return -EINVAL; return -EINVAL;
} }
mutex_init(&mvdev->mres.mr_mtx); mutex_init(&mvdev->mres.lock);
res->uar = mlx5_get_uars_page(mdev); res->uar = mlx5_get_uars_page(mdev);
if (IS_ERR(res->uar)) { if (IS_ERR(res->uar)) {
err = PTR_ERR(res->uar); err = PTR_ERR(res->uar);
...@@ -301,7 +301,7 @@ int mlx5_vdpa_alloc_resources(struct mlx5_vdpa_dev *mvdev) ...@@ -301,7 +301,7 @@ int mlx5_vdpa_alloc_resources(struct mlx5_vdpa_dev *mvdev)
err_uctx: err_uctx:
mlx5_put_uars_page(mdev, res->uar); mlx5_put_uars_page(mdev, res->uar);
err_uars: err_uars:
mutex_destroy(&mvdev->mres.mr_mtx); mutex_destroy(&mvdev->mres.lock);
return err; return err;
} }
...@@ -318,7 +318,7 @@ void mlx5_vdpa_free_resources(struct mlx5_vdpa_dev *mvdev) ...@@ -318,7 +318,7 @@ void mlx5_vdpa_free_resources(struct mlx5_vdpa_dev *mvdev)
dealloc_pd(mvdev, res->pdn, res->uid); dealloc_pd(mvdev, res->pdn, res->uid);
destroy_uctx(mvdev, res->uid); destroy_uctx(mvdev, res->uid);
mlx5_put_uars_page(mvdev->mdev, res->uar); mlx5_put_uars_page(mvdev->mdev, res->uar);
mutex_destroy(&mvdev->mres.mr_mtx); mutex_destroy(&mvdev->mres.lock);
res->valid = false; res->valid = false;
} }
......
...@@ -3639,10 +3639,10 @@ static int mlx5_set_group_asid(struct vdpa_device *vdev, u32 group, ...@@ -3639,10 +3639,10 @@ static int mlx5_set_group_asid(struct vdpa_device *vdev, u32 group,
mvdev->mres.group2asid[group] = asid; mvdev->mres.group2asid[group] = asid;
mutex_lock(&mvdev->mres.mr_mtx); mutex_lock(&mvdev->mres.lock);
if (group == MLX5_VDPA_CVQ_GROUP && mvdev->mres.mr[asid]) if (group == MLX5_VDPA_CVQ_GROUP && mvdev->mres.mr[asid])
err = mlx5_vdpa_update_cvq_iotlb(mvdev, mvdev->mres.mr[asid]->iotlb, asid); err = mlx5_vdpa_update_cvq_iotlb(mvdev, mvdev->mres.mr[asid]->iotlb, asid);
mutex_unlock(&mvdev->mres.mr_mtx); mutex_unlock(&mvdev->mres.lock);
return err; return err;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment