Commit 1c3d247e authored by Jason Gunthorpe's avatar Jason Gunthorpe

RDMA/mlx5: Remove mlx5_ib_mr->npages

This is the same value as ib_umem_num_pages(mr->umem), use that instead.

Link: https://lore.kernel.org/r/20201026131936.1335664-4-leon@kernel.orgSigned-off-by: default avatarLeon Romanovsky <leonro@nvidia.com>
Signed-off-by: default avatarJason Gunthorpe <jgg@nvidia.com>
parent fc332570
...@@ -602,7 +602,6 @@ struct mlx5_ib_mr { ...@@ -602,7 +602,6 @@ struct mlx5_ib_mr {
struct mlx5_shared_mr_info *smr_info; struct mlx5_shared_mr_info *smr_info;
struct list_head list; struct list_head list;
struct mlx5_cache_ent *cache_ent; struct mlx5_cache_ent *cache_ent;
int npages;
struct mlx5_ib_dev *dev; struct mlx5_ib_dev *dev;
u32 out[MLX5_ST_SZ_DW(create_mkey_out)]; u32 out[MLX5_ST_SZ_DW(create_mkey_out)];
struct mlx5_core_sig_ctx *sig; struct mlx5_core_sig_ctx *sig;
......
...@@ -1417,8 +1417,7 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, ...@@ -1417,8 +1417,7 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
mlx5_ib_dbg(dev, "mkey 0x%x\n", mr->mmkey.key); mlx5_ib_dbg(dev, "mkey 0x%x\n", mr->mmkey.key);
mr->umem = umem; mr->umem = umem;
mr->npages = npages; atomic_add(ib_umem_num_pages(mr->umem), &dev->mdev->priv.reg_pages);
atomic_add(mr->npages, &dev->mdev->priv.reg_pages);
set_mr_fields(dev, mr, length, access_flags); set_mr_fields(dev, mr, length, access_flags);
if (xlt_with_umr && !(access_flags & IB_ACCESS_ON_DEMAND)) { if (xlt_with_umr && !(access_flags & IB_ACCESS_ON_DEMAND)) {
...@@ -1551,8 +1550,8 @@ int mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start, ...@@ -1551,8 +1550,8 @@ int mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start,
* used. * used.
*/ */
flags |= IB_MR_REREG_TRANS; flags |= IB_MR_REREG_TRANS;
atomic_sub(mr->npages, &dev->mdev->priv.reg_pages); atomic_sub(ib_umem_num_pages(mr->umem),
mr->npages = 0; &dev->mdev->priv.reg_pages);
ib_umem_release(mr->umem); ib_umem_release(mr->umem);
mr->umem = NULL; mr->umem = NULL;
...@@ -1560,8 +1559,8 @@ int mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start, ...@@ -1560,8 +1559,8 @@ int mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start,
&npages, &page_shift, &ncont, &order); &npages, &page_shift, &ncont, &order);
if (err) if (err)
goto err; goto err;
mr->npages = ncont; atomic_add(ib_umem_num_pages(mr->umem),
atomic_add(mr->npages, &dev->mdev->priv.reg_pages); &dev->mdev->priv.reg_pages);
} }
if (!mlx5_ib_can_reconfig_with_umr(dev, mr->access_flags, if (!mlx5_ib_can_reconfig_with_umr(dev, mr->access_flags,
...@@ -1694,7 +1693,6 @@ static void clean_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr) ...@@ -1694,7 +1693,6 @@ static void clean_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
static void dereg_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr) static void dereg_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
{ {
int npages = mr->npages;
struct ib_umem *umem = mr->umem; struct ib_umem *umem = mr->umem;
/* Stop all DMA */ /* Stop all DMA */
...@@ -1703,14 +1701,17 @@ static void dereg_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr) ...@@ -1703,14 +1701,17 @@ static void dereg_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
else else
clean_mr(dev, mr); clean_mr(dev, mr);
if (umem) {
if (!is_odp_mr(mr))
atomic_sub(ib_umem_num_pages(umem),
&dev->mdev->priv.reg_pages);
ib_umem_release(umem);
}
if (mr->cache_ent) if (mr->cache_ent)
mlx5_mr_cache_free(dev, mr); mlx5_mr_cache_free(dev, mr);
else else
kfree(mr); kfree(mr);
ib_umem_release(umem);
atomic_sub(npages, &dev->mdev->priv.reg_pages);
} }
int mlx5_ib_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata) int mlx5_ib_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment