Commit fba0e448 authored by Jason Gunthorpe's avatar Jason Gunthorpe

RDMA/mlx5: Use odp instead of mr->umem in pagefault_mr

These are the same thing since mr always comes from odp->private. It is
confusing to reference the same memory via two names.

Link: https://lore.kernel.org/r/20190819111710.18440-13-leon@kernel.orgSigned-off-by: default avatarLeon Romanovsky <leonro@mellanox.com>
Signed-off-by: default avatarJason Gunthorpe <jgg@mellanox.com>
parent a705f3e3
...@@ -601,7 +601,7 @@ static int pagefault_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr, ...@@ -601,7 +601,7 @@ static int pagefault_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr,
start_idx = (io_virt - (mr->mmkey.iova & page_mask)) >> page_shift; start_idx = (io_virt - (mr->mmkey.iova & page_mask)) >> page_shift;
access_mask = ODP_READ_ALLOWED_BIT; access_mask = ODP_READ_ALLOWED_BIT;
if (prefetch && !downgrade && !mr->umem->writable) { if (prefetch && !downgrade && !odp->umem.writable) {
/* prefetch with write-access must /* prefetch with write-access must
* be supported by the MR * be supported by the MR
*/ */
...@@ -609,7 +609,7 @@ static int pagefault_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr, ...@@ -609,7 +609,7 @@ static int pagefault_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr,
goto out; goto out;
} }
if (mr->umem->writable && !downgrade) if (odp->umem.writable && !downgrade)
access_mask |= ODP_WRITE_ALLOWED_BIT; access_mask |= ODP_WRITE_ALLOWED_BIT;
current_seq = READ_ONCE(odp->notifiers_seq); current_seq = READ_ONCE(odp->notifiers_seq);
...@@ -619,8 +619,8 @@ static int pagefault_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr, ...@@ -619,8 +619,8 @@ static int pagefault_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr,
*/ */
smp_rmb(); smp_rmb();
ret = ib_umem_odp_map_dma_pages(to_ib_umem_odp(mr->umem), io_virt, size, ret = ib_umem_odp_map_dma_pages(odp, io_virt, size, access_mask,
access_mask, current_seq); current_seq);
if (ret < 0) if (ret < 0)
goto out; goto out;
...@@ -628,8 +628,7 @@ static int pagefault_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr, ...@@ -628,8 +628,7 @@ static int pagefault_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr,
np = ret; np = ret;
mutex_lock(&odp->umem_mutex); mutex_lock(&odp->umem_mutex);
if (!ib_umem_mmu_notifier_retry(to_ib_umem_odp(mr->umem), if (!ib_umem_mmu_notifier_retry(odp, current_seq)) {
current_seq)) {
/* /*
* No need to check whether the MTTs really belong to * No need to check whether the MTTs really belong to
* this MR, since ib_umem_odp_map_dma_pages already * this MR, since ib_umem_odp_map_dma_pages already
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment