Commit 13859d5d authored by Leon Romanovsky's avatar Leon Romanovsky Committed by Jason Gunthorpe

RDMA/mlx5: Embed into the code flow the ODP config option

Convert various places to more readable code, which embeds
CONFIG_INFINIBAND_ON_DEMAND_PAGING into the code flow.
Signed-off-by: default avatarLeon Romanovsky <leonro@mellanox.com>
Signed-off-by: default avatarJason Gunthorpe <jgg@mellanox.com>
parent 8b4d5bc5
...@@ -234,14 +234,11 @@ static int ib_uverbs_get_context(struct uverbs_attr_bundle *attrs) ...@@ -234,14 +234,11 @@ static int ib_uverbs_get_context(struct uverbs_attr_bundle *attrs)
ucontext->closing = false; ucontext->closing = false;
ucontext->cleanup_retryable = false; ucontext->cleanup_retryable = false;
#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
mutex_init(&ucontext->per_mm_list_lock); mutex_init(&ucontext->per_mm_list_lock);
INIT_LIST_HEAD(&ucontext->per_mm_list); INIT_LIST_HEAD(&ucontext->per_mm_list);
if (!(ib_dev->attrs.device_cap_flags & IB_DEVICE_ON_DEMAND_PAGING)) if (!(ib_dev->attrs.device_cap_flags & IB_DEVICE_ON_DEMAND_PAGING))
ucontext->invalidate_range = NULL; ucontext->invalidate_range = NULL;
#endif
resp.num_comp_vectors = file->device->num_comp_vectors; resp.num_comp_vectors = file->device->num_comp_vectors;
ret = get_unused_fd_flags(O_CLOEXEC); ret = get_unused_fd_flags(O_CLOEXEC);
......
...@@ -1763,9 +1763,9 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev, ...@@ -1763,9 +1763,9 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
if (err) if (err)
goto out_sys_pages; goto out_sys_pages;
#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING if (ibdev->attrs.device_cap_flags & IB_DEVICE_ON_DEMAND_PAGING)
context->ibucontext.invalidate_range = &mlx5_ib_invalidate_range; context->ibucontext.invalidate_range =
#endif &mlx5_ib_invalidate_range;
if (req.flags & MLX5_IB_ALLOC_UCTX_DEVX) { if (req.flags & MLX5_IB_ALLOC_UCTX_DEVX) {
err = mlx5_ib_devx_create(dev, true); err = mlx5_ib_devx_create(dev, true);
...@@ -1897,12 +1897,10 @@ static int mlx5_ib_dealloc_ucontext(struct ib_ucontext *ibcontext) ...@@ -1897,12 +1897,10 @@ static int mlx5_ib_dealloc_ucontext(struct ib_ucontext *ibcontext)
struct mlx5_ib_dev *dev = to_mdev(ibcontext->device); struct mlx5_ib_dev *dev = to_mdev(ibcontext->device);
struct mlx5_bfreg_info *bfregi; struct mlx5_bfreg_info *bfregi;
#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
/* All umem's must be destroyed before destroying the ucontext. */ /* All umem's must be destroyed before destroying the ucontext. */
mutex_lock(&ibcontext->per_mm_list_lock); mutex_lock(&ibcontext->per_mm_list_lock);
WARN_ON(!list_empty(&ibcontext->per_mm_list)); WARN_ON(!list_empty(&ibcontext->per_mm_list));
mutex_unlock(&ibcontext->per_mm_list_lock); mutex_unlock(&ibcontext->per_mm_list_lock);
#endif
bfregi = &context->bfregi; bfregi = &context->bfregi;
mlx5_ib_dealloc_transport_domain(dev, context->tdn, context->devx_uid); mlx5_ib_dealloc_transport_domain(dev, context->tdn, context->devx_uid);
...@@ -5722,11 +5720,11 @@ static struct ib_counters *mlx5_ib_create_counters(struct ib_device *device, ...@@ -5722,11 +5720,11 @@ static struct ib_counters *mlx5_ib_create_counters(struct ib_device *device,
void mlx5_ib_stage_init_cleanup(struct mlx5_ib_dev *dev) void mlx5_ib_stage_init_cleanup(struct mlx5_ib_dev *dev)
{ {
mlx5_ib_cleanup_multiport_master(dev); mlx5_ib_cleanup_multiport_master(dev);
#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING if (IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING)) {
cleanup_srcu_struct(&dev->mr_srcu); cleanup_srcu_struct(&dev->mr_srcu);
drain_workqueue(dev->advise_mr_wq); drain_workqueue(dev->advise_mr_wq);
destroy_workqueue(dev->advise_mr_wq); destroy_workqueue(dev->advise_mr_wq);
#endif }
kfree(dev->port); kfree(dev->port);
} }
...@@ -5779,8 +5777,9 @@ int mlx5_ib_stage_init_init(struct mlx5_ib_dev *dev) ...@@ -5779,8 +5777,9 @@ int mlx5_ib_stage_init_init(struct mlx5_ib_dev *dev)
spin_lock_init(&dev->memic.memic_lock); spin_lock_init(&dev->memic.memic_lock);
dev->memic.dev = mdev; dev->memic.dev = mdev;
#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING if (IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING)) {
dev->advise_mr_wq = alloc_ordered_workqueue("mlx5_ib_advise_mr_wq", 0); dev->advise_mr_wq =
alloc_ordered_workqueue("mlx5_ib_advise_mr_wq", 0);
if (!dev->advise_mr_wq) { if (!dev->advise_mr_wq) {
err = -ENOMEM; err = -ENOMEM;
goto err_mp; goto err_mp;
...@@ -5791,7 +5790,7 @@ int mlx5_ib_stage_init_init(struct mlx5_ib_dev *dev) ...@@ -5791,7 +5790,7 @@ int mlx5_ib_stage_init_init(struct mlx5_ib_dev *dev)
destroy_workqueue(dev->advise_mr_wq); destroy_workqueue(dev->advise_mr_wq);
goto err_mp; goto err_mp;
} }
#endif }
return 0; return 0;
err_mp: err_mp:
......
...@@ -111,7 +111,6 @@ void mlx5_ib_cont_pages(struct ib_umem *umem, u64 addr, ...@@ -111,7 +111,6 @@ void mlx5_ib_cont_pages(struct ib_umem *umem, u64 addr,
*count = i; *count = i;
} }
#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
static u64 umem_dma_to_mtt(dma_addr_t umem_dma) static u64 umem_dma_to_mtt(dma_addr_t umem_dma)
{ {
u64 mtt_entry = umem_dma & ODP_DMA_ADDR_MASK; u64 mtt_entry = umem_dma & ODP_DMA_ADDR_MASK;
...@@ -123,7 +122,6 @@ static u64 umem_dma_to_mtt(dma_addr_t umem_dma) ...@@ -123,7 +122,6 @@ static u64 umem_dma_to_mtt(dma_addr_t umem_dma)
return mtt_entry; return mtt_entry;
} }
#endif
/* /*
* Populate the given array with bus addresses from the umem. * Populate the given array with bus addresses from the umem.
...@@ -151,7 +149,7 @@ void __mlx5_ib_populate_pas(struct mlx5_ib_dev *dev, struct ib_umem *umem, ...@@ -151,7 +149,7 @@ void __mlx5_ib_populate_pas(struct mlx5_ib_dev *dev, struct ib_umem *umem,
int len; int len;
struct scatterlist *sg; struct scatterlist *sg;
int entry; int entry;
#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
if (umem->is_odp) { if (umem->is_odp) {
WARN_ON(shift != 0); WARN_ON(shift != 0);
WARN_ON(access_flags != (MLX5_IB_MTT_READ | MLX5_IB_MTT_WRITE)); WARN_ON(access_flags != (MLX5_IB_MTT_READ | MLX5_IB_MTT_WRITE));
...@@ -164,7 +162,6 @@ void __mlx5_ib_populate_pas(struct mlx5_ib_dev *dev, struct ib_umem *umem, ...@@ -164,7 +162,6 @@ void __mlx5_ib_populate_pas(struct mlx5_ib_dev *dev, struct ib_umem *umem,
} }
return; return;
} }
#endif
i = 0; i = 0;
for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) { for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) {
......
...@@ -71,10 +71,9 @@ static int destroy_mkey(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr) ...@@ -71,10 +71,9 @@ static int destroy_mkey(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
{ {
int err = mlx5_core_destroy_mkey(dev->mdev, &mr->mmkey); int err = mlx5_core_destroy_mkey(dev->mdev, &mr->mmkey);
#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING if (IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING))
/* Wait until all page fault handlers using the mr complete. */ /* Wait until all page fault handlers using the mr complete. */
synchronize_srcu(&dev->mr_srcu); synchronize_srcu(&dev->mr_srcu);
#endif
return err; return err;
} }
...@@ -254,9 +253,8 @@ static void remove_keys(struct mlx5_ib_dev *dev, int c, int num) ...@@ -254,9 +253,8 @@ static void remove_keys(struct mlx5_ib_dev *dev, int c, int num)
mlx5_core_destroy_mkey(dev->mdev, &mr->mmkey); mlx5_core_destroy_mkey(dev->mdev, &mr->mmkey);
} }
#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING if (IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING))
synchronize_srcu(&dev->mr_srcu); synchronize_srcu(&dev->mr_srcu);
#endif
list_for_each_entry_safe(mr, tmp_mr, &del_list, list) { list_for_each_entry_safe(mr, tmp_mr, &del_list, list) {
list_del(&mr->list); list_del(&mr->list);
...@@ -1329,8 +1327,8 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, ...@@ -1329,8 +1327,8 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
mlx5_ib_dbg(dev, "start 0x%llx, virt_addr 0x%llx, length 0x%llx, access_flags 0x%x\n", mlx5_ib_dbg(dev, "start 0x%llx, virt_addr 0x%llx, length 0x%llx, access_flags 0x%x\n",
start, virt_addr, length, access_flags); start, virt_addr, length, access_flags);
#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING if (IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING) && !start &&
if (!start && length == U64_MAX) { length == U64_MAX) {
if (!(access_flags & IB_ACCESS_ON_DEMAND) || if (!(access_flags & IB_ACCESS_ON_DEMAND) ||
!(dev->odp_caps.general_caps & IB_ODP_SUPPORT_IMPLICIT)) !(dev->odp_caps.general_caps & IB_ODP_SUPPORT_IMPLICIT))
return ERR_PTR(-EINVAL); return ERR_PTR(-EINVAL);
...@@ -1340,7 +1338,6 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, ...@@ -1340,7 +1338,6 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
return ERR_CAST(mr); return ERR_CAST(mr);
return &mr->ibmr; return &mr->ibmr;
} }
#endif
err = mr_umem_get(pd, start, length, access_flags, &umem, &npages, err = mr_umem_get(pd, start, length, access_flags, &umem, &npages,
&page_shift, &ncont, &order); &page_shift, &ncont, &order);
...@@ -1401,9 +1398,9 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, ...@@ -1401,9 +1398,9 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
} }
} }
#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING if (IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING))
mr->live = 1; mr->live = 1;
#endif
return &mr->ibmr; return &mr->ibmr;
error: error:
ib_umem_release(umem); ib_umem_release(umem);
...@@ -1518,9 +1515,8 @@ int mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start, ...@@ -1518,9 +1515,8 @@ int mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start,
} }
mr->allocated_from_cache = 0; mr->allocated_from_cache = 0;
#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING if (IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING))
mr->live = 1; mr->live = 1;
#endif
} else { } else {
/* /*
* Send a UMR WQE * Send a UMR WQE
......
...@@ -83,6 +83,19 @@ static inline struct ib_umem_odp *to_ib_umem_odp(struct ib_umem *umem) ...@@ -83,6 +83,19 @@ static inline struct ib_umem_odp *to_ib_umem_odp(struct ib_umem *umem)
return container_of(umem, struct ib_umem_odp, umem); return container_of(umem, struct ib_umem_odp, umem);
} }
/*
* The lower 2 bits of the DMA address signal the R/W permissions for
* the entry. To upgrade the permissions, provide the appropriate
* bitmask to the map_dma_pages function.
*
* Be aware that upgrading a mapped address might result in change of
* the DMA address for the page.
*/
#define ODP_READ_ALLOWED_BIT (1<<0ULL)
#define ODP_WRITE_ALLOWED_BIT (1<<1ULL)
#define ODP_DMA_ADDR_MASK (~(ODP_READ_ALLOWED_BIT | ODP_WRITE_ALLOWED_BIT))
#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
struct ib_ucontext_per_mm { struct ib_ucontext_per_mm {
...@@ -107,19 +120,6 @@ struct ib_umem_odp *ib_alloc_odp_umem(struct ib_ucontext_per_mm *per_mm, ...@@ -107,19 +120,6 @@ struct ib_umem_odp *ib_alloc_odp_umem(struct ib_ucontext_per_mm *per_mm,
unsigned long addr, size_t size); unsigned long addr, size_t size);
void ib_umem_odp_release(struct ib_umem_odp *umem_odp); void ib_umem_odp_release(struct ib_umem_odp *umem_odp);
/*
* The lower 2 bits of the DMA address signal the R/W permissions for
* the entry. To upgrade the permissions, provide the appropriate
* bitmask to the map_dma_pages function.
*
* Be aware that upgrading a mapped address might result in change of
* the DMA address for the page.
*/
#define ODP_READ_ALLOWED_BIT (1<<0ULL)
#define ODP_WRITE_ALLOWED_BIT (1<<1ULL)
#define ODP_DMA_ADDR_MASK (~(ODP_READ_ALLOWED_BIT | ODP_WRITE_ALLOWED_BIT))
int ib_umem_odp_map_dma_pages(struct ib_umem_odp *umem_odp, u64 start_offset, int ib_umem_odp_map_dma_pages(struct ib_umem_odp *umem_odp, u64 start_offset,
u64 bcnt, u64 access_mask, u64 bcnt, u64 access_mask,
unsigned long current_seq); unsigned long current_seq);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment