Commit f27a0d50 authored by Jason Gunthorpe's avatar Jason Gunthorpe Committed by Doug Ledford

RDMA/umem: Use umem->owning_mm inside ODP

Since ODP had a single struct mmu_notifier located in the ucontext it
could only handle a single MM at a time, and this prevented it from using
the new owning_mm system.

With the prior rework it is now simple to let ODP track multiple MMs per
ucontext, finish the job so that the per_mm is allocated on a mm by mm
basis, and freed when the last umem is dropped from the ucontext.

As a side effect the new saner locking removes the lockdep splat about
nesting the umem_rwsem between mmu_notifier_unregister and
ib_umem_odp_release.

It also makes ODP work with multiple processes, across, fork, etc.
Signed-off-by: default avatarJason Gunthorpe <jgg@mellanox.com>
Signed-off-by: default avatarLeon Romanovsky <leonro@mellanox.com>
Signed-off-by: default avatarDoug Ledford <dledford@redhat.com>
parent c9990ab3
This diff is collapsed.
...@@ -124,12 +124,8 @@ ssize_t ib_uverbs_get_context(struct ib_uverbs_file *file, ...@@ -124,12 +124,8 @@ ssize_t ib_uverbs_get_context(struct ib_uverbs_file *file,
ucontext->cleanup_retryable = false; ucontext->cleanup_retryable = false;
#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
ucontext->per_mm.umem_tree = RB_ROOT_CACHED; mutex_init(&ucontext->per_mm_list_lock);
init_rwsem(&ucontext->per_mm.umem_rwsem); INIT_LIST_HEAD(&ucontext->per_mm_list);
ucontext->per_mm.odp_mrs_count = 0;
INIT_LIST_HEAD(&ucontext->per_mm.no_private_counters);
ucontext->per_mm.context = ucontext;
if (!(ib_dev->attrs.device_cap_flags & IB_DEVICE_ON_DEMAND_PAGING)) if (!(ib_dev->attrs.device_cap_flags & IB_DEVICE_ON_DEMAND_PAGING))
ucontext->invalidate_range = NULL; ucontext->invalidate_range = NULL;
......
...@@ -1861,6 +1861,13 @@ static int mlx5_ib_dealloc_ucontext(struct ib_ucontext *ibcontext) ...@@ -1861,6 +1861,13 @@ static int mlx5_ib_dealloc_ucontext(struct ib_ucontext *ibcontext)
struct mlx5_ib_dev *dev = to_mdev(ibcontext->device); struct mlx5_ib_dev *dev = to_mdev(ibcontext->device);
struct mlx5_bfreg_info *bfregi; struct mlx5_bfreg_info *bfregi;
#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
/* All umem's must be destroyed before destroying the ucontext. */
mutex_lock(&ibcontext->per_mm_list_lock);
WARN_ON(!list_empty(&ibcontext->per_mm_list));
mutex_unlock(&ibcontext->per_mm_list_lock);
#endif
if (context->devx_uid) if (context->devx_uid)
mlx5_ib_devx_destroy(dev, context); mlx5_ib_devx_destroy(dev, context);
......
...@@ -393,7 +393,7 @@ static struct ib_umem_odp *implicit_mr_get_data(struct mlx5_ib_mr *mr, ...@@ -393,7 +393,7 @@ static struct ib_umem_odp *implicit_mr_get_data(struct mlx5_ib_mr *mr,
if (nentries) if (nentries)
nentries++; nentries++;
} else { } else {
odp = ib_alloc_odp_umem(odp_mr->umem.context, addr, odp = ib_alloc_odp_umem(odp_mr->per_mm, addr,
MLX5_IMR_MTT_SIZE); MLX5_IMR_MTT_SIZE);
if (IS_ERR(odp)) { if (IS_ERR(odp)) {
mutex_unlock(&odp_mr->umem_mutex); mutex_unlock(&odp_mr->umem_mutex);
......
...@@ -91,8 +91,26 @@ static inline struct ib_umem_odp *to_ib_umem_odp(struct ib_umem *umem) ...@@ -91,8 +91,26 @@ static inline struct ib_umem_odp *to_ib_umem_odp(struct ib_umem *umem)
#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
struct ib_ucontext_per_mm {
struct ib_ucontext *context;
struct mm_struct *mm;
struct pid *tgid;
struct rb_root_cached umem_tree;
/* Protects umem_tree */
struct rw_semaphore umem_rwsem;
atomic_t notifier_count;
struct mmu_notifier mn;
/* A list of umems that don't have private mmu notifier counters yet. */
struct list_head no_private_counters;
unsigned int odp_mrs_count;
struct list_head ucontext_list;
};
int ib_umem_odp_get(struct ib_umem_odp *umem_odp, int access); int ib_umem_odp_get(struct ib_umem_odp *umem_odp, int access);
struct ib_umem_odp *ib_alloc_odp_umem(struct ib_ucontext *context, struct ib_umem_odp *ib_alloc_odp_umem(struct ib_ucontext_per_mm *per_mm,
unsigned long addr, size_t size); unsigned long addr, size_t size);
void ib_umem_odp_release(struct ib_umem_odp *umem_odp); void ib_umem_odp_release(struct ib_umem_odp *umem_odp);
......
...@@ -1488,25 +1488,6 @@ struct ib_rdmacg_object { ...@@ -1488,25 +1488,6 @@ struct ib_rdmacg_object {
#endif #endif
}; };
#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
struct ib_ucontext_per_mm {
struct ib_ucontext *context;
struct rb_root_cached umem_tree;
/*
* Protects .umem_rbroot and tree, as well as odp_mrs_count and
* mmu notifiers registration.
*/
struct rw_semaphore umem_rwsem;
struct mmu_notifier mn;
atomic_t notifier_count;
/* A list of umems that don't have private mmu notifier counters yet. */
struct list_head no_private_counters;
unsigned int odp_mrs_count;
};
#endif
struct ib_ucontext { struct ib_ucontext {
struct ib_device *device; struct ib_device *device;
struct ib_uverbs_file *ufile; struct ib_uverbs_file *ufile;
...@@ -1523,7 +1504,8 @@ struct ib_ucontext { ...@@ -1523,7 +1504,8 @@ struct ib_ucontext {
#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
void (*invalidate_range)(struct ib_umem_odp *umem_odp, void (*invalidate_range)(struct ib_umem_odp *umem_odp,
unsigned long start, unsigned long end); unsigned long start, unsigned long end);
struct ib_ucontext_per_mm per_mm; struct mutex per_mm_list_lock;
struct list_head per_mm_list;
#endif #endif
struct ib_rdmacg_object cg_obj; struct ib_rdmacg_object cg_obj;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment