Commit 597ecc5a authored by Jason Gunthorpe's avatar Jason Gunthorpe Committed by Doug Ledford

RDMA/umem: Get rid of struct ib_umem.odp_data

This no longer has any use, we can use container_of to get to the
umem_odp, and a simple flag to indicate if this is an odp MR. Remove the
few remaining references to it.
Signed-off-by: default avatarJason Gunthorpe <jgg@mellanox.com>
Signed-off-by: default avatarLeon Romanovsky <leonro@mellanox.com>
Signed-off-by: default avatarDoug Ledford <dledford@redhat.com>
parent 41b4deea
...@@ -112,7 +112,7 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr, ...@@ -112,7 +112,7 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
umem = kzalloc(sizeof(struct ib_umem_odp), GFP_KERNEL); umem = kzalloc(sizeof(struct ib_umem_odp), GFP_KERNEL);
if (!umem) if (!umem)
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
umem->odp_data = to_ib_umem_odp(umem); umem->is_odp = 1;
} else { } else {
umem = kzalloc(sizeof(*umem), GFP_KERNEL); umem = kzalloc(sizeof(*umem), GFP_KERNEL);
if (!umem) if (!umem)
...@@ -243,7 +243,7 @@ EXPORT_SYMBOL(ib_umem_get); ...@@ -243,7 +243,7 @@ EXPORT_SYMBOL(ib_umem_get);
static void __ib_umem_release_tail(struct ib_umem *umem) static void __ib_umem_release_tail(struct ib_umem *umem)
{ {
mmdrop(umem->owning_mm); mmdrop(umem->owning_mm);
if (umem->odp_data) if (umem->is_odp)
kfree(to_ib_umem_odp(umem)); kfree(to_ib_umem_odp(umem));
else else
kfree(umem); kfree(umem);
...@@ -268,7 +268,7 @@ void ib_umem_release(struct ib_umem *umem) ...@@ -268,7 +268,7 @@ void ib_umem_release(struct ib_umem *umem)
{ {
struct ib_ucontext *context = umem->context; struct ib_ucontext *context = umem->context;
if (umem->odp_data) { if (umem->is_odp) {
ib_umem_odp_release(to_ib_umem_odp(umem)); ib_umem_odp_release(to_ib_umem_odp(umem));
__ib_umem_release_tail(umem); __ib_umem_release_tail(umem);
return; return;
...@@ -306,7 +306,7 @@ int ib_umem_page_count(struct ib_umem *umem) ...@@ -306,7 +306,7 @@ int ib_umem_page_count(struct ib_umem *umem)
int n; int n;
struct scatterlist *sg; struct scatterlist *sg;
if (umem->odp_data) if (umem->is_odp)
return ib_umem_num_pages(umem); return ib_umem_num_pages(umem);
n = 0; n = 0;
......
...@@ -291,6 +291,7 @@ struct ib_umem_odp *ib_alloc_odp_umem(struct ib_ucontext *context, ...@@ -291,6 +291,7 @@ struct ib_umem_odp *ib_alloc_odp_umem(struct ib_ucontext *context,
umem->address = addr; umem->address = addr;
umem->page_shift = PAGE_SHIFT; umem->page_shift = PAGE_SHIFT;
umem->writable = 1; umem->writable = 1;
umem->is_odp = 1;
mutex_init(&odp_data->umem_mutex); mutex_init(&odp_data->umem_mutex);
init_completion(&odp_data->notifier_completion); init_completion(&odp_data->notifier_completion);
...@@ -319,8 +320,6 @@ struct ib_umem_odp *ib_alloc_odp_umem(struct ib_ucontext *context, ...@@ -319,8 +320,6 @@ struct ib_umem_odp *ib_alloc_odp_umem(struct ib_ucontext *context,
&context->no_private_counters); &context->no_private_counters);
up_write(&context->umem_rwsem); up_write(&context->umem_rwsem);
umem->odp_data = odp_data;
return odp_data; return odp_data;
out_page_list: out_page_list:
......
...@@ -57,7 +57,7 @@ void mlx5_ib_cont_pages(struct ib_umem *umem, u64 addr, ...@@ -57,7 +57,7 @@ void mlx5_ib_cont_pages(struct ib_umem *umem, u64 addr,
int entry; int entry;
unsigned long page_shift = umem->page_shift; unsigned long page_shift = umem->page_shift;
if (umem->odp_data) { if (umem->is_odp) {
*ncont = ib_umem_page_count(umem); *ncont = ib_umem_page_count(umem);
*count = *ncont << (page_shift - PAGE_SHIFT); *count = *ncont << (page_shift - PAGE_SHIFT);
*shift = page_shift; *shift = page_shift;
...@@ -152,14 +152,13 @@ void __mlx5_ib_populate_pas(struct mlx5_ib_dev *dev, struct ib_umem *umem, ...@@ -152,14 +152,13 @@ void __mlx5_ib_populate_pas(struct mlx5_ib_dev *dev, struct ib_umem *umem,
struct scatterlist *sg; struct scatterlist *sg;
int entry; int entry;
#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
const bool odp = umem->odp_data != NULL; if (umem->is_odp) {
if (odp) {
WARN_ON(shift != 0); WARN_ON(shift != 0);
WARN_ON(access_flags != (MLX5_IB_MTT_READ | MLX5_IB_MTT_WRITE)); WARN_ON(access_flags != (MLX5_IB_MTT_READ | MLX5_IB_MTT_WRITE));
for (i = 0; i < num_pages; ++i) { for (i = 0; i < num_pages; ++i) {
dma_addr_t pa = umem->odp_data->dma_list[offset + i]; dma_addr_t pa =
to_ib_umem_odp(umem)->dma_list[offset + i];
pas[i] = cpu_to_be64(umem_dma_to_mtt(pa)); pas[i] = cpu_to_be64(umem_dma_to_mtt(pa));
} }
......
...@@ -98,7 +98,7 @@ static bool use_umr_mtt_update(struct mlx5_ib_mr *mr, u64 start, u64 length) ...@@ -98,7 +98,7 @@ static bool use_umr_mtt_update(struct mlx5_ib_mr *mr, u64 start, u64 length)
#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
static void update_odp_mr(struct mlx5_ib_mr *mr) static void update_odp_mr(struct mlx5_ib_mr *mr)
{ {
if (mr->umem->odp_data) { if (mr->umem->is_odp) {
/* /*
* This barrier prevents the compiler from moving the * This barrier prevents the compiler from moving the
* setting of umem->odp_data->private to point to our * setting of umem->odp_data->private to point to our
...@@ -107,7 +107,7 @@ static void update_odp_mr(struct mlx5_ib_mr *mr) ...@@ -107,7 +107,7 @@ static void update_odp_mr(struct mlx5_ib_mr *mr)
* handle invalidations. * handle invalidations.
*/ */
smp_wmb(); smp_wmb();
mr->umem->odp_data->private = mr; to_ib_umem_odp(mr->umem)->private = mr;
/* /*
* Make sure we will see the new * Make sure we will see the new
* umem->odp_data->private value in the invalidation * umem->odp_data->private value in the invalidation
...@@ -1624,15 +1624,16 @@ static void dereg_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr) ...@@ -1624,15 +1624,16 @@ static void dereg_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
struct ib_umem *umem = mr->umem; struct ib_umem *umem = mr->umem;
#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
if (umem && umem->odp_data) { if (umem && umem->is_odp) {
struct ib_umem_odp *umem_odp = to_ib_umem_odp(umem);
/* Prevent new page faults from succeeding */ /* Prevent new page faults from succeeding */
mr->live = 0; mr->live = 0;
/* Wait for all running page-fault handlers to finish. */ /* Wait for all running page-fault handlers to finish. */
synchronize_srcu(&dev->mr_srcu); synchronize_srcu(&dev->mr_srcu);
/* Destroy all page mappings */ /* Destroy all page mappings */
if (umem->odp_data->page_list) if (umem_odp->page_list)
mlx5_ib_invalidate_range(to_ib_umem_odp(umem), mlx5_ib_invalidate_range(umem_odp, ib_umem_start(umem),
ib_umem_start(umem),
ib_umem_end(umem)); ib_umem_end(umem));
else else
mlx5_ib_free_implicit_mr(mr); mlx5_ib_free_implicit_mr(mr);
......
...@@ -371,11 +371,12 @@ static struct ib_umem_odp *implicit_mr_get_data(struct mlx5_ib_mr *mr, ...@@ -371,11 +371,12 @@ static struct ib_umem_odp *implicit_mr_get_data(struct mlx5_ib_mr *mr,
struct ib_ucontext *ctx = mr->ibmr.pd->uobject->context; struct ib_ucontext *ctx = mr->ibmr.pd->uobject->context;
struct mlx5_ib_dev *dev = to_mdev(mr->ibmr.pd->device); struct mlx5_ib_dev *dev = to_mdev(mr->ibmr.pd->device);
struct ib_umem_odp *odp, *result = NULL; struct ib_umem_odp *odp, *result = NULL;
struct ib_umem_odp *odp_mr = to_ib_umem_odp(mr->umem);
u64 addr = io_virt & MLX5_IMR_MTT_MASK; u64 addr = io_virt & MLX5_IMR_MTT_MASK;
int nentries = 0, start_idx = 0, ret; int nentries = 0, start_idx = 0, ret;
struct mlx5_ib_mr *mtt; struct mlx5_ib_mr *mtt;
mutex_lock(&mr->umem->odp_data->umem_mutex); mutex_lock(&odp_mr->umem_mutex);
odp = odp_lookup(ctx, addr, 1, mr); odp = odp_lookup(ctx, addr, 1, mr);
mlx5_ib_dbg(dev, "io_virt:%llx bcnt:%zx addr:%llx odp:%p\n", mlx5_ib_dbg(dev, "io_virt:%llx bcnt:%zx addr:%llx odp:%p\n",
...@@ -388,14 +389,14 @@ static struct ib_umem_odp *implicit_mr_get_data(struct mlx5_ib_mr *mr, ...@@ -388,14 +389,14 @@ static struct ib_umem_odp *implicit_mr_get_data(struct mlx5_ib_mr *mr,
} else { } else {
odp = ib_alloc_odp_umem(ctx, addr, MLX5_IMR_MTT_SIZE); odp = ib_alloc_odp_umem(ctx, addr, MLX5_IMR_MTT_SIZE);
if (IS_ERR(odp)) { if (IS_ERR(odp)) {
mutex_unlock(&mr->umem->odp_data->umem_mutex); mutex_unlock(&odp_mr->umem_mutex);
return ERR_CAST(odp); return ERR_CAST(odp);
} }
mtt = implicit_mr_alloc(mr->ibmr.pd, &odp->umem, 0, mtt = implicit_mr_alloc(mr->ibmr.pd, &odp->umem, 0,
mr->access_flags); mr->access_flags);
if (IS_ERR(mtt)) { if (IS_ERR(mtt)) {
mutex_unlock(&mr->umem->odp_data->umem_mutex); mutex_unlock(&odp_mr->umem_mutex);
ib_umem_release(&odp->umem); ib_umem_release(&odp->umem);
return ERR_CAST(mtt); return ERR_CAST(mtt);
} }
...@@ -433,7 +434,7 @@ static struct ib_umem_odp *implicit_mr_get_data(struct mlx5_ib_mr *mr, ...@@ -433,7 +434,7 @@ static struct ib_umem_odp *implicit_mr_get_data(struct mlx5_ib_mr *mr,
} }
} }
mutex_unlock(&mr->umem->odp_data->umem_mutex); mutex_unlock(&odp_mr->umem_mutex);
return result; return result;
} }
...@@ -498,6 +499,7 @@ void mlx5_ib_free_implicit_mr(struct mlx5_ib_mr *imr) ...@@ -498,6 +499,7 @@ void mlx5_ib_free_implicit_mr(struct mlx5_ib_mr *imr)
static int pagefault_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr, static int pagefault_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr,
u64 io_virt, size_t bcnt, u32 *bytes_mapped) u64 io_virt, size_t bcnt, u32 *bytes_mapped)
{ {
struct ib_umem_odp *odp_mr = to_ib_umem_odp(mr->umem);
u64 access_mask = ODP_READ_ALLOWED_BIT; u64 access_mask = ODP_READ_ALLOWED_BIT;
int npages = 0, page_shift, np; int npages = 0, page_shift, np;
u64 start_idx, page_mask; u64 start_idx, page_mask;
...@@ -506,7 +508,7 @@ static int pagefault_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr, ...@@ -506,7 +508,7 @@ static int pagefault_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr,
size_t size; size_t size;
int ret; int ret;
if (!mr->umem->odp_data->page_list) { if (!odp_mr->page_list) {
odp = implicit_mr_get_data(mr, io_virt, bcnt); odp = implicit_mr_get_data(mr, io_virt, bcnt);
if (IS_ERR(odp)) if (IS_ERR(odp))
...@@ -514,7 +516,7 @@ static int pagefault_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr, ...@@ -514,7 +516,7 @@ static int pagefault_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr,
mr = odp->private; mr = odp->private;
} else { } else {
odp = mr->umem->odp_data; odp = odp_mr;
} }
next_mr: next_mr:
......
...@@ -46,10 +46,10 @@ struct ib_umem { ...@@ -46,10 +46,10 @@ struct ib_umem {
size_t length; size_t length;
unsigned long address; unsigned long address;
int page_shift; int page_shift;
int writable; u32 writable : 1;
int hugetlb; u32 hugetlb : 1;
u32 is_odp : 1;
struct work_struct work; struct work_struct work;
struct ib_umem_odp *odp_data;
struct sg_table sg_head; struct sg_table sg_head;
int nmap; int nmap;
int npages; int npages;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment