Commit d8668bb0 authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Jason Gunthorpe

memremap: pass a struct dev_pagemap to ->kill and ->cleanup

Passing the actual typed structure leads to more understandable code
vs just passing the ref member.
Reported-by: default avatarLogan Gunthorpe <logang@deltatee.com>
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Reviewed-by: default avatarLogan Gunthorpe <logang@deltatee.com>
Reviewed-by: default avatarJason Gunthorpe <jgg@mellanox.com>
Reviewed-by: default avatarDan Williams <dan.j.williams@intel.com>
Tested-by: default avatarDan Williams <dan.j.williams@intel.com>
Signed-off-by: default avatarJason Gunthorpe <jgg@mellanox.com>
parent 1e240e8d
...@@ -27,21 +27,21 @@ static void dev_dax_percpu_release(struct percpu_ref *ref) ...@@ -27,21 +27,21 @@ static void dev_dax_percpu_release(struct percpu_ref *ref)
complete(&dev_dax->cmp); complete(&dev_dax->cmp);
} }
static void dev_dax_percpu_exit(struct percpu_ref *ref) static void dev_dax_percpu_exit(struct dev_pagemap *pgmap)
{ {
struct dev_dax *dev_dax = ref_to_dev_dax(ref); struct dev_dax *dev_dax = container_of(pgmap, struct dev_dax, pgmap);
dev_dbg(&dev_dax->dev, "%s\n", __func__); dev_dbg(&dev_dax->dev, "%s\n", __func__);
wait_for_completion(&dev_dax->cmp); wait_for_completion(&dev_dax->cmp);
percpu_ref_exit(ref); percpu_ref_exit(pgmap->ref);
} }
static void dev_dax_percpu_kill(struct percpu_ref *ref) static void dev_dax_percpu_kill(struct dev_pagemap *pgmap)
{ {
struct dev_dax *dev_dax = ref_to_dev_dax(ref); struct dev_dax *dev_dax = container_of(pgmap, struct dev_dax, pgmap);
dev_dbg(&dev_dax->dev, "%s\n", __func__); dev_dbg(&dev_dax->dev, "%s\n", __func__);
percpu_ref_kill(ref); percpu_ref_kill(pgmap->ref);
} }
static int check_vma(struct dev_dax *dev_dax, struct vm_area_struct *vma, static int check_vma(struct dev_dax *dev_dax, struct vm_area_struct *vma,
......
...@@ -303,24 +303,24 @@ static const struct attribute_group *pmem_attribute_groups[] = { ...@@ -303,24 +303,24 @@ static const struct attribute_group *pmem_attribute_groups[] = {
NULL, NULL,
}; };
static void pmem_pagemap_cleanup(struct percpu_ref *ref) static void pmem_pagemap_cleanup(struct dev_pagemap *pgmap)
{ {
struct request_queue *q; struct request_queue *q =
container_of(pgmap->ref, struct request_queue, q_usage_counter);
q = container_of(ref, typeof(*q), q_usage_counter);
blk_cleanup_queue(q); blk_cleanup_queue(q);
} }
static void pmem_release_queue(void *ref) static void pmem_release_queue(void *pgmap)
{ {
pmem_pagemap_cleanup(ref); pmem_pagemap_cleanup(pgmap);
} }
static void pmem_pagemap_kill(struct percpu_ref *ref) static void pmem_pagemap_kill(struct dev_pagemap *pgmap)
{ {
struct request_queue *q; struct request_queue *q =
container_of(pgmap->ref, struct request_queue, q_usage_counter);
q = container_of(ref, typeof(*q), q_usage_counter);
blk_freeze_queue_start(q); blk_freeze_queue_start(q);
} }
...@@ -435,7 +435,7 @@ static int pmem_attach_disk(struct device *dev, ...@@ -435,7 +435,7 @@ static int pmem_attach_disk(struct device *dev,
memcpy(&bb_res, &pmem->pgmap.res, sizeof(bb_res)); memcpy(&bb_res, &pmem->pgmap.res, sizeof(bb_res));
} else { } else {
if (devm_add_action_or_reset(dev, pmem_release_queue, if (devm_add_action_or_reset(dev, pmem_release_queue,
&q->q_usage_counter)) &pmem->pgmap))
return -ENOMEM; return -ENOMEM;
addr = devm_memremap(dev, pmem->phys_addr, addr = devm_memremap(dev, pmem->phys_addr,
pmem->size, ARCH_MEMREMAP_PMEM); pmem->size, ARCH_MEMREMAP_PMEM);
......
...@@ -91,14 +91,15 @@ static void pci_p2pdma_percpu_release(struct percpu_ref *ref) ...@@ -91,14 +91,15 @@ static void pci_p2pdma_percpu_release(struct percpu_ref *ref)
complete(&p2p_pgmap->ref_done); complete(&p2p_pgmap->ref_done);
} }
static void pci_p2pdma_percpu_kill(struct percpu_ref *ref) static void pci_p2pdma_percpu_kill(struct dev_pagemap *pgmap)
{ {
percpu_ref_kill(ref); percpu_ref_kill(pgmap->ref);
} }
static void pci_p2pdma_percpu_cleanup(struct percpu_ref *ref) static void pci_p2pdma_percpu_cleanup(struct dev_pagemap *pgmap)
{ {
struct p2pdma_pagemap *p2p_pgmap = to_p2p_pgmap(ref); struct p2pdma_pagemap *p2p_pgmap =
container_of(pgmap, struct p2pdma_pagemap, pgmap);
wait_for_completion(&p2p_pgmap->ref_done); wait_for_completion(&p2p_pgmap->ref_done);
percpu_ref_exit(&p2p_pgmap->ref); percpu_ref_exit(&p2p_pgmap->ref);
......
...@@ -74,12 +74,12 @@ struct dev_pagemap_ops { ...@@ -74,12 +74,12 @@ struct dev_pagemap_ops {
/* /*
* Transition the refcount in struct dev_pagemap to the dead state. * Transition the refcount in struct dev_pagemap to the dead state.
*/ */
void (*kill)(struct percpu_ref *ref); void (*kill)(struct dev_pagemap *pgmap);
/* /*
* Wait for refcount in struct dev_pagemap to be idle and reap it. * Wait for refcount in struct dev_pagemap to be idle and reap it.
*/ */
void (*cleanup)(struct percpu_ref *ref); void (*cleanup)(struct dev_pagemap *pgmap);
}; };
/** /**
......
...@@ -92,10 +92,10 @@ static void devm_memremap_pages_release(void *data) ...@@ -92,10 +92,10 @@ static void devm_memremap_pages_release(void *data)
unsigned long pfn; unsigned long pfn;
int nid; int nid;
pgmap->ops->kill(pgmap->ref); pgmap->ops->kill(pgmap);
for_each_device_pfn(pfn, pgmap) for_each_device_pfn(pfn, pgmap)
put_page(pfn_to_page(pfn)); put_page(pfn_to_page(pfn));
pgmap->ops->cleanup(pgmap->ref); pgmap->ops->cleanup(pgmap);
/* pages are dead and unused, undo the arch mapping */ /* pages are dead and unused, undo the arch mapping */
align_start = res->start & ~(SECTION_SIZE - 1); align_start = res->start & ~(SECTION_SIZE - 1);
...@@ -294,8 +294,8 @@ void *devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap) ...@@ -294,8 +294,8 @@ void *devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap)
err_pfn_remap: err_pfn_remap:
pgmap_array_delete(res); pgmap_array_delete(res);
err_array: err_array:
pgmap->ops->kill(pgmap->ref); pgmap->ops->kill(pgmap);
pgmap->ops->cleanup(pgmap->ref); pgmap->ops->cleanup(pgmap);
return ERR_PTR(error); return ERR_PTR(error);
} }
EXPORT_SYMBOL_GPL(devm_memremap_pages); EXPORT_SYMBOL_GPL(devm_memremap_pages);
......
...@@ -1352,18 +1352,18 @@ static void hmm_devmem_ref_release(struct percpu_ref *ref) ...@@ -1352,18 +1352,18 @@ static void hmm_devmem_ref_release(struct percpu_ref *ref)
complete(&devmem->completion); complete(&devmem->completion);
} }
static void hmm_devmem_ref_exit(struct percpu_ref *ref) static void hmm_devmem_ref_exit(struct dev_pagemap *pgmap)
{ {
struct hmm_devmem *devmem; struct hmm_devmem *devmem;
devmem = container_of(ref, struct hmm_devmem, ref); devmem = container_of(pgmap, struct hmm_devmem, pagemap);
wait_for_completion(&devmem->completion); wait_for_completion(&devmem->completion);
percpu_ref_exit(ref); percpu_ref_exit(pgmap->ref);
} }
static void hmm_devmem_ref_kill(struct percpu_ref *ref) static void hmm_devmem_ref_kill(struct dev_pagemap *pgmap)
{ {
percpu_ref_kill(ref); percpu_ref_kill(pgmap->ref);
} }
static vm_fault_t hmm_devmem_fault(struct vm_area_struct *vma, static vm_fault_t hmm_devmem_fault(struct vm_area_struct *vma,
......
...@@ -102,8 +102,8 @@ static void nfit_test_kill(void *_pgmap) ...@@ -102,8 +102,8 @@ static void nfit_test_kill(void *_pgmap)
WARN_ON(!pgmap || !pgmap->ref || !pgmap->ops || !pgmap->ops->kill || WARN_ON(!pgmap || !pgmap->ref || !pgmap->ops || !pgmap->ops->kill ||
!pgmap->ops->cleanup); !pgmap->ops->cleanup);
pgmap->ops->kill(pgmap->ref); pgmap->ops->kill(pgmap);
pgmap->ops->cleanup(pgmap->ref); pgmap->ops->cleanup(pgmap);
} }
void *__wrap_devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap) void *__wrap_devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment