Commit fac555ac authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Jason Gunthorpe

mm/hmm: remove superfluous arguments from hmm_range_register

The start, end and page_shift values are all saved in the range structure,
so we might as well use that for argument passing.

Link: https://lore.kernel.org/r/20190806160554.14046-7-hch@lst.deSigned-off-by: default avatarChristoph Hellwig <hch@lst.de>
Reviewed-by: default avatarJason Gunthorpe <jgg@mellanox.com>
Reviewed-by: default avatarFelix Kuehling <Felix.Kuehling@amd.com>
Signed-off-by: default avatarJason Gunthorpe <jgg@mellanox.com>
parent 2cbeb419
......@@ -222,7 +222,7 @@ The usage pattern is::
range.flags = ...;
range.values = ...;
range.pfn_shift = ...;
hmm_range_register(&range);
hmm_range_register(&range, mirror);
/*
* Just wait for range to be valid, safe to ignore return value as we
......
......@@ -818,8 +818,11 @@ int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo, struct page **pages)
0 : range->flags[HMM_PFN_WRITE];
range->pfn_flags_mask = 0;
range->pfns = pfns;
hmm_range_register(range, mirror, start,
start + ttm->num_pages * PAGE_SIZE, PAGE_SHIFT);
range->page_shift = PAGE_SHIFT;
range->start = start;
range->end = start + ttm->num_pages * PAGE_SIZE;
hmm_range_register(range, mirror);
/*
* Just wait for range to be valid, safe to ignore return value as we
......
......@@ -492,9 +492,7 @@ nouveau_range_fault(struct nouveau_svmm *svmm, struct hmm_range *range)
range->default_flags = 0;
range->pfn_flags_mask = -1UL;
ret = hmm_range_register(range, &svmm->mirror,
range->start, range->end,
PAGE_SHIFT);
ret = hmm_range_register(range, &svmm->mirror);
if (ret) {
up_read(&svmm->mm->mmap_sem);
return (int)ret;
......@@ -682,6 +680,7 @@ nouveau_svm_fault(struct nvif_notify *notify)
args.i.p.addr + args.i.p.size, fn - fi);
/* Have HMM fault pages within the fault window to the GPU. */
range.page_shift = PAGE_SHIFT;
range.start = args.i.p.addr;
range.end = args.i.p.addr + args.i.p.size;
range.pfns = args.phys;
......
......@@ -400,11 +400,7 @@ void hmm_mirror_unregister(struct hmm_mirror *mirror);
/*
* Please see Documentation/vm/hmm.rst for how to use the range API.
*/
int hmm_range_register(struct hmm_range *range,
struct hmm_mirror *mirror,
unsigned long start,
unsigned long end,
unsigned page_shift);
int hmm_range_register(struct hmm_range *range, struct hmm_mirror *mirror);
void hmm_range_unregister(struct hmm_range *range);
/*
......
......@@ -850,35 +850,25 @@ static void hmm_pfns_clear(struct hmm_range *range,
* hmm_range_register() - start tracking change to CPU page table over a range
* @range: range
* @mm: the mm struct for the range of virtual address
* @start: start virtual address (inclusive)
* @end: end virtual address (exclusive)
* @page_shift: expect page shift for the range
*
* Return: 0 on success, -EFAULT if the address space is no longer valid
*
* Track updates to the CPU page table see include/linux/hmm.h
*/
int hmm_range_register(struct hmm_range *range,
struct hmm_mirror *mirror,
unsigned long start,
unsigned long end,
unsigned page_shift)
int hmm_range_register(struct hmm_range *range, struct hmm_mirror *mirror)
{
unsigned long mask = ((1UL << page_shift) - 1UL);
unsigned long mask = ((1UL << range->page_shift) - 1UL);
struct hmm *hmm = mirror->hmm;
unsigned long flags;
range->valid = false;
range->hmm = NULL;
if ((start & mask) || (end & mask))
if ((range->start & mask) || (range->end & mask))
return -EINVAL;
if (start >= end)
if (range->start >= range->end)
return -EINVAL;
range->page_shift = page_shift;
range->start = start;
range->end = end;
/* Prevent hmm_release() from running while the range is valid */
if (!mmget_not_zero(hmm->mm))
return -EFAULT;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment