Commit 9a863a6a authored by Jason Gunthorpe's avatar Jason Gunthorpe Committed by Andrew Morton

mm/gup: make locked never NULL in the internal GUP functions

Now that NULL locked doesn't have a special meaning we can just make it
non-NULL in all cases and remove the special tests.

get_user_pages() and pin_user_pages() can safely pass in a locked = 1

get_user_pages_remote) and pin_user_pages_remote() can swap in a local
variable for locked if NULL is passed.

Remove all the NULL checks.

Link: https://lkml.kernel.org/r/9-v2-987e91b59705+36b-gup_tidy_jgg@nvidia.comSigned-off-by: default avatarJason Gunthorpe <jgg@nvidia.com>
Acked-by: default avatarMike Rapoport (IBM) <rppt@kernel.org>
Reviewed-by: default avatarJohn Hubbard <jhubbard@nvidia.com>
Cc: Alistair Popple <apopple@nvidia.com>
Cc: Christoph Hellwig <hch@infradead.org>
Cc: Claudio Imbrenda <imbrenda@linux.ibm.com>
Cc: David Hildenbrand <david@redhat.com>
Cc: David Howells <dhowells@redhat.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent f04740f5
...@@ -879,9 +879,9 @@ static int get_gate_page(struct mm_struct *mm, unsigned long address, ...@@ -879,9 +879,9 @@ static int get_gate_page(struct mm_struct *mm, unsigned long address,
} }
/* /*
* mmap_lock must be held on entry. If @locked != NULL and *@flags * mmap_lock must be held on entry. If @flags has FOLL_UNLOCKABLE but not
* does not include FOLL_NOWAIT, the mmap_lock may be released. If it * FOLL_NOWAIT, the mmap_lock may be released. If it is, *@locked will be set
* is, *@locked will be set to 0 and -EBUSY returned. * to 0 and -EBUSY returned.
*/ */
static int faultin_page(struct vm_area_struct *vma, static int faultin_page(struct vm_area_struct *vma,
unsigned long address, unsigned int *flags, bool unshare, unsigned long address, unsigned int *flags, bool unshare,
...@@ -930,8 +930,8 @@ static int faultin_page(struct vm_area_struct *vma, ...@@ -930,8 +930,8 @@ static int faultin_page(struct vm_area_struct *vma,
* mmap lock in the page fault handler. Sanity check this. * mmap lock in the page fault handler. Sanity check this.
*/ */
WARN_ON_ONCE(fault_flags & FAULT_FLAG_RETRY_NOWAIT); WARN_ON_ONCE(fault_flags & FAULT_FLAG_RETRY_NOWAIT);
if (locked) *locked = 0;
*locked = 0;
/* /*
* We should do the same as VM_FAULT_RETRY, but let's not * We should do the same as VM_FAULT_RETRY, but let's not
* return -EBUSY since that's not reflecting the reality of * return -EBUSY since that's not reflecting the reality of
...@@ -951,7 +951,7 @@ static int faultin_page(struct vm_area_struct *vma, ...@@ -951,7 +951,7 @@ static int faultin_page(struct vm_area_struct *vma,
} }
if (ret & VM_FAULT_RETRY) { if (ret & VM_FAULT_RETRY) {
if (locked && !(fault_flags & FAULT_FLAG_RETRY_NOWAIT)) if (!(fault_flags & FAULT_FLAG_RETRY_NOWAIT))
*locked = 0; *locked = 0;
return -EBUSY; return -EBUSY;
} }
...@@ -1062,14 +1062,12 @@ static int check_vma_flags(struct vm_area_struct *vma, unsigned long gup_flags) ...@@ -1062,14 +1062,12 @@ static int check_vma_flags(struct vm_area_struct *vma, unsigned long gup_flags)
* appropriate) must be called after the page is finished with, and * appropriate) must be called after the page is finished with, and
* before put_page is called. * before put_page is called.
* *
* If @locked != NULL, *@locked will be set to 0 when mmap_lock is * If FOLL_UNLOCKABLE is set without FOLL_NOWAIT then the mmap_lock may
* released by an up_read(). That can happen if @gup_flags does not * be released. If this happens *@locked will be set to 0 on return.
* have FOLL_NOWAIT.
* *
* A caller using such a combination of @locked and @gup_flags * A caller using such a combination of @gup_flags must therefore hold the
* must therefore hold the mmap_lock for reading only, and recognize * mmap_lock for reading only, and recognize when it's been released. Otherwise,
* when it's been released. Otherwise, it must be held for either * it must be held for either reading or writing and will not be released.
* reading or writing and will not be released.
* *
* In most cases, get_user_pages or get_user_pages_fast should be used * In most cases, get_user_pages or get_user_pages_fast should be used
* instead of __get_user_pages. __get_user_pages should be used only if * instead of __get_user_pages. __get_user_pages should be used only if
...@@ -1121,7 +1119,7 @@ static long __get_user_pages(struct mm_struct *mm, ...@@ -1121,7 +1119,7 @@ static long __get_user_pages(struct mm_struct *mm,
i = follow_hugetlb_page(mm, vma, pages, vmas, i = follow_hugetlb_page(mm, vma, pages, vmas,
&start, &nr_pages, i, &start, &nr_pages, i,
gup_flags, locked); gup_flags, locked);
if (locked && *locked == 0) { if (!*locked) {
/* /*
* We've got a VM_FAULT_RETRY * We've got a VM_FAULT_RETRY
* and we've lost mmap_lock. * and we've lost mmap_lock.
...@@ -1354,7 +1352,7 @@ static __always_inline long __get_user_pages_locked(struct mm_struct *mm, ...@@ -1354,7 +1352,7 @@ static __always_inline long __get_user_pages_locked(struct mm_struct *mm,
* The internal caller expects GUP to manage the lock internally and the * The internal caller expects GUP to manage the lock internally and the
* lock must be released when this returns. * lock must be released when this returns.
*/ */
if (locked && !*locked) { if (!*locked) {
if (mmap_read_lock_killable(mm)) if (mmap_read_lock_killable(mm))
return -EAGAIN; return -EAGAIN;
must_unlock = true; must_unlock = true;
...@@ -1502,6 +1500,7 @@ long populate_vma_page_range(struct vm_area_struct *vma, ...@@ -1502,6 +1500,7 @@ long populate_vma_page_range(struct vm_area_struct *vma,
{ {
struct mm_struct *mm = vma->vm_mm; struct mm_struct *mm = vma->vm_mm;
unsigned long nr_pages = (end - start) / PAGE_SIZE; unsigned long nr_pages = (end - start) / PAGE_SIZE;
int local_locked = 1;
int gup_flags; int gup_flags;
long ret; long ret;
...@@ -1542,7 +1541,7 @@ long populate_vma_page_range(struct vm_area_struct *vma, ...@@ -1542,7 +1541,7 @@ long populate_vma_page_range(struct vm_area_struct *vma,
* not result in a stack expansion that recurses back here. * not result in a stack expansion that recurses back here.
*/ */
ret = __get_user_pages(mm, start, nr_pages, gup_flags, ret = __get_user_pages(mm, start, nr_pages, gup_flags,
NULL, NULL, locked); NULL, NULL, locked ? locked : &local_locked);
lru_add_drain(); lru_add_drain();
return ret; return ret;
} }
...@@ -1683,7 +1682,7 @@ static long __get_user_pages_locked(struct mm_struct *mm, unsigned long start, ...@@ -1683,7 +1682,7 @@ static long __get_user_pages_locked(struct mm_struct *mm, unsigned long start,
* The internal caller expects GUP to manage the lock internally and the * The internal caller expects GUP to manage the lock internally and the
* lock must be released when this returns. * lock must be released when this returns.
*/ */
if (locked && !*locked) { if (!*locked) {
if (mmap_read_lock_killable(mm)) if (mmap_read_lock_killable(mm))
return -EAGAIN; return -EAGAIN;
must_unlock = true; must_unlock = true;
...@@ -2222,11 +2221,14 @@ long get_user_pages_remote(struct mm_struct *mm, ...@@ -2222,11 +2221,14 @@ long get_user_pages_remote(struct mm_struct *mm,
unsigned int gup_flags, struct page **pages, unsigned int gup_flags, struct page **pages,
struct vm_area_struct **vmas, int *locked) struct vm_area_struct **vmas, int *locked)
{ {
int local_locked = 1;
if (!is_valid_gup_args(pages, vmas, locked, &gup_flags, if (!is_valid_gup_args(pages, vmas, locked, &gup_flags,
FOLL_TOUCH | FOLL_REMOTE)) FOLL_TOUCH | FOLL_REMOTE))
return -EINVAL; return -EINVAL;
return __get_user_pages_locked(mm, start, nr_pages, pages, vmas, locked, return __get_user_pages_locked(mm, start, nr_pages, pages, vmas,
locked ? locked : &local_locked,
gup_flags); gup_flags);
} }
EXPORT_SYMBOL(get_user_pages_remote); EXPORT_SYMBOL(get_user_pages_remote);
...@@ -2261,11 +2263,13 @@ long get_user_pages(unsigned long start, unsigned long nr_pages, ...@@ -2261,11 +2263,13 @@ long get_user_pages(unsigned long start, unsigned long nr_pages,
unsigned int gup_flags, struct page **pages, unsigned int gup_flags, struct page **pages,
struct vm_area_struct **vmas) struct vm_area_struct **vmas)
{ {
int locked = 1;
if (!is_valid_gup_args(pages, vmas, NULL, &gup_flags, FOLL_TOUCH)) if (!is_valid_gup_args(pages, vmas, NULL, &gup_flags, FOLL_TOUCH))
return -EINVAL; return -EINVAL;
return __get_user_pages_locked(current->mm, start, nr_pages, pages, return __get_user_pages_locked(current->mm, start, nr_pages, pages,
vmas, NULL, gup_flags); vmas, &locked, gup_flags);
} }
EXPORT_SYMBOL(get_user_pages); EXPORT_SYMBOL(get_user_pages);
...@@ -3158,10 +3162,13 @@ long pin_user_pages_remote(struct mm_struct *mm, ...@@ -3158,10 +3162,13 @@ long pin_user_pages_remote(struct mm_struct *mm,
unsigned int gup_flags, struct page **pages, unsigned int gup_flags, struct page **pages,
struct vm_area_struct **vmas, int *locked) struct vm_area_struct **vmas, int *locked)
{ {
int local_locked = 1;
if (!is_valid_gup_args(pages, vmas, locked, &gup_flags, if (!is_valid_gup_args(pages, vmas, locked, &gup_flags,
FOLL_PIN | FOLL_TOUCH | FOLL_REMOTE)) FOLL_PIN | FOLL_TOUCH | FOLL_REMOTE))
return 0; return 0;
return __gup_longterm_locked(mm, start, nr_pages, pages, vmas, locked, return __gup_longterm_locked(mm, start, nr_pages, pages, vmas,
locked ? locked : &local_locked,
gup_flags); gup_flags);
} }
EXPORT_SYMBOL(pin_user_pages_remote); EXPORT_SYMBOL(pin_user_pages_remote);
...@@ -3187,10 +3194,12 @@ long pin_user_pages(unsigned long start, unsigned long nr_pages, ...@@ -3187,10 +3194,12 @@ long pin_user_pages(unsigned long start, unsigned long nr_pages,
unsigned int gup_flags, struct page **pages, unsigned int gup_flags, struct page **pages,
struct vm_area_struct **vmas) struct vm_area_struct **vmas)
{ {
int locked = 1;
if (!is_valid_gup_args(pages, vmas, NULL, &gup_flags, FOLL_PIN)) if (!is_valid_gup_args(pages, vmas, NULL, &gup_flags, FOLL_PIN))
return 0; return 0;
return __gup_longterm_locked(current->mm, start, nr_pages, return __gup_longterm_locked(current->mm, start, nr_pages,
pages, vmas, NULL, gup_flags); pages, vmas, &locked, gup_flags);
} }
EXPORT_SYMBOL(pin_user_pages); EXPORT_SYMBOL(pin_user_pages);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment