Commit 9d73777e authored by Peter Zijlstra's avatar Peter Zijlstra Committed by Linus Torvalds

clarify get_user_pages() prototype

Currently the 4th parameter of get_user_pages() is called len, but its
in pages, not bytes. Rename the thing to nr_pages to avoid future
confusion.
Signed-off-by: default avatarPeter Zijlstra <a.p.zijlstra@chello.nl>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent ab420e6d
...@@ -826,7 +826,7 @@ extern int make_pages_present(unsigned long addr, unsigned long end); ...@@ -826,7 +826,7 @@ extern int make_pages_present(unsigned long addr, unsigned long end);
extern int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write); extern int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write);
int get_user_pages(struct task_struct *tsk, struct mm_struct *mm, int get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
unsigned long start, int len, int write, int force, unsigned long start, int nr_pages, int write, int force,
struct page **pages, struct vm_area_struct **vmas); struct page **pages, struct vm_area_struct **vmas);
int get_user_pages_fast(unsigned long start, int nr_pages, int write, int get_user_pages_fast(unsigned long start, int nr_pages, int write,
struct page **pages); struct page **pages);
......
...@@ -1207,8 +1207,8 @@ static inline int use_zero_page(struct vm_area_struct *vma) ...@@ -1207,8 +1207,8 @@ static inline int use_zero_page(struct vm_area_struct *vma)
int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm, int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
unsigned long start, int len, int flags, unsigned long start, int nr_pages, int flags,
struct page **pages, struct vm_area_struct **vmas) struct page **pages, struct vm_area_struct **vmas)
{ {
int i; int i;
unsigned int vm_flags = 0; unsigned int vm_flags = 0;
...@@ -1217,7 +1217,7 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm, ...@@ -1217,7 +1217,7 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
int ignore = !!(flags & GUP_FLAGS_IGNORE_VMA_PERMISSIONS); int ignore = !!(flags & GUP_FLAGS_IGNORE_VMA_PERMISSIONS);
int ignore_sigkill = !!(flags & GUP_FLAGS_IGNORE_SIGKILL); int ignore_sigkill = !!(flags & GUP_FLAGS_IGNORE_SIGKILL);
if (len <= 0) if (nr_pages <= 0)
return 0; return 0;
/* /*
* Require read or write permissions. * Require read or write permissions.
...@@ -1269,7 +1269,7 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm, ...@@ -1269,7 +1269,7 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
vmas[i] = gate_vma; vmas[i] = gate_vma;
i++; i++;
start += PAGE_SIZE; start += PAGE_SIZE;
len--; nr_pages--;
continue; continue;
} }
...@@ -1280,7 +1280,7 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm, ...@@ -1280,7 +1280,7 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
if (is_vm_hugetlb_page(vma)) { if (is_vm_hugetlb_page(vma)) {
i = follow_hugetlb_page(mm, vma, pages, vmas, i = follow_hugetlb_page(mm, vma, pages, vmas,
&start, &len, i, write); &start, &nr_pages, i, write);
continue; continue;
} }
...@@ -1357,9 +1357,9 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm, ...@@ -1357,9 +1357,9 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
vmas[i] = vma; vmas[i] = vma;
i++; i++;
start += PAGE_SIZE; start += PAGE_SIZE;
len--; nr_pages--;
} while (len && start < vma->vm_end); } while (nr_pages && start < vma->vm_end);
} while (len); } while (nr_pages);
return i; return i;
} }
...@@ -1368,7 +1368,7 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm, ...@@ -1368,7 +1368,7 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
* @tsk: task_struct of target task * @tsk: task_struct of target task
* @mm: mm_struct of target mm * @mm: mm_struct of target mm
* @start: starting user address * @start: starting user address
* @len: number of pages from start to pin * @nr_pages: number of pages from start to pin
* @write: whether pages will be written to by the caller * @write: whether pages will be written to by the caller
* @force: whether to force write access even if user mapping is * @force: whether to force write access even if user mapping is
* readonly. This will result in the page being COWed even * readonly. This will result in the page being COWed even
...@@ -1380,7 +1380,7 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm, ...@@ -1380,7 +1380,7 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
* Or NULL if the caller does not require them. * Or NULL if the caller does not require them.
* *
* Returns number of pages pinned. This may be fewer than the number * Returns number of pages pinned. This may be fewer than the number
* requested. If len is 0 or negative, returns 0. If no pages * requested. If nr_pages is 0 or negative, returns 0. If no pages
* were pinned, returns -errno. Each page returned must be released * were pinned, returns -errno. Each page returned must be released
* with a put_page() call when it is finished with. vmas will only * with a put_page() call when it is finished with. vmas will only
* remain valid while mmap_sem is held. * remain valid while mmap_sem is held.
...@@ -1414,7 +1414,7 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm, ...@@ -1414,7 +1414,7 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
* See also get_user_pages_fast, for performance critical applications. * See also get_user_pages_fast, for performance critical applications.
*/ */
int get_user_pages(struct task_struct *tsk, struct mm_struct *mm, int get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
unsigned long start, int len, int write, int force, unsigned long start, int nr_pages, int write, int force,
struct page **pages, struct vm_area_struct **vmas) struct page **pages, struct vm_area_struct **vmas)
{ {
int flags = 0; int flags = 0;
...@@ -1424,9 +1424,7 @@ int get_user_pages(struct task_struct *tsk, struct mm_struct *mm, ...@@ -1424,9 +1424,7 @@ int get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
if (force) if (force)
flags |= GUP_FLAGS_FORCE; flags |= GUP_FLAGS_FORCE;
return __get_user_pages(tsk, mm, return __get_user_pages(tsk, mm, start, nr_pages, flags, pages, vmas);
start, len, flags,
pages, vmas);
} }
EXPORT_SYMBOL(get_user_pages); EXPORT_SYMBOL(get_user_pages);
......
...@@ -173,8 +173,8 @@ unsigned int kobjsize(const void *objp) ...@@ -173,8 +173,8 @@ unsigned int kobjsize(const void *objp)
} }
int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm, int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
unsigned long start, int len, int flags, unsigned long start, int nr_pages, int flags,
struct page **pages, struct vm_area_struct **vmas) struct page **pages, struct vm_area_struct **vmas)
{ {
struct vm_area_struct *vma; struct vm_area_struct *vma;
unsigned long vm_flags; unsigned long vm_flags;
...@@ -189,7 +189,7 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm, ...@@ -189,7 +189,7 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
vm_flags = write ? (VM_WRITE | VM_MAYWRITE) : (VM_READ | VM_MAYREAD); vm_flags = write ? (VM_WRITE | VM_MAYWRITE) : (VM_READ | VM_MAYREAD);
vm_flags &= force ? (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE); vm_flags &= force ? (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE);
for (i = 0; i < len; i++) { for (i = 0; i < nr_pages; i++) {
vma = find_vma(mm, start); vma = find_vma(mm, start);
if (!vma) if (!vma)
goto finish_or_fault; goto finish_or_fault;
...@@ -224,7 +224,7 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm, ...@@ -224,7 +224,7 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
* - don't permit access to VMAs that don't support it, such as I/O mappings * - don't permit access to VMAs that don't support it, such as I/O mappings
*/ */
int get_user_pages(struct task_struct *tsk, struct mm_struct *mm, int get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
unsigned long start, int len, int write, int force, unsigned long start, int nr_pages, int write, int force,
struct page **pages, struct vm_area_struct **vmas) struct page **pages, struct vm_area_struct **vmas)
{ {
int flags = 0; int flags = 0;
...@@ -234,9 +234,7 @@ int get_user_pages(struct task_struct *tsk, struct mm_struct *mm, ...@@ -234,9 +234,7 @@ int get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
if (force) if (force)
flags |= GUP_FLAGS_FORCE; flags |= GUP_FLAGS_FORCE;
return __get_user_pages(tsk, mm, return __get_user_pages(tsk, mm, start, nr_pages, flags, pages, vmas);
start, len, flags,
pages, vmas);
} }
EXPORT_SYMBOL(get_user_pages); EXPORT_SYMBOL(get_user_pages);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment