Commit 9beae1ea authored by Lorenzo Stoakes's avatar Lorenzo Stoakes Committed by Linus Torvalds

mm: replace get_user_pages_remote() write/force parameters with gup_flags

This removes the 'write' and 'force' from get_user_pages_remote() and
replaces them with 'gup_flags' to make the use of FOLL_FORCE explicit in
callers as use of this flag can result in surprising behaviour (and
hence bugs) within the mm subsystem.
Signed-off-by: default avatarLorenzo Stoakes <lstoakes@gmail.com>
Acked-by: default avatarMichal Hocko <mhocko@suse.com>
Reviewed-by: default avatarJan Kara <jack@suse.cz>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 768ae309
...@@ -748,19 +748,22 @@ static struct page **etnaviv_gem_userptr_do_get_pages( ...@@ -748,19 +748,22 @@ static struct page **etnaviv_gem_userptr_do_get_pages(
int ret = 0, pinned, npages = etnaviv_obj->base.size >> PAGE_SHIFT; int ret = 0, pinned, npages = etnaviv_obj->base.size >> PAGE_SHIFT;
struct page **pvec; struct page **pvec;
uintptr_t ptr; uintptr_t ptr;
unsigned int flags = 0;
pvec = drm_malloc_ab(npages, sizeof(struct page *)); pvec = drm_malloc_ab(npages, sizeof(struct page *));
if (!pvec) if (!pvec)
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
if (!etnaviv_obj->userptr.ro)
flags |= FOLL_WRITE;
pinned = 0; pinned = 0;
ptr = etnaviv_obj->userptr.ptr; ptr = etnaviv_obj->userptr.ptr;
down_read(&mm->mmap_sem); down_read(&mm->mmap_sem);
while (pinned < npages) { while (pinned < npages) {
ret = get_user_pages_remote(task, mm, ptr, npages - pinned, ret = get_user_pages_remote(task, mm, ptr, npages - pinned,
!etnaviv_obj->userptr.ro, 0, flags, pvec + pinned, NULL);
pvec + pinned, NULL);
if (ret < 0) if (ret < 0)
break; break;
......
...@@ -508,6 +508,10 @@ __i915_gem_userptr_get_pages_worker(struct work_struct *_work) ...@@ -508,6 +508,10 @@ __i915_gem_userptr_get_pages_worker(struct work_struct *_work)
pvec = drm_malloc_gfp(npages, sizeof(struct page *), GFP_TEMPORARY); pvec = drm_malloc_gfp(npages, sizeof(struct page *), GFP_TEMPORARY);
if (pvec != NULL) { if (pvec != NULL) {
struct mm_struct *mm = obj->userptr.mm->mm; struct mm_struct *mm = obj->userptr.mm->mm;
unsigned int flags = 0;
if (!obj->userptr.read_only)
flags |= FOLL_WRITE;
ret = -EFAULT; ret = -EFAULT;
if (atomic_inc_not_zero(&mm->mm_users)) { if (atomic_inc_not_zero(&mm->mm_users)) {
...@@ -517,7 +521,7 @@ __i915_gem_userptr_get_pages_worker(struct work_struct *_work) ...@@ -517,7 +521,7 @@ __i915_gem_userptr_get_pages_worker(struct work_struct *_work)
(work->task, mm, (work->task, mm,
obj->userptr.ptr + pinned * PAGE_SIZE, obj->userptr.ptr + pinned * PAGE_SIZE,
npages - pinned, npages - pinned,
!obj->userptr.read_only, 0, flags,
pvec + pinned, NULL); pvec + pinned, NULL);
if (ret < 0) if (ret < 0)
break; break;
......
...@@ -527,6 +527,7 @@ int ib_umem_odp_map_dma_pages(struct ib_umem *umem, u64 user_virt, u64 bcnt, ...@@ -527,6 +527,7 @@ int ib_umem_odp_map_dma_pages(struct ib_umem *umem, u64 user_virt, u64 bcnt,
u64 off; u64 off;
int j, k, ret = 0, start_idx, npages = 0; int j, k, ret = 0, start_idx, npages = 0;
u64 base_virt_addr; u64 base_virt_addr;
unsigned int flags = 0;
if (access_mask == 0) if (access_mask == 0)
return -EINVAL; return -EINVAL;
...@@ -556,6 +557,9 @@ int ib_umem_odp_map_dma_pages(struct ib_umem *umem, u64 user_virt, u64 bcnt, ...@@ -556,6 +557,9 @@ int ib_umem_odp_map_dma_pages(struct ib_umem *umem, u64 user_virt, u64 bcnt,
goto out_put_task; goto out_put_task;
} }
if (access_mask & ODP_WRITE_ALLOWED_BIT)
flags |= FOLL_WRITE;
start_idx = (user_virt - ib_umem_start(umem)) >> PAGE_SHIFT; start_idx = (user_virt - ib_umem_start(umem)) >> PAGE_SHIFT;
k = start_idx; k = start_idx;
...@@ -574,8 +578,7 @@ int ib_umem_odp_map_dma_pages(struct ib_umem *umem, u64 user_virt, u64 bcnt, ...@@ -574,8 +578,7 @@ int ib_umem_odp_map_dma_pages(struct ib_umem *umem, u64 user_virt, u64 bcnt,
*/ */
npages = get_user_pages_remote(owning_process, owning_mm, npages = get_user_pages_remote(owning_process, owning_mm,
user_virt, gup_num_pages, user_virt, gup_num_pages,
access_mask & ODP_WRITE_ALLOWED_BIT, flags, local_page_list, NULL);
0, local_page_list, NULL);
up_read(&owning_mm->mmap_sem); up_read(&owning_mm->mmap_sem);
if (npages < 0) if (npages < 0)
......
...@@ -191,6 +191,7 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos, ...@@ -191,6 +191,7 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
{ {
struct page *page; struct page *page;
int ret; int ret;
unsigned int gup_flags = FOLL_FORCE;
#ifdef CONFIG_STACK_GROWSUP #ifdef CONFIG_STACK_GROWSUP
if (write) { if (write) {
...@@ -199,12 +200,16 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos, ...@@ -199,12 +200,16 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
return NULL; return NULL;
} }
#endif #endif
if (write)
gup_flags |= FOLL_WRITE;
/* /*
* We are doing an exec(). 'current' is the process * We are doing an exec(). 'current' is the process
* doing the exec and bprm->mm is the new process's mm. * doing the exec and bprm->mm is the new process's mm.
*/ */
ret = get_user_pages_remote(current, bprm->mm, pos, 1, write, ret = get_user_pages_remote(current, bprm->mm, pos, 1, gup_flags,
1, &page, NULL); &page, NULL);
if (ret <= 0) if (ret <= 0)
return NULL; return NULL;
......
...@@ -1276,7 +1276,7 @@ long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm, ...@@ -1276,7 +1276,7 @@ long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
struct vm_area_struct **vmas, int *nonblocking); struct vm_area_struct **vmas, int *nonblocking);
long get_user_pages_remote(struct task_struct *tsk, struct mm_struct *mm, long get_user_pages_remote(struct task_struct *tsk, struct mm_struct *mm,
unsigned long start, unsigned long nr_pages, unsigned long start, unsigned long nr_pages,
int write, int force, struct page **pages, unsigned int gup_flags, struct page **pages,
struct vm_area_struct **vmas); struct vm_area_struct **vmas);
long get_user_pages(unsigned long start, unsigned long nr_pages, long get_user_pages(unsigned long start, unsigned long nr_pages,
unsigned int gup_flags, struct page **pages, unsigned int gup_flags, struct page **pages,
......
...@@ -300,7 +300,8 @@ int uprobe_write_opcode(struct mm_struct *mm, unsigned long vaddr, ...@@ -300,7 +300,8 @@ int uprobe_write_opcode(struct mm_struct *mm, unsigned long vaddr,
retry: retry:
/* Read the page with vaddr into memory */ /* Read the page with vaddr into memory */
ret = get_user_pages_remote(NULL, mm, vaddr, 1, 0, 1, &old_page, &vma); ret = get_user_pages_remote(NULL, mm, vaddr, 1, FOLL_FORCE, &old_page,
&vma);
if (ret <= 0) if (ret <= 0)
return ret; return ret;
...@@ -1710,7 +1711,8 @@ static int is_trap_at_addr(struct mm_struct *mm, unsigned long vaddr) ...@@ -1710,7 +1711,8 @@ static int is_trap_at_addr(struct mm_struct *mm, unsigned long vaddr)
* but we treat this as a 'remote' access since it is * but we treat this as a 'remote' access since it is
* essentially a kernel access to the memory. * essentially a kernel access to the memory.
*/ */
result = get_user_pages_remote(NULL, mm, vaddr, 1, 0, 1, &page, NULL); result = get_user_pages_remote(NULL, mm, vaddr, 1, FOLL_FORCE, &page,
NULL);
if (result < 0) if (result < 0)
return result; return result;
......
...@@ -915,9 +915,7 @@ EXPORT_SYMBOL(get_user_pages_unlocked); ...@@ -915,9 +915,7 @@ EXPORT_SYMBOL(get_user_pages_unlocked);
* @mm: mm_struct of target mm * @mm: mm_struct of target mm
* @start: starting user address * @start: starting user address
* @nr_pages: number of pages from start to pin * @nr_pages: number of pages from start to pin
* @write: whether pages will be written to by the caller * @gup_flags: flags modifying lookup behaviour
* @force: whether to force access even when user mapping is currently
* protected (but never forces write access to shared mapping).
* @pages: array that receives pointers to the pages pinned. * @pages: array that receives pointers to the pages pinned.
* Should be at least nr_pages long. Or NULL, if caller * Should be at least nr_pages long. Or NULL, if caller
* only intends to ensure the pages are faulted in. * only intends to ensure the pages are faulted in.
...@@ -946,9 +944,9 @@ EXPORT_SYMBOL(get_user_pages_unlocked); ...@@ -946,9 +944,9 @@ EXPORT_SYMBOL(get_user_pages_unlocked);
* or similar operation cannot guarantee anything stronger anyway because * or similar operation cannot guarantee anything stronger anyway because
* locks can't be held over the syscall boundary. * locks can't be held over the syscall boundary.
* *
* If write=0, the page must not be written to. If the page is written to, * If gup_flags & FOLL_WRITE == 0, the page must not be written to. If the page
* set_page_dirty (or set_page_dirty_lock, as appropriate) must be called * is written to, set_page_dirty (or set_page_dirty_lock, as appropriate) must
* after the page is finished with, and before put_page is called. * be called after the page is finished with, and before put_page is called.
* *
* get_user_pages is typically used for fewer-copy IO operations, to get a * get_user_pages is typically used for fewer-copy IO operations, to get a
* handle on the memory by some means other than accesses via the user virtual * handle on the memory by some means other than accesses via the user virtual
...@@ -965,18 +963,12 @@ EXPORT_SYMBOL(get_user_pages_unlocked); ...@@ -965,18 +963,12 @@ EXPORT_SYMBOL(get_user_pages_unlocked);
*/ */
long get_user_pages_remote(struct task_struct *tsk, struct mm_struct *mm, long get_user_pages_remote(struct task_struct *tsk, struct mm_struct *mm,
unsigned long start, unsigned long nr_pages, unsigned long start, unsigned long nr_pages,
int write, int force, struct page **pages, unsigned int gup_flags, struct page **pages,
struct vm_area_struct **vmas) struct vm_area_struct **vmas)
{ {
unsigned int flags = FOLL_TOUCH | FOLL_REMOTE;
if (write)
flags |= FOLL_WRITE;
if (force)
flags |= FOLL_FORCE;
return __get_user_pages_locked(tsk, mm, start, nr_pages, pages, vmas, return __get_user_pages_locked(tsk, mm, start, nr_pages, pages, vmas,
NULL, false, flags); NULL, false,
gup_flags | FOLL_TOUCH | FOLL_REMOTE);
} }
EXPORT_SYMBOL(get_user_pages_remote); EXPORT_SYMBOL(get_user_pages_remote);
......
...@@ -3873,6 +3873,10 @@ static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm, ...@@ -3873,6 +3873,10 @@ static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
{ {
struct vm_area_struct *vma; struct vm_area_struct *vma;
void *old_buf = buf; void *old_buf = buf;
unsigned int flags = FOLL_FORCE;
if (write)
flags |= FOLL_WRITE;
down_read(&mm->mmap_sem); down_read(&mm->mmap_sem);
/* ignore errors, just check how much was successfully transferred */ /* ignore errors, just check how much was successfully transferred */
...@@ -3882,7 +3886,7 @@ static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm, ...@@ -3882,7 +3886,7 @@ static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
struct page *page = NULL; struct page *page = NULL;
ret = get_user_pages_remote(tsk, mm, addr, 1, ret = get_user_pages_remote(tsk, mm, addr, 1,
write, 1, &page, &vma); flags, &page, &vma);
if (ret <= 0) { if (ret <= 0) {
#ifndef CONFIG_HAVE_IOREMAP_PROT #ifndef CONFIG_HAVE_IOREMAP_PROT
break; break;
......
...@@ -881,7 +881,7 @@ bool tomoyo_dump_page(struct linux_binprm *bprm, unsigned long pos, ...@@ -881,7 +881,7 @@ bool tomoyo_dump_page(struct linux_binprm *bprm, unsigned long pos,
* the execve(). * the execve().
*/ */
if (get_user_pages_remote(current, bprm->mm, pos, 1, if (get_user_pages_remote(current, bprm->mm, pos, 1,
0, 1, &page, NULL) <= 0) FOLL_FORCE, &page, NULL) <= 0)
return false; return false;
#else #else
page = bprm->page[pos / PAGE_SIZE]; page = bprm->page[pos / PAGE_SIZE];
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment