Commit f57e88a8 authored by Hugh Dickins's avatar Hugh Dickins Committed by Linus Torvalds

[PATCH] unpaged: ZERO_PAGE in VM_UNPAGED

It's strange enough to be looking out for anonymous pages in VM_UNPAGED areas,
let's not insert the ZERO_PAGE there - though whether it would matter will
depend on what we decide about ZERO_PAGE refcounting.

But whereas do_anonymous_page may (exceptionally) be called on a VM_UNPAGED
area, do_no_page should never be: just BUG_ON.
Signed-off-by: default avatarHugh Dickins <hugh@veritas.com>
Signed-off-by: default avatarAndrew Morton <akpm@osdl.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@osdl.org>
parent ee498ed7
...@@ -591,7 +591,7 @@ static inline size_t read_zero_pagealigned(char __user * buf, size_t size) ...@@ -591,7 +591,7 @@ static inline size_t read_zero_pagealigned(char __user * buf, size_t size)
if (vma->vm_start > addr || (vma->vm_flags & VM_WRITE) == 0) if (vma->vm_start > addr || (vma->vm_flags & VM_WRITE) == 0)
goto out_up; goto out_up;
if (vma->vm_flags & (VM_SHARED | VM_HUGETLB)) if (vma->vm_flags & (VM_SHARED | VM_HUGETLB | VM_UNPAGED))
break; break;
count = vma->vm_end - addr; count = vma->vm_end - addr;
if (count > size) if (count > size)
......
...@@ -1812,7 +1812,16 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma, ...@@ -1812,7 +1812,16 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
spinlock_t *ptl; spinlock_t *ptl;
pte_t entry; pte_t entry;
if (write_access) { /*
* A VM_UNPAGED vma will normally be filled with present ptes
* by remap_pfn_range, and never arrive here; but it might have
* holes, or if !VM_DONTEXPAND, mremap might have expanded it.
* It's weird enough handling anon pages in unpaged vmas, we do
* not want to worry about ZERO_PAGEs too (it may or may not
* matter if their counts wrap): just give them anon pages.
*/
if (write_access || (vma->vm_flags & VM_UNPAGED)) {
/* Allocate our own private page. */ /* Allocate our own private page. */
pte_unmap(page_table); pte_unmap(page_table);
...@@ -1887,6 +1896,7 @@ static int do_no_page(struct mm_struct *mm, struct vm_area_struct *vma, ...@@ -1887,6 +1896,7 @@ static int do_no_page(struct mm_struct *mm, struct vm_area_struct *vma,
int anon = 0; int anon = 0;
pte_unmap(page_table); pte_unmap(page_table);
BUG_ON(vma->vm_flags & VM_UNPAGED);
if (vma->vm_file) { if (vma->vm_file) {
mapping = vma->vm_file->f_mapping; mapping = vma->vm_file->f_mapping;
...@@ -1962,7 +1972,7 @@ static int do_no_page(struct mm_struct *mm, struct vm_area_struct *vma, ...@@ -1962,7 +1972,7 @@ static int do_no_page(struct mm_struct *mm, struct vm_area_struct *vma,
inc_mm_counter(mm, anon_rss); inc_mm_counter(mm, anon_rss);
lru_cache_add_active(new_page); lru_cache_add_active(new_page);
page_add_anon_rmap(new_page, vma, address); page_add_anon_rmap(new_page, vma, address);
} else if (!(vma->vm_flags & VM_UNPAGED)) { } else {
inc_mm_counter(mm, file_rss); inc_mm_counter(mm, file_rss);
page_add_file_rmap(new_page); page_add_file_rmap(new_page);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment