Commit f38b4b31 authored by Johannes Weiner's avatar Johannes Weiner Committed by Linus Torvalds

mm: memory: merge shared-writable dirtying branches in do_wp_page()

Whether there is a vm_ops->page_mkwrite or not, the page dirtying is
pretty much the same.  Make sure the page references are the same in both
cases, then merge the two branches.

It's tempting to go even further and page-lock the !page_mkwrite case, to
get it in line with everybody else setting the page table and thus further
simplify the model.  But that's not quite compelling enough to justify
dropping the pte lock, then relocking and verifying the entry for
filesystems without ->page_mkwrite, which notably includes tmpfs.  Leave
it for now and lock the page late in the !page_mkwrite case.
Signed-off-by: default avatarJohannes Weiner <hannes@cmpxchg.org>
Acked-by: default avatarKirill A. Shutemov <kirill.shutemov@linux.intel.com>
Reviewed-by: default avatarJan Kara <jack@suse.cz>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 74ec6751
...@@ -2005,7 +2005,7 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma, ...@@ -2005,7 +2005,7 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
pte_t entry; pte_t entry;
int ret = 0; int ret = 0;
int page_mkwrite = 0; int page_mkwrite = 0;
struct page *dirty_page = NULL; bool dirty_shared = false;
unsigned long mmun_start = 0; /* For mmu_notifiers */ unsigned long mmun_start = 0; /* For mmu_notifiers */
unsigned long mmun_end = 0; /* For mmu_notifiers */ unsigned long mmun_end = 0; /* For mmu_notifiers */
struct mem_cgroup *memcg; struct mem_cgroup *memcg;
...@@ -2056,6 +2056,7 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma, ...@@ -2056,6 +2056,7 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
unlock_page(old_page); unlock_page(old_page);
} else if (unlikely((vma->vm_flags & (VM_WRITE|VM_SHARED)) == } else if (unlikely((vma->vm_flags & (VM_WRITE|VM_SHARED)) ==
(VM_WRITE|VM_SHARED))) { (VM_WRITE|VM_SHARED))) {
page_cache_get(old_page);
/* /*
* Only catch write-faults on shared writable pages, * Only catch write-faults on shared writable pages,
* read-only shared pages can get COWed by * read-only shared pages can get COWed by
...@@ -2063,7 +2064,7 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma, ...@@ -2063,7 +2064,7 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
*/ */
if (vma->vm_ops && vma->vm_ops->page_mkwrite) { if (vma->vm_ops && vma->vm_ops->page_mkwrite) {
int tmp; int tmp;
page_cache_get(old_page);
pte_unmap_unlock(page_table, ptl); pte_unmap_unlock(page_table, ptl);
tmp = do_page_mkwrite(vma, old_page, address); tmp = do_page_mkwrite(vma, old_page, address);
if (unlikely(!tmp || (tmp & if (unlikely(!tmp || (tmp &
...@@ -2083,11 +2084,10 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma, ...@@ -2083,11 +2084,10 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
unlock_page(old_page); unlock_page(old_page);
goto unlock; goto unlock;
} }
page_mkwrite = 1; page_mkwrite = 1;
} }
dirty_page = old_page;
get_page(dirty_page); dirty_shared = true;
reuse: reuse:
/* /*
...@@ -2106,20 +2106,20 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma, ...@@ -2106,20 +2106,20 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
pte_unmap_unlock(page_table, ptl); pte_unmap_unlock(page_table, ptl);
ret |= VM_FAULT_WRITE; ret |= VM_FAULT_WRITE;
if (!dirty_page) if (dirty_shared) {
return ret;
if (!page_mkwrite) {
struct address_space *mapping; struct address_space *mapping;
int dirtied; int dirtied;
lock_page(dirty_page); if (!page_mkwrite)
dirtied = set_page_dirty(dirty_page); lock_page(old_page);
VM_BUG_ON_PAGE(PageAnon(dirty_page), dirty_page);
mapping = dirty_page->mapping;
unlock_page(dirty_page);
if (dirtied && mapping) { dirtied = set_page_dirty(old_page);
VM_BUG_ON_PAGE(PageAnon(old_page), old_page);
mapping = old_page->mapping;
unlock_page(old_page);
page_cache_release(old_page);
if ((dirtied || page_mkwrite) && mapping) {
/* /*
* Some device drivers do not set page.mapping * Some device drivers do not set page.mapping
* but still dirty their pages * but still dirty their pages
...@@ -2127,23 +2127,9 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma, ...@@ -2127,23 +2127,9 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
balance_dirty_pages_ratelimited(mapping); balance_dirty_pages_ratelimited(mapping);
} }
if (!page_mkwrite)
file_update_time(vma->vm_file); file_update_time(vma->vm_file);
} }
put_page(dirty_page);
if (page_mkwrite) {
struct address_space *mapping = dirty_page->mapping;
set_page_dirty(dirty_page);
unlock_page(dirty_page);
page_cache_release(dirty_page);
if (mapping) {
/*
* Some device drivers do not set page.mapping
* but still dirty their pages
*/
balance_dirty_pages_ratelimited(mapping);
}
}
return ret; return ret;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment