Commit a200ee18 authored by Peter Zijlstra's avatar Peter Zijlstra Committed by Linus Torvalds

mm: set_page_dirty_balance() vs ->page_mkwrite()

All the current page_mkwrite() implementations also set the page dirty. Which
results in the set_page_dirty_balance() call to _not_ call balance, because the
page is already found dirty.

This allows us to dirty a _lot_ of pages without ever hitting
balance_dirty_pages().  Not good (tm).

Force a balance call if ->page_mkwrite() was successful.
Signed-off-by: default avatarPeter Zijlstra <a.p.zijlstra@chello.nl>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 3eb215de
...@@ -127,7 +127,7 @@ int sync_page_range(struct inode *inode, struct address_space *mapping, ...@@ -127,7 +127,7 @@ int sync_page_range(struct inode *inode, struct address_space *mapping,
loff_t pos, loff_t count); loff_t pos, loff_t count);
int sync_page_range_nolock(struct inode *inode, struct address_space *mapping, int sync_page_range_nolock(struct inode *inode, struct address_space *mapping,
loff_t pos, loff_t count); loff_t pos, loff_t count);
void set_page_dirty_balance(struct page *page); void set_page_dirty_balance(struct page *page, int page_mkwrite);
void writeback_set_ratelimit(void); void writeback_set_ratelimit(void);
/* pdflush.c */ /* pdflush.c */
......
...@@ -1639,6 +1639,7 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma, ...@@ -1639,6 +1639,7 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
struct page *old_page, *new_page; struct page *old_page, *new_page;
pte_t entry; pte_t entry;
int reuse = 0, ret = 0; int reuse = 0, ret = 0;
int page_mkwrite = 0;
struct page *dirty_page = NULL; struct page *dirty_page = NULL;
old_page = vm_normal_page(vma, address, orig_pte); old_page = vm_normal_page(vma, address, orig_pte);
...@@ -1687,6 +1688,8 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma, ...@@ -1687,6 +1688,8 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
page_cache_release(old_page); page_cache_release(old_page);
if (!pte_same(*page_table, orig_pte)) if (!pte_same(*page_table, orig_pte))
goto unlock; goto unlock;
page_mkwrite = 1;
} }
dirty_page = old_page; dirty_page = old_page;
get_page(dirty_page); get_page(dirty_page);
...@@ -1774,7 +1777,7 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma, ...@@ -1774,7 +1777,7 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
* do_no_page is protected similarly. * do_no_page is protected similarly.
*/ */
wait_on_page_locked(dirty_page); wait_on_page_locked(dirty_page);
set_page_dirty_balance(dirty_page); set_page_dirty_balance(dirty_page, page_mkwrite);
put_page(dirty_page); put_page(dirty_page);
} }
return ret; return ret;
...@@ -2322,6 +2325,7 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma, ...@@ -2322,6 +2325,7 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
struct page *dirty_page = NULL; struct page *dirty_page = NULL;
struct vm_fault vmf; struct vm_fault vmf;
int ret; int ret;
int page_mkwrite = 0;
vmf.virtual_address = (void __user *)(address & PAGE_MASK); vmf.virtual_address = (void __user *)(address & PAGE_MASK);
vmf.pgoff = pgoff; vmf.pgoff = pgoff;
...@@ -2398,6 +2402,7 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma, ...@@ -2398,6 +2402,7 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
anon = 1; /* no anon but release vmf.page */ anon = 1; /* no anon but release vmf.page */
goto out; goto out;
} }
page_mkwrite = 1;
} }
} }
...@@ -2453,7 +2458,7 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma, ...@@ -2453,7 +2458,7 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
if (anon) if (anon)
page_cache_release(vmf.page); page_cache_release(vmf.page);
else if (dirty_page) { else if (dirty_page) {
set_page_dirty_balance(dirty_page); set_page_dirty_balance(dirty_page, page_mkwrite);
put_page(dirty_page); put_page(dirty_page);
} }
......
...@@ -274,9 +274,9 @@ static void balance_dirty_pages(struct address_space *mapping) ...@@ -274,9 +274,9 @@ static void balance_dirty_pages(struct address_space *mapping)
pdflush_operation(background_writeout, 0); pdflush_operation(background_writeout, 0);
} }
void set_page_dirty_balance(struct page *page) void set_page_dirty_balance(struct page *page, int page_mkwrite)
{ {
if (set_page_dirty(page)) { if (set_page_dirty(page) || page_mkwrite) {
struct address_space *mapping = page_mapping(page); struct address_space *mapping = page_mapping(page);
if (mapping) if (mapping)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment