Commit abe4c3b5 authored by Mel Gorman's avatar Mel Gorman Committed by Linus Torvalds

vmscan: set up pagevec as late as possible in shrink_page_list()

shrink_page_list() sets up a pagevec to release pages as according as they
are free.  It uses significant amounts of stack on the pagevec.  This
patch adds pages to be freed via pagevec to a linked list which is then
freed en-masse at the end.  This avoids using stack in the main path that
potentially calls writepage().
Signed-off-by: default avatarMel Gorman <mel@csn.ul.ie>
Reviewed-by: default avatarRik van Riel <riel@redhat.com>
Cc: Dave Chinner <david@fromorbit.com>
Cc: Chris Mason <chris.mason@oracle.com>
Cc: Nick Piggin <npiggin@suse.de>
Cc: Rik van Riel <riel@redhat.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Christoph Hellwig <hch@infradead.org>
Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: Michael Rubin <mrubin@google.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 66635629
...@@ -622,6 +622,24 @@ static enum page_references page_check_references(struct page *page, ...@@ -622,6 +622,24 @@ static enum page_references page_check_references(struct page *page,
return PAGEREF_RECLAIM; return PAGEREF_RECLAIM;
} }
static noinline_for_stack void free_page_list(struct list_head *free_pages)
{
struct pagevec freed_pvec;
struct page *page, *tmp;
pagevec_init(&freed_pvec, 1);
list_for_each_entry_safe(page, tmp, free_pages, lru) {
list_del(&page->lru);
if (!pagevec_add(&freed_pvec, page)) {
__pagevec_free(&freed_pvec);
pagevec_reinit(&freed_pvec);
}
}
pagevec_free(&freed_pvec);
}
/* /*
* shrink_page_list() returns the number of reclaimed pages * shrink_page_list() returns the number of reclaimed pages
*/ */
...@@ -630,13 +648,12 @@ static unsigned long shrink_page_list(struct list_head *page_list, ...@@ -630,13 +648,12 @@ static unsigned long shrink_page_list(struct list_head *page_list,
enum pageout_io sync_writeback) enum pageout_io sync_writeback)
{ {
LIST_HEAD(ret_pages); LIST_HEAD(ret_pages);
struct pagevec freed_pvec; LIST_HEAD(free_pages);
int pgactivate = 0; int pgactivate = 0;
unsigned long nr_reclaimed = 0; unsigned long nr_reclaimed = 0;
cond_resched(); cond_resched();
pagevec_init(&freed_pvec, 1);
while (!list_empty(page_list)) { while (!list_empty(page_list)) {
enum page_references references; enum page_references references;
struct address_space *mapping; struct address_space *mapping;
...@@ -811,10 +828,12 @@ static unsigned long shrink_page_list(struct list_head *page_list, ...@@ -811,10 +828,12 @@ static unsigned long shrink_page_list(struct list_head *page_list,
__clear_page_locked(page); __clear_page_locked(page);
free_it: free_it:
nr_reclaimed++; nr_reclaimed++;
if (!pagevec_add(&freed_pvec, page)) {
__pagevec_free(&freed_pvec); /*
pagevec_reinit(&freed_pvec); * Is there need to periodically free_page_list? It would
} * appear not as the counts should be low
*/
list_add(&page->lru, &free_pages);
continue; continue;
cull_mlocked: cull_mlocked:
...@@ -837,9 +856,10 @@ static unsigned long shrink_page_list(struct list_head *page_list, ...@@ -837,9 +856,10 @@ static unsigned long shrink_page_list(struct list_head *page_list,
list_add(&page->lru, &ret_pages); list_add(&page->lru, &ret_pages);
VM_BUG_ON(PageLRU(page) || PageUnevictable(page)); VM_BUG_ON(PageLRU(page) || PageUnevictable(page));
} }
free_page_list(&free_pages);
list_splice(&ret_pages, page_list); list_splice(&ret_pages, page_list);
if (pagevec_count(&freed_pvec))
__pagevec_free(&freed_pvec);
count_vm_events(PGACTIVATE, pgactivate); count_vm_events(PGACTIVATE, pgactivate);
return nr_reclaimed; return nr_reclaimed;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment