Commit af166777 authored by Wu Fengguang's avatar Wu Fengguang Committed by Linus Torvalds

vmscan: ZVC updates in shrink_active_list() can be done once

This effectively lifts the unit of updates to nr_inactive_* and
pgdeactivate from PAGEVEC_SIZE=14 to SWAP_CLUSTER_MAX=32, or
MAX_ORDER_NR_PAGES=1024 for reclaim_zone().

Cc: Johannes Weiner <hannes@cmpxchg.org>
Acked-by: default avatarRik van Riel <riel@redhat.com>
Reviewed-by: default avatarMinchan Kim <minchan.kim@gmail.com>
Signed-off-by: default avatarWu Fengguang <fengguang.wu@intel.com>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Christoph Lameter <cl@linux-foundation.org>
Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Cc: Lee Schermerhorn <lee.schermerhorn@hp.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 08d9ae7c
...@@ -1223,7 +1223,6 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone, ...@@ -1223,7 +1223,6 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone,
struct scan_control *sc, int priority, int file) struct scan_control *sc, int priority, int file)
{ {
unsigned long pgmoved; unsigned long pgmoved;
int pgdeactivate = 0;
unsigned long pgscanned; unsigned long pgscanned;
LIST_HEAD(l_hold); /* The pages which were snipped off */ LIST_HEAD(l_hold); /* The pages which were snipped off */
LIST_HEAD(l_inactive); LIST_HEAD(l_inactive);
...@@ -1252,7 +1251,7 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone, ...@@ -1252,7 +1251,7 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone,
__mod_zone_page_state(zone, NR_ACTIVE_ANON, -pgmoved); __mod_zone_page_state(zone, NR_ACTIVE_ANON, -pgmoved);
spin_unlock_irq(&zone->lru_lock); spin_unlock_irq(&zone->lru_lock);
pgmoved = 0; pgmoved = 0; /* count referenced (mapping) mapped pages */
while (!list_empty(&l_hold)) { while (!list_empty(&l_hold)) {
cond_resched(); cond_resched();
page = lru_to_page(&l_hold); page = lru_to_page(&l_hold);
...@@ -1286,7 +1285,7 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone, ...@@ -1286,7 +1285,7 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone,
*/ */
reclaim_stat->recent_rotated[!!file] += pgmoved; reclaim_stat->recent_rotated[!!file] += pgmoved;
pgmoved = 0; pgmoved = 0; /* count pages moved to inactive list */
while (!list_empty(&l_inactive)) { while (!list_empty(&l_inactive)) {
page = lru_to_page(&l_inactive); page = lru_to_page(&l_inactive);
prefetchw_prev_lru_page(page, &l_inactive, flags); prefetchw_prev_lru_page(page, &l_inactive, flags);
...@@ -1299,10 +1298,7 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone, ...@@ -1299,10 +1298,7 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone,
mem_cgroup_add_lru_list(page, lru); mem_cgroup_add_lru_list(page, lru);
pgmoved++; pgmoved++;
if (!pagevec_add(&pvec, page)) { if (!pagevec_add(&pvec, page)) {
__mod_zone_page_state(zone, NR_LRU_BASE + lru, pgmoved);
spin_unlock_irq(&zone->lru_lock); spin_unlock_irq(&zone->lru_lock);
pgdeactivate += pgmoved;
pgmoved = 0;
if (buffer_heads_over_limit) if (buffer_heads_over_limit)
pagevec_strip(&pvec); pagevec_strip(&pvec);
__pagevec_release(&pvec); __pagevec_release(&pvec);
...@@ -1310,9 +1306,8 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone, ...@@ -1310,9 +1306,8 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone,
} }
} }
__mod_zone_page_state(zone, NR_LRU_BASE + lru, pgmoved); __mod_zone_page_state(zone, NR_LRU_BASE + lru, pgmoved);
pgdeactivate += pgmoved;
__count_zone_vm_events(PGREFILL, zone, pgscanned); __count_zone_vm_events(PGREFILL, zone, pgscanned);
__count_vm_events(PGDEACTIVATE, pgdeactivate); __count_vm_events(PGDEACTIVATE, pgmoved);
spin_unlock_irq(&zone->lru_lock); spin_unlock_irq(&zone->lru_lock);
if (buffer_heads_over_limit) if (buffer_heads_over_limit)
pagevec_strip(&pvec); pagevec_strip(&pvec);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment