Commit 6a18adb3 authored by Konstantin Khlebnikov's avatar Konstantin Khlebnikov Committed by Linus Torvalds

mm/vmscan: push zone pointer into shrink_page_list()

It doesn't need a pointer to the cgroup - pointer to the zone is enough.
This patch also kills the "mz" argument of page_check_references() - it is
unused after "mm: memcg: count pte references from every member of the
reclaimed hierarch"
Signed-off-by: default avatarKonstantin Khlebnikov <khlebnikov@openvz.org>
Cc: Mel Gorman <mel@csn.ul.ie>
Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Acked-by: default avatarHugh Dickins <hughd@google.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 5dc35979
...@@ -629,7 +629,6 @@ enum page_references { ...@@ -629,7 +629,6 @@ enum page_references {
}; };
static enum page_references page_check_references(struct page *page, static enum page_references page_check_references(struct page *page,
struct mem_cgroup_zone *mz,
struct scan_control *sc) struct scan_control *sc)
{ {
int referenced_ptes, referenced_page; int referenced_ptes, referenced_page;
...@@ -688,7 +687,7 @@ static enum page_references page_check_references(struct page *page, ...@@ -688,7 +687,7 @@ static enum page_references page_check_references(struct page *page,
* shrink_page_list() returns the number of reclaimed pages * shrink_page_list() returns the number of reclaimed pages
*/ */
static unsigned long shrink_page_list(struct list_head *page_list, static unsigned long shrink_page_list(struct list_head *page_list,
struct mem_cgroup_zone *mz, struct zone *zone,
struct scan_control *sc, struct scan_control *sc,
unsigned long *ret_nr_dirty, unsigned long *ret_nr_dirty,
unsigned long *ret_nr_writeback) unsigned long *ret_nr_writeback)
...@@ -718,7 +717,7 @@ static unsigned long shrink_page_list(struct list_head *page_list, ...@@ -718,7 +717,7 @@ static unsigned long shrink_page_list(struct list_head *page_list,
goto keep; goto keep;
VM_BUG_ON(PageActive(page)); VM_BUG_ON(PageActive(page));
VM_BUG_ON(page_zone(page) != mz->zone); VM_BUG_ON(page_zone(page) != zone);
sc->nr_scanned++; sc->nr_scanned++;
...@@ -741,7 +740,7 @@ static unsigned long shrink_page_list(struct list_head *page_list, ...@@ -741,7 +740,7 @@ static unsigned long shrink_page_list(struct list_head *page_list,
goto keep; goto keep;
} }
references = page_check_references(page, mz, sc); references = page_check_references(page, sc);
switch (references) { switch (references) {
case PAGEREF_ACTIVATE: case PAGEREF_ACTIVATE:
goto activate_locked; goto activate_locked;
...@@ -931,7 +930,7 @@ static unsigned long shrink_page_list(struct list_head *page_list, ...@@ -931,7 +930,7 @@ static unsigned long shrink_page_list(struct list_head *page_list,
* will encounter the same problem * will encounter the same problem
*/ */
if (nr_dirty && nr_dirty == nr_congested && global_reclaim(sc)) if (nr_dirty && nr_dirty == nr_congested && global_reclaim(sc))
zone_set_flag(mz->zone, ZONE_CONGESTED); zone_set_flag(zone, ZONE_CONGESTED);
free_hot_cold_page_list(&free_pages, 1); free_hot_cold_page_list(&free_pages, 1);
...@@ -1309,7 +1308,7 @@ shrink_inactive_list(unsigned long nr_to_scan, struct mem_cgroup_zone *mz, ...@@ -1309,7 +1308,7 @@ shrink_inactive_list(unsigned long nr_to_scan, struct mem_cgroup_zone *mz,
update_isolated_counts(mz, &page_list, &nr_anon, &nr_file); update_isolated_counts(mz, &page_list, &nr_anon, &nr_file);
nr_reclaimed = shrink_page_list(&page_list, mz, sc, nr_reclaimed = shrink_page_list(&page_list, zone, sc,
&nr_dirty, &nr_writeback); &nr_dirty, &nr_writeback);
spin_lock_irq(&zone->lru_lock); spin_lock_irq(&zone->lru_lock);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment