Commit c56d5c7d authored by Konstantin Khlebnikov's avatar Konstantin Khlebnikov Committed by Linus Torvalds

mm/vmscan: push lruvec pointer into inactive_list_is_low()

Switch mem_cgroup_inactive_anon_is_low() to lruvec pointers,
mem_cgroup_get_lruvec_size() is more effective than
mem_cgroup_zone_nr_lru_pages()
Signed-off-by: default avatarKonstantin Khlebnikov <khlebnikov@openvz.org>
Cc: Mel Gorman <mel@csn.ul.ie>
Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Acked-by: default avatarHugh Dickins <hughd@google.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 074291fe
...@@ -118,10 +118,8 @@ void mem_cgroup_iter_break(struct mem_cgroup *, struct mem_cgroup *); ...@@ -118,10 +118,8 @@ void mem_cgroup_iter_break(struct mem_cgroup *, struct mem_cgroup *);
/* /*
* For memory reclaim. * For memory reclaim.
*/ */
int mem_cgroup_inactive_anon_is_low(struct mem_cgroup *memcg, int mem_cgroup_inactive_anon_is_low(struct lruvec *lruvec);
struct zone *zone); int mem_cgroup_inactive_file_is_low(struct lruvec *lruvec);
int mem_cgroup_inactive_file_is_low(struct mem_cgroup *memcg,
struct zone *zone);
int mem_cgroup_select_victim_node(struct mem_cgroup *memcg); int mem_cgroup_select_victim_node(struct mem_cgroup *memcg);
unsigned long mem_cgroup_get_lruvec_size(struct lruvec *lruvec, enum lru_list); unsigned long mem_cgroup_get_lruvec_size(struct lruvec *lruvec, enum lru_list);
struct zone_reclaim_stat* struct zone_reclaim_stat*
...@@ -330,13 +328,13 @@ static inline bool mem_cgroup_disabled(void) ...@@ -330,13 +328,13 @@ static inline bool mem_cgroup_disabled(void)
} }
static inline int static inline int
mem_cgroup_inactive_anon_is_low(struct mem_cgroup *memcg, struct zone *zone) mem_cgroup_inactive_anon_is_low(struct lruvec *lruvec)
{ {
return 1; return 1;
} }
static inline int static inline int
mem_cgroup_inactive_file_is_low(struct mem_cgroup *memcg, struct zone *zone) mem_cgroup_inactive_file_is_low(struct lruvec *lruvec)
{ {
return 1; return 1;
} }
......
...@@ -1208,19 +1208,15 @@ int task_in_mem_cgroup(struct task_struct *task, const struct mem_cgroup *memcg) ...@@ -1208,19 +1208,15 @@ int task_in_mem_cgroup(struct task_struct *task, const struct mem_cgroup *memcg)
return ret; return ret;
} }
int mem_cgroup_inactive_anon_is_low(struct mem_cgroup *memcg, struct zone *zone) int mem_cgroup_inactive_anon_is_low(struct lruvec *lruvec)
{ {
unsigned long inactive_ratio; unsigned long inactive_ratio;
int nid = zone_to_nid(zone);
int zid = zone_idx(zone);
unsigned long inactive; unsigned long inactive;
unsigned long active; unsigned long active;
unsigned long gb; unsigned long gb;
inactive = mem_cgroup_zone_nr_lru_pages(memcg, nid, zid, inactive = mem_cgroup_get_lruvec_size(lruvec, LRU_INACTIVE_ANON);
BIT(LRU_INACTIVE_ANON)); active = mem_cgroup_get_lruvec_size(lruvec, LRU_ACTIVE_ANON);
active = mem_cgroup_zone_nr_lru_pages(memcg, nid, zid,
BIT(LRU_ACTIVE_ANON));
gb = (inactive + active) >> (30 - PAGE_SHIFT); gb = (inactive + active) >> (30 - PAGE_SHIFT);
if (gb) if (gb)
...@@ -1231,17 +1227,13 @@ int mem_cgroup_inactive_anon_is_low(struct mem_cgroup *memcg, struct zone *zone) ...@@ -1231,17 +1227,13 @@ int mem_cgroup_inactive_anon_is_low(struct mem_cgroup *memcg, struct zone *zone)
return inactive * inactive_ratio < active; return inactive * inactive_ratio < active;
} }
int mem_cgroup_inactive_file_is_low(struct mem_cgroup *memcg, struct zone *zone) int mem_cgroup_inactive_file_is_low(struct lruvec *lruvec)
{ {
unsigned long active; unsigned long active;
unsigned long inactive; unsigned long inactive;
int zid = zone_idx(zone);
int nid = zone_to_nid(zone);
inactive = mem_cgroup_zone_nr_lru_pages(memcg, nid, zid, inactive = mem_cgroup_get_lruvec_size(lruvec, LRU_INACTIVE_FILE);
BIT(LRU_INACTIVE_FILE)); active = mem_cgroup_get_lruvec_size(lruvec, LRU_ACTIVE_FILE);
active = mem_cgroup_zone_nr_lru_pages(memcg, nid, zid,
BIT(LRU_ACTIVE_FILE));
return (active > inactive); return (active > inactive);
} }
......
...@@ -1488,13 +1488,12 @@ static int inactive_anon_is_low_global(struct zone *zone) ...@@ -1488,13 +1488,12 @@ static int inactive_anon_is_low_global(struct zone *zone)
/** /**
* inactive_anon_is_low - check if anonymous pages need to be deactivated * inactive_anon_is_low - check if anonymous pages need to be deactivated
* @zone: zone to check * @lruvec: LRU vector to check
* @sc: scan control of this context
* *
* Returns true if the zone does not have enough inactive anon pages, * Returns true if the zone does not have enough inactive anon pages,
* meaning some active anon pages need to be deactivated. * meaning some active anon pages need to be deactivated.
*/ */
static int inactive_anon_is_low(struct mem_cgroup_zone *mz) static int inactive_anon_is_low(struct lruvec *lruvec)
{ {
/* /*
* If we don't have swap space, anonymous page deactivation * If we don't have swap space, anonymous page deactivation
...@@ -1504,13 +1503,12 @@ static int inactive_anon_is_low(struct mem_cgroup_zone *mz) ...@@ -1504,13 +1503,12 @@ static int inactive_anon_is_low(struct mem_cgroup_zone *mz)
return 0; return 0;
if (!mem_cgroup_disabled()) if (!mem_cgroup_disabled())
return mem_cgroup_inactive_anon_is_low(mz->mem_cgroup, return mem_cgroup_inactive_anon_is_low(lruvec);
mz->zone);
return inactive_anon_is_low_global(mz->zone); return inactive_anon_is_low_global(lruvec_zone(lruvec));
} }
#else #else
static inline int inactive_anon_is_low(struct mem_cgroup_zone *mz) static inline int inactive_anon_is_low(struct lruvec *lruvec)
{ {
return 0; return 0;
} }
...@@ -1528,7 +1526,7 @@ static int inactive_file_is_low_global(struct zone *zone) ...@@ -1528,7 +1526,7 @@ static int inactive_file_is_low_global(struct zone *zone)
/** /**
* inactive_file_is_low - check if file pages need to be deactivated * inactive_file_is_low - check if file pages need to be deactivated
* @mz: memory cgroup and zone to check * @lruvec: LRU vector to check
* *
* When the system is doing streaming IO, memory pressure here * When the system is doing streaming IO, memory pressure here
* ensures that active file pages get deactivated, until more * ensures that active file pages get deactivated, until more
...@@ -1540,21 +1538,20 @@ static int inactive_file_is_low_global(struct zone *zone) ...@@ -1540,21 +1538,20 @@ static int inactive_file_is_low_global(struct zone *zone)
* This uses a different ratio than the anonymous pages, because * This uses a different ratio than the anonymous pages, because
* the page cache uses a use-once replacement algorithm. * the page cache uses a use-once replacement algorithm.
*/ */
static int inactive_file_is_low(struct mem_cgroup_zone *mz) static int inactive_file_is_low(struct lruvec *lruvec)
{ {
if (!mem_cgroup_disabled()) if (!mem_cgroup_disabled())
return mem_cgroup_inactive_file_is_low(mz->mem_cgroup, return mem_cgroup_inactive_file_is_low(lruvec);
mz->zone);
return inactive_file_is_low_global(mz->zone); return inactive_file_is_low_global(lruvec_zone(lruvec));
} }
static int inactive_list_is_low(struct mem_cgroup_zone *mz, int file) static int inactive_list_is_low(struct lruvec *lruvec, int file)
{ {
if (file) if (file)
return inactive_file_is_low(mz); return inactive_file_is_low(lruvec);
else else
return inactive_anon_is_low(mz); return inactive_anon_is_low(lruvec);
} }
static unsigned long shrink_list(enum lru_list lru, unsigned long nr_to_scan, static unsigned long shrink_list(enum lru_list lru, unsigned long nr_to_scan,
...@@ -1564,7 +1561,10 @@ static unsigned long shrink_list(enum lru_list lru, unsigned long nr_to_scan, ...@@ -1564,7 +1561,10 @@ static unsigned long shrink_list(enum lru_list lru, unsigned long nr_to_scan,
int file = is_file_lru(lru); int file = is_file_lru(lru);
if (is_active_lru(lru)) { if (is_active_lru(lru)) {
if (inactive_list_is_low(mz, file)) struct lruvec *lruvec = mem_cgroup_zone_lruvec(mz->zone,
mz->mem_cgroup);
if (inactive_list_is_low(lruvec, file))
shrink_active_list(nr_to_scan, mz, sc, lru); shrink_active_list(nr_to_scan, mz, sc, lru);
return 0; return 0;
} }
...@@ -1793,6 +1793,9 @@ static void shrink_mem_cgroup_zone(struct mem_cgroup_zone *mz, ...@@ -1793,6 +1793,9 @@ static void shrink_mem_cgroup_zone(struct mem_cgroup_zone *mz,
unsigned long nr_reclaimed, nr_scanned; unsigned long nr_reclaimed, nr_scanned;
unsigned long nr_to_reclaim = sc->nr_to_reclaim; unsigned long nr_to_reclaim = sc->nr_to_reclaim;
struct blk_plug plug; struct blk_plug plug;
struct lruvec *lruvec;
lruvec = mem_cgroup_zone_lruvec(mz->zone, mz->mem_cgroup);
restart: restart:
nr_reclaimed = 0; nr_reclaimed = 0;
...@@ -1831,7 +1834,7 @@ static void shrink_mem_cgroup_zone(struct mem_cgroup_zone *mz, ...@@ -1831,7 +1834,7 @@ static void shrink_mem_cgroup_zone(struct mem_cgroup_zone *mz,
* Even if we did not try to evict anon pages at all, we want to * Even if we did not try to evict anon pages at all, we want to
* rebalance the anon lru active/inactive ratio. * rebalance the anon lru active/inactive ratio.
*/ */
if (inactive_anon_is_low(mz)) if (inactive_anon_is_low(lruvec))
shrink_active_list(SWAP_CLUSTER_MAX, mz, shrink_active_list(SWAP_CLUSTER_MAX, mz,
sc, LRU_ACTIVE_ANON); sc, LRU_ACTIVE_ANON);
...@@ -2264,12 +2267,13 @@ static void age_active_anon(struct zone *zone, struct scan_control *sc) ...@@ -2264,12 +2267,13 @@ static void age_active_anon(struct zone *zone, struct scan_control *sc)
memcg = mem_cgroup_iter(NULL, NULL, NULL); memcg = mem_cgroup_iter(NULL, NULL, NULL);
do { do {
struct lruvec *lruvec = mem_cgroup_zone_lruvec(zone, memcg);
struct mem_cgroup_zone mz = { struct mem_cgroup_zone mz = {
.mem_cgroup = memcg, .mem_cgroup = memcg,
.zone = zone, .zone = zone,
}; };
if (inactive_anon_is_low(&mz)) if (inactive_anon_is_low(lruvec))
shrink_active_list(SWAP_CLUSTER_MAX, &mz, shrink_active_list(SWAP_CLUSTER_MAX, &mz,
sc, LRU_ACTIVE_ANON); sc, LRU_ACTIVE_ANON);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment