Commit 7f5e86c2 authored by Konstantin Khlebnikov's avatar Konstantin Khlebnikov Committed by Linus Torvalds

mm: add link from struct lruvec to struct zone

This is the first stage of struct mem_cgroup_zone removal.  Further
patches replace struct mem_cgroup_zone with a pointer to struct lruvec.

If CONFIG_CGROUP_MEM_RES_CTLR=n lruvec_zone() is just container_of().
Signed-off-by: default avatarKonstantin Khlebnikov <khlebnikov@openvz.org>
Cc: Mel Gorman <mel@csn.ul.ie>
Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Acked-by: default avatarHugh Dickins <hughd@google.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 9e3b2f8c
...@@ -201,6 +201,9 @@ struct zone_reclaim_stat { ...@@ -201,6 +201,9 @@ struct zone_reclaim_stat {
struct lruvec { struct lruvec {
struct list_head lists[NR_LRU_LISTS]; struct list_head lists[NR_LRU_LISTS];
struct zone_reclaim_stat reclaim_stat; struct zone_reclaim_stat reclaim_stat;
#ifdef CONFIG_CGROUP_MEM_RES_CTLR
struct zone *zone;
#endif
}; };
/* Mask used at gathering information at once (see memcontrol.c) */ /* Mask used at gathering information at once (see memcontrol.c) */
...@@ -729,6 +732,17 @@ extern int init_currently_empty_zone(struct zone *zone, unsigned long start_pfn, ...@@ -729,6 +732,17 @@ extern int init_currently_empty_zone(struct zone *zone, unsigned long start_pfn,
unsigned long size, unsigned long size,
enum memmap_context context); enum memmap_context context);
extern void lruvec_init(struct lruvec *lruvec, struct zone *zone);
static inline struct zone *lruvec_zone(struct lruvec *lruvec)
{
#ifdef CONFIG_CGROUP_MEM_RES_CTLR
return lruvec->zone;
#else
return container_of(lruvec, struct zone, lruvec);
#endif
}
#ifdef CONFIG_HAVE_MEMORY_PRESENT #ifdef CONFIG_HAVE_MEMORY_PRESENT
void memory_present(int nid, unsigned long start, unsigned long end); void memory_present(int nid, unsigned long start, unsigned long end);
#else #else
......
...@@ -4738,7 +4738,6 @@ static int alloc_mem_cgroup_per_zone_info(struct mem_cgroup *memcg, int node) ...@@ -4738,7 +4738,6 @@ static int alloc_mem_cgroup_per_zone_info(struct mem_cgroup *memcg, int node)
{ {
struct mem_cgroup_per_node *pn; struct mem_cgroup_per_node *pn;
struct mem_cgroup_per_zone *mz; struct mem_cgroup_per_zone *mz;
enum lru_list lru;
int zone, tmp = node; int zone, tmp = node;
/* /*
* This routine is called against possible nodes. * This routine is called against possible nodes.
...@@ -4756,8 +4755,7 @@ static int alloc_mem_cgroup_per_zone_info(struct mem_cgroup *memcg, int node) ...@@ -4756,8 +4755,7 @@ static int alloc_mem_cgroup_per_zone_info(struct mem_cgroup *memcg, int node)
for (zone = 0; zone < MAX_NR_ZONES; zone++) { for (zone = 0; zone < MAX_NR_ZONES; zone++) {
mz = &pn->zoneinfo[zone]; mz = &pn->zoneinfo[zone];
for_each_lru(lru) lruvec_init(&mz->lruvec, &NODE_DATA(node)->node_zones[zone]);
INIT_LIST_HEAD(&mz->lruvec.lists[lru]);
mz->usage_in_excess = 0; mz->usage_in_excess = 0;
mz->on_tree = false; mz->on_tree = false;
mz->memcg = memcg; mz->memcg = memcg;
......
...@@ -86,3 +86,17 @@ int memmap_valid_within(unsigned long pfn, ...@@ -86,3 +86,17 @@ int memmap_valid_within(unsigned long pfn,
return 1; return 1;
} }
#endif /* CONFIG_ARCH_HAS_HOLES_MEMORYMODEL */ #endif /* CONFIG_ARCH_HAS_HOLES_MEMORYMODEL */
void lruvec_init(struct lruvec *lruvec, struct zone *zone)
{
enum lru_list lru;
memset(lruvec, 0, sizeof(struct lruvec));
for_each_lru(lru)
INIT_LIST_HEAD(&lruvec->lists[lru]);
#ifdef CONFIG_CGROUP_MEM_RES_CTLR
lruvec->zone = zone;
#endif
}
...@@ -4358,7 +4358,6 @@ static void __paginginit free_area_init_core(struct pglist_data *pgdat, ...@@ -4358,7 +4358,6 @@ static void __paginginit free_area_init_core(struct pglist_data *pgdat,
for (j = 0; j < MAX_NR_ZONES; j++) { for (j = 0; j < MAX_NR_ZONES; j++) {
struct zone *zone = pgdat->node_zones + j; struct zone *zone = pgdat->node_zones + j;
unsigned long size, realsize, memmap_pages; unsigned long size, realsize, memmap_pages;
enum lru_list lru;
size = zone_spanned_pages_in_node(nid, j, zones_size); size = zone_spanned_pages_in_node(nid, j, zones_size);
realsize = size - zone_absent_pages_in_node(nid, j, realsize = size - zone_absent_pages_in_node(nid, j,
...@@ -4408,12 +4407,7 @@ static void __paginginit free_area_init_core(struct pglist_data *pgdat, ...@@ -4408,12 +4407,7 @@ static void __paginginit free_area_init_core(struct pglist_data *pgdat,
zone->zone_pgdat = pgdat; zone->zone_pgdat = pgdat;
zone_pcp_init(zone); zone_pcp_init(zone);
for_each_lru(lru) lruvec_init(&zone->lruvec, zone);
INIT_LIST_HEAD(&zone->lruvec.lists[lru]);
zone->lruvec.reclaim_stat.recent_rotated[0] = 0;
zone->lruvec.reclaim_stat.recent_rotated[1] = 0;
zone->lruvec.reclaim_stat.recent_scanned[0] = 0;
zone->lruvec.reclaim_stat.recent_scanned[1] = 0;
zap_zone_vm_stats(zone); zap_zone_vm_stats(zone);
zone->flags = 0; zone->flags = 0;
if (!size) if (!size)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment