Commit 6b208e3f authored by Johannes Weiner's avatar Johannes Weiner Committed by Linus Torvalds

mm: memcg: remove unused node/section info from pc->flags

To find the page corresponding to a certain page_cgroup, the pc->flags
encoded the node or section ID with the base array to compare the pc
pointer to.

Now that the per-memory cgroup LRU lists link page descriptors directly,
there is no longer any code that knows the struct page_cgroup of a PFN
but not the struct page.

[hughd@google.com: remove unused node/section info from pc->flags fix]
Signed-off-by: default avatarJohannes Weiner <jweiner@redhat.com>
Reviewed-by: default avatarKAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Reviewed-by: default avatarMichal Hocko <mhocko@suse.cz>
Reviewed-by: default avatarKirill A. Shutemov <kirill@shutemov.name>
Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: Michal Hocko <mhocko@suse.cz>
Cc: "Kirill A. Shutemov" <kirill@shutemov.name>
Cc: Daisuke Nishimura <nishimura@mxp.nes.nec.co.jp>
Cc: Balbir Singh <bsingharora@gmail.com>
Cc: Ying Han <yinghan@google.com>
Cc: Greg Thelen <gthelen@google.com>
Cc: Michel Lespinasse <walken@google.com>
Cc: Rik van Riel <riel@redhat.com>
Cc: Minchan Kim <minchan.kim@gmail.com>
Cc: Christoph Hellwig <hch@infradead.org>
Signed-off-by: default avatarHugh Dickins <hughd@google.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 925b7673
...@@ -121,39 +121,6 @@ static inline void move_unlock_page_cgroup(struct page_cgroup *pc, ...@@ -121,39 +121,6 @@ static inline void move_unlock_page_cgroup(struct page_cgroup *pc,
local_irq_restore(*flags); local_irq_restore(*flags);
} }
#ifdef CONFIG_SPARSEMEM
#define PCG_ARRAYID_WIDTH SECTIONS_SHIFT
#else
#define PCG_ARRAYID_WIDTH NODES_SHIFT
#endif
#if (PCG_ARRAYID_WIDTH > BITS_PER_LONG - NR_PCG_FLAGS)
#error Not enough space left in pc->flags to store page_cgroup array IDs
#endif
/* pc->flags: ARRAY-ID | FLAGS */
#define PCG_ARRAYID_MASK ((1UL << PCG_ARRAYID_WIDTH) - 1)
#define PCG_ARRAYID_OFFSET (BITS_PER_LONG - PCG_ARRAYID_WIDTH)
/*
* Zero the shift count for non-existent fields, to prevent compiler
* warnings and ensure references are optimized away.
*/
#define PCG_ARRAYID_SHIFT (PCG_ARRAYID_OFFSET * (PCG_ARRAYID_WIDTH != 0))
static inline void set_page_cgroup_array_id(struct page_cgroup *pc,
unsigned long id)
{
pc->flags &= ~(PCG_ARRAYID_MASK << PCG_ARRAYID_SHIFT);
pc->flags |= (id & PCG_ARRAYID_MASK) << PCG_ARRAYID_SHIFT;
}
static inline unsigned long page_cgroup_array_id(struct page_cgroup *pc)
{
return (pc->flags >> PCG_ARRAYID_SHIFT) & PCG_ARRAYID_MASK;
}
#else /* CONFIG_CGROUP_MEM_RES_CTLR */ #else /* CONFIG_CGROUP_MEM_RES_CTLR */
struct page_cgroup; struct page_cgroup;
......
...@@ -11,12 +11,6 @@ ...@@ -11,12 +11,6 @@
#include <linux/swapops.h> #include <linux/swapops.h>
#include <linux/kmemleak.h> #include <linux/kmemleak.h>
static void __meminit init_page_cgroup(struct page_cgroup *pc, unsigned long id)
{
pc->flags = 0;
set_page_cgroup_array_id(pc, id);
pc->mem_cgroup = NULL;
}
static unsigned long total_usage; static unsigned long total_usage;
#if !defined(CONFIG_SPARSEMEM) #if !defined(CONFIG_SPARSEMEM)
...@@ -41,28 +35,13 @@ struct page_cgroup *lookup_page_cgroup(struct page *page) ...@@ -41,28 +35,13 @@ struct page_cgroup *lookup_page_cgroup(struct page *page)
return base + offset; return base + offset;
} }
struct page *lookup_cgroup_page(struct page_cgroup *pc)
{
unsigned long pfn;
struct page *page;
pg_data_t *pgdat;
pgdat = NODE_DATA(page_cgroup_array_id(pc));
pfn = pc - pgdat->node_page_cgroup + pgdat->node_start_pfn;
page = pfn_to_page(pfn);
VM_BUG_ON(pc != lookup_page_cgroup(page));
return page;
}
static int __init alloc_node_page_cgroup(int nid) static int __init alloc_node_page_cgroup(int nid)
{ {
struct page_cgroup *base, *pc; struct page_cgroup *base;
unsigned long table_size; unsigned long table_size;
unsigned long start_pfn, nr_pages, index; unsigned long nr_pages;
start_pfn = NODE_DATA(nid)->node_start_pfn;
nr_pages = NODE_DATA(nid)->node_spanned_pages; nr_pages = NODE_DATA(nid)->node_spanned_pages;
if (!nr_pages) if (!nr_pages)
return 0; return 0;
...@@ -72,10 +51,6 @@ static int __init alloc_node_page_cgroup(int nid) ...@@ -72,10 +51,6 @@ static int __init alloc_node_page_cgroup(int nid)
table_size, PAGE_SIZE, __pa(MAX_DMA_ADDRESS)); table_size, PAGE_SIZE, __pa(MAX_DMA_ADDRESS));
if (!base) if (!base)
return -ENOMEM; return -ENOMEM;
for (index = 0; index < nr_pages; index++) {
pc = base + index;
init_page_cgroup(pc, nid);
}
NODE_DATA(nid)->node_page_cgroup = base; NODE_DATA(nid)->node_page_cgroup = base;
total_usage += table_size; total_usage += table_size;
return 0; return 0;
...@@ -116,23 +91,10 @@ struct page_cgroup *lookup_page_cgroup(struct page *page) ...@@ -116,23 +91,10 @@ struct page_cgroup *lookup_page_cgroup(struct page *page)
return section->page_cgroup + pfn; return section->page_cgroup + pfn;
} }
struct page *lookup_cgroup_page(struct page_cgroup *pc)
{
struct mem_section *section;
struct page *page;
unsigned long nr;
nr = page_cgroup_array_id(pc);
section = __nr_to_section(nr);
page = pfn_to_page(pc - section->page_cgroup);
VM_BUG_ON(pc != lookup_page_cgroup(page));
return page;
}
static void *__meminit alloc_page_cgroup(size_t size, int nid) static void *__meminit alloc_page_cgroup(size_t size, int nid)
{ {
gfp_t flags = GFP_KERNEL | __GFP_ZERO | __GFP_NOWARN;
void *addr = NULL; void *addr = NULL;
gfp_t flags = GFP_KERNEL | __GFP_NOWARN;
addr = alloc_pages_exact_nid(nid, size, flags); addr = alloc_pages_exact_nid(nid, size, flags);
if (addr) { if (addr) {
...@@ -141,9 +103,9 @@ static void *__meminit alloc_page_cgroup(size_t size, int nid) ...@@ -141,9 +103,9 @@ static void *__meminit alloc_page_cgroup(size_t size, int nid)
} }
if (node_state(nid, N_HIGH_MEMORY)) if (node_state(nid, N_HIGH_MEMORY))
addr = vmalloc_node(size, nid); addr = vzalloc_node(size, nid);
else else
addr = vmalloc(size); addr = vzalloc(size);
return addr; return addr;
} }
...@@ -166,14 +128,11 @@ static void free_page_cgroup(void *addr) ...@@ -166,14 +128,11 @@ static void free_page_cgroup(void *addr)
static int __meminit init_section_page_cgroup(unsigned long pfn, int nid) static int __meminit init_section_page_cgroup(unsigned long pfn, int nid)
{ {
struct page_cgroup *base, *pc;
struct mem_section *section; struct mem_section *section;
struct page_cgroup *base;
unsigned long table_size; unsigned long table_size;
unsigned long nr;
int index;
nr = pfn_to_section_nr(pfn); section = __pfn_to_section(pfn);
section = __nr_to_section(nr);
if (section->page_cgroup) if (section->page_cgroup)
return 0; return 0;
...@@ -193,10 +152,6 @@ static int __meminit init_section_page_cgroup(unsigned long pfn, int nid) ...@@ -193,10 +152,6 @@ static int __meminit init_section_page_cgroup(unsigned long pfn, int nid)
return -ENOMEM; return -ENOMEM;
} }
for (index = 0; index < PAGES_PER_SECTION; index++) {
pc = base + index;
init_page_cgroup(pc, nr);
}
/* /*
* The passed "pfn" may not be aligned to SECTION. For the calculation * The passed "pfn" may not be aligned to SECTION. For the calculation
* we need to apply a mask. * we need to apply a mask.
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment