Commit 33327948 authored by KAMEZAWA Hiroyuki's avatar KAMEZAWA Hiroyuki Committed by Linus Torvalds

memcgroup: use vmalloc for mem_cgroup allocation

On ia64, this kmalloc() requires order-4 pages.  But this is not necessary to
be physically contiguous.  For big mem_cgroup, vmalloc is better.  For small
ones, kmalloc is used.

[akpm@linux-foundation.org: simplification]
Signed-off-by: default avatarKAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: Pavel Emelyanov <xemul@openvz.org>
Cc: Li Zefan <lizf@cn.fujitsu.com>
Cc: Balbir Singh <balbir@in.ibm.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 4a56d02e
...@@ -31,6 +31,7 @@ ...@@ -31,6 +31,7 @@
#include <linux/spinlock.h> #include <linux/spinlock.h>
#include <linux/fs.h> #include <linux/fs.h>
#include <linux/seq_file.h> #include <linux/seq_file.h>
#include <linux/vmalloc.h>
#include <asm/uaccess.h> #include <asm/uaccess.h>
...@@ -983,6 +984,29 @@ static void free_mem_cgroup_per_zone_info(struct mem_cgroup *mem, int node) ...@@ -983,6 +984,29 @@ static void free_mem_cgroup_per_zone_info(struct mem_cgroup *mem, int node)
kfree(mem->info.nodeinfo[node]); kfree(mem->info.nodeinfo[node]);
} }
static struct mem_cgroup *mem_cgroup_alloc(void)
{
struct mem_cgroup *mem;
if (sizeof(*mem) < PAGE_SIZE)
mem = kmalloc(sizeof(*mem), GFP_KERNEL);
else
mem = vmalloc(sizeof(*mem));
if (mem)
memset(mem, 0, sizeof(*mem));
return mem;
}
static void mem_cgroup_free(struct mem_cgroup *mem)
{
if (sizeof(*mem) < PAGE_SIZE)
kfree(mem);
else
vfree(mem);
}
static struct cgroup_subsys_state * static struct cgroup_subsys_state *
mem_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cont) mem_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cont)
{ {
...@@ -993,12 +1017,11 @@ mem_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cont) ...@@ -993,12 +1017,11 @@ mem_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cont)
mem = &init_mem_cgroup; mem = &init_mem_cgroup;
page_cgroup_cache = KMEM_CACHE(page_cgroup, SLAB_PANIC); page_cgroup_cache = KMEM_CACHE(page_cgroup, SLAB_PANIC);
} else { } else {
mem = kzalloc(sizeof(struct mem_cgroup), GFP_KERNEL); mem = mem_cgroup_alloc();
if (!mem)
return ERR_PTR(-ENOMEM);
} }
if (mem == NULL)
return ERR_PTR(-ENOMEM);
res_counter_init(&mem->res); res_counter_init(&mem->res);
memset(&mem->info, 0, sizeof(mem->info)); memset(&mem->info, 0, sizeof(mem->info));
...@@ -1012,7 +1035,7 @@ mem_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cont) ...@@ -1012,7 +1035,7 @@ mem_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cont)
for_each_node_state(node, N_POSSIBLE) for_each_node_state(node, N_POSSIBLE)
free_mem_cgroup_per_zone_info(mem, node); free_mem_cgroup_per_zone_info(mem, node);
if (cont->parent != NULL) if (cont->parent != NULL)
kfree(mem); mem_cgroup_free(mem);
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
} }
...@@ -1032,7 +1055,7 @@ static void mem_cgroup_destroy(struct cgroup_subsys *ss, ...@@ -1032,7 +1055,7 @@ static void mem_cgroup_destroy(struct cgroup_subsys *ss,
for_each_node_state(node, N_POSSIBLE) for_each_node_state(node, N_POSSIBLE)
free_mem_cgroup_per_zone_info(mem, node); free_mem_cgroup_per_zone_info(mem, node);
kfree(mem_cgroup_from_cont(cont)); mem_cgroup_free(mem_cgroup_from_cont(cont));
} }
static int mem_cgroup_populate(struct cgroup_subsys *ss, static int mem_cgroup_populate(struct cgroup_subsys *ss,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment