Commit 8ff69e2c authored by Vladimir Davydov's avatar Vladimir Davydov Committed by Linus Torvalds

memcg: do not use vmalloc for mem_cgroup allocations

The vmalloc was introduced by 33327948 ("memcgroup: use vmalloc for
mem_cgroup allocation"), because at that time MAX_NUMNODES was used for
defining the per-node array in the mem_cgroup structure so that the
structure could be huge even if the system had the only NUMA node.

The situation was significantly improved by commit 45cf7ebd ("memcg:
reduce the size of struct memcg 244-fold"), which made the size of the
mem_cgroup structure calculated dynamically depending on the real number
of NUMA nodes installed on the system (nr_node_ids), so now there is no
point in using vmalloc here: the structure is allocated rarely and on
most systems its size is about 1K.
Signed-off-by: default avatarVladimir Davydov <vdavydov@parallels.com>
Acked-by: default avatarMichal Hocko <mhocko@suse.cz>
Cc: Glauber Costa <glommer@openvz.org>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Balbir Singh <bsingharora@gmail.com>
Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 01cc2e58
......@@ -49,7 +49,6 @@
#include <linux/sort.h>
#include <linux/fs.h>
#include <linux/seq_file.h>
#include <linux/vmalloc.h>
#include <linux/vmpressure.h>
#include <linux/mm_inline.h>
#include <linux/page_cgroup.h>
......@@ -381,12 +380,6 @@ struct mem_cgroup {
/* WARNING: nodeinfo must be the last member here */
};
static size_t memcg_size(void)
{
return sizeof(struct mem_cgroup) +
nr_node_ids * sizeof(struct mem_cgroup_per_node *);
}
/* internal only representation about the status of kmem accounting. */
enum {
KMEM_ACCOUNTED_ACTIVE = 0, /* accounted by this cgroup itself */
......@@ -6405,14 +6398,12 @@ static void free_mem_cgroup_per_zone_info(struct mem_cgroup *memcg, int node)
static struct mem_cgroup *mem_cgroup_alloc(void)
{
struct mem_cgroup *memcg;
size_t size = memcg_size();
size_t size;
/* Can be very big if nr_node_ids is very big */
if (size < PAGE_SIZE)
memcg = kzalloc(size, GFP_KERNEL);
else
memcg = vzalloc(size);
size = sizeof(struct mem_cgroup);
size += nr_node_ids * sizeof(struct mem_cgroup_per_node *);
memcg = kzalloc(size, GFP_KERNEL);
if (!memcg)
return NULL;
......@@ -6423,10 +6414,7 @@ static struct mem_cgroup *mem_cgroup_alloc(void)
return memcg;
out_free:
if (size < PAGE_SIZE)
kfree(memcg);
else
vfree(memcg);
kfree(memcg);
return NULL;
}
......@@ -6444,7 +6432,6 @@ static struct mem_cgroup *mem_cgroup_alloc(void)
static void __mem_cgroup_free(struct mem_cgroup *memcg)
{
int node;
size_t size = memcg_size();
mem_cgroup_remove_from_trees(memcg);
......@@ -6465,10 +6452,7 @@ static void __mem_cgroup_free(struct mem_cgroup *memcg)
* the cgroup_lock.
*/
disarm_static_keys(memcg);
if (size < PAGE_SIZE)
kfree(memcg);
else
vfree(memcg);
kfree(memcg);
}
/*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment