Commit 1aa13254 authored by Vladimir Davydov's avatar Vladimir Davydov Committed by Linus Torvalds

memcg, slab: clean up memcg cache initialization/destruction

Currently, we have rather a messy function set relating to per-memcg
kmem cache initialization/destruction.

Per-memcg caches are created in memcg_create_kmem_cache().  This
function calls kmem_cache_create_memcg() to allocate and initialize a
kmem cache and then "registers" the new cache in the
memcg_params::memcg_caches array of the parent cache.

During its work-flow, kmem_cache_create_memcg() executes the following
memcg-related functions:

 - memcg_alloc_cache_params(), to initialize memcg_params of the newly
   created cache;
 - memcg_cache_list_add(), to add the new cache to the memcg_slab_caches
   list.

On the other hand, kmem_cache_destroy() called on a cache destruction
only calls memcg_release_cache(), which does all the work: it cleans the
reference to the cache in its parent's memcg_params::memcg_caches,
removes the cache from the memcg_slab_caches list, and frees
memcg_params.

Such an inconsistency between destruction and initialization paths make
the code difficult to read, so let's clean this up a bit.

This patch moves all the code relating to registration of per-memcg
caches (adding to memcg list, setting the pointer to a cache from its
parent) to the newly created memcg_register_cache() and
memcg_unregister_cache() functions making the initialization and
destruction paths look symmetrical.
Signed-off-by: default avatarVladimir Davydov <vdavydov@parallels.com>
Cc: Michal Hocko <mhocko@suse.cz>
Cc: Glauber Costa <glommer@gmail.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Balbir Singh <bsingharora@gmail.com>
Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: Pekka Enberg <penberg@kernel.org>
Cc: Christoph Lameter <cl@linux.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 363a044f
...@@ -500,8 +500,8 @@ int memcg_cache_id(struct mem_cgroup *memcg); ...@@ -500,8 +500,8 @@ int memcg_cache_id(struct mem_cgroup *memcg);
int memcg_alloc_cache_params(struct mem_cgroup *memcg, struct kmem_cache *s, int memcg_alloc_cache_params(struct mem_cgroup *memcg, struct kmem_cache *s,
struct kmem_cache *root_cache); struct kmem_cache *root_cache);
void memcg_free_cache_params(struct kmem_cache *s); void memcg_free_cache_params(struct kmem_cache *s);
void memcg_release_cache(struct kmem_cache *cachep); void memcg_register_cache(struct kmem_cache *s);
void memcg_cache_list_add(struct mem_cgroup *memcg, struct kmem_cache *cachep); void memcg_unregister_cache(struct kmem_cache *s);
int memcg_update_cache_size(struct kmem_cache *s, int num_groups); int memcg_update_cache_size(struct kmem_cache *s, int num_groups);
void memcg_update_array_size(int num_groups); void memcg_update_array_size(int num_groups);
...@@ -651,12 +651,11 @@ static inline void memcg_free_cache_params(struct kmem_cache *s) ...@@ -651,12 +651,11 @@ static inline void memcg_free_cache_params(struct kmem_cache *s)
{ {
} }
static inline void memcg_release_cache(struct kmem_cache *cachep) static inline void memcg_register_cache(struct kmem_cache *s)
{ {
} }
static inline void memcg_cache_list_add(struct mem_cgroup *memcg, static inline void memcg_unregister_cache(struct kmem_cache *s)
struct kmem_cache *s)
{ {
} }
......
...@@ -3095,16 +3095,6 @@ static void memcg_uncharge_kmem(struct mem_cgroup *memcg, u64 size) ...@@ -3095,16 +3095,6 @@ static void memcg_uncharge_kmem(struct mem_cgroup *memcg, u64 size)
css_put(&memcg->css); css_put(&memcg->css);
} }
void memcg_cache_list_add(struct mem_cgroup *memcg, struct kmem_cache *cachep)
{
if (!memcg)
return;
mutex_lock(&memcg->slab_caches_mutex);
list_add(&cachep->memcg_params->list, &memcg->memcg_slab_caches);
mutex_unlock(&memcg->slab_caches_mutex);
}
/* /*
* helper for acessing a memcg's index. It will be used as an index in the * helper for acessing a memcg's index. It will be used as an index in the
* child cache array in kmem_cache, and also to derive its name. This function * child cache array in kmem_cache, and also to derive its name. This function
...@@ -3265,21 +3255,41 @@ void memcg_free_cache_params(struct kmem_cache *s) ...@@ -3265,21 +3255,41 @@ void memcg_free_cache_params(struct kmem_cache *s)
kfree(s->memcg_params); kfree(s->memcg_params);
} }
void memcg_release_cache(struct kmem_cache *s) void memcg_register_cache(struct kmem_cache *s)
{ {
struct kmem_cache *root; struct kmem_cache *root;
struct mem_cgroup *memcg; struct mem_cgroup *memcg;
int id; int id;
if (is_root_cache(s))
return;
root = s->memcg_params->root_cache;
memcg = s->memcg_params->memcg;
id = memcg_cache_id(memcg);
css_get(&memcg->css);
mutex_lock(&memcg->slab_caches_mutex);
list_add(&s->memcg_params->list, &memcg->memcg_slab_caches);
mutex_unlock(&memcg->slab_caches_mutex);
root->memcg_params->memcg_caches[id] = s;
/* /*
* This happens, for instance, when a root cache goes away before we * the readers won't lock, make sure everybody sees the updated value,
* add any memcg. * so they won't put stuff in the queue again for no reason
*/ */
if (!s->memcg_params) wmb();
return; }
if (s->memcg_params->is_root_cache) void memcg_unregister_cache(struct kmem_cache *s)
goto out; {
struct kmem_cache *root;
struct mem_cgroup *memcg;
int id;
if (is_root_cache(s))
return;
memcg = s->memcg_params->memcg; memcg = s->memcg_params->memcg;
id = memcg_cache_id(memcg); id = memcg_cache_id(memcg);
...@@ -3292,8 +3302,6 @@ void memcg_release_cache(struct kmem_cache *s) ...@@ -3292,8 +3302,6 @@ void memcg_release_cache(struct kmem_cache *s)
mutex_unlock(&memcg->slab_caches_mutex); mutex_unlock(&memcg->slab_caches_mutex);
css_put(&memcg->css); css_put(&memcg->css);
out:
memcg_free_cache_params(s);
} }
/* /*
...@@ -3451,26 +3459,13 @@ static struct kmem_cache *memcg_create_kmem_cache(struct mem_cgroup *memcg, ...@@ -3451,26 +3459,13 @@ static struct kmem_cache *memcg_create_kmem_cache(struct mem_cgroup *memcg,
mutex_lock(&memcg_cache_mutex); mutex_lock(&memcg_cache_mutex);
new_cachep = cache_from_memcg_idx(cachep, idx); new_cachep = cache_from_memcg_idx(cachep, idx);
if (new_cachep) { if (new_cachep)
css_put(&memcg->css);
goto out; goto out;
}
new_cachep = kmem_cache_dup(memcg, cachep); new_cachep = kmem_cache_dup(memcg, cachep);
if (new_cachep == NULL) { if (new_cachep == NULL)
new_cachep = cachep; new_cachep = cachep;
css_put(&memcg->css);
goto out;
}
atomic_set(&new_cachep->memcg_params->nr_pages , 0);
cachep->memcg_params->memcg_caches[idx] = new_cachep;
/*
* the readers won't lock, make sure everybody sees the updated value,
* so they won't put stuff in the queue again for no reason
*/
wmb();
out: out:
mutex_unlock(&memcg_cache_mutex); mutex_unlock(&memcg_cache_mutex);
return new_cachep; return new_cachep;
...@@ -3550,6 +3545,7 @@ static void memcg_create_cache_work_func(struct work_struct *w) ...@@ -3550,6 +3545,7 @@ static void memcg_create_cache_work_func(struct work_struct *w)
cw = container_of(w, struct create_work, work); cw = container_of(w, struct create_work, work);
memcg_create_kmem_cache(cw->memcg, cw->cachep); memcg_create_kmem_cache(cw->memcg, cw->cachep);
css_put(&cw->memcg->css);
kfree(cw); kfree(cw);
} }
......
...@@ -215,7 +215,7 @@ kmem_cache_create_memcg(struct mem_cgroup *memcg, const char *name, size_t size, ...@@ -215,7 +215,7 @@ kmem_cache_create_memcg(struct mem_cgroup *memcg, const char *name, size_t size,
s->refcount = 1; s->refcount = 1;
list_add(&s->list, &slab_caches); list_add(&s->list, &slab_caches);
memcg_cache_list_add(memcg, s); memcg_register_cache(s);
out_unlock: out_unlock:
mutex_unlock(&slab_mutex); mutex_unlock(&slab_mutex);
...@@ -265,7 +265,8 @@ void kmem_cache_destroy(struct kmem_cache *s) ...@@ -265,7 +265,8 @@ void kmem_cache_destroy(struct kmem_cache *s)
if (s->flags & SLAB_DESTROY_BY_RCU) if (s->flags & SLAB_DESTROY_BY_RCU)
rcu_barrier(); rcu_barrier();
memcg_release_cache(s); memcg_unregister_cache(s);
memcg_free_cache_params(s);
kfree(s->name); kfree(s->name);
kmem_cache_free(kmem_cache, s); kmem_cache_free(kmem_cache, s);
} else { } else {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment