Commit 426589f5 authored by Vladimir Davydov's avatar Vladimir Davydov Committed by Linus Torvalds

slab: link memcg caches of the same kind into a list

Sometimes, we need to iterate over all memcg copies of a particular root
kmem cache.  Currently, we use memcg_cache_params->memcg_caches array for
that, because it contains all existing memcg caches.

However, it's a bad practice to keep all caches, including those that
belong to offline cgroups, in this array, because it will be growing
beyond any bounds then.  I'm going to wipe away dead caches from it to
save space.  To still be able to perform iterations over all memcg caches
of the same kind, let us link them into a list.
Signed-off-by: default avatarVladimir Davydov <vdavydov@parallels.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Michal Hocko <mhocko@suse.cz>
Cc: Tejun Heo <tj@kernel.org>
Cc: Christoph Lameter <cl@linux.com>
Cc: Pekka Enberg <penberg@kernel.org>
Cc: David Rientjes <rientjes@google.com>
Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Cc: Dave Chinner <david@fromorbit.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent f7ce3190
...@@ -491,9 +491,13 @@ struct memcg_cache_array { ...@@ -491,9 +491,13 @@ struct memcg_cache_array {
* *
* @memcg: pointer to the memcg this cache belongs to * @memcg: pointer to the memcg this cache belongs to
* @root_cache: pointer to the global, root cache, this cache was derived from * @root_cache: pointer to the global, root cache, this cache was derived from
*
* Both root and child caches of the same kind are linked into a list chained
* through @list.
*/ */
struct memcg_cache_params { struct memcg_cache_params {
bool is_root_cache; bool is_root_cache;
struct list_head list;
union { union {
struct memcg_cache_array __rcu *memcg_caches; struct memcg_cache_array __rcu *memcg_caches;
struct { struct {
......
...@@ -3708,8 +3708,7 @@ static int do_tune_cpucache(struct kmem_cache *cachep, int limit, ...@@ -3708,8 +3708,7 @@ static int do_tune_cpucache(struct kmem_cache *cachep, int limit,
int batchcount, int shared, gfp_t gfp) int batchcount, int shared, gfp_t gfp)
{ {
int ret; int ret;
struct kmem_cache *c = NULL; struct kmem_cache *c;
int i = 0;
ret = __do_tune_cpucache(cachep, limit, batchcount, shared, gfp); ret = __do_tune_cpucache(cachep, limit, batchcount, shared, gfp);
...@@ -3719,12 +3718,10 @@ static int do_tune_cpucache(struct kmem_cache *cachep, int limit, ...@@ -3719,12 +3718,10 @@ static int do_tune_cpucache(struct kmem_cache *cachep, int limit,
if ((ret < 0) || !is_root_cache(cachep)) if ((ret < 0) || !is_root_cache(cachep))
return ret; return ret;
VM_BUG_ON(!mutex_is_locked(&slab_mutex)); lockdep_assert_held(&slab_mutex);
for_each_memcg_cache_index(i) { for_each_memcg_cache(c, cachep) {
c = cache_from_memcg_idx(cachep, i); /* return value determined by the root cache only */
if (c) __do_tune_cpucache(c, limit, batchcount, shared, gfp);
/* return value determined by the parent cache only */
__do_tune_cpucache(c, limit, batchcount, shared, gfp);
} }
return ret; return ret;
......
...@@ -163,6 +163,18 @@ ssize_t slabinfo_write(struct file *file, const char __user *buffer, ...@@ -163,6 +163,18 @@ ssize_t slabinfo_write(struct file *file, const char __user *buffer,
size_t count, loff_t *ppos); size_t count, loff_t *ppos);
#ifdef CONFIG_MEMCG_KMEM #ifdef CONFIG_MEMCG_KMEM
/*
* Iterate over all memcg caches of the given root cache. The caller must hold
* slab_mutex.
*/
#define for_each_memcg_cache(iter, root) \
list_for_each_entry(iter, &(root)->memcg_params.list, \
memcg_params.list)
#define for_each_memcg_cache_safe(iter, tmp, root) \
list_for_each_entry_safe(iter, tmp, &(root)->memcg_params.list, \
memcg_params.list)
static inline bool is_root_cache(struct kmem_cache *s) static inline bool is_root_cache(struct kmem_cache *s)
{ {
return s->memcg_params.is_root_cache; return s->memcg_params.is_root_cache;
...@@ -241,6 +253,11 @@ extern void slab_init_memcg_params(struct kmem_cache *); ...@@ -241,6 +253,11 @@ extern void slab_init_memcg_params(struct kmem_cache *);
#else /* !CONFIG_MEMCG_KMEM */ #else /* !CONFIG_MEMCG_KMEM */
#define for_each_memcg_cache(iter, root) \
for ((void)(iter), (void)(root); 0; )
#define for_each_memcg_cache_safe(iter, tmp, root) \
for ((void)(iter), (void)(tmp), (void)(root); 0; )
static inline bool is_root_cache(struct kmem_cache *s) static inline bool is_root_cache(struct kmem_cache *s)
{ {
return true; return true;
......
...@@ -109,6 +109,7 @@ static inline int kmem_cache_sanity_check(const char *name, size_t size) ...@@ -109,6 +109,7 @@ static inline int kmem_cache_sanity_check(const char *name, size_t size)
void slab_init_memcg_params(struct kmem_cache *s) void slab_init_memcg_params(struct kmem_cache *s)
{ {
s->memcg_params.is_root_cache = true; s->memcg_params.is_root_cache = true;
INIT_LIST_HEAD(&s->memcg_params.list);
RCU_INIT_POINTER(s->memcg_params.memcg_caches, NULL); RCU_INIT_POINTER(s->memcg_params.memcg_caches, NULL);
} }
...@@ -449,6 +450,7 @@ static int do_kmem_cache_shutdown(struct kmem_cache *s, ...@@ -449,6 +450,7 @@ static int do_kmem_cache_shutdown(struct kmem_cache *s,
lockdep_is_held(&slab_mutex)); lockdep_is_held(&slab_mutex));
BUG_ON(arr->entries[idx] != s); BUG_ON(arr->entries[idx] != s);
arr->entries[idx] = NULL; arr->entries[idx] = NULL;
list_del(&s->memcg_params.list);
} }
#endif #endif
list_move(&s->list, release); list_move(&s->list, release);
...@@ -529,6 +531,8 @@ void memcg_create_kmem_cache(struct mem_cgroup *memcg, ...@@ -529,6 +531,8 @@ void memcg_create_kmem_cache(struct mem_cgroup *memcg,
goto out_unlock; goto out_unlock;
} }
list_add(&s->memcg_params.list, &root_cache->memcg_params.list);
/* /*
* Since readers won't lock (see cache_from_memcg_idx()), we need a * Since readers won't lock (see cache_from_memcg_idx()), we need a
* barrier here to ensure nobody will see the kmem_cache partially * barrier here to ensure nobody will see the kmem_cache partially
...@@ -581,11 +585,13 @@ void slab_kmem_cache_release(struct kmem_cache *s) ...@@ -581,11 +585,13 @@ void slab_kmem_cache_release(struct kmem_cache *s)
void kmem_cache_destroy(struct kmem_cache *s) void kmem_cache_destroy(struct kmem_cache *s)
{ {
int i; struct kmem_cache *c, *c2;
LIST_HEAD(release); LIST_HEAD(release);
bool need_rcu_barrier = false; bool need_rcu_barrier = false;
bool busy = false; bool busy = false;
BUG_ON(!is_root_cache(s));
get_online_cpus(); get_online_cpus();
get_online_mems(); get_online_mems();
...@@ -595,10 +601,8 @@ void kmem_cache_destroy(struct kmem_cache *s) ...@@ -595,10 +601,8 @@ void kmem_cache_destroy(struct kmem_cache *s)
if (s->refcount) if (s->refcount)
goto out_unlock; goto out_unlock;
for_each_memcg_cache_index(i) { for_each_memcg_cache_safe(c, c2, s) {
struct kmem_cache *c = cache_from_memcg_idx(s, i); if (do_kmem_cache_shutdown(c, &release, &need_rcu_barrier))
if (c && do_kmem_cache_shutdown(c, &release, &need_rcu_barrier))
busy = true; busy = true;
} }
...@@ -932,16 +936,11 @@ memcg_accumulate_slabinfo(struct kmem_cache *s, struct slabinfo *info) ...@@ -932,16 +936,11 @@ memcg_accumulate_slabinfo(struct kmem_cache *s, struct slabinfo *info)
{ {
struct kmem_cache *c; struct kmem_cache *c;
struct slabinfo sinfo; struct slabinfo sinfo;
int i;
if (!is_root_cache(s)) if (!is_root_cache(s))
return; return;
for_each_memcg_cache_index(i) { for_each_memcg_cache(c, s) {
c = cache_from_memcg_idx(s, i);
if (!c)
continue;
memset(&sinfo, 0, sizeof(sinfo)); memset(&sinfo, 0, sizeof(sinfo));
get_slabinfo(c, &sinfo); get_slabinfo(c, &sinfo);
......
...@@ -3636,13 +3636,10 @@ struct kmem_cache * ...@@ -3636,13 +3636,10 @@ struct kmem_cache *
__kmem_cache_alias(const char *name, size_t size, size_t align, __kmem_cache_alias(const char *name, size_t size, size_t align,
unsigned long flags, void (*ctor)(void *)) unsigned long flags, void (*ctor)(void *))
{ {
struct kmem_cache *s; struct kmem_cache *s, *c;
s = find_mergeable(size, align, flags, name, ctor); s = find_mergeable(size, align, flags, name, ctor);
if (s) { if (s) {
int i;
struct kmem_cache *c;
s->refcount++; s->refcount++;
/* /*
...@@ -3652,10 +3649,7 @@ __kmem_cache_alias(const char *name, size_t size, size_t align, ...@@ -3652,10 +3649,7 @@ __kmem_cache_alias(const char *name, size_t size, size_t align,
s->object_size = max(s->object_size, (int)size); s->object_size = max(s->object_size, (int)size);
s->inuse = max_t(int, s->inuse, ALIGN(size, sizeof(void *))); s->inuse = max_t(int, s->inuse, ALIGN(size, sizeof(void *)));
for_each_memcg_cache_index(i) { for_each_memcg_cache(c, s) {
c = cache_from_memcg_idx(s, i);
if (!c)
continue;
c->object_size = s->object_size; c->object_size = s->object_size;
c->inuse = max_t(int, c->inuse, c->inuse = max_t(int, c->inuse,
ALIGN(size, sizeof(void *))); ALIGN(size, sizeof(void *)));
...@@ -4921,7 +4915,7 @@ static ssize_t slab_attr_store(struct kobject *kobj, ...@@ -4921,7 +4915,7 @@ static ssize_t slab_attr_store(struct kobject *kobj,
err = attribute->store(s, buf, len); err = attribute->store(s, buf, len);
#ifdef CONFIG_MEMCG_KMEM #ifdef CONFIG_MEMCG_KMEM
if (slab_state >= FULL && err >= 0 && is_root_cache(s)) { if (slab_state >= FULL && err >= 0 && is_root_cache(s)) {
int i; struct kmem_cache *c;
mutex_lock(&slab_mutex); mutex_lock(&slab_mutex);
if (s->max_attr_size < len) if (s->max_attr_size < len)
...@@ -4944,11 +4938,8 @@ static ssize_t slab_attr_store(struct kobject *kobj, ...@@ -4944,11 +4938,8 @@ static ssize_t slab_attr_store(struct kobject *kobj,
* directly either failed or succeeded, in which case we loop * directly either failed or succeeded, in which case we loop
* through the descendants with best-effort propagation. * through the descendants with best-effort propagation.
*/ */
for_each_memcg_cache_index(i) { for_each_memcg_cache(c, s)
struct kmem_cache *c = cache_from_memcg_idx(s, i); attribute->store(c, buf, len);
if (c)
attribute->store(c, buf, len);
}
mutex_unlock(&slab_mutex); mutex_unlock(&slab_mutex);
} }
#endif #endif
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment