Commit 230e9fc2 authored by Vladimir Davydov's avatar Vladimir Davydov Committed by Linus Torvalds

slab: add SLAB_ACCOUNT flag

Currently, if we want to account all objects of a particular kmem cache,
we have to pass __GFP_ACCOUNT to each kmem_cache_alloc call, which is
inconvenient.  This patch introduces SLAB_ACCOUNT flag which if passed
to kmem_cache_create will force accounting for every allocation from
this cache even if __GFP_ACCOUNT is not passed.

This patch does not make any of the existing caches use this flag - it
will be done later in the series.

Note, a cache with SLAB_ACCOUNT cannot be merged with a cache w/o
SLAB_ACCOUNT, because merged caches share the same kmem_cache struct and
hence cannot have different sets of SLAB_* flags.  Thus using this flag
will probably reduce the number of merged slabs even if kmem accounting
is not used (only compiled in).
Signed-off-by: default avatarVladimir Davydov <vdavydov@virtuozzo.com>
Suggested-by: default avatarTejun Heo <tj@kernel.org>
Acked-by: default avatarJohannes Weiner <hannes@cmpxchg.org>
Acked-by: default avatarMichal Hocko <mhocko@suse.com>
Cc: Greg Thelen <gthelen@google.com>
Cc: Christoph Lameter <cl@linux.com>
Cc: Pekka Enberg <penberg@kernel.org>
Cc: David Rientjes <rientjes@google.com>
Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent a9bb7e62
...@@ -766,15 +766,13 @@ static inline int memcg_cache_id(struct mem_cgroup *memcg) ...@@ -766,15 +766,13 @@ static inline int memcg_cache_id(struct mem_cgroup *memcg)
return memcg ? memcg->kmemcg_id : -1; return memcg ? memcg->kmemcg_id : -1;
} }
struct kmem_cache *__memcg_kmem_get_cache(struct kmem_cache *cachep); struct kmem_cache *__memcg_kmem_get_cache(struct kmem_cache *cachep, gfp_t gfp);
void __memcg_kmem_put_cache(struct kmem_cache *cachep); void __memcg_kmem_put_cache(struct kmem_cache *cachep);
static inline bool __memcg_kmem_bypass(gfp_t gfp) static inline bool __memcg_kmem_bypass(void)
{ {
if (!memcg_kmem_enabled()) if (!memcg_kmem_enabled())
return true; return true;
if (!(gfp & __GFP_ACCOUNT))
return true;
if (in_interrupt() || (!current->mm) || (current->flags & PF_KTHREAD)) if (in_interrupt() || (!current->mm) || (current->flags & PF_KTHREAD))
return true; return true;
return false; return false;
...@@ -791,7 +789,9 @@ static inline bool __memcg_kmem_bypass(gfp_t gfp) ...@@ -791,7 +789,9 @@ static inline bool __memcg_kmem_bypass(gfp_t gfp)
static __always_inline int memcg_kmem_charge(struct page *page, static __always_inline int memcg_kmem_charge(struct page *page,
gfp_t gfp, int order) gfp_t gfp, int order)
{ {
if (__memcg_kmem_bypass(gfp)) if (__memcg_kmem_bypass())
return 0;
if (!(gfp & __GFP_ACCOUNT))
return 0; return 0;
return __memcg_kmem_charge(page, gfp, order); return __memcg_kmem_charge(page, gfp, order);
} }
...@@ -810,16 +810,15 @@ static __always_inline void memcg_kmem_uncharge(struct page *page, int order) ...@@ -810,16 +810,15 @@ static __always_inline void memcg_kmem_uncharge(struct page *page, int order)
/** /**
* memcg_kmem_get_cache: selects the correct per-memcg cache for allocation * memcg_kmem_get_cache: selects the correct per-memcg cache for allocation
* @cachep: the original global kmem cache * @cachep: the original global kmem cache
* @gfp: allocation flags.
* *
* All memory allocated from a per-memcg cache is charged to the owner memcg. * All memory allocated from a per-memcg cache is charged to the owner memcg.
*/ */
static __always_inline struct kmem_cache * static __always_inline struct kmem_cache *
memcg_kmem_get_cache(struct kmem_cache *cachep, gfp_t gfp) memcg_kmem_get_cache(struct kmem_cache *cachep, gfp_t gfp)
{ {
if (__memcg_kmem_bypass(gfp)) if (__memcg_kmem_bypass())
return cachep; return cachep;
return __memcg_kmem_get_cache(cachep); return __memcg_kmem_get_cache(cachep, gfp);
} }
static __always_inline void memcg_kmem_put_cache(struct kmem_cache *cachep) static __always_inline void memcg_kmem_put_cache(struct kmem_cache *cachep)
......
...@@ -86,6 +86,11 @@ ...@@ -86,6 +86,11 @@
#else #else
# define SLAB_FAILSLAB 0x00000000UL # define SLAB_FAILSLAB 0x00000000UL
#endif #endif
#ifdef CONFIG_MEMCG_KMEM
# define SLAB_ACCOUNT 0x04000000UL /* Account to memcg */
#else
# define SLAB_ACCOUNT 0x00000000UL
#endif
/* The following flags affect the page allocator grouping pages by mobility */ /* The following flags affect the page allocator grouping pages by mobility */
#define SLAB_RECLAIM_ACCOUNT 0x00020000UL /* Objects are reclaimable */ #define SLAB_RECLAIM_ACCOUNT 0x00020000UL /* Objects are reclaimable */
......
...@@ -2356,7 +2356,7 @@ static void memcg_schedule_kmem_cache_create(struct mem_cgroup *memcg, ...@@ -2356,7 +2356,7 @@ static void memcg_schedule_kmem_cache_create(struct mem_cgroup *memcg,
* Can't be called in interrupt context or from kernel threads. * Can't be called in interrupt context or from kernel threads.
* This function needs to be called with rcu_read_lock() held. * This function needs to be called with rcu_read_lock() held.
*/ */
struct kmem_cache *__memcg_kmem_get_cache(struct kmem_cache *cachep) struct kmem_cache *__memcg_kmem_get_cache(struct kmem_cache *cachep, gfp_t gfp)
{ {
struct mem_cgroup *memcg; struct mem_cgroup *memcg;
struct kmem_cache *memcg_cachep; struct kmem_cache *memcg_cachep;
...@@ -2364,6 +2364,12 @@ struct kmem_cache *__memcg_kmem_get_cache(struct kmem_cache *cachep) ...@@ -2364,6 +2364,12 @@ struct kmem_cache *__memcg_kmem_get_cache(struct kmem_cache *cachep)
VM_BUG_ON(!is_root_cache(cachep)); VM_BUG_ON(!is_root_cache(cachep));
if (cachep->flags & SLAB_ACCOUNT)
gfp |= __GFP_ACCOUNT;
if (!(gfp & __GFP_ACCOUNT))
return cachep;
if (current->memcg_kmem_skip_account) if (current->memcg_kmem_skip_account)
return cachep; return cachep;
......
...@@ -128,10 +128,11 @@ static inline unsigned long kmem_cache_flags(unsigned long object_size, ...@@ -128,10 +128,11 @@ static inline unsigned long kmem_cache_flags(unsigned long object_size,
#if defined(CONFIG_SLAB) #if defined(CONFIG_SLAB)
#define SLAB_CACHE_FLAGS (SLAB_MEM_SPREAD | SLAB_NOLEAKTRACE | \ #define SLAB_CACHE_FLAGS (SLAB_MEM_SPREAD | SLAB_NOLEAKTRACE | \
SLAB_RECLAIM_ACCOUNT | SLAB_TEMPORARY | SLAB_NOTRACK) SLAB_RECLAIM_ACCOUNT | SLAB_TEMPORARY | \
SLAB_NOTRACK | SLAB_ACCOUNT)
#elif defined(CONFIG_SLUB) #elif defined(CONFIG_SLUB)
#define SLAB_CACHE_FLAGS (SLAB_NOLEAKTRACE | SLAB_RECLAIM_ACCOUNT | \ #define SLAB_CACHE_FLAGS (SLAB_NOLEAKTRACE | SLAB_RECLAIM_ACCOUNT | \
SLAB_TEMPORARY | SLAB_NOTRACK) SLAB_TEMPORARY | SLAB_NOTRACK | SLAB_ACCOUNT)
#else #else
#define SLAB_CACHE_FLAGS (0) #define SLAB_CACHE_FLAGS (0)
#endif #endif
......
...@@ -37,7 +37,8 @@ struct kmem_cache *kmem_cache; ...@@ -37,7 +37,8 @@ struct kmem_cache *kmem_cache;
SLAB_TRACE | SLAB_DESTROY_BY_RCU | SLAB_NOLEAKTRACE | \ SLAB_TRACE | SLAB_DESTROY_BY_RCU | SLAB_NOLEAKTRACE | \
SLAB_FAILSLAB) SLAB_FAILSLAB)
#define SLAB_MERGE_SAME (SLAB_RECLAIM_ACCOUNT | SLAB_CACHE_DMA | SLAB_NOTRACK) #define SLAB_MERGE_SAME (SLAB_RECLAIM_ACCOUNT | SLAB_CACHE_DMA | \
SLAB_NOTRACK | SLAB_ACCOUNT)
/* /*
* Merge control. If this is set then no merging of slab caches will occur. * Merge control. If this is set then no merging of slab caches will occur.
......
...@@ -5362,6 +5362,8 @@ static char *create_unique_id(struct kmem_cache *s) ...@@ -5362,6 +5362,8 @@ static char *create_unique_id(struct kmem_cache *s)
*p++ = 'F'; *p++ = 'F';
if (!(s->flags & SLAB_NOTRACK)) if (!(s->flags & SLAB_NOTRACK))
*p++ = 't'; *p++ = 't';
if (s->flags & SLAB_ACCOUNT)
*p++ = 'A';
if (p != name + 1) if (p != name + 1)
*p++ = '-'; *p++ = '-';
p += sprintf(p, "%07d", s->size); p += sprintf(p, "%07d", s->size);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment