Commit 4b873696 authored by Suren Baghdasaryan's avatar Suren Baghdasaryan Committed by Andrew Morton

mm/slab: add allocation accounting into slab allocation and free paths

Account slab allocations using codetag reference embedded into slabobj_ext.

Link: https://lkml.kernel.org/r/20240321163705.3067592-24-surenb@google.comSigned-off-by: default avatarSuren Baghdasaryan <surenb@google.com>
Co-developed-by: default avatarKent Overstreet <kent.overstreet@linux.dev>
Signed-off-by: default avatarKent Overstreet <kent.overstreet@linux.dev>
Reviewed-by: default avatarKees Cook <keescook@chromium.org>
Reviewed-by: default avatarVlastimil Babka <vbabka@suse.cz>
Tested-by: default avatarKees Cook <keescook@chromium.org>
Cc: Alexander Viro <viro@zeniv.linux.org.uk>
Cc: Alex Gaynor <alex.gaynor@gmail.com>
Cc: Alice Ryhl <aliceryhl@google.com>
Cc: Andreas Hindborg <a.hindborg@samsung.com>
Cc: Benno Lossin <benno.lossin@proton.me>
Cc: "Björn Roy Baron" <bjorn3_gh@protonmail.com>
Cc: Boqun Feng <boqun.feng@gmail.com>
Cc: Christoph Lameter <cl@linux.com>
Cc: Dennis Zhou <dennis@kernel.org>
Cc: Gary Guo <gary@garyguo.net>
Cc: Miguel Ojeda <ojeda@kernel.org>
Cc: Pasha Tatashin <pasha.tatashin@soleen.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Tejun Heo <tj@kernel.org>
Cc: Wedson Almeida Filho <wedsonaf@gmail.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent c789b5fe
...@@ -1932,7 +1932,68 @@ static inline void free_slab_obj_exts(struct slab *slab) ...@@ -1932,7 +1932,68 @@ static inline void free_slab_obj_exts(struct slab *slab)
kfree(obj_exts); kfree(obj_exts);
slab->obj_exts = 0; slab->obj_exts = 0;
} }
static inline bool need_slab_obj_ext(void)
{
if (mem_alloc_profiling_enabled())
return true;
/*
* CONFIG_MEMCG_KMEM creates vector of obj_cgroup objects conditionally
* inside memcg_slab_post_alloc_hook. No other users for now.
*/
return false;
}
static inline struct slabobj_ext *
prepare_slab_obj_exts_hook(struct kmem_cache *s, gfp_t flags, void *p)
{
struct slab *slab;
if (!p)
return NULL;
if (s->flags & SLAB_NO_OBJ_EXT)
return NULL;
if (flags & __GFP_NO_OBJ_EXT)
return NULL;
slab = virt_to_slab(p);
if (!slab_obj_exts(slab) &&
WARN(alloc_slab_obj_exts(slab, s, flags, false),
"%s, %s: Failed to create slab extension vector!\n",
__func__, s->name))
return NULL;
return slab_obj_exts(slab) + obj_to_index(s, slab, p);
}
static inline void
alloc_tagging_slab_free_hook(struct kmem_cache *s, struct slab *slab, void **p,
int objects)
{
#ifdef CONFIG_MEM_ALLOC_PROFILING
struct slabobj_ext *obj_exts;
int i;
if (!mem_alloc_profiling_enabled())
return;
obj_exts = slab_obj_exts(slab);
if (!obj_exts)
return;
for (i = 0; i < objects; i++) {
unsigned int off = obj_to_index(s, slab, p[i]);
alloc_tag_sub(&obj_exts[off].ref, s->size);
}
#endif
}
#else /* CONFIG_SLAB_OBJ_EXT */ #else /* CONFIG_SLAB_OBJ_EXT */
static int alloc_slab_obj_exts(struct slab *slab, struct kmem_cache *s, static int alloc_slab_obj_exts(struct slab *slab, struct kmem_cache *s,
gfp_t gfp, bool new_slab) gfp_t gfp, bool new_slab)
{ {
...@@ -1942,6 +2003,24 @@ static int alloc_slab_obj_exts(struct slab *slab, struct kmem_cache *s, ...@@ -1942,6 +2003,24 @@ static int alloc_slab_obj_exts(struct slab *slab, struct kmem_cache *s,
static inline void free_slab_obj_exts(struct slab *slab) static inline void free_slab_obj_exts(struct slab *slab)
{ {
} }
static inline bool need_slab_obj_ext(void)
{
return false;
}
static inline struct slabobj_ext *
prepare_slab_obj_exts_hook(struct kmem_cache *s, gfp_t flags, void *p)
{
return NULL;
}
static inline void
alloc_tagging_slab_free_hook(struct kmem_cache *s, struct slab *slab, void **p,
int objects)
{
}
#endif /* CONFIG_SLAB_OBJ_EXT */ #endif /* CONFIG_SLAB_OBJ_EXT */
#ifdef CONFIG_MEMCG_KMEM #ifdef CONFIG_MEMCG_KMEM
...@@ -2370,7 +2449,7 @@ static __always_inline void account_slab(struct slab *slab, int order, ...@@ -2370,7 +2449,7 @@ static __always_inline void account_slab(struct slab *slab, int order,
static __always_inline void unaccount_slab(struct slab *slab, int order, static __always_inline void unaccount_slab(struct slab *slab, int order,
struct kmem_cache *s) struct kmem_cache *s)
{ {
if (memcg_kmem_online()) if (memcg_kmem_online() || need_slab_obj_ext())
free_slab_obj_exts(slab); free_slab_obj_exts(slab);
mod_node_page_state(slab_pgdat(slab), cache_vmstat_idx(s), mod_node_page_state(slab_pgdat(slab), cache_vmstat_idx(s),
...@@ -3823,6 +3902,7 @@ void slab_post_alloc_hook(struct kmem_cache *s, struct obj_cgroup *objcg, ...@@ -3823,6 +3902,7 @@ void slab_post_alloc_hook(struct kmem_cache *s, struct obj_cgroup *objcg,
unsigned int orig_size) unsigned int orig_size)
{ {
unsigned int zero_size = s->object_size; unsigned int zero_size = s->object_size;
struct slabobj_ext *obj_exts;
bool kasan_init = init; bool kasan_init = init;
size_t i; size_t i;
gfp_t init_flags = flags & gfp_allowed_mask; gfp_t init_flags = flags & gfp_allowed_mask;
...@@ -3865,6 +3945,18 @@ void slab_post_alloc_hook(struct kmem_cache *s, struct obj_cgroup *objcg, ...@@ -3865,6 +3945,18 @@ void slab_post_alloc_hook(struct kmem_cache *s, struct obj_cgroup *objcg,
kmemleak_alloc_recursive(p[i], s->object_size, 1, kmemleak_alloc_recursive(p[i], s->object_size, 1,
s->flags, init_flags); s->flags, init_flags);
kmsan_slab_alloc(s, p[i], init_flags); kmsan_slab_alloc(s, p[i], init_flags);
if (need_slab_obj_ext()) {
obj_exts = prepare_slab_obj_exts_hook(s, flags, p[i]);
#ifdef CONFIG_MEM_ALLOC_PROFILING
/*
* Currently obj_exts is used only for allocation profiling.
* If other users appear then mem_alloc_profiling_enabled()
* check should be added before alloc_tag_add().
*/
if (likely(obj_exts))
alloc_tag_add(&obj_exts->ref, current->alloc_tag, s->size);
#endif
}
} }
memcg_slab_post_alloc_hook(s, objcg, flags, size, p); memcg_slab_post_alloc_hook(s, objcg, flags, size, p);
...@@ -4339,6 +4431,7 @@ void slab_free(struct kmem_cache *s, struct slab *slab, void *object, ...@@ -4339,6 +4431,7 @@ void slab_free(struct kmem_cache *s, struct slab *slab, void *object,
unsigned long addr) unsigned long addr)
{ {
memcg_slab_free_hook(s, slab, &object, 1); memcg_slab_free_hook(s, slab, &object, 1);
alloc_tagging_slab_free_hook(s, slab, &object, 1);
if (likely(slab_free_hook(s, object, slab_want_init_on_free(s)))) if (likely(slab_free_hook(s, object, slab_want_init_on_free(s))))
do_slab_free(s, slab, object, object, 1, addr); do_slab_free(s, slab, object, object, 1, addr);
...@@ -4349,6 +4442,7 @@ void slab_free_bulk(struct kmem_cache *s, struct slab *slab, void *head, ...@@ -4349,6 +4442,7 @@ void slab_free_bulk(struct kmem_cache *s, struct slab *slab, void *head,
void *tail, void **p, int cnt, unsigned long addr) void *tail, void **p, int cnt, unsigned long addr)
{ {
memcg_slab_free_hook(s, slab, p, cnt); memcg_slab_free_hook(s, slab, p, cnt);
alloc_tagging_slab_free_hook(s, slab, p, cnt);
/* /*
* With KASAN enabled slab_free_freelist_hook modifies the freelist * With KASAN enabled slab_free_freelist_hook modifies the freelist
* to remove objects, whose reuse must be delayed. * to remove objects, whose reuse must be delayed.
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment