Commit 239d6c96 authored by Suren Baghdasaryan's avatar Suren Baghdasaryan Committed by Andrew Morton

codetag: debug: skip objext checking when it's for objext itself

objext objects are created with __GFP_NO_OBJ_EXT flag and therefore have
no corresponding objext themselves (otherwise we would get an infinite
recursion). When freeing these objects their codetag will be empty and
when CONFIG_MEM_ALLOC_PROFILING_DEBUG is enabled this will lead to false
warnings. Introduce CODETAG_EMPTY special codetag value to mark
allocations which intentionally lack codetag to avoid these warnings.
Set objext codetags to CODETAG_EMPTY before freeing to indicate that
the codetag is expected to be empty.

Link: https://lkml.kernel.org/r/20240321163705.3067592-34-surenb@google.comSigned-off-by: default avatarSuren Baghdasaryan <surenb@google.com>
Tested-by: default avatarKees Cook <keescook@chromium.org>
Cc: Alexander Viro <viro@zeniv.linux.org.uk>
Cc: Alex Gaynor <alex.gaynor@gmail.com>
Cc: Alice Ryhl <aliceryhl@google.com>
Cc: Andreas Hindborg <a.hindborg@samsung.com>
Cc: Benno Lossin <benno.lossin@proton.me>
Cc: "Björn Roy Baron" <bjorn3_gh@protonmail.com>
Cc: Boqun Feng <boqun.feng@gmail.com>
Cc: Christoph Lameter <cl@linux.com>
Cc: Dennis Zhou <dennis@kernel.org>
Cc: Gary Guo <gary@garyguo.net>
Cc: Kent Overstreet <kent.overstreet@linux.dev>
Cc: Miguel Ojeda <ojeda@kernel.org>
Cc: Pasha Tatashin <pasha.tatashin@soleen.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Tejun Heo <tj@kernel.org>
Cc: Vlastimil Babka <vbabka@suse.cz>
Cc: Wedson Almeida Filho <wedsonaf@gmail.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent 1438d349
...@@ -29,6 +29,27 @@ struct alloc_tag { ...@@ -29,6 +29,27 @@ struct alloc_tag {
struct alloc_tag_counters __percpu *counters; struct alloc_tag_counters __percpu *counters;
} __aligned(8); } __aligned(8);
#ifdef CONFIG_MEM_ALLOC_PROFILING_DEBUG
#define CODETAG_EMPTY ((void *)1)
static inline bool is_codetag_empty(union codetag_ref *ref)
{
return ref->ct == CODETAG_EMPTY;
}
static inline void set_codetag_empty(union codetag_ref *ref)
{
if (ref)
ref->ct = CODETAG_EMPTY;
}
#else /* CONFIG_MEM_ALLOC_PROFILING_DEBUG */
static inline bool is_codetag_empty(union codetag_ref *ref) { return false; }
#endif /* CONFIG_MEM_ALLOC_PROFILING_DEBUG */
#ifdef CONFIG_MEM_ALLOC_PROFILING #ifdef CONFIG_MEM_ALLOC_PROFILING
struct codetag_bytes { struct codetag_bytes {
...@@ -151,6 +172,11 @@ static inline void alloc_tag_sub(union codetag_ref *ref, size_t bytes) ...@@ -151,6 +172,11 @@ static inline void alloc_tag_sub(union codetag_ref *ref, size_t bytes)
if (!ref || !ref->ct) if (!ref || !ref->ct)
return; return;
if (is_codetag_empty(ref)) {
ref->ct = NULL;
return;
}
tag = ct_to_alloc_tag(ref->ct); tag = ct_to_alloc_tag(ref->ct);
this_cpu_sub(tag->counters->bytes, bytes); this_cpu_sub(tag->counters->bytes, bytes);
......
...@@ -1873,6 +1873,30 @@ static inline enum node_stat_item cache_vmstat_idx(struct kmem_cache *s) ...@@ -1873,6 +1873,30 @@ static inline enum node_stat_item cache_vmstat_idx(struct kmem_cache *s)
#ifdef CONFIG_SLAB_OBJ_EXT #ifdef CONFIG_SLAB_OBJ_EXT
#ifdef CONFIG_MEM_ALLOC_PROFILING_DEBUG
static inline void mark_objexts_empty(struct slabobj_ext *obj_exts)
{
struct slabobj_ext *slab_exts;
struct slab *obj_exts_slab;
obj_exts_slab = virt_to_slab(obj_exts);
slab_exts = slab_obj_exts(obj_exts_slab);
if (slab_exts) {
unsigned int offs = obj_to_index(obj_exts_slab->slab_cache,
obj_exts_slab, obj_exts);
/* codetag should be NULL */
WARN_ON(slab_exts[offs].ref.ct);
set_codetag_empty(&slab_exts[offs].ref);
}
}
#else /* CONFIG_MEM_ALLOC_PROFILING_DEBUG */
static inline void mark_objexts_empty(struct slabobj_ext *obj_exts) {}
#endif /* CONFIG_MEM_ALLOC_PROFILING_DEBUG */
/* /*
* The allocated objcg pointers array is not accounted directly. * The allocated objcg pointers array is not accounted directly.
* Moreover, it should not come from DMA buffer and is not readily * Moreover, it should not come from DMA buffer and is not readily
...@@ -1913,6 +1937,7 @@ static int alloc_slab_obj_exts(struct slab *slab, struct kmem_cache *s, ...@@ -1913,6 +1937,7 @@ static int alloc_slab_obj_exts(struct slab *slab, struct kmem_cache *s,
* assign slabobj_exts in parallel. In this case the existing * assign slabobj_exts in parallel. In this case the existing
* objcg vector should be reused. * objcg vector should be reused.
*/ */
mark_objexts_empty(vec);
kfree(vec); kfree(vec);
return 0; return 0;
} }
...@@ -1929,6 +1954,14 @@ static inline void free_slab_obj_exts(struct slab *slab) ...@@ -1929,6 +1954,14 @@ static inline void free_slab_obj_exts(struct slab *slab)
if (!obj_exts) if (!obj_exts)
return; return;
/*
* obj_exts was created with __GFP_NO_OBJ_EXT flag, therefore its
* corresponding extension will be NULL. alloc_tag_sub() will throw a
* warning if slab has extensions but the extension of an object is
* NULL, therefore replace NULL with CODETAG_EMPTY to indicate that
* the extension for obj_exts is expected to be NULL.
*/
mark_objexts_empty(obj_exts);
kfree(obj_exts); kfree(obj_exts);
slab->obj_exts = 0; slab->obj_exts = 0;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment