Commit 3b7f8813 authored by Andrey Konovalov's avatar Andrey Konovalov Committed by Andrew Morton

kasan: only define kasan_never_merge for Generic mode

KASAN prevents merging of slab caches whose objects have per-object
metadata stored in redzones.

As now only the Generic mode uses per-object metadata, define
kasan_never_merge() only for this mode.

Link: https://lkml.kernel.org/r/81ed01f29ff3443580b7e2fe362a8b47b1e8006d.1662411799.git.andreyknvl@google.comSigned-off-by: default avatarAndrey Konovalov <andreyknvl@google.com>
Reviewed-by: default avatarMarco Elver <elver@google.com>
Cc: Alexander Potapenko <glider@google.com>
Cc: Andrey Ryabinin <ryabinin.a.a@gmail.com>
Cc: Dmitry Vyukov <dvyukov@google.com>
Cc: Evgenii Stepanov <eugenis@google.com>
Cc: Peter Collingbourne <pcc@google.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent f372bde9
...@@ -103,14 +103,6 @@ struct kasan_cache { ...@@ -103,14 +103,6 @@ struct kasan_cache {
bool is_kmalloc; bool is_kmalloc;
}; };
slab_flags_t __kasan_never_merge(void);
static __always_inline slab_flags_t kasan_never_merge(void)
{
if (kasan_enabled())
return __kasan_never_merge();
return 0;
}
void __kasan_unpoison_range(const void *addr, size_t size); void __kasan_unpoison_range(const void *addr, size_t size);
static __always_inline void kasan_unpoison_range(const void *addr, size_t size) static __always_inline void kasan_unpoison_range(const void *addr, size_t size)
{ {
...@@ -261,10 +253,6 @@ static __always_inline bool kasan_check_byte(const void *addr) ...@@ -261,10 +253,6 @@ static __always_inline bool kasan_check_byte(const void *addr)
#else /* CONFIG_KASAN */ #else /* CONFIG_KASAN */
static inline slab_flags_t kasan_never_merge(void)
{
return 0;
}
static inline void kasan_unpoison_range(const void *address, size_t size) {} static inline void kasan_unpoison_range(const void *address, size_t size) {}
static inline void kasan_poison_pages(struct page *page, unsigned int order, static inline void kasan_poison_pages(struct page *page, unsigned int order,
bool init) {} bool init) {}
...@@ -325,6 +313,7 @@ static inline void kasan_unpoison_task_stack(struct task_struct *task) {} ...@@ -325,6 +313,7 @@ static inline void kasan_unpoison_task_stack(struct task_struct *task) {}
#ifdef CONFIG_KASAN_GENERIC #ifdef CONFIG_KASAN_GENERIC
size_t kasan_metadata_size(struct kmem_cache *cache); size_t kasan_metadata_size(struct kmem_cache *cache);
slab_flags_t kasan_never_merge(void);
void kasan_cache_shrink(struct kmem_cache *cache); void kasan_cache_shrink(struct kmem_cache *cache);
void kasan_cache_shutdown(struct kmem_cache *cache); void kasan_cache_shutdown(struct kmem_cache *cache);
...@@ -338,6 +327,11 @@ static inline size_t kasan_metadata_size(struct kmem_cache *cache) ...@@ -338,6 +327,11 @@ static inline size_t kasan_metadata_size(struct kmem_cache *cache)
{ {
return 0; return 0;
} }
/* And thus nothing prevents cache merging. */
static inline slab_flags_t kasan_never_merge(void)
{
return 0;
}
static inline void kasan_cache_shrink(struct kmem_cache *cache) {} static inline void kasan_cache_shrink(struct kmem_cache *cache) {}
static inline void kasan_cache_shutdown(struct kmem_cache *cache) {} static inline void kasan_cache_shutdown(struct kmem_cache *cache) {}
......
...@@ -88,14 +88,6 @@ asmlinkage void kasan_unpoison_task_stack_below(const void *watermark) ...@@ -88,14 +88,6 @@ asmlinkage void kasan_unpoison_task_stack_below(const void *watermark)
} }
#endif /* CONFIG_KASAN_STACK */ #endif /* CONFIG_KASAN_STACK */
/* Only allow cache merging when no per-object metadata is present. */
slab_flags_t __kasan_never_merge(void)
{
if (kasan_requires_meta())
return SLAB_KASAN;
return 0;
}
void __kasan_unpoison_pages(struct page *page, unsigned int order, bool init) void __kasan_unpoison_pages(struct page *page, unsigned int order, bool init)
{ {
u8 tag; u8 tag;
......
...@@ -328,6 +328,14 @@ DEFINE_ASAN_SET_SHADOW(f3); ...@@ -328,6 +328,14 @@ DEFINE_ASAN_SET_SHADOW(f3);
DEFINE_ASAN_SET_SHADOW(f5); DEFINE_ASAN_SET_SHADOW(f5);
DEFINE_ASAN_SET_SHADOW(f8); DEFINE_ASAN_SET_SHADOW(f8);
/* Only allow cache merging when no per-object metadata is present. */
slab_flags_t kasan_never_merge(void)
{
if (!kasan_requires_meta())
return 0;
return SLAB_KASAN;
}
/* /*
* Adaptive redzone policy taken from the userspace AddressSanitizer runtime. * Adaptive redzone policy taken from the userspace AddressSanitizer runtime.
* For larger allocations larger redzones are used. * For larger allocations larger redzones are used.
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment