Commit aa1ef4d7 authored by Andrey Konovalov's avatar Andrey Konovalov Committed by Linus Torvalds

kasan, mm: reset tags when accessing metadata

Kernel allocator code accesses metadata for slab objects, that may lie
out-of-bounds of the object itself, or be accessed when an object is
freed.  Such accesses trigger tag faults and lead to false-positive
reports with hardware tag-based KASAN.

Software KASAN modes disable instrumentation for allocator code via
KASAN_SANITIZE Makefile macro, and rely on kasan_enable/disable_current()
annotations which are used to ignore KASAN reports.

With hardware tag-based KASAN neither of those options are available, as
it doesn't use compiler instrumetation, no tag faults are ignored, and MTE
is disabled after the first one.

Instead, reset tags when accessing metadata (currently only for SLUB).

Link: https://lkml.kernel.org/r/a0f3cefbc49f34c843b664110842de4db28179d0.1606161801.git.andreyknvl@google.comSigned-off-by: default avatarAndrey Konovalov <andreyknvl@google.com>
Signed-off-by: default avatarVincenzo Frascino <vincenzo.frascino@arm.com>
Acked-by: default avatarMarco Elver <elver@google.com>
Reviewed-by: default avatarAlexander Potapenko <glider@google.com>
Tested-by: default avatarVincenzo Frascino <vincenzo.frascino@arm.com>
Cc: Andrey Ryabinin <aryabinin@virtuozzo.com>
Cc: Branislav Rankov <Branislav.Rankov@arm.com>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Dmitry Vyukov <dvyukov@google.com>
Cc: Evgenii Stepanov <eugenis@google.com>
Cc: Kevin Brodsky <kevin.brodsky@arm.com>
Cc: Vasily Gorbik <gor@linux.ibm.com>
Cc: Will Deacon <will.deacon@arm.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 4291e9ee
...@@ -1204,8 +1204,10 @@ static void kernel_init_free_pages(struct page *page, int numpages) ...@@ -1204,8 +1204,10 @@ static void kernel_init_free_pages(struct page *page, int numpages)
/* s390's use of memset() could override KASAN redzones. */ /* s390's use of memset() could override KASAN redzones. */
kasan_disable_current(); kasan_disable_current();
for (i = 0; i < numpages; i++) for (i = 0; i < numpages; i++) {
page_kasan_tag_reset(page + i);
clear_highpage(page + i); clear_highpage(page + i);
}
kasan_enable_current(); kasan_enable_current();
} }
......
...@@ -25,7 +25,7 @@ static void poison_page(struct page *page) ...@@ -25,7 +25,7 @@ static void poison_page(struct page *page)
/* KASAN still think the page is in-use, so skip it. */ /* KASAN still think the page is in-use, so skip it. */
kasan_disable_current(); kasan_disable_current();
memset(addr, PAGE_POISON, PAGE_SIZE); memset(kasan_reset_tag(addr), PAGE_POISON, PAGE_SIZE);
kasan_enable_current(); kasan_enable_current();
kunmap_atomic(addr); kunmap_atomic(addr);
} }
......
...@@ -249,7 +249,7 @@ static inline void *freelist_ptr(const struct kmem_cache *s, void *ptr, ...@@ -249,7 +249,7 @@ static inline void *freelist_ptr(const struct kmem_cache *s, void *ptr,
{ {
#ifdef CONFIG_SLAB_FREELIST_HARDENED #ifdef CONFIG_SLAB_FREELIST_HARDENED
/* /*
* When CONFIG_KASAN_SW_TAGS is enabled, ptr_addr might be tagged. * When CONFIG_KASAN_SW/HW_TAGS is enabled, ptr_addr might be tagged.
* Normally, this doesn't cause any issues, as both set_freepointer() * Normally, this doesn't cause any issues, as both set_freepointer()
* and get_freepointer() are called with a pointer with the same tag. * and get_freepointer() are called with a pointer with the same tag.
* However, there are some issues with CONFIG_SLUB_DEBUG code. For * However, there are some issues with CONFIG_SLUB_DEBUG code. For
...@@ -275,6 +275,7 @@ static inline void *freelist_dereference(const struct kmem_cache *s, ...@@ -275,6 +275,7 @@ static inline void *freelist_dereference(const struct kmem_cache *s,
static inline void *get_freepointer(struct kmem_cache *s, void *object) static inline void *get_freepointer(struct kmem_cache *s, void *object)
{ {
object = kasan_reset_tag(object);
return freelist_dereference(s, object + s->offset); return freelist_dereference(s, object + s->offset);
} }
...@@ -304,6 +305,7 @@ static inline void set_freepointer(struct kmem_cache *s, void *object, void *fp) ...@@ -304,6 +305,7 @@ static inline void set_freepointer(struct kmem_cache *s, void *object, void *fp)
BUG_ON(object == fp); /* naive detection of double free or corruption */ BUG_ON(object == fp); /* naive detection of double free or corruption */
#endif #endif
freeptr_addr = (unsigned long)kasan_reset_tag((void *)freeptr_addr);
*(void **)freeptr_addr = freelist_ptr(s, fp, freeptr_addr); *(void **)freeptr_addr = freelist_ptr(s, fp, freeptr_addr);
} }
...@@ -538,8 +540,8 @@ static void print_section(char *level, char *text, u8 *addr, ...@@ -538,8 +540,8 @@ static void print_section(char *level, char *text, u8 *addr,
unsigned int length) unsigned int length)
{ {
metadata_access_enable(); metadata_access_enable();
print_hex_dump(level, text, DUMP_PREFIX_ADDRESS, 16, 1, addr, print_hex_dump(level, kasan_reset_tag(text), DUMP_PREFIX_ADDRESS,
length, 1); 16, 1, addr, length, 1);
metadata_access_disable(); metadata_access_disable();
} }
...@@ -570,7 +572,7 @@ static struct track *get_track(struct kmem_cache *s, void *object, ...@@ -570,7 +572,7 @@ static struct track *get_track(struct kmem_cache *s, void *object,
p = object + get_info_end(s); p = object + get_info_end(s);
return p + alloc; return kasan_reset_tag(p + alloc);
} }
static void set_track(struct kmem_cache *s, void *object, static void set_track(struct kmem_cache *s, void *object,
...@@ -583,7 +585,8 @@ static void set_track(struct kmem_cache *s, void *object, ...@@ -583,7 +585,8 @@ static void set_track(struct kmem_cache *s, void *object,
unsigned int nr_entries; unsigned int nr_entries;
metadata_access_enable(); metadata_access_enable();
nr_entries = stack_trace_save(p->addrs, TRACK_ADDRS_COUNT, 3); nr_entries = stack_trace_save(kasan_reset_tag(p->addrs),
TRACK_ADDRS_COUNT, 3);
metadata_access_disable(); metadata_access_disable();
if (nr_entries < TRACK_ADDRS_COUNT) if (nr_entries < TRACK_ADDRS_COUNT)
...@@ -747,7 +750,7 @@ static __printf(3, 4) void slab_err(struct kmem_cache *s, struct page *page, ...@@ -747,7 +750,7 @@ static __printf(3, 4) void slab_err(struct kmem_cache *s, struct page *page,
static void init_object(struct kmem_cache *s, void *object, u8 val) static void init_object(struct kmem_cache *s, void *object, u8 val)
{ {
u8 *p = object; u8 *p = kasan_reset_tag(object);
if (s->flags & SLAB_RED_ZONE) if (s->flags & SLAB_RED_ZONE)
memset(p - s->red_left_pad, val, s->red_left_pad); memset(p - s->red_left_pad, val, s->red_left_pad);
...@@ -777,7 +780,7 @@ static int check_bytes_and_report(struct kmem_cache *s, struct page *page, ...@@ -777,7 +780,7 @@ static int check_bytes_and_report(struct kmem_cache *s, struct page *page,
u8 *addr = page_address(page); u8 *addr = page_address(page);
metadata_access_enable(); metadata_access_enable();
fault = memchr_inv(start, value, bytes); fault = memchr_inv(kasan_reset_tag(start), value, bytes);
metadata_access_disable(); metadata_access_disable();
if (!fault) if (!fault)
return 1; return 1;
...@@ -873,7 +876,7 @@ static int slab_pad_check(struct kmem_cache *s, struct page *page) ...@@ -873,7 +876,7 @@ static int slab_pad_check(struct kmem_cache *s, struct page *page)
pad = end - remainder; pad = end - remainder;
metadata_access_enable(); metadata_access_enable();
fault = memchr_inv(pad, POISON_INUSE, remainder); fault = memchr_inv(kasan_reset_tag(pad), POISON_INUSE, remainder);
metadata_access_disable(); metadata_access_disable();
if (!fault) if (!fault)
return 1; return 1;
...@@ -1118,7 +1121,7 @@ void setup_page_debug(struct kmem_cache *s, struct page *page, void *addr) ...@@ -1118,7 +1121,7 @@ void setup_page_debug(struct kmem_cache *s, struct page *page, void *addr)
return; return;
metadata_access_enable(); metadata_access_enable();
memset(addr, POISON_INUSE, page_size(page)); memset(kasan_reset_tag(addr), POISON_INUSE, page_size(page));
metadata_access_disable(); metadata_access_disable();
} }
...@@ -1566,10 +1569,10 @@ static inline bool slab_free_freelist_hook(struct kmem_cache *s, ...@@ -1566,10 +1569,10 @@ static inline bool slab_free_freelist_hook(struct kmem_cache *s,
* Clear the object and the metadata, but don't touch * Clear the object and the metadata, but don't touch
* the redzone. * the redzone.
*/ */
memset(object, 0, s->object_size); memset(kasan_reset_tag(object), 0, s->object_size);
rsize = (s->flags & SLAB_RED_ZONE) ? s->red_left_pad rsize = (s->flags & SLAB_RED_ZONE) ? s->red_left_pad
: 0; : 0;
memset((char *)object + s->inuse, 0, memset((char *)kasan_reset_tag(object) + s->inuse, 0,
s->size - s->inuse - rsize); s->size - s->inuse - rsize);
} }
...@@ -2881,10 +2884,10 @@ static __always_inline void *slab_alloc_node(struct kmem_cache *s, ...@@ -2881,10 +2884,10 @@ static __always_inline void *slab_alloc_node(struct kmem_cache *s,
stat(s, ALLOC_FASTPATH); stat(s, ALLOC_FASTPATH);
} }
maybe_wipe_obj_freeptr(s, object); maybe_wipe_obj_freeptr(s, kasan_reset_tag(object));
if (unlikely(slab_want_init_on_alloc(gfpflags, s)) && object) if (unlikely(slab_want_init_on_alloc(gfpflags, s)) && object)
memset(object, 0, s->object_size); memset(kasan_reset_tag(object), 0, s->object_size);
slab_post_alloc_hook(s, objcg, gfpflags, 1, &object); slab_post_alloc_hook(s, objcg, gfpflags, 1, &object);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment