Commit 99f3fe41 authored by Andrey Konovalov's avatar Andrey Konovalov Committed by Andrew Morton

kasan: clean up is_kfence_address checks

1. Do not untag addresses that are passed to is_kfence_address: it
   tolerates tagged addresses.

2. Move is_kfence_address checks from internal KASAN functions
   (kasan_poison/unpoison, etc.) to external-facing ones.

   Note that kasan_poison/unpoison are never called outside of KASAN/slab
   code anymore; the comment is wrong, so drop it.

3. Simplify/reorganize the code around the updated checks.

Link: https://lkml.kernel.org/r/1065732315ef4e141b6177d8f612232d4d5bc0ab.1703188911.git.andreyknvl@google.comSigned-off-by: default avatarAndrey Konovalov <andreyknvl@google.com>
Cc: Alexander Potapenko <glider@google.com>
Cc: Andrey Ryabinin <ryabinin.a.a@gmail.com>
Cc: Dmitry Vyukov <dvyukov@google.com>
Cc: Marco Elver <elver@google.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent 1a55836a
...@@ -79,6 +79,9 @@ EXPORT_SYMBOL(kasan_disable_current); ...@@ -79,6 +79,9 @@ EXPORT_SYMBOL(kasan_disable_current);
void __kasan_unpoison_range(const void *address, size_t size) void __kasan_unpoison_range(const void *address, size_t size)
{ {
if (is_kfence_address(address))
return;
kasan_unpoison(address, size, false); kasan_unpoison(address, size, false);
} }
...@@ -218,9 +221,6 @@ static inline bool poison_slab_object(struct kmem_cache *cache, void *object, ...@@ -218,9 +221,6 @@ static inline bool poison_slab_object(struct kmem_cache *cache, void *object,
tagged_object = object; tagged_object = object;
object = kasan_reset_tag(object); object = kasan_reset_tag(object);
if (is_kfence_address(object))
return false;
if (unlikely(nearest_obj(cache, virt_to_slab(object), object) != object)) { if (unlikely(nearest_obj(cache, virt_to_slab(object), object) != object)) {
kasan_report_invalid_free(tagged_object, ip, KASAN_REPORT_INVALID_FREE); kasan_report_invalid_free(tagged_object, ip, KASAN_REPORT_INVALID_FREE);
return true; return true;
...@@ -247,7 +247,12 @@ static inline bool poison_slab_object(struct kmem_cache *cache, void *object, ...@@ -247,7 +247,12 @@ static inline bool poison_slab_object(struct kmem_cache *cache, void *object,
bool __kasan_slab_free(struct kmem_cache *cache, void *object, bool __kasan_slab_free(struct kmem_cache *cache, void *object,
unsigned long ip, bool init) unsigned long ip, bool init)
{ {
bool buggy_object = poison_slab_object(cache, object, ip, init); bool buggy_object;
if (is_kfence_address(object))
return false;
buggy_object = poison_slab_object(cache, object, ip, init);
return buggy_object ? true : kasan_quarantine_put(cache, object); return buggy_object ? true : kasan_quarantine_put(cache, object);
} }
...@@ -359,7 +364,7 @@ void * __must_check __kasan_kmalloc(struct kmem_cache *cache, const void *object ...@@ -359,7 +364,7 @@ void * __must_check __kasan_kmalloc(struct kmem_cache *cache, const void *object
if (unlikely(object == NULL)) if (unlikely(object == NULL))
return NULL; return NULL;
if (is_kfence_address(kasan_reset_tag(object))) if (is_kfence_address(object))
return (void *)object; return (void *)object;
/* The object has already been unpoisoned by kasan_slab_alloc(). */ /* The object has already been unpoisoned by kasan_slab_alloc(). */
...@@ -417,7 +422,7 @@ void * __must_check __kasan_krealloc(const void *object, size_t size, gfp_t flag ...@@ -417,7 +422,7 @@ void * __must_check __kasan_krealloc(const void *object, size_t size, gfp_t flag
if (unlikely(object == ZERO_SIZE_PTR)) if (unlikely(object == ZERO_SIZE_PTR))
return (void *)object; return (void *)object;
if (is_kfence_address(kasan_reset_tag(object))) if (is_kfence_address(object))
return (void *)object; return (void *)object;
/* /*
...@@ -483,6 +488,9 @@ bool __kasan_mempool_poison_object(void *ptr, unsigned long ip) ...@@ -483,6 +488,9 @@ bool __kasan_mempool_poison_object(void *ptr, unsigned long ip)
return true; return true;
} }
if (is_kfence_address(ptr))
return false;
slab = folio_slab(folio); slab = folio_slab(folio);
return !poison_slab_object(slab->slab_cache, ptr, ip, false); return !poison_slab_object(slab->slab_cache, ptr, ip, false);
} }
...@@ -492,9 +500,6 @@ void __kasan_mempool_unpoison_object(void *ptr, size_t size, unsigned long ip) ...@@ -492,9 +500,6 @@ void __kasan_mempool_unpoison_object(void *ptr, size_t size, unsigned long ip)
struct slab *slab; struct slab *slab;
gfp_t flags = 0; /* Might be executing under a lock. */ gfp_t flags = 0; /* Might be executing under a lock. */
if (is_kfence_address(kasan_reset_tag(ptr)))
return;
slab = virt_to_slab(ptr); slab = virt_to_slab(ptr);
/* /*
...@@ -507,6 +512,9 @@ void __kasan_mempool_unpoison_object(void *ptr, size_t size, unsigned long ip) ...@@ -507,6 +512,9 @@ void __kasan_mempool_unpoison_object(void *ptr, size_t size, unsigned long ip)
return; return;
} }
if (is_kfence_address(ptr))
return;
/* Unpoison the object and save alloc info for non-kmalloc() allocations. */ /* Unpoison the object and save alloc info for non-kmalloc() allocations. */
unpoison_slab_object(slab->slab_cache, ptr, size, flags); unpoison_slab_object(slab->slab_cache, ptr, size, flags);
......
...@@ -466,35 +466,23 @@ static inline u8 kasan_random_tag(void) { return 0; } ...@@ -466,35 +466,23 @@ static inline u8 kasan_random_tag(void) { return 0; }
static inline void kasan_poison(const void *addr, size_t size, u8 value, bool init) static inline void kasan_poison(const void *addr, size_t size, u8 value, bool init)
{ {
addr = kasan_reset_tag(addr);
/* Skip KFENCE memory if called explicitly outside of sl*b. */
if (is_kfence_address(addr))
return;
if (WARN_ON((unsigned long)addr & KASAN_GRANULE_MASK)) if (WARN_ON((unsigned long)addr & KASAN_GRANULE_MASK))
return; return;
if (WARN_ON(size & KASAN_GRANULE_MASK)) if (WARN_ON(size & KASAN_GRANULE_MASK))
return; return;
hw_set_mem_tag_range((void *)addr, size, value, init); hw_set_mem_tag_range(kasan_reset_tag(addr), size, value, init);
} }
static inline void kasan_unpoison(const void *addr, size_t size, bool init) static inline void kasan_unpoison(const void *addr, size_t size, bool init)
{ {
u8 tag = get_tag(addr); u8 tag = get_tag(addr);
addr = kasan_reset_tag(addr);
/* Skip KFENCE memory if called explicitly outside of sl*b. */
if (is_kfence_address(addr))
return;
if (WARN_ON((unsigned long)addr & KASAN_GRANULE_MASK)) if (WARN_ON((unsigned long)addr & KASAN_GRANULE_MASK))
return; return;
size = round_up(size, KASAN_GRANULE_SIZE); size = round_up(size, KASAN_GRANULE_SIZE);
hw_set_mem_tag_range((void *)addr, size, tag, init); hw_set_mem_tag_range(kasan_reset_tag(addr), size, tag, init);
} }
static inline bool kasan_byte_accessible(const void *addr) static inline bool kasan_byte_accessible(const void *addr)
......
...@@ -135,10 +135,6 @@ void kasan_poison(const void *addr, size_t size, u8 value, bool init) ...@@ -135,10 +135,6 @@ void kasan_poison(const void *addr, size_t size, u8 value, bool init)
*/ */
addr = kasan_reset_tag(addr); addr = kasan_reset_tag(addr);
/* Skip KFENCE memory if called explicitly outside of sl*b. */
if (is_kfence_address(addr))
return;
if (WARN_ON((unsigned long)addr & KASAN_GRANULE_MASK)) if (WARN_ON((unsigned long)addr & KASAN_GRANULE_MASK))
return; return;
if (WARN_ON(size & KASAN_GRANULE_MASK)) if (WARN_ON(size & KASAN_GRANULE_MASK))
...@@ -175,14 +171,6 @@ void kasan_unpoison(const void *addr, size_t size, bool init) ...@@ -175,14 +171,6 @@ void kasan_unpoison(const void *addr, size_t size, bool init)
*/ */
addr = kasan_reset_tag(addr); addr = kasan_reset_tag(addr);
/*
* Skip KFENCE memory if called explicitly outside of sl*b. Also note
* that calls to ksize(), where size is not a multiple of machine-word
* size, would otherwise poison the invalid portion of the word.
*/
if (is_kfence_address(addr))
return;
if (WARN_ON((unsigned long)addr & KASAN_GRANULE_MASK)) if (WARN_ON((unsigned long)addr & KASAN_GRANULE_MASK))
return; return;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment