Commit eeb3160c authored by Andrey Konovalov's avatar Andrey Konovalov Committed by Linus Torvalds

kasan, mm: rename kasan_poison_kfree

Rename kasan_poison_kfree() to kasan_slab_free_mempool() as it better
reflects what this annotation does. Also add a comment that explains the
PageSlab() check.

No functional changes.

Link: https://lkml.kernel.org/r/141675fb493555e984c5dca555e9d9f768c7bbaa.1606162397.git.andreyknvl@google.com
Link: https://linux-review.googlesource.com/id/I5026f87364e556b506ef1baee725144bb04b8810Signed-off-by: default avatarAndrey Konovalov <andreyknvl@google.com>
Reviewed-by: default avatarMarco Elver <elver@google.com>
Tested-by: default avatarVincenzo Frascino <vincenzo.frascino@arm.com>
Cc: Alexander Potapenko <glider@google.com>
Cc: Andrey Ryabinin <aryabinin@virtuozzo.com>
Cc: Branislav Rankov <Branislav.Rankov@arm.com>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Dmitry Vyukov <dvyukov@google.com>
Cc: Evgenii Stepanov <eugenis@google.com>
Cc: Kevin Brodsky <kevin.brodsky@arm.com>
Cc: Vasily Gorbik <gor@linux.ibm.com>
Cc: Will Deacon <will.deacon@arm.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 34303244
...@@ -176,6 +176,13 @@ static __always_inline bool kasan_slab_free(struct kmem_cache *s, void *object, ...@@ -176,6 +176,13 @@ static __always_inline bool kasan_slab_free(struct kmem_cache *s, void *object,
return false; return false;
} }
void __kasan_slab_free_mempool(void *ptr, unsigned long ip);
static __always_inline void kasan_slab_free_mempool(void *ptr, unsigned long ip)
{
if (kasan_enabled())
__kasan_slab_free_mempool(ptr, ip);
}
void * __must_check __kasan_slab_alloc(struct kmem_cache *s, void * __must_check __kasan_slab_alloc(struct kmem_cache *s,
void *object, gfp_t flags); void *object, gfp_t flags);
static __always_inline void * __must_check kasan_slab_alloc( static __always_inline void * __must_check kasan_slab_alloc(
...@@ -216,13 +223,6 @@ static __always_inline void * __must_check kasan_krealloc(const void *object, ...@@ -216,13 +223,6 @@ static __always_inline void * __must_check kasan_krealloc(const void *object,
return (void *)object; return (void *)object;
} }
void __kasan_poison_kfree(void *ptr, unsigned long ip);
static __always_inline void kasan_poison_kfree(void *ptr, unsigned long ip)
{
if (kasan_enabled())
__kasan_poison_kfree(ptr, ip);
}
void __kasan_kfree_large(void *ptr, unsigned long ip); void __kasan_kfree_large(void *ptr, unsigned long ip);
static __always_inline void kasan_kfree_large(void *ptr, unsigned long ip) static __always_inline void kasan_kfree_large(void *ptr, unsigned long ip)
{ {
...@@ -261,6 +261,7 @@ static inline bool kasan_slab_free(struct kmem_cache *s, void *object, ...@@ -261,6 +261,7 @@ static inline bool kasan_slab_free(struct kmem_cache *s, void *object,
{ {
return false; return false;
} }
static inline void kasan_slab_free_mempool(void *ptr, unsigned long ip) {}
static inline void *kasan_slab_alloc(struct kmem_cache *s, void *object, static inline void *kasan_slab_alloc(struct kmem_cache *s, void *object,
gfp_t flags) gfp_t flags)
{ {
...@@ -280,7 +281,6 @@ static inline void *kasan_krealloc(const void *object, size_t new_size, ...@@ -280,7 +281,6 @@ static inline void *kasan_krealloc(const void *object, size_t new_size,
{ {
return (void *)object; return (void *)object;
} }
static inline void kasan_poison_kfree(void *ptr, unsigned long ip) {}
static inline void kasan_kfree_large(void *ptr, unsigned long ip) {} static inline void kasan_kfree_large(void *ptr, unsigned long ip) {}
#endif /* CONFIG_KASAN */ #endif /* CONFIG_KASAN */
......
...@@ -331,6 +331,29 @@ bool __kasan_slab_free(struct kmem_cache *cache, void *object, unsigned long ip) ...@@ -331,6 +331,29 @@ bool __kasan_slab_free(struct kmem_cache *cache, void *object, unsigned long ip)
return ____kasan_slab_free(cache, object, ip, true); return ____kasan_slab_free(cache, object, ip, true);
} }
void __kasan_slab_free_mempool(void *ptr, unsigned long ip)
{
struct page *page;
page = virt_to_head_page(ptr);
/*
* Even though this function is only called for kmem_cache_alloc and
* kmalloc backed mempool allocations, those allocations can still be
* !PageSlab() when the size provided to kmalloc is larger than
* KMALLOC_MAX_SIZE, and kmalloc falls back onto page_alloc.
*/
if (unlikely(!PageSlab(page))) {
if (ptr != page_address(page)) {
kasan_report_invalid_free(ptr, ip);
return;
}
poison_range(ptr, page_size(page), KASAN_FREE_PAGE);
} else {
____kasan_slab_free(page->slab_cache, ptr, ip, false);
}
}
static void set_alloc_info(struct kmem_cache *cache, void *object, gfp_t flags) static void set_alloc_info(struct kmem_cache *cache, void *object, gfp_t flags)
{ {
kasan_set_track(&kasan_get_alloc_meta(cache, object)->alloc_track, flags); kasan_set_track(&kasan_get_alloc_meta(cache, object)->alloc_track, flags);
...@@ -422,23 +445,6 @@ void * __must_check __kasan_krealloc(const void *object, size_t size, gfp_t flag ...@@ -422,23 +445,6 @@ void * __must_check __kasan_krealloc(const void *object, size_t size, gfp_t flag
flags, true); flags, true);
} }
void __kasan_poison_kfree(void *ptr, unsigned long ip)
{
struct page *page;
page = virt_to_head_page(ptr);
if (unlikely(!PageSlab(page))) {
if (ptr != page_address(page)) {
kasan_report_invalid_free(ptr, ip);
return;
}
poison_range(ptr, page_size(page), KASAN_FREE_PAGE);
} else {
____kasan_slab_free(page->slab_cache, ptr, ip, false);
}
}
void __kasan_kfree_large(void *ptr, unsigned long ip) void __kasan_kfree_large(void *ptr, unsigned long ip)
{ {
if (ptr != page_address(virt_to_head_page(ptr))) if (ptr != page_address(virt_to_head_page(ptr)))
......
...@@ -104,7 +104,7 @@ static inline void poison_element(mempool_t *pool, void *element) ...@@ -104,7 +104,7 @@ static inline void poison_element(mempool_t *pool, void *element)
static __always_inline void kasan_poison_element(mempool_t *pool, void *element) static __always_inline void kasan_poison_element(mempool_t *pool, void *element)
{ {
if (pool->alloc == mempool_alloc_slab || pool->alloc == mempool_kmalloc) if (pool->alloc == mempool_alloc_slab || pool->alloc == mempool_kmalloc)
kasan_poison_kfree(element, _RET_IP_); kasan_slab_free_mempool(element, _RET_IP_);
else if (pool->alloc == mempool_alloc_pages) else if (pool->alloc == mempool_alloc_pages)
kasan_free_pages(element, (unsigned long)pool->pool_data); kasan_free_pages(element, (unsigned long)pool->pool_data);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment