Commit 6860f634 authored by Dmitry Vyukov's avatar Dmitry Vyukov Committed by Linus Torvalds

kasan: detect invalid frees for large mempool objects

Detect frees of pointers into middle of mempool objects.

I did a one-off test, but it turned out to be very tricky, so I reverted
it.  First, mempool does not call kasan_poison_kfree() unless allocation
function fails.  I stubbed an allocation function to fail on second and
subsequent allocations.  But then mempool stopped to call
kasan_poison_kfree() at all, because it does it only when allocation
function is mempool_kmalloc().  We could support this special failing
test allocation function in mempool, but it also can't live with kasan
tests, because these are in a module.

Link: http://lkml.kernel.org/r/bf7a7d035d7a5ed62d2dd0e3d2e8a4fcdf456aa7.1514378558.git.dvyukov@google.comSigned-off-by: default avatarDmitry Vyukov <dvyukov@google.com>
Cc: Andrey Ryabinin <aryabinin@virtuozzo.com>a
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent ee3ce779
...@@ -57,7 +57,7 @@ void kasan_init_slab_obj(struct kmem_cache *cache, const void *object); ...@@ -57,7 +57,7 @@ void kasan_init_slab_obj(struct kmem_cache *cache, const void *object);
void kasan_kmalloc_large(const void *ptr, size_t size, gfp_t flags); void kasan_kmalloc_large(const void *ptr, size_t size, gfp_t flags);
void kasan_kfree_large(void *ptr, unsigned long ip); void kasan_kfree_large(void *ptr, unsigned long ip);
void kasan_poison_kfree(void *ptr); void kasan_poison_kfree(void *ptr, unsigned long ip);
void kasan_kmalloc(struct kmem_cache *s, const void *object, size_t size, void kasan_kmalloc(struct kmem_cache *s, const void *object, size_t size,
gfp_t flags); gfp_t flags);
void kasan_krealloc(const void *object, size_t new_size, gfp_t flags); void kasan_krealloc(const void *object, size_t new_size, gfp_t flags);
...@@ -109,7 +109,7 @@ static inline void kasan_init_slab_obj(struct kmem_cache *cache, ...@@ -109,7 +109,7 @@ static inline void kasan_init_slab_obj(struct kmem_cache *cache,
static inline void kasan_kmalloc_large(void *ptr, size_t size, gfp_t flags) {} static inline void kasan_kmalloc_large(void *ptr, size_t size, gfp_t flags) {}
static inline void kasan_kfree_large(void *ptr, unsigned long ip) {} static inline void kasan_kfree_large(void *ptr, unsigned long ip) {}
static inline void kasan_poison_kfree(void *ptr) {} static inline void kasan_poison_kfree(void *ptr, unsigned long ip) {}
static inline void kasan_kmalloc(struct kmem_cache *s, const void *object, static inline void kasan_kmalloc(struct kmem_cache *s, const void *object,
size_t size, gfp_t flags) {} size_t size, gfp_t flags) {}
static inline void kasan_krealloc(const void *object, size_t new_size, static inline void kasan_krealloc(const void *object, size_t new_size,
......
...@@ -588,17 +588,22 @@ void kasan_krealloc(const void *object, size_t size, gfp_t flags) ...@@ -588,17 +588,22 @@ void kasan_krealloc(const void *object, size_t size, gfp_t flags)
kasan_kmalloc(page->slab_cache, object, size, flags); kasan_kmalloc(page->slab_cache, object, size, flags);
} }
void kasan_poison_kfree(void *ptr) void kasan_poison_kfree(void *ptr, unsigned long ip)
{ {
struct page *page; struct page *page;
page = virt_to_head_page(ptr); page = virt_to_head_page(ptr);
if (unlikely(!PageSlab(page))) if (unlikely(!PageSlab(page))) {
if (ptr != page_address(page)) {
kasan_report_invalid_free(ptr, ip);
return;
}
kasan_poison_shadow(ptr, PAGE_SIZE << compound_order(page), kasan_poison_shadow(ptr, PAGE_SIZE << compound_order(page),
KASAN_FREE_PAGE); KASAN_FREE_PAGE);
else } else {
kasan_poison_slab_free(page->slab_cache, ptr); kasan_poison_slab_free(page->slab_cache, ptr);
}
} }
void kasan_kfree_large(void *ptr, unsigned long ip) void kasan_kfree_large(void *ptr, unsigned long ip)
......
...@@ -103,10 +103,10 @@ static inline void poison_element(mempool_t *pool, void *element) ...@@ -103,10 +103,10 @@ static inline void poison_element(mempool_t *pool, void *element)
} }
#endif /* CONFIG_DEBUG_SLAB || CONFIG_SLUB_DEBUG_ON */ #endif /* CONFIG_DEBUG_SLAB || CONFIG_SLUB_DEBUG_ON */
static void kasan_poison_element(mempool_t *pool, void *element) static __always_inline void kasan_poison_element(mempool_t *pool, void *element)
{ {
if (pool->alloc == mempool_alloc_slab || pool->alloc == mempool_kmalloc) if (pool->alloc == mempool_alloc_slab || pool->alloc == mempool_kmalloc)
kasan_poison_kfree(element); kasan_poison_kfree(element, _RET_IP_);
if (pool->alloc == mempool_alloc_pages) if (pool->alloc == mempool_alloc_pages)
kasan_free_pages(element, (unsigned long)pool->pool_data); kasan_free_pages(element, (unsigned long)pool->pool_data);
} }
...@@ -119,7 +119,7 @@ static void kasan_unpoison_element(mempool_t *pool, void *element, gfp_t flags) ...@@ -119,7 +119,7 @@ static void kasan_unpoison_element(mempool_t *pool, void *element, gfp_t flags)
kasan_alloc_pages(element, (unsigned long)pool->pool_data); kasan_alloc_pages(element, (unsigned long)pool->pool_data);
} }
static void add_element(mempool_t *pool, void *element) static __always_inline void add_element(mempool_t *pool, void *element)
{ {
BUG_ON(pool->curr_nr >= pool->min_nr); BUG_ON(pool->curr_nr >= pool->min_nr);
poison_element(pool, element); poison_element(pool, element);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment