Commit 10d1f8cb authored by Marco Elver's avatar Marco Elver Committed by Linus Torvalds

mm/slab: refactor common ksize KASAN logic into slab_common.c

This refactors common code of ksize() between the various allocators into
slab_common.c: __ksize() is the allocator-specific implementation without
instrumentation, whereas ksize() includes the required KASAN logic.

Link: http://lkml.kernel.org/r/20190626142014.141844-5-elver@google.comSigned-off-by: default avatarMarco Elver <elver@google.com>
Acked-by: default avatarChristoph Lameter <cl@linux.com>
Reviewed-by: default avatarAndrey Ryabinin <aryabinin@virtuozzo.com>
Cc: Dmitry Vyukov <dvyukov@google.com>
Cc: Alexander Potapenko <glider@google.com>
Cc: Andrey Konovalov <andreyknvl@google.com>
Cc: Pekka Enberg <penberg@kernel.org>
Cc: David Rientjes <rientjes@google.com>
Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Cc: Mark Rutland <mark.rutland@arm.com>
Cc: Kees Cook <keescook@chromium.org>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent bb104ed7
...@@ -184,6 +184,7 @@ void * __must_check __krealloc(const void *, size_t, gfp_t); ...@@ -184,6 +184,7 @@ void * __must_check __krealloc(const void *, size_t, gfp_t);
void * __must_check krealloc(const void *, size_t, gfp_t); void * __must_check krealloc(const void *, size_t, gfp_t);
void kfree(const void *); void kfree(const void *);
void kzfree(const void *); void kzfree(const void *);
size_t __ksize(const void *);
size_t ksize(const void *); size_t ksize(const void *);
#ifdef CONFIG_HAVE_HARDENED_USERCOPY_ALLOCATOR #ifdef CONFIG_HAVE_HARDENED_USERCOPY_ALLOCATOR
......
...@@ -4204,20 +4204,12 @@ void __check_heap_object(const void *ptr, unsigned long n, struct page *page, ...@@ -4204,20 +4204,12 @@ void __check_heap_object(const void *ptr, unsigned long n, struct page *page,
#endif /* CONFIG_HARDENED_USERCOPY */ #endif /* CONFIG_HARDENED_USERCOPY */
/** /**
* ksize - get the actual amount of memory allocated for a given object * __ksize -- Uninstrumented ksize.
* @objp: Pointer to the object
* *
* kmalloc may internally round up allocations and return more memory * Unlike ksize(), __ksize() is uninstrumented, and does not provide the same
* than requested. ksize() can be used to determine the actual amount of * safety checks as ksize() with KASAN instrumentation enabled.
* memory allocated. The caller may use this additional memory, even though
* a smaller amount of memory was initially specified with the kmalloc call.
* The caller must guarantee that objp points to a valid object previously
* allocated with either kmalloc() or kmem_cache_alloc(). The object
* must not be freed during the duration of the call.
*
* Return: size of the actual memory used by @objp in bytes
*/ */
size_t ksize(const void *objp) size_t __ksize(const void *objp)
{ {
struct kmem_cache *c; struct kmem_cache *c;
size_t size; size_t size;
...@@ -4228,11 +4220,7 @@ size_t ksize(const void *objp) ...@@ -4228,11 +4220,7 @@ size_t ksize(const void *objp)
c = virt_to_cache(objp); c = virt_to_cache(objp);
size = c ? c->object_size : 0; size = c ? c->object_size : 0;
/* We assume that ksize callers could use the whole allocated area,
* so we need to unpoison this area.
*/
kasan_unpoison_shadow(objp, size);
return size; return size;
} }
EXPORT_SYMBOL(ksize); EXPORT_SYMBOL(__ksize);
...@@ -1597,6 +1597,32 @@ void kzfree(const void *p) ...@@ -1597,6 +1597,32 @@ void kzfree(const void *p)
} }
EXPORT_SYMBOL(kzfree); EXPORT_SYMBOL(kzfree);
/**
* ksize - get the actual amount of memory allocated for a given object
* @objp: Pointer to the object
*
* kmalloc may internally round up allocations and return more memory
* than requested. ksize() can be used to determine the actual amount of
* memory allocated. The caller may use this additional memory, even though
* a smaller amount of memory was initially specified with the kmalloc call.
* The caller must guarantee that objp points to a valid object previously
* allocated with either kmalloc() or kmem_cache_alloc(). The object
* must not be freed during the duration of the call.
*
* Return: size of the actual memory used by @objp in bytes
*/
size_t ksize(const void *objp)
{
size_t size = __ksize(objp);
/*
* We assume that ksize callers could use whole allocated area,
* so we need to unpoison this area.
*/
kasan_unpoison_shadow(objp, size);
return size;
}
EXPORT_SYMBOL(ksize);
/* Tracepoints definitions. */ /* Tracepoints definitions. */
EXPORT_TRACEPOINT_SYMBOL(kmalloc); EXPORT_TRACEPOINT_SYMBOL(kmalloc);
EXPORT_TRACEPOINT_SYMBOL(kmem_cache_alloc); EXPORT_TRACEPOINT_SYMBOL(kmem_cache_alloc);
......
...@@ -527,7 +527,7 @@ void kfree(const void *block) ...@@ -527,7 +527,7 @@ void kfree(const void *block)
EXPORT_SYMBOL(kfree); EXPORT_SYMBOL(kfree);
/* can't use ksize for kmem_cache_alloc memory, only kmalloc */ /* can't use ksize for kmem_cache_alloc memory, only kmalloc */
size_t ksize(const void *block) size_t __ksize(const void *block)
{ {
struct page *sp; struct page *sp;
int align; int align;
...@@ -545,7 +545,7 @@ size_t ksize(const void *block) ...@@ -545,7 +545,7 @@ size_t ksize(const void *block)
m = (unsigned int *)(block - align); m = (unsigned int *)(block - align);
return SLOB_UNITS(*m) * SLOB_UNIT; return SLOB_UNITS(*m) * SLOB_UNIT;
} }
EXPORT_SYMBOL(ksize); EXPORT_SYMBOL(__ksize);
int __kmem_cache_create(struct kmem_cache *c, slab_flags_t flags) int __kmem_cache_create(struct kmem_cache *c, slab_flags_t flags)
{ {
......
...@@ -3895,7 +3895,7 @@ void __check_heap_object(const void *ptr, unsigned long n, struct page *page, ...@@ -3895,7 +3895,7 @@ void __check_heap_object(const void *ptr, unsigned long n, struct page *page,
} }
#endif /* CONFIG_HARDENED_USERCOPY */ #endif /* CONFIG_HARDENED_USERCOPY */
static size_t __ksize(const void *object) size_t __ksize(const void *object)
{ {
struct page *page; struct page *page;
...@@ -3911,17 +3911,7 @@ static size_t __ksize(const void *object) ...@@ -3911,17 +3911,7 @@ static size_t __ksize(const void *object)
return slab_ksize(page->slab_cache); return slab_ksize(page->slab_cache);
} }
EXPORT_SYMBOL(__ksize);
size_t ksize(const void *object)
{
size_t size = __ksize(object);
/* We assume that ksize callers could use whole allocated area,
* so we need to unpoison this area.
*/
kasan_unpoison_shadow(object, size);
return size;
}
EXPORT_SYMBOL(ksize);
void kfree(const void *x) void kfree(const void *x)
{ {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment