Commit b89fb5ef authored by Alexander Potapenko's avatar Alexander Potapenko Committed by Linus Torvalds

mm, kfence: insert KFENCE hooks for SLUB

Inserts KFENCE hooks into the SLUB allocator.

To pass the originally requested size to KFENCE, add an argument
'orig_size' to slab_alloc*(). The additional argument is required to
preserve the requested original size for kmalloc() allocations, which
uses size classes (e.g. an allocation of 272 bytes will return an object
of size 512). Therefore, kmem_cache::size does not represent the
kmalloc-caller's requested size, and we must introduce the argument
'orig_size' to propagate the originally requested size to KFENCE.

Without the originally requested size, we would not be able to detect
out-of-bounds accesses for objects placed at the end of a KFENCE object
page if that object is not equal to the kmalloc-size class it was
bucketed into.

When KFENCE is disabled, there is no additional overhead, since
slab_alloc*() functions are __always_inline.

Link: https://lkml.kernel.org/r/20201103175841.3495947-6-elver@google.comSigned-off-by: default avatarMarco Elver <elver@google.com>
Signed-off-by: default avatarAlexander Potapenko <glider@google.com>
Reviewed-by: default avatarDmitry Vyukov <dvyukov@google.com>
Reviewed-by: default avatarJann Horn <jannh@google.com>
Co-developed-by: default avatarMarco Elver <elver@google.com>
Cc: Andrey Konovalov <andreyknvl@google.com>
Cc: Andrey Ryabinin <aryabinin@virtuozzo.com>
Cc: Andy Lutomirski <luto@kernel.org>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Christopher Lameter <cl@linux.com>
Cc: Dave Hansen <dave.hansen@linux.intel.com>
Cc: David Rientjes <rientjes@google.com>
Cc: Eric Dumazet <edumazet@google.com>
Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Cc: Hillf Danton <hdanton@sina.com>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Joern Engel <joern@purestorage.com>
Cc: Jonathan Corbet <corbet@lwn.net>
Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Cc: Kees Cook <keescook@chromium.org>
Cc: Mark Rutland <mark.rutland@arm.com>
Cc: Paul E. McKenney <paulmck@kernel.org>
Cc: Pekka Enberg <penberg@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: SeongJae Park <sjpark@amazon.de>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Vlastimil Babka <vbabka@suse.cz>
Cc: Will Deacon <will@kernel.org>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent d3fb45f3
...@@ -7,6 +7,7 @@ ...@@ -7,6 +7,7 @@
* *
* (C) 2007 SGI, Christoph Lameter * (C) 2007 SGI, Christoph Lameter
*/ */
#include <linux/kfence.h>
#include <linux/kobject.h> #include <linux/kobject.h>
#include <linux/reciprocal_div.h> #include <linux/reciprocal_div.h>
...@@ -185,6 +186,8 @@ static inline unsigned int __obj_to_index(const struct kmem_cache *cache, ...@@ -185,6 +186,8 @@ static inline unsigned int __obj_to_index(const struct kmem_cache *cache,
static inline unsigned int obj_to_index(const struct kmem_cache *cache, static inline unsigned int obj_to_index(const struct kmem_cache *cache,
const struct page *page, void *obj) const struct page *page, void *obj)
{ {
if (is_kfence_address(obj))
return 0;
return __obj_to_index(cache, page_address(page), obj); return __obj_to_index(cache, page_address(page), obj);
} }
......
...@@ -317,6 +317,8 @@ static void *kfence_guarded_alloc(struct kmem_cache *cache, size_t size, gfp_t g ...@@ -317,6 +317,8 @@ static void *kfence_guarded_alloc(struct kmem_cache *cache, size_t size, gfp_t g
/* Set required struct page fields. */ /* Set required struct page fields. */
page = virt_to_page(meta->addr); page = virt_to_page(meta->addr);
page->slab_cache = cache; page->slab_cache = cache;
if (IS_ENABLED(CONFIG_SLUB))
page->objects = 1;
if (IS_ENABLED(CONFIG_SLAB)) if (IS_ENABLED(CONFIG_SLAB))
page->s_mem = addr; page->s_mem = addr;
......
...@@ -27,6 +27,7 @@ ...@@ -27,6 +27,7 @@
#include <linux/ctype.h> #include <linux/ctype.h>
#include <linux/debugobjects.h> #include <linux/debugobjects.h>
#include <linux/kallsyms.h> #include <linux/kallsyms.h>
#include <linux/kfence.h>
#include <linux/memory.h> #include <linux/memory.h>
#include <linux/math64.h> #include <linux/math64.h>
#include <linux/fault-inject.h> #include <linux/fault-inject.h>
...@@ -1570,6 +1571,11 @@ static inline bool slab_free_freelist_hook(struct kmem_cache *s, ...@@ -1570,6 +1571,11 @@ static inline bool slab_free_freelist_hook(struct kmem_cache *s,
void *old_tail = *tail ? *tail : *head; void *old_tail = *tail ? *tail : *head;
int rsize; int rsize;
if (is_kfence_address(next)) {
slab_free_hook(s, next);
return true;
}
/* Head and tail of the reconstructed freelist */ /* Head and tail of the reconstructed freelist */
*head = NULL; *head = NULL;
*tail = NULL; *tail = NULL;
...@@ -2809,7 +2815,7 @@ static __always_inline void maybe_wipe_obj_freeptr(struct kmem_cache *s, ...@@ -2809,7 +2815,7 @@ static __always_inline void maybe_wipe_obj_freeptr(struct kmem_cache *s,
* Otherwise we can simply pick the next object from the lockless free list. * Otherwise we can simply pick the next object from the lockless free list.
*/ */
static __always_inline void *slab_alloc_node(struct kmem_cache *s, static __always_inline void *slab_alloc_node(struct kmem_cache *s,
gfp_t gfpflags, int node, unsigned long addr) gfp_t gfpflags, int node, unsigned long addr, size_t orig_size)
{ {
void *object; void *object;
struct kmem_cache_cpu *c; struct kmem_cache_cpu *c;
...@@ -2820,6 +2826,11 @@ static __always_inline void *slab_alloc_node(struct kmem_cache *s, ...@@ -2820,6 +2826,11 @@ static __always_inline void *slab_alloc_node(struct kmem_cache *s,
s = slab_pre_alloc_hook(s, &objcg, 1, gfpflags); s = slab_pre_alloc_hook(s, &objcg, 1, gfpflags);
if (!s) if (!s)
return NULL; return NULL;
object = kfence_alloc(s, orig_size, gfpflags);
if (unlikely(object))
goto out;
redo: redo:
/* /*
* Must read kmem_cache cpu data via this cpu ptr. Preemption is * Must read kmem_cache cpu data via this cpu ptr. Preemption is
...@@ -2892,20 +2903,21 @@ static __always_inline void *slab_alloc_node(struct kmem_cache *s, ...@@ -2892,20 +2903,21 @@ static __always_inline void *slab_alloc_node(struct kmem_cache *s,
if (unlikely(slab_want_init_on_alloc(gfpflags, s)) && object) if (unlikely(slab_want_init_on_alloc(gfpflags, s)) && object)
memset(kasan_reset_tag(object), 0, s->object_size); memset(kasan_reset_tag(object), 0, s->object_size);
out:
slab_post_alloc_hook(s, objcg, gfpflags, 1, &object); slab_post_alloc_hook(s, objcg, gfpflags, 1, &object);
return object; return object;
} }
static __always_inline void *slab_alloc(struct kmem_cache *s, static __always_inline void *slab_alloc(struct kmem_cache *s,
gfp_t gfpflags, unsigned long addr) gfp_t gfpflags, unsigned long addr, size_t orig_size)
{ {
return slab_alloc_node(s, gfpflags, NUMA_NO_NODE, addr); return slab_alloc_node(s, gfpflags, NUMA_NO_NODE, addr, orig_size);
} }
void *kmem_cache_alloc(struct kmem_cache *s, gfp_t gfpflags) void *kmem_cache_alloc(struct kmem_cache *s, gfp_t gfpflags)
{ {
void *ret = slab_alloc(s, gfpflags, _RET_IP_); void *ret = slab_alloc(s, gfpflags, _RET_IP_, s->object_size);
trace_kmem_cache_alloc(_RET_IP_, ret, s->object_size, trace_kmem_cache_alloc(_RET_IP_, ret, s->object_size,
s->size, gfpflags); s->size, gfpflags);
...@@ -2917,7 +2929,7 @@ EXPORT_SYMBOL(kmem_cache_alloc); ...@@ -2917,7 +2929,7 @@ EXPORT_SYMBOL(kmem_cache_alloc);
#ifdef CONFIG_TRACING #ifdef CONFIG_TRACING
void *kmem_cache_alloc_trace(struct kmem_cache *s, gfp_t gfpflags, size_t size) void *kmem_cache_alloc_trace(struct kmem_cache *s, gfp_t gfpflags, size_t size)
{ {
void *ret = slab_alloc(s, gfpflags, _RET_IP_); void *ret = slab_alloc(s, gfpflags, _RET_IP_, size);
trace_kmalloc(_RET_IP_, ret, size, s->size, gfpflags); trace_kmalloc(_RET_IP_, ret, size, s->size, gfpflags);
ret = kasan_kmalloc(s, ret, size, gfpflags); ret = kasan_kmalloc(s, ret, size, gfpflags);
return ret; return ret;
...@@ -2928,7 +2940,7 @@ EXPORT_SYMBOL(kmem_cache_alloc_trace); ...@@ -2928,7 +2940,7 @@ EXPORT_SYMBOL(kmem_cache_alloc_trace);
#ifdef CONFIG_NUMA #ifdef CONFIG_NUMA
void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags, int node) void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags, int node)
{ {
void *ret = slab_alloc_node(s, gfpflags, node, _RET_IP_); void *ret = slab_alloc_node(s, gfpflags, node, _RET_IP_, s->object_size);
trace_kmem_cache_alloc_node(_RET_IP_, ret, trace_kmem_cache_alloc_node(_RET_IP_, ret,
s->object_size, s->size, gfpflags, node); s->object_size, s->size, gfpflags, node);
...@@ -2942,7 +2954,7 @@ void *kmem_cache_alloc_node_trace(struct kmem_cache *s, ...@@ -2942,7 +2954,7 @@ void *kmem_cache_alloc_node_trace(struct kmem_cache *s,
gfp_t gfpflags, gfp_t gfpflags,
int node, size_t size) int node, size_t size)
{ {
void *ret = slab_alloc_node(s, gfpflags, node, _RET_IP_); void *ret = slab_alloc_node(s, gfpflags, node, _RET_IP_, size);
trace_kmalloc_node(_RET_IP_, ret, trace_kmalloc_node(_RET_IP_, ret,
size, s->size, gfpflags, node); size, s->size, gfpflags, node);
...@@ -2976,6 +2988,9 @@ static void __slab_free(struct kmem_cache *s, struct page *page, ...@@ -2976,6 +2988,9 @@ static void __slab_free(struct kmem_cache *s, struct page *page,
stat(s, FREE_SLOWPATH); stat(s, FREE_SLOWPATH);
if (kfence_free(head))
return;
if (kmem_cache_debug(s) && if (kmem_cache_debug(s) &&
!free_debug_processing(s, page, head, tail, cnt, addr)) !free_debug_processing(s, page, head, tail, cnt, addr))
return; return;
...@@ -3220,6 +3235,13 @@ int build_detached_freelist(struct kmem_cache *s, size_t size, ...@@ -3220,6 +3235,13 @@ int build_detached_freelist(struct kmem_cache *s, size_t size,
df->s = cache_from_obj(s, object); /* Support for memcg */ df->s = cache_from_obj(s, object); /* Support for memcg */
} }
if (is_kfence_address(object)) {
slab_free_hook(df->s, object);
__kfence_free(object);
p[size] = NULL; /* mark object processed */
return size;
}
/* Start new detached freelist */ /* Start new detached freelist */
df->page = page; df->page = page;
set_freepointer(df->s, object, NULL); set_freepointer(df->s, object, NULL);
...@@ -3295,8 +3317,14 @@ int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size, ...@@ -3295,8 +3317,14 @@ int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
c = this_cpu_ptr(s->cpu_slab); c = this_cpu_ptr(s->cpu_slab);
for (i = 0; i < size; i++) { for (i = 0; i < size; i++) {
void *object = c->freelist; void *object = kfence_alloc(s, s->object_size, flags);
if (unlikely(object)) {
p[i] = object;
continue;
}
object = c->freelist;
if (unlikely(!object)) { if (unlikely(!object)) {
/* /*
* We may have removed an object from c->freelist using * We may have removed an object from c->freelist using
...@@ -4021,7 +4049,7 @@ void *__kmalloc(size_t size, gfp_t flags) ...@@ -4021,7 +4049,7 @@ void *__kmalloc(size_t size, gfp_t flags)
if (unlikely(ZERO_OR_NULL_PTR(s))) if (unlikely(ZERO_OR_NULL_PTR(s)))
return s; return s;
ret = slab_alloc(s, flags, _RET_IP_); ret = slab_alloc(s, flags, _RET_IP_, size);
trace_kmalloc(_RET_IP_, ret, size, s->size, flags); trace_kmalloc(_RET_IP_, ret, size, s->size, flags);
...@@ -4069,7 +4097,7 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node) ...@@ -4069,7 +4097,7 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node)
if (unlikely(ZERO_OR_NULL_PTR(s))) if (unlikely(ZERO_OR_NULL_PTR(s)))
return s; return s;
ret = slab_alloc_node(s, flags, node, _RET_IP_); ret = slab_alloc_node(s, flags, node, _RET_IP_, size);
trace_kmalloc_node(_RET_IP_, ret, size, s->size, flags, node); trace_kmalloc_node(_RET_IP_, ret, size, s->size, flags, node);
...@@ -4095,6 +4123,7 @@ void __check_heap_object(const void *ptr, unsigned long n, struct page *page, ...@@ -4095,6 +4123,7 @@ void __check_heap_object(const void *ptr, unsigned long n, struct page *page,
struct kmem_cache *s; struct kmem_cache *s;
unsigned int offset; unsigned int offset;
size_t object_size; size_t object_size;
bool is_kfence = is_kfence_address(ptr);
ptr = kasan_reset_tag(ptr); ptr = kasan_reset_tag(ptr);
...@@ -4107,10 +4136,13 @@ void __check_heap_object(const void *ptr, unsigned long n, struct page *page, ...@@ -4107,10 +4136,13 @@ void __check_heap_object(const void *ptr, unsigned long n, struct page *page,
to_user, 0, n); to_user, 0, n);
/* Find offset within object. */ /* Find offset within object. */
offset = (ptr - page_address(page)) % s->size; if (is_kfence)
offset = ptr - kfence_object_start(ptr);
else
offset = (ptr - page_address(page)) % s->size;
/* Adjust for redzone and reject if within the redzone. */ /* Adjust for redzone and reject if within the redzone. */
if (kmem_cache_debug_flags(s, SLAB_RED_ZONE)) { if (!is_kfence && kmem_cache_debug_flags(s, SLAB_RED_ZONE)) {
if (offset < s->red_left_pad) if (offset < s->red_left_pad)
usercopy_abort("SLUB object in left red zone", usercopy_abort("SLUB object in left red zone",
s->name, to_user, offset, n); s->name, to_user, offset, n);
...@@ -4527,7 +4559,7 @@ void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, unsigned long caller) ...@@ -4527,7 +4559,7 @@ void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, unsigned long caller)
if (unlikely(ZERO_OR_NULL_PTR(s))) if (unlikely(ZERO_OR_NULL_PTR(s)))
return s; return s;
ret = slab_alloc(s, gfpflags, caller); ret = slab_alloc(s, gfpflags, caller, size);
/* Honor the call site pointer we received. */ /* Honor the call site pointer we received. */
trace_kmalloc(caller, ret, size, s->size, gfpflags); trace_kmalloc(caller, ret, size, s->size, gfpflags);
...@@ -4558,7 +4590,7 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags, ...@@ -4558,7 +4590,7 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
if (unlikely(ZERO_OR_NULL_PTR(s))) if (unlikely(ZERO_OR_NULL_PTR(s)))
return s; return s;
ret = slab_alloc_node(s, gfpflags, node, caller); ret = slab_alloc_node(s, gfpflags, node, caller, size);
/* Honor the call site pointer we received. */ /* Honor the call site pointer we received. */
trace_kmalloc_node(caller, ret, size, s->size, gfpflags, node); trace_kmalloc_node(caller, ret, size, s->size, gfpflags, node);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment