Commit a2abe7cb authored by Sami Tolvanen's avatar Sami Tolvanen Committed by Will Deacon

scs: switch to vmapped shadow stacks

The kernel currently uses kmem_cache to allocate shadow call stacks,
which means an overflows may not be immediately detected and can
potentially result in another task's shadow stack to be overwritten.

This change switches SCS to use virtually mapped shadow stacks for
tasks, which increases shadow stack size to a full page and provides
more robust overflow detection, similarly to VMAP_STACK.
Signed-off-by: default avatarSami Tolvanen <samitolvanen@google.com>
Acked-by: default avatarWill Deacon <will@kernel.org>
Link: https://lore.kernel.org/r/20201130233442.2562064-2-samitolvanen@google.comSigned-off-by: default avatarWill Deacon <will@kernel.org>
parent f8394f23
...@@ -15,12 +15,8 @@ ...@@ -15,12 +15,8 @@
#ifdef CONFIG_SHADOW_CALL_STACK #ifdef CONFIG_SHADOW_CALL_STACK
/* #define SCS_ORDER 0
* In testing, 1 KiB shadow stack size (i.e. 128 stack frames on a 64-bit #define SCS_SIZE (PAGE_SIZE << SCS_ORDER)
* architecture) provided ~40% safety margin on stack usage while keeping
* memory allocation overhead reasonable.
*/
#define SCS_SIZE SZ_1K
#define GFP_SCS (GFP_KERNEL | __GFP_ZERO) #define GFP_SCS (GFP_KERNEL | __GFP_ZERO)
/* An illegal pointer value to mark the end of the shadow stack. */ /* An illegal pointer value to mark the end of the shadow stack. */
...@@ -33,6 +29,8 @@ ...@@ -33,6 +29,8 @@
#define task_scs(tsk) (task_thread_info(tsk)->scs_base) #define task_scs(tsk) (task_thread_info(tsk)->scs_base)
#define task_scs_sp(tsk) (task_thread_info(tsk)->scs_sp) #define task_scs_sp(tsk) (task_thread_info(tsk)->scs_sp)
void *scs_alloc(int node);
void scs_free(void *s);
void scs_init(void); void scs_init(void);
int scs_prepare(struct task_struct *tsk, int node); int scs_prepare(struct task_struct *tsk, int node);
void scs_release(struct task_struct *tsk); void scs_release(struct task_struct *tsk);
...@@ -61,6 +59,8 @@ static inline bool task_scs_end_corrupted(struct task_struct *tsk) ...@@ -61,6 +59,8 @@ static inline bool task_scs_end_corrupted(struct task_struct *tsk)
#else /* CONFIG_SHADOW_CALL_STACK */ #else /* CONFIG_SHADOW_CALL_STACK */
static inline void *scs_alloc(int node) { return NULL; }
static inline void scs_free(void *s) {}
static inline void scs_init(void) {} static inline void scs_init(void) {}
static inline void scs_task_reset(struct task_struct *tsk) {} static inline void scs_task_reset(struct task_struct *tsk) {}
static inline int scs_prepare(struct task_struct *tsk, int node) { return 0; } static inline int scs_prepare(struct task_struct *tsk, int node) { return 0; }
......
...@@ -5,26 +5,49 @@ ...@@ -5,26 +5,49 @@
* Copyright (C) 2019 Google LLC * Copyright (C) 2019 Google LLC
*/ */
#include <linux/cpuhotplug.h>
#include <linux/kasan.h> #include <linux/kasan.h>
#include <linux/mm.h> #include <linux/mm.h>
#include <linux/scs.h> #include <linux/scs.h>
#include <linux/slab.h> #include <linux/vmalloc.h>
#include <linux/vmstat.h> #include <linux/vmstat.h>
static struct kmem_cache *scs_cache;
static void __scs_account(void *s, int account) static void __scs_account(void *s, int account)
{ {
struct page *scs_page = virt_to_page(s); struct page *scs_page = vmalloc_to_page(s);
mod_node_page_state(page_pgdat(scs_page), NR_KERNEL_SCS_KB, mod_node_page_state(page_pgdat(scs_page), NR_KERNEL_SCS_KB,
account * (SCS_SIZE / SZ_1K)); account * (SCS_SIZE / SZ_1K));
} }
static void *scs_alloc(int node) /* Matches NR_CACHED_STACKS for VMAP_STACK */
#define NR_CACHED_SCS 2
static DEFINE_PER_CPU(void *, scs_cache[NR_CACHED_SCS]);
static void *__scs_alloc(int node)
{
int i;
void *s;
for (i = 0; i < NR_CACHED_SCS; i++) {
s = this_cpu_xchg(scs_cache[i], NULL);
if (s) {
kasan_unpoison_vmalloc(s, SCS_SIZE);
memset(s, 0, SCS_SIZE);
return s;
}
}
return __vmalloc_node_range(SCS_SIZE, 1, VMALLOC_START, VMALLOC_END,
GFP_SCS, PAGE_KERNEL, 0, node,
__builtin_return_address(0));
}
void *scs_alloc(int node)
{ {
void *s = kmem_cache_alloc_node(scs_cache, GFP_SCS, node); void *s;
s = __scs_alloc(node);
if (!s) if (!s)
return NULL; return NULL;
...@@ -34,21 +57,47 @@ static void *scs_alloc(int node) ...@@ -34,21 +57,47 @@ static void *scs_alloc(int node)
* Poison the allocation to catch unintentional accesses to * Poison the allocation to catch unintentional accesses to
* the shadow stack when KASAN is enabled. * the shadow stack when KASAN is enabled.
*/ */
kasan_poison_object_data(scs_cache, s); kasan_poison_vmalloc(s, SCS_SIZE);
__scs_account(s, 1); __scs_account(s, 1);
return s; return s;
} }
static void scs_free(void *s) void scs_free(void *s)
{ {
int i;
__scs_account(s, -1); __scs_account(s, -1);
kasan_unpoison_object_data(scs_cache, s);
kmem_cache_free(scs_cache, s); /*
* We cannot sleep as this can be called in interrupt context,
* so use this_cpu_cmpxchg to update the cache, and vfree_atomic
* to free the stack.
*/
for (i = 0; i < NR_CACHED_SCS; i++)
if (this_cpu_cmpxchg(scs_cache[i], 0, s) == NULL)
return;
vfree_atomic(s);
}
static int scs_cleanup(unsigned int cpu)
{
int i;
void **cache = per_cpu_ptr(scs_cache, cpu);
for (i = 0; i < NR_CACHED_SCS; i++) {
vfree(cache[i]);
cache[i] = NULL;
}
return 0;
} }
void __init scs_init(void) void __init scs_init(void)
{ {
scs_cache = kmem_cache_create("scs_cache", SCS_SIZE, 0, 0, NULL); cpuhp_setup_state(CPUHP_BP_PREPARE_DYN, "scs:scs_cache", NULL,
scs_cleanup);
} }
int scs_prepare(struct task_struct *tsk, int node) int scs_prepare(struct task_struct *tsk, int node)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment