Commit d08b9f0c authored by Sami Tolvanen's avatar Sami Tolvanen Committed by Will Deacon

scs: Add support for Clang's Shadow Call Stack (SCS)

This change adds generic support for Clang's Shadow Call Stack,
which uses a shadow stack to protect return addresses from being
overwritten by an attacker. Details are available here:

  https://clang.llvm.org/docs/ShadowCallStack.html

Note that security guarantees in the kernel differ from the ones
documented for user space. The kernel must store addresses of
shadow stacks in memory, which means an attacker capable reading
and writing arbitrary memory may be able to locate them and hijack
control flow by modifying the stacks.
Signed-off-by: default avatarSami Tolvanen <samitolvanen@google.com>
Reviewed-by: default avatarKees Cook <keescook@chromium.org>
Reviewed-by: default avatarMiguel Ojeda <miguel.ojeda.sandonis@gmail.com>
[will: Numerous cosmetic changes]
Signed-off-by: default avatarWill Deacon <will@kernel.org>
parent 6a8b55ed
...@@ -866,6 +866,12 @@ ifdef CONFIG_LIVEPATCH ...@@ -866,6 +866,12 @@ ifdef CONFIG_LIVEPATCH
KBUILD_CFLAGS += $(call cc-option, -flive-patching=inline-clone) KBUILD_CFLAGS += $(call cc-option, -flive-patching=inline-clone)
endif endif
ifdef CONFIG_SHADOW_CALL_STACK
CC_FLAGS_SCS := -fsanitize=shadow-call-stack
KBUILD_CFLAGS += $(CC_FLAGS_SCS)
export CC_FLAGS_SCS
endif
# arch Makefile may override CC so keep this after arch Makefile is included # arch Makefile may override CC so keep this after arch Makefile is included
NOSTDINC_FLAGS += -nostdinc -isystem $(shell $(CC) -print-file-name=include) NOSTDINC_FLAGS += -nostdinc -isystem $(shell $(CC) -print-file-name=include)
......
...@@ -533,6 +533,30 @@ config STACKPROTECTOR_STRONG ...@@ -533,6 +533,30 @@ config STACKPROTECTOR_STRONG
about 20% of all kernel functions, which increases the kernel code about 20% of all kernel functions, which increases the kernel code
size by about 2%. size by about 2%.
config ARCH_SUPPORTS_SHADOW_CALL_STACK
bool
help
An architecture should select this if it supports Clang's Shadow
Call Stack, has asm/scs.h, and implements runtime support for shadow
stack switching.
config SHADOW_CALL_STACK
bool "Clang Shadow Call Stack"
depends on CC_IS_CLANG && ARCH_SUPPORTS_SHADOW_CALL_STACK
help
This option enables Clang's Shadow Call Stack, which uses a
shadow stack to protect function return addresses from being
overwritten by an attacker. More information can be found in
Clang's documentation:
https://clang.llvm.org/docs/ShadowCallStack.html
Note that security guarantees in the kernel differ from the
ones documented for user space. The kernel must store addresses
of shadow stacks in memory, which means an attacker capable of
reading and writing arbitrary memory may be able to locate them
and hijack control flow by modifying the stacks.
config HAVE_ARCH_WITHIN_STACK_FRAMES config HAVE_ARCH_WITHIN_STACK_FRAMES
bool bool
help help
......
...@@ -42,3 +42,7 @@ ...@@ -42,3 +42,7 @@
* compilers, like ICC. * compilers, like ICC.
*/ */
#define barrier() __asm__ __volatile__("" : : : "memory") #define barrier() __asm__ __volatile__("" : : : "memory")
#if __has_feature(shadow_call_stack)
# define __noscs __attribute__((__no_sanitize__("shadow-call-stack")))
#endif
...@@ -193,6 +193,10 @@ struct ftrace_likely_data { ...@@ -193,6 +193,10 @@ struct ftrace_likely_data {
# define randomized_struct_fields_end # define randomized_struct_fields_end
#endif #endif
#ifndef __noscs
# define __noscs
#endif
#ifndef asm_volatile_goto #ifndef asm_volatile_goto
#define asm_volatile_goto(x...) asm goto(x) #define asm_volatile_goto(x...) asm goto(x)
#endif #endif
......
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Shadow Call Stack support.
*
* Copyright (C) 2019 Google LLC
*/
#ifndef _LINUX_SCS_H
#define _LINUX_SCS_H
#include <linux/gfp.h>
#include <linux/poison.h>
#include <linux/sched.h>
#include <linux/sizes.h>
#ifdef CONFIG_SHADOW_CALL_STACK
/*
* In testing, 1 KiB shadow stack size (i.e. 128 stack frames on a 64-bit
* architecture) provided ~40% safety margin on stack usage while keeping
* memory allocation overhead reasonable.
*/
#define SCS_SIZE SZ_1K
#define GFP_SCS (GFP_KERNEL | __GFP_ZERO)
/* An illegal pointer value to mark the end of the shadow stack. */
#define SCS_END_MAGIC (0x5f6UL + POISON_POINTER_DELTA)
#define task_scs(tsk) (task_thread_info(tsk)->scs_base)
#define task_scs_offset(tsk) (task_thread_info(tsk)->scs_offset)
void scs_init(void);
int scs_prepare(struct task_struct *tsk, int node);
void scs_release(struct task_struct *tsk);
static inline void scs_task_reset(struct task_struct *tsk)
{
/*
* Reset the shadow stack to the base address in case the task
* is reused.
*/
task_scs_offset(tsk) = 0;
}
static inline unsigned long *__scs_magic(void *s)
{
return (unsigned long *)(s + SCS_SIZE) - 1;
}
static inline bool scs_corrupted(struct task_struct *tsk)
{
unsigned long *magic = __scs_magic(task_scs(tsk));
return (task_scs_offset(tsk) >= SCS_SIZE - 1 ||
READ_ONCE_NOCHECK(*magic) != SCS_END_MAGIC);
}
#else /* CONFIG_SHADOW_CALL_STACK */
static inline void scs_init(void) {}
static inline void scs_task_reset(struct task_struct *tsk) {}
static inline int scs_prepare(struct task_struct *tsk, int node) { return 0; }
static inline bool scs_corrupted(struct task_struct *tsk) { return false; }
static inline void scs_release(struct task_struct *tsk) {}
#endif /* CONFIG_SHADOW_CALL_STACK */
#endif /* _LINUX_SCS_H */
...@@ -11,6 +11,7 @@ ...@@ -11,6 +11,7 @@
#include <linux/mm.h> #include <linux/mm.h>
#include <linux/audit.h> #include <linux/audit.h>
#include <linux/numa.h> #include <linux/numa.h>
#include <linux/scs.h>
#include <asm/pgtable.h> #include <asm/pgtable.h>
#include <linux/uaccess.h> #include <linux/uaccess.h>
...@@ -50,6 +51,13 @@ static struct sighand_struct init_sighand = { ...@@ -50,6 +51,13 @@ static struct sighand_struct init_sighand = {
.signalfd_wqh = __WAIT_QUEUE_HEAD_INITIALIZER(init_sighand.signalfd_wqh), .signalfd_wqh = __WAIT_QUEUE_HEAD_INITIALIZER(init_sighand.signalfd_wqh),
}; };
#ifdef CONFIG_SHADOW_CALL_STACK
unsigned long init_shadow_call_stack[SCS_SIZE / sizeof(long)]
__init_task_data = {
[(SCS_SIZE / sizeof(long)) - 1] = SCS_END_MAGIC
};
#endif
/* /*
* Set up the first task table, touch at your own risk!. Base=0, * Set up the first task table, touch at your own risk!. Base=0,
* limit=0x1fffff (=2MB) * limit=0x1fffff (=2MB)
......
...@@ -103,6 +103,7 @@ obj-$(CONFIG_TRACEPOINTS) += trace/ ...@@ -103,6 +103,7 @@ obj-$(CONFIG_TRACEPOINTS) += trace/
obj-$(CONFIG_IRQ_WORK) += irq_work.o obj-$(CONFIG_IRQ_WORK) += irq_work.o
obj-$(CONFIG_CPU_PM) += cpu_pm.o obj-$(CONFIG_CPU_PM) += cpu_pm.o
obj-$(CONFIG_BPF) += bpf/ obj-$(CONFIG_BPF) += bpf/
obj-$(CONFIG_SHADOW_CALL_STACK) += scs.o
obj-$(CONFIG_PERF_EVENTS) += events/ obj-$(CONFIG_PERF_EVENTS) += events/
......
...@@ -94,6 +94,7 @@ ...@@ -94,6 +94,7 @@
#include <linux/thread_info.h> #include <linux/thread_info.h>
#include <linux/stackleak.h> #include <linux/stackleak.h>
#include <linux/kasan.h> #include <linux/kasan.h>
#include <linux/scs.h>
#include <asm/pgtable.h> #include <asm/pgtable.h>
#include <asm/pgalloc.h> #include <asm/pgalloc.h>
...@@ -456,6 +457,8 @@ void put_task_stack(struct task_struct *tsk) ...@@ -456,6 +457,8 @@ void put_task_stack(struct task_struct *tsk)
void free_task(struct task_struct *tsk) void free_task(struct task_struct *tsk)
{ {
scs_release(tsk);
#ifndef CONFIG_THREAD_INFO_IN_TASK #ifndef CONFIG_THREAD_INFO_IN_TASK
/* /*
* The task is finally done with both the stack and thread_info, * The task is finally done with both the stack and thread_info,
...@@ -840,6 +843,8 @@ void __init fork_init(void) ...@@ -840,6 +843,8 @@ void __init fork_init(void)
NULL, free_vm_stack_cache); NULL, free_vm_stack_cache);
#endif #endif
scs_init();
lockdep_init_task(&init_task); lockdep_init_task(&init_task);
uprobes_init(); uprobes_init();
} }
...@@ -899,6 +904,10 @@ static struct task_struct *dup_task_struct(struct task_struct *orig, int node) ...@@ -899,6 +904,10 @@ static struct task_struct *dup_task_struct(struct task_struct *orig, int node)
if (err) if (err)
goto free_stack; goto free_stack;
err = scs_prepare(tsk, node);
if (err)
goto free_stack;
#ifdef CONFIG_SECCOMP #ifdef CONFIG_SECCOMP
/* /*
* We must handle setting up seccomp filters once we're under * We must handle setting up seccomp filters once we're under
......
...@@ -11,6 +11,7 @@ ...@@ -11,6 +11,7 @@
#include <linux/nospec.h> #include <linux/nospec.h>
#include <linux/kcov.h> #include <linux/kcov.h>
#include <linux/scs.h>
#include <asm/switch_to.h> #include <asm/switch_to.h>
#include <asm/tlb.h> #include <asm/tlb.h>
...@@ -6040,6 +6041,7 @@ void init_idle(struct task_struct *idle, int cpu) ...@@ -6040,6 +6041,7 @@ void init_idle(struct task_struct *idle, int cpu)
idle->se.exec_start = sched_clock(); idle->se.exec_start = sched_clock();
idle->flags |= PF_IDLE; idle->flags |= PF_IDLE;
scs_task_reset(idle);
kasan_unpoison_task_stack(idle); kasan_unpoison_task_stack(idle);
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
......
// SPDX-License-Identifier: GPL-2.0
/*
* Shadow Call Stack support.
*
* Copyright (C) 2019 Google LLC
*/
#include <linux/kasan.h>
#include <linux/scs.h>
#include <linux/slab.h>
#include <asm/scs.h>
static struct kmem_cache *scs_cache;
static void *scs_alloc(int node)
{
void *s;
s = kmem_cache_alloc_node(scs_cache, GFP_SCS, node);
if (s) {
*__scs_magic(s) = SCS_END_MAGIC;
/*
* Poison the allocation to catch unintentional accesses to
* the shadow stack when KASAN is enabled.
*/
kasan_poison_object_data(scs_cache, s);
}
return s;
}
static void scs_free(void *s)
{
kasan_unpoison_object_data(scs_cache, s);
kmem_cache_free(scs_cache, s);
}
void __init scs_init(void)
{
scs_cache = kmem_cache_create("scs_cache", SCS_SIZE, 0, 0, NULL);
}
int scs_prepare(struct task_struct *tsk, int node)
{
void *s = scs_alloc(node);
if (!s)
return -ENOMEM;
task_scs(tsk) = s;
task_scs_offset(tsk) = 0;
return 0;
}
void scs_release(struct task_struct *tsk)
{
void *s = task_scs(tsk);
if (!s)
return;
WARN(scs_corrupted(tsk), "corrupted shadow stack detected when freeing task\n");
scs_free(s);
}
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment