Commit a5e090ac authored by Russell King's avatar Russell King

ARM: software-based priviledged-no-access support

Provide a software-based implementation of the priviledged no access
support found in ARMv8.1.

Userspace pages are mapped using a different domain number from the
kernel and IO mappings.  If we switch the user domain to "no access"
when we enter the kernel, we can prevent the kernel from touching
userspace.

However, the kernel needs to be able to access userspace via the
various user accessor functions.  With the wrapping in the previous
patch, we can temporarily enable access when the kernel needs user
access, and re-disable it afterwards.

This allows us to trap non-intended accesses to userspace, eg, caused
by an inadvertent dereference of the LIST_POISON* values, which, with
appropriate user mappings setup, can be made to succeed.  This in turn
can allow use-after-free bugs to be further exploited than would
otherwise be possible.
Signed-off-by: default avatarRussell King <rmk+kernel@arm.linux.org.uk>
parent 2190fed6
...@@ -1694,6 +1694,21 @@ config HIGHPTE ...@@ -1694,6 +1694,21 @@ config HIGHPTE
bool "Allocate 2nd-level pagetables from highmem" bool "Allocate 2nd-level pagetables from highmem"
depends on HIGHMEM depends on HIGHMEM
config CPU_SW_DOMAIN_PAN
bool "Enable use of CPU domains to implement privileged no-access"
depends on MMU && !ARM_LPAE
default y
help
Increase kernel security by ensuring that normal kernel accesses
are unable to access userspace addresses. This can help prevent
use-after-free bugs becoming an exploitable privilege escalation
by ensuring that magic values (such as LIST_POISON) will always
fault when dereferenced.
CPUs with low-vector mappings use a best-efforts implementation.
Their lower 1MB needs to remain accessible for the vectors, but
the remainder of userspace will become appropriately inaccessible.
config HW_PERF_EVENTS config HW_PERF_EVENTS
bool "Enable hardware performance counter support for perf events" bool "Enable hardware performance counter support for perf events"
depends on PERF_EVENTS depends on PERF_EVENTS
......
...@@ -446,15 +446,45 @@ THUMB( orr \reg , \reg , #PSR_T_BIT ) ...@@ -446,15 +446,45 @@ THUMB( orr \reg , \reg , #PSR_T_BIT )
.endm .endm
.macro uaccess_disable, tmp, isb=1 .macro uaccess_disable, tmp, isb=1
#ifdef CONFIG_CPU_SW_DOMAIN_PAN
/*
* Whenever we re-enter userspace, the domains should always be
* set appropriately.
*/
mov \tmp, #DACR_UACCESS_DISABLE
mcr p15, 0, \tmp, c3, c0, 0 @ Set domain register
.if \isb
instr_sync
.endif
#endif
.endm .endm
.macro uaccess_enable, tmp, isb=1 .macro uaccess_enable, tmp, isb=1
#ifdef CONFIG_CPU_SW_DOMAIN_PAN
/*
* Whenever we re-enter userspace, the domains should always be
* set appropriately.
*/
mov \tmp, #DACR_UACCESS_ENABLE
mcr p15, 0, \tmp, c3, c0, 0
.if \isb
instr_sync
.endif
#endif
.endm .endm
.macro uaccess_save, tmp .macro uaccess_save, tmp
#ifdef CONFIG_CPU_SW_DOMAIN_PAN
mrc p15, 0, \tmp, c3, c0, 0
str \tmp, [sp, #S_FRAME_SIZE]
#endif
.endm .endm
.macro uaccess_restore .macro uaccess_restore
#ifdef CONFIG_CPU_SW_DOMAIN_PAN
ldr r0, [sp, #S_FRAME_SIZE]
mcr p15, 0, r0, c3, c0, 0
#endif
.endm .endm
.macro uaccess_save_and_disable, tmp .macro uaccess_save_and_disable, tmp
......
...@@ -57,11 +57,29 @@ ...@@ -57,11 +57,29 @@
#define domain_mask(dom) ((3) << (2 * (dom))) #define domain_mask(dom) ((3) << (2 * (dom)))
#define domain_val(dom,type) ((type) << (2 * (dom))) #define domain_val(dom,type) ((type) << (2 * (dom)))
#ifdef CONFIG_CPU_SW_DOMAIN_PAN
#define DACR_INIT \
(domain_val(DOMAIN_USER, DOMAIN_NOACCESS) | \
domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \
domain_val(DOMAIN_IO, DOMAIN_CLIENT) | \
domain_val(DOMAIN_VECTORS, DOMAIN_CLIENT))
#else
#define DACR_INIT \ #define DACR_INIT \
(domain_val(DOMAIN_USER, DOMAIN_CLIENT) | \ (domain_val(DOMAIN_USER, DOMAIN_CLIENT) | \
domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \ domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \
domain_val(DOMAIN_IO, DOMAIN_CLIENT) | \ domain_val(DOMAIN_IO, DOMAIN_CLIENT) | \
domain_val(DOMAIN_VECTORS, DOMAIN_CLIENT)) domain_val(DOMAIN_VECTORS, DOMAIN_CLIENT))
#endif
#define __DACR_DEFAULT \
domain_val(DOMAIN_KERNEL, DOMAIN_CLIENT) | \
domain_val(DOMAIN_IO, DOMAIN_CLIENT) | \
domain_val(DOMAIN_VECTORS, DOMAIN_CLIENT)
#define DACR_UACCESS_DISABLE \
(__DACR_DEFAULT | domain_val(DOMAIN_USER, DOMAIN_NOACCESS))
#define DACR_UACCESS_ENABLE \
(__DACR_DEFAULT | domain_val(DOMAIN_USER, DOMAIN_CLIENT))
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
...@@ -76,7 +94,6 @@ static inline unsigned int get_domain(void) ...@@ -76,7 +94,6 @@ static inline unsigned int get_domain(void)
return domain; return domain;
} }
#ifdef CONFIG_CPU_USE_DOMAINS
static inline void set_domain(unsigned val) static inline void set_domain(unsigned val)
{ {
asm volatile( asm volatile(
...@@ -85,6 +102,7 @@ static inline void set_domain(unsigned val) ...@@ -85,6 +102,7 @@ static inline void set_domain(unsigned val)
isb(); isb();
} }
#ifdef CONFIG_CPU_USE_DOMAINS
#define modify_domain(dom,type) \ #define modify_domain(dom,type) \
do { \ do { \
unsigned int domain = get_domain(); \ unsigned int domain = get_domain(); \
...@@ -94,7 +112,6 @@ static inline void set_domain(unsigned val) ...@@ -94,7 +112,6 @@ static inline void set_domain(unsigned val)
} while (0) } while (0)
#else #else
static inline void set_domain(unsigned val) { }
static inline void modify_domain(unsigned dom, unsigned type) { } static inline void modify_domain(unsigned dom, unsigned type) { }
#endif #endif
......
...@@ -57,11 +57,25 @@ extern int fixup_exception(struct pt_regs *regs); ...@@ -57,11 +57,25 @@ extern int fixup_exception(struct pt_regs *regs);
*/ */
static inline unsigned int uaccess_save_and_enable(void) static inline unsigned int uaccess_save_and_enable(void)
{ {
#ifdef CONFIG_CPU_SW_DOMAIN_PAN
unsigned int old_domain = get_domain();
/* Set the current domain access to permit user accesses */
set_domain((old_domain & ~domain_mask(DOMAIN_USER)) |
domain_val(DOMAIN_USER, DOMAIN_CLIENT));
return old_domain;
#else
return 0; return 0;
#endif
} }
static inline void uaccess_restore(unsigned int flags) static inline void uaccess_restore(unsigned int flags)
{ {
#ifdef CONFIG_CPU_SW_DOMAIN_PAN
/* Restore the user access mask */
set_domain(flags);
#endif
} }
/* /*
......
...@@ -129,12 +129,36 @@ void __show_regs(struct pt_regs *regs) ...@@ -129,12 +129,36 @@ void __show_regs(struct pt_regs *regs)
buf[4] = '\0'; buf[4] = '\0';
#ifndef CONFIG_CPU_V7M #ifndef CONFIG_CPU_V7M
printk("Flags: %s IRQs o%s FIQs o%s Mode %s ISA %s Segment %s\n", {
buf, interrupts_enabled(regs) ? "n" : "ff", unsigned int domain = get_domain();
fast_interrupts_enabled(regs) ? "n" : "ff", const char *segment;
processor_modes[processor_mode(regs)],
isa_modes[isa_mode(regs)], #ifdef CONFIG_CPU_SW_DOMAIN_PAN
get_fs() == get_ds() ? "kernel" : "user"); /*
* Get the domain register for the parent context. In user
* mode, we don't save the DACR, so lets use what it should
* be. For other modes, we place it after the pt_regs struct.
*/
if (user_mode(regs))
domain = DACR_UACCESS_ENABLE;
else
domain = *(unsigned int *)(regs + 1);
#endif
if ((domain & domain_mask(DOMAIN_USER)) ==
domain_val(DOMAIN_USER, DOMAIN_NOACCESS))
segment = "none";
else if (get_fs() == get_ds())
segment = "kernel";
else
segment = "user";
printk("Flags: %s IRQs o%s FIQs o%s Mode %s ISA %s Segment %s\n",
buf, interrupts_enabled(regs) ? "n" : "ff",
fast_interrupts_enabled(regs) ? "n" : "ff",
processor_modes[processor_mode(regs)],
isa_modes[isa_mode(regs)], segment);
}
#else #else
printk("xPSR: %08lx\n", regs->ARM_cpsr); printk("xPSR: %08lx\n", regs->ARM_cpsr);
#endif #endif
......
...@@ -141,11 +141,14 @@ static int emulate_swpX(unsigned int address, unsigned int *data, ...@@ -141,11 +141,14 @@ static int emulate_swpX(unsigned int address, unsigned int *data,
while (1) { while (1) {
unsigned long temp; unsigned long temp;
unsigned int __ua_flags;
__ua_flags = uaccess_save_and_enable();
if (type == TYPE_SWPB) if (type == TYPE_SWPB)
__user_swpb_asm(*data, address, res, temp); __user_swpb_asm(*data, address, res, temp);
else else
__user_swp_asm(*data, address, res, temp); __user_swp_asm(*data, address, res, temp);
uaccess_restore(__ua_flags);
if (likely(res != -EAGAIN) || signal_pending(current)) if (likely(res != -EAGAIN) || signal_pending(current))
break; break;
......
...@@ -17,6 +17,19 @@ ...@@ -17,6 +17,19 @@
.text .text
#ifdef CONFIG_CPU_SW_DOMAIN_PAN
.macro save_regs
mrc p15, 0, ip, c3, c0, 0
stmfd sp!, {r1, r2, r4 - r8, ip, lr}
uaccess_enable ip
.endm
.macro load_regs
ldmfd sp!, {r1, r2, r4 - r8, ip, lr}
mcr p15, 0, ip, c3, c0, 0
ret lr
.endm
#else
.macro save_regs .macro save_regs
stmfd sp!, {r1, r2, r4 - r8, lr} stmfd sp!, {r1, r2, r4 - r8, lr}
.endm .endm
...@@ -24,6 +37,7 @@ ...@@ -24,6 +37,7 @@
.macro load_regs .macro load_regs
ldmfd sp!, {r1, r2, r4 - r8, pc} ldmfd sp!, {r1, r2, r4 - r8, pc}
.endm .endm
#endif
.macro load1b, reg1 .macro load1b, reg1
ldrusr \reg1, r0, 1 ldrusr \reg1, r0, 1
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment