Commit 47058bb5 authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Al Viro

x86: remove address space overrides using set_fs()

Stop providing the possibility to override the address space using
set_fs() now that there is no need for that any more.  To properly
handle the TASK_SIZE_MAX checking for 4 vs 5-level page tables on
x86 a new alternative is introduced, which just like the one in
entry_64.S has to use the hardcoded virtual address bits to escape
the fact that TASK_SIZE_MAX isn't actually a constant when 5-level
page tables are enabled.
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Reviewed-by: default avatarKees Cook <keescook@chromium.org>
Signed-off-by: default avatarAl Viro <viro@zeniv.linux.org.uk>
parent a1d826d4
...@@ -237,7 +237,6 @@ config X86 ...@@ -237,7 +237,6 @@ config X86
select HAVE_ARCH_KCSAN if X86_64 select HAVE_ARCH_KCSAN if X86_64
select X86_FEATURE_NAMES if PROC_FS select X86_FEATURE_NAMES if PROC_FS
select PROC_PID_ARCH_STATUS if PROC_FS select PROC_PID_ARCH_STATUS if PROC_FS
select SET_FS
imply IMA_SECURE_AND_OR_TRUSTED_BOOT if EFI imply IMA_SECURE_AND_OR_TRUSTED_BOOT if EFI
config INSTRUCTION_DECODER config INSTRUCTION_DECODER
......
...@@ -239,7 +239,6 @@ static int load_aout_binary(struct linux_binprm *bprm) ...@@ -239,7 +239,6 @@ static int load_aout_binary(struct linux_binprm *bprm)
(regs)->ss = __USER32_DS; (regs)->ss = __USER32_DS;
regs->r8 = regs->r9 = regs->r10 = regs->r11 = regs->r8 = regs->r9 = regs->r10 = regs->r11 =
regs->r12 = regs->r13 = regs->r14 = regs->r15 = 0; regs->r12 = regs->r13 = regs->r14 = regs->r15 = 0;
set_fs(USER_DS);
return 0; return 0;
} }
......
...@@ -482,10 +482,6 @@ extern unsigned int fpu_user_xstate_size; ...@@ -482,10 +482,6 @@ extern unsigned int fpu_user_xstate_size;
struct perf_event; struct perf_event;
typedef struct {
unsigned long seg;
} mm_segment_t;
struct thread_struct { struct thread_struct {
/* Cached TLS descriptors: */ /* Cached TLS descriptors: */
struct desc_struct tls_array[GDT_ENTRY_TLS_ENTRIES]; struct desc_struct tls_array[GDT_ENTRY_TLS_ENTRIES];
...@@ -538,8 +534,6 @@ struct thread_struct { ...@@ -538,8 +534,6 @@ struct thread_struct {
*/ */
unsigned long iopl_emul; unsigned long iopl_emul;
mm_segment_t addr_limit;
unsigned int sig_on_uaccess_err:1; unsigned int sig_on_uaccess_err:1;
/* Floating point and extended processor state */ /* Floating point and extended processor state */
...@@ -785,15 +779,12 @@ static inline void spin_lock_prefetch(const void *x) ...@@ -785,15 +779,12 @@ static inline void spin_lock_prefetch(const void *x)
#define INIT_THREAD { \ #define INIT_THREAD { \
.sp0 = TOP_OF_INIT_STACK, \ .sp0 = TOP_OF_INIT_STACK, \
.sysenter_cs = __KERNEL_CS, \ .sysenter_cs = __KERNEL_CS, \
.addr_limit = KERNEL_DS, \
} }
#define KSTK_ESP(task) (task_pt_regs(task)->sp) #define KSTK_ESP(task) (task_pt_regs(task)->sp)
#else #else
#define INIT_THREAD { \ #define INIT_THREAD { }
.addr_limit = KERNEL_DS, \
}
extern unsigned long KSTK_ESP(struct task_struct *task); extern unsigned long KSTK_ESP(struct task_struct *task);
......
...@@ -102,7 +102,6 @@ struct thread_info { ...@@ -102,7 +102,6 @@ struct thread_info {
#define TIF_SYSCALL_TRACEPOINT 28 /* syscall tracepoint instrumentation */ #define TIF_SYSCALL_TRACEPOINT 28 /* syscall tracepoint instrumentation */
#define TIF_ADDR32 29 /* 32-bit address space on 64 bits */ #define TIF_ADDR32 29 /* 32-bit address space on 64 bits */
#define TIF_X32 30 /* 32-bit native x86-64 binary */ #define TIF_X32 30 /* 32-bit native x86-64 binary */
#define TIF_FSCHECK 31 /* Check FS is USER_DS on return */
#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE) #define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
#define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME) #define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
...@@ -131,7 +130,6 @@ struct thread_info { ...@@ -131,7 +130,6 @@ struct thread_info {
#define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT) #define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT)
#define _TIF_ADDR32 (1 << TIF_ADDR32) #define _TIF_ADDR32 (1 << TIF_ADDR32)
#define _TIF_X32 (1 << TIF_X32) #define _TIF_X32 (1 << TIF_X32)
#define _TIF_FSCHECK (1 << TIF_FSCHECK)
/* flags to check in __switch_to() */ /* flags to check in __switch_to() */
#define _TIF_WORK_CTXSW_BASE \ #define _TIF_WORK_CTXSW_BASE \
......
...@@ -12,30 +12,6 @@ ...@@ -12,30 +12,6 @@
#include <asm/smap.h> #include <asm/smap.h>
#include <asm/extable.h> #include <asm/extable.h>
/*
* The fs value determines whether argument validity checking should be
* performed or not. If get_fs() == USER_DS, checking is performed, with
* get_fs() == KERNEL_DS, checking is bypassed.
*
* For historical reasons, these macros are grossly misnamed.
*/
#define MAKE_MM_SEG(s) ((mm_segment_t) { (s) })
#define KERNEL_DS MAKE_MM_SEG(-1UL)
#define USER_DS MAKE_MM_SEG(TASK_SIZE_MAX)
#define get_fs() (current->thread.addr_limit)
static inline void set_fs(mm_segment_t fs)
{
current->thread.addr_limit = fs;
/* On user-mode return, check fs is correct */
set_thread_flag(TIF_FSCHECK);
}
#define uaccess_kernel() (get_fs().seg == KERNEL_DS.seg)
#define user_addr_max() (current->thread.addr_limit.seg)
/* /*
* Test whether a block of memory is a valid user space address. * Test whether a block of memory is a valid user space address.
* Returns 0 if the range is valid, nonzero otherwise. * Returns 0 if the range is valid, nonzero otherwise.
...@@ -93,7 +69,7 @@ static inline bool pagefault_disabled(void); ...@@ -93,7 +69,7 @@ static inline bool pagefault_disabled(void);
#define access_ok(addr, size) \ #define access_ok(addr, size) \
({ \ ({ \
WARN_ON_IN_IRQ(); \ WARN_ON_IN_IRQ(); \
likely(!__range_not_ok(addr, size, user_addr_max())); \ likely(!__range_not_ok(addr, size, TASK_SIZE_MAX)); \
}) })
/* /*
......
...@@ -37,9 +37,6 @@ static void __used common(void) ...@@ -37,9 +37,6 @@ static void __used common(void)
OFFSET(TASK_stack_canary, task_struct, stack_canary); OFFSET(TASK_stack_canary, task_struct, stack_canary);
#endif #endif
BLANK();
OFFSET(TASK_addr_limit, task_struct, thread.addr_limit);
BLANK(); BLANK();
OFFSET(crypto_tfm_ctx_offset, crypto_tfm, __crt_ctx); OFFSET(crypto_tfm_ctx_offset, crypto_tfm, __crt_ctx);
......
...@@ -35,10 +35,19 @@ ...@@ -35,10 +35,19 @@
#include <asm/smap.h> #include <asm/smap.h>
#include <asm/export.h> #include <asm/export.h>
#ifdef CONFIG_X86_5LEVEL
#define LOAD_TASK_SIZE_MINUS_N(n) \
ALTERNATIVE __stringify(mov $((1 << 47) - 4096 - (n)),%rdx), \
__stringify(mov $((1 << 56) - 4096 - (n)),%rdx), X86_FEATURE_LA57
#else
#define LOAD_TASK_SIZE_MINUS_N(n) \
mov $(TASK_SIZE_MAX - (n)),%_ASM_DX
#endif
.text .text
SYM_FUNC_START(__get_user_1) SYM_FUNC_START(__get_user_1)
mov PER_CPU_VAR(current_task), %_ASM_DX LOAD_TASK_SIZE_MINUS_N(0)
cmp TASK_addr_limit(%_ASM_DX),%_ASM_AX cmp %_ASM_DX,%_ASM_AX
jae bad_get_user jae bad_get_user
sbb %_ASM_DX, %_ASM_DX /* array_index_mask_nospec() */ sbb %_ASM_DX, %_ASM_DX /* array_index_mask_nospec() */
and %_ASM_DX, %_ASM_AX and %_ASM_DX, %_ASM_AX
...@@ -51,15 +60,13 @@ SYM_FUNC_END(__get_user_1) ...@@ -51,15 +60,13 @@ SYM_FUNC_END(__get_user_1)
EXPORT_SYMBOL(__get_user_1) EXPORT_SYMBOL(__get_user_1)
SYM_FUNC_START(__get_user_2) SYM_FUNC_START(__get_user_2)
add $1,%_ASM_AX LOAD_TASK_SIZE_MINUS_N(1)
jc bad_get_user cmp %_ASM_DX,%_ASM_AX
mov PER_CPU_VAR(current_task), %_ASM_DX
cmp TASK_addr_limit(%_ASM_DX),%_ASM_AX
jae bad_get_user jae bad_get_user
sbb %_ASM_DX, %_ASM_DX /* array_index_mask_nospec() */ sbb %_ASM_DX, %_ASM_DX /* array_index_mask_nospec() */
and %_ASM_DX, %_ASM_AX and %_ASM_DX, %_ASM_AX
ASM_STAC ASM_STAC
2: movzwl -1(%_ASM_AX),%edx 2: movzwl (%_ASM_AX),%edx
xor %eax,%eax xor %eax,%eax
ASM_CLAC ASM_CLAC
ret ret
...@@ -67,15 +74,13 @@ SYM_FUNC_END(__get_user_2) ...@@ -67,15 +74,13 @@ SYM_FUNC_END(__get_user_2)
EXPORT_SYMBOL(__get_user_2) EXPORT_SYMBOL(__get_user_2)
SYM_FUNC_START(__get_user_4) SYM_FUNC_START(__get_user_4)
add $3,%_ASM_AX LOAD_TASK_SIZE_MINUS_N(3)
jc bad_get_user cmp %_ASM_DX,%_ASM_AX
mov PER_CPU_VAR(current_task), %_ASM_DX
cmp TASK_addr_limit(%_ASM_DX),%_ASM_AX
jae bad_get_user jae bad_get_user
sbb %_ASM_DX, %_ASM_DX /* array_index_mask_nospec() */ sbb %_ASM_DX, %_ASM_DX /* array_index_mask_nospec() */
and %_ASM_DX, %_ASM_AX and %_ASM_DX, %_ASM_AX
ASM_STAC ASM_STAC
3: movl -3(%_ASM_AX),%edx 3: movl (%_ASM_AX),%edx
xor %eax,%eax xor %eax,%eax
ASM_CLAC ASM_CLAC
ret ret
...@@ -84,29 +89,25 @@ EXPORT_SYMBOL(__get_user_4) ...@@ -84,29 +89,25 @@ EXPORT_SYMBOL(__get_user_4)
SYM_FUNC_START(__get_user_8) SYM_FUNC_START(__get_user_8)
#ifdef CONFIG_X86_64 #ifdef CONFIG_X86_64
add $7,%_ASM_AX LOAD_TASK_SIZE_MINUS_N(7)
jc bad_get_user cmp %_ASM_DX,%_ASM_AX
mov PER_CPU_VAR(current_task), %_ASM_DX
cmp TASK_addr_limit(%_ASM_DX),%_ASM_AX
jae bad_get_user jae bad_get_user
sbb %_ASM_DX, %_ASM_DX /* array_index_mask_nospec() */ sbb %_ASM_DX, %_ASM_DX /* array_index_mask_nospec() */
and %_ASM_DX, %_ASM_AX and %_ASM_DX, %_ASM_AX
ASM_STAC ASM_STAC
4: movq -7(%_ASM_AX),%rdx 4: movq (%_ASM_AX),%rdx
xor %eax,%eax xor %eax,%eax
ASM_CLAC ASM_CLAC
ret ret
#else #else
add $7,%_ASM_AX LOAD_TASK_SIZE_MINUS_N(7)
jc bad_get_user_8 cmp %_ASM_DX,%_ASM_AX
mov PER_CPU_VAR(current_task), %_ASM_DX
cmp TASK_addr_limit(%_ASM_DX),%_ASM_AX
jae bad_get_user_8 jae bad_get_user_8
sbb %_ASM_DX, %_ASM_DX /* array_index_mask_nospec() */ sbb %_ASM_DX, %_ASM_DX /* array_index_mask_nospec() */
and %_ASM_DX, %_ASM_AX and %_ASM_DX, %_ASM_AX
ASM_STAC ASM_STAC
4: movl -7(%_ASM_AX),%edx 4: movl (%_ASM_AX),%edx
5: movl -3(%_ASM_AX),%ecx 5: movl 4(%_ASM_AX),%ecx
xor %eax,%eax xor %eax,%eax
ASM_CLAC ASM_CLAC
ret ret
......
...@@ -31,12 +31,19 @@ ...@@ -31,12 +31,19 @@
* as they get called from within inline assembly. * as they get called from within inline assembly.
*/ */
#define ENTER mov PER_CPU_VAR(current_task), %_ASM_BX #ifdef CONFIG_X86_5LEVEL
#define LOAD_TASK_SIZE_MINUS_N(n) \
ALTERNATIVE __stringify(mov $((1 << 47) - 4096 - (n)),%rbx), \
__stringify(mov $((1 << 56) - 4096 - (n)),%rbx), X86_FEATURE_LA57
#else
#define LOAD_TASK_SIZE_MINUS_N(n) \
mov $(TASK_SIZE_MAX - (n)),%_ASM_BX
#endif
.text .text
SYM_FUNC_START(__put_user_1) SYM_FUNC_START(__put_user_1)
ENTER LOAD_TASK_SIZE_MINUS_N(0)
cmp TASK_addr_limit(%_ASM_BX),%_ASM_CX cmp %_ASM_BX,%_ASM_CX
jae .Lbad_put_user jae .Lbad_put_user
ASM_STAC ASM_STAC
1: movb %al,(%_ASM_CX) 1: movb %al,(%_ASM_CX)
...@@ -47,9 +54,7 @@ SYM_FUNC_END(__put_user_1) ...@@ -47,9 +54,7 @@ SYM_FUNC_END(__put_user_1)
EXPORT_SYMBOL(__put_user_1) EXPORT_SYMBOL(__put_user_1)
SYM_FUNC_START(__put_user_2) SYM_FUNC_START(__put_user_2)
ENTER LOAD_TASK_SIZE_MINUS_N(1)
mov TASK_addr_limit(%_ASM_BX),%_ASM_BX
sub $1,%_ASM_BX
cmp %_ASM_BX,%_ASM_CX cmp %_ASM_BX,%_ASM_CX
jae .Lbad_put_user jae .Lbad_put_user
ASM_STAC ASM_STAC
...@@ -61,9 +66,7 @@ SYM_FUNC_END(__put_user_2) ...@@ -61,9 +66,7 @@ SYM_FUNC_END(__put_user_2)
EXPORT_SYMBOL(__put_user_2) EXPORT_SYMBOL(__put_user_2)
SYM_FUNC_START(__put_user_4) SYM_FUNC_START(__put_user_4)
ENTER LOAD_TASK_SIZE_MINUS_N(3)
mov TASK_addr_limit(%_ASM_BX),%_ASM_BX
sub $3,%_ASM_BX
cmp %_ASM_BX,%_ASM_CX cmp %_ASM_BX,%_ASM_CX
jae .Lbad_put_user jae .Lbad_put_user
ASM_STAC ASM_STAC
...@@ -75,9 +78,7 @@ SYM_FUNC_END(__put_user_4) ...@@ -75,9 +78,7 @@ SYM_FUNC_END(__put_user_4)
EXPORT_SYMBOL(__put_user_4) EXPORT_SYMBOL(__put_user_4)
SYM_FUNC_START(__put_user_8) SYM_FUNC_START(__put_user_8)
ENTER LOAD_TASK_SIZE_MINUS_N(7)
mov TASK_addr_limit(%_ASM_BX),%_ASM_BX
sub $7,%_ASM_BX
cmp %_ASM_BX,%_ASM_CX cmp %_ASM_BX,%_ASM_CX
jae .Lbad_put_user jae .Lbad_put_user
ASM_STAC ASM_STAC
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment