Commit 7fb983b4 authored by Andy Lutomirski's avatar Andy Lutomirski Committed by Ingo Molnar

x86/entry: Fix assumptions that the HW TSS is at the beginning of cpu_tss

A future patch will move SYSENTER_stack to the beginning of cpu_tss
to help detect overflow.  Before this can happen, fix several code
paths that hardcode assumptions about the old layout.
Signed-off-by: default avatarAndy Lutomirski <luto@kernel.org>
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
Reviewed-by: default avatarBorislav Petkov <bp@suse.de>
Reviewed-by: default avatarDave Hansen <dave.hansen@intel.com>
Reviewed-by: default avatarThomas Gleixner <tglx@linutronix.de>
Cc: Boris Ostrovsky <boris.ostrovsky@oracle.com>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Borislav Petkov <bpetkov@suse.de>
Cc: Brian Gerst <brgerst@gmail.com>
Cc: Dave Hansen <dave.hansen@linux.intel.com>
Cc: David Laight <David.Laight@aculab.com>
Cc: Denys Vlasenko <dvlasenk@redhat.com>
Cc: Eduardo Valentin <eduval@amazon.com>
Cc: Greg KH <gregkh@linuxfoundation.org>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Josh Poimboeuf <jpoimboe@redhat.com>
Cc: Juergen Gross <jgross@suse.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Rik van Riel <riel@redhat.com>
Cc: Will Deacon <will.deacon@arm.com>
Cc: aliguori@amazon.com
Cc: daniel.gruss@iaik.tugraz.at
Cc: hughd@google.com
Cc: keescook@google.com
Link: https://lkml.kernel.org/r/20171204150605.722425540@linutronix.deSigned-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent 21506525
...@@ -178,7 +178,7 @@ static inline void set_tssldt_descriptor(void *d, unsigned long addr, ...@@ -178,7 +178,7 @@ static inline void set_tssldt_descriptor(void *d, unsigned long addr,
#endif #endif
} }
static inline void __set_tss_desc(unsigned cpu, unsigned int entry, void *addr) static inline void __set_tss_desc(unsigned cpu, unsigned int entry, struct x86_hw_tss *addr)
{ {
struct desc_struct *d = get_cpu_gdt_rw(cpu); struct desc_struct *d = get_cpu_gdt_rw(cpu);
tss_desc tss; tss_desc tss;
......
...@@ -162,7 +162,7 @@ enum cpuid_regs_idx { ...@@ -162,7 +162,7 @@ enum cpuid_regs_idx {
extern struct cpuinfo_x86 boot_cpu_data; extern struct cpuinfo_x86 boot_cpu_data;
extern struct cpuinfo_x86 new_cpu_data; extern struct cpuinfo_x86 new_cpu_data;
extern struct tss_struct doublefault_tss; extern struct x86_hw_tss doublefault_tss;
extern __u32 cpu_caps_cleared[NCAPINTS]; extern __u32 cpu_caps_cleared[NCAPINTS];
extern __u32 cpu_caps_set[NCAPINTS]; extern __u32 cpu_caps_set[NCAPINTS];
...@@ -252,6 +252,11 @@ static inline void load_cr3(pgd_t *pgdir) ...@@ -252,6 +252,11 @@ static inline void load_cr3(pgd_t *pgdir)
write_cr3(__sme_pa(pgdir)); write_cr3(__sme_pa(pgdir));
} }
/*
* Note that while the legacy 'TSS' name comes from 'Task State Segment',
* on modern x86 CPUs the TSS also holds information important to 64-bit mode,
* unrelated to the task-switch mechanism:
*/
#ifdef CONFIG_X86_32 #ifdef CONFIG_X86_32
/* This is the TSS defined by the hardware. */ /* This is the TSS defined by the hardware. */
struct x86_hw_tss { struct x86_hw_tss {
...@@ -322,7 +327,7 @@ struct x86_hw_tss { ...@@ -322,7 +327,7 @@ struct x86_hw_tss {
#define IO_BITMAP_BITS 65536 #define IO_BITMAP_BITS 65536
#define IO_BITMAP_BYTES (IO_BITMAP_BITS/8) #define IO_BITMAP_BYTES (IO_BITMAP_BITS/8)
#define IO_BITMAP_LONGS (IO_BITMAP_BYTES/sizeof(long)) #define IO_BITMAP_LONGS (IO_BITMAP_BYTES/sizeof(long))
#define IO_BITMAP_OFFSET offsetof(struct tss_struct, io_bitmap) #define IO_BITMAP_OFFSET (offsetof(struct tss_struct, io_bitmap) - offsetof(struct tss_struct, x86_tss))
#define INVALID_IO_BITMAP_OFFSET 0x8000 #define INVALID_IO_BITMAP_OFFSET 0x8000
struct tss_struct { struct tss_struct {
......
...@@ -1557,7 +1557,7 @@ void cpu_init(void) ...@@ -1557,7 +1557,7 @@ void cpu_init(void)
} }
} }
t->x86_tss.io_bitmap_base = offsetof(struct tss_struct, io_bitmap); t->x86_tss.io_bitmap_base = IO_BITMAP_OFFSET;
/* /*
* <= is required because the CPU will access up to * <= is required because the CPU will access up to
...@@ -1576,7 +1576,7 @@ void cpu_init(void) ...@@ -1576,7 +1576,7 @@ void cpu_init(void)
* Initialize the TSS. Don't bother initializing sp0, as the initial * Initialize the TSS. Don't bother initializing sp0, as the initial
* task never enters user mode. * task never enters user mode.
*/ */
set_tss_desc(cpu, t); set_tss_desc(cpu, &t->x86_tss);
load_TR_desc(); load_TR_desc();
load_mm_ldt(&init_mm); load_mm_ldt(&init_mm);
...@@ -1634,12 +1634,12 @@ void cpu_init(void) ...@@ -1634,12 +1634,12 @@ void cpu_init(void)
* Initialize the TSS. Don't bother initializing sp0, as the initial * Initialize the TSS. Don't bother initializing sp0, as the initial
* task never enters user mode. * task never enters user mode.
*/ */
set_tss_desc(cpu, t); set_tss_desc(cpu, &t->x86_tss);
load_TR_desc(); load_TR_desc();
load_mm_ldt(&init_mm); load_mm_ldt(&init_mm);
t->x86_tss.io_bitmap_base = offsetof(struct tss_struct, io_bitmap); t->x86_tss.io_bitmap_base = IO_BITMAP_OFFSET;
#ifdef CONFIG_DOUBLEFAULT #ifdef CONFIG_DOUBLEFAULT
/* Set up doublefault TSS pointer in the GDT */ /* Set up doublefault TSS pointer in the GDT */
......
...@@ -50,25 +50,23 @@ static void doublefault_fn(void) ...@@ -50,25 +50,23 @@ static void doublefault_fn(void)
cpu_relax(); cpu_relax();
} }
struct tss_struct doublefault_tss __cacheline_aligned = { struct x86_hw_tss doublefault_tss __cacheline_aligned = {
.x86_tss = { .sp0 = STACK_START,
.sp0 = STACK_START, .ss0 = __KERNEL_DS,
.ss0 = __KERNEL_DS, .ldt = 0,
.ldt = 0, .io_bitmap_base = INVALID_IO_BITMAP_OFFSET,
.io_bitmap_base = INVALID_IO_BITMAP_OFFSET,
.ip = (unsigned long) doublefault_fn,
.ip = (unsigned long) doublefault_fn, /* 0x2 bit is always set */
/* 0x2 bit is always set */ .flags = X86_EFLAGS_SF | 0x2,
.flags = X86_EFLAGS_SF | 0x2, .sp = STACK_START,
.sp = STACK_START, .es = __USER_DS,
.es = __USER_DS, .cs = __KERNEL_CS,
.cs = __KERNEL_CS, .ss = __KERNEL_DS,
.ss = __KERNEL_DS, .ds = __USER_DS,
.ds = __USER_DS, .fs = __KERNEL_PERCPU,
.fs = __KERNEL_PERCPU,
.__cr3 = __pa_nodebug(swapper_pg_dir),
.__cr3 = __pa_nodebug(swapper_pg_dir),
}
}; };
/* dummy for do_double_fault() call */ /* dummy for do_double_fault() call */
......
...@@ -2291,7 +2291,7 @@ static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu) ...@@ -2291,7 +2291,7 @@ static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
* processors. See 22.2.4. * processors. See 22.2.4.
*/ */
vmcs_writel(HOST_TR_BASE, vmcs_writel(HOST_TR_BASE,
(unsigned long)this_cpu_ptr(&cpu_tss)); (unsigned long)this_cpu_ptr(&cpu_tss.x86_tss));
vmcs_writel(HOST_GDTR_BASE, (unsigned long)gdt); /* 22.2.4 */ vmcs_writel(HOST_GDTR_BASE, (unsigned long)gdt); /* 22.2.4 */
/* /*
......
...@@ -165,12 +165,13 @@ static void fix_processor_context(void) ...@@ -165,12 +165,13 @@ static void fix_processor_context(void)
struct desc_struct *desc = get_cpu_gdt_rw(cpu); struct desc_struct *desc = get_cpu_gdt_rw(cpu);
tss_desc tss; tss_desc tss;
#endif #endif
set_tss_desc(cpu, t); /*
* This just modifies memory; should not be /*
* necessary. But... This is necessary, because * This just modifies memory; should not be necessary. But... This is
* 386 hardware has concept of busy TSS or some * necessary, because 386 hardware has concept of busy TSS or some
* similar stupidity. * similar stupidity.
*/ */
set_tss_desc(cpu, &t->x86_tss);
#ifdef CONFIG_X86_64 #ifdef CONFIG_X86_64
memcpy(&tss, &desc[GDT_ENTRY_TSS], sizeof(tss_desc)); memcpy(&tss, &desc[GDT_ENTRY_TSS], sizeof(tss_desc));
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment