Commit 4fe2d8b1 authored by Dave Hansen's avatar Dave Hansen Committed by Ingo Molnar

x86/entry: Rename SYSENTER_stack to CPU_ENTRY_AREA_entry_stack

If the kernel oopses while on the trampoline stack, it will print
"<SYSENTER>" even if SYSENTER is not involved.  That is rather confusing.

The "SYSENTER" stack is used for a lot more than SYSENTER now.  Give it a
better string to display in stack dumps, and rename the kernel code to
match.

Also move the 32-bit code over to the new naming even though it still uses
the entry stack only for SYSENTER.
Signed-off-by: default avatarDave Hansen <dave.hansen@linux.intel.com>
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
Cc: Andy Lutomirski <luto@kernel.org>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Borislav Petkov <bp@suse.de>
Cc: Brian Gerst <brgerst@gmail.com>
Cc: Denys Vlasenko <dvlasenk@redhat.com>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Josh Poimboeuf <jpoimboe@redhat.com>
Cc: Juergen Gross <jgross@suse.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent e8ffe96e
...@@ -942,9 +942,9 @@ ENTRY(debug) ...@@ -942,9 +942,9 @@ ENTRY(debug)
/* Are we currently on the SYSENTER stack? */ /* Are we currently on the SYSENTER stack? */
movl PER_CPU_VAR(cpu_entry_area), %ecx movl PER_CPU_VAR(cpu_entry_area), %ecx
addl $CPU_ENTRY_AREA_SYSENTER_stack + SIZEOF_SYSENTER_stack, %ecx addl $CPU_ENTRY_AREA_entry_stack + SIZEOF_entry_stack, %ecx
subl %eax, %ecx /* ecx = (end of SYSENTER_stack) - esp */ subl %eax, %ecx /* ecx = (end of entry_stack) - esp */
cmpl $SIZEOF_SYSENTER_stack, %ecx cmpl $SIZEOF_entry_stack, %ecx
jb .Ldebug_from_sysenter_stack jb .Ldebug_from_sysenter_stack
TRACE_IRQS_OFF TRACE_IRQS_OFF
...@@ -986,9 +986,9 @@ ENTRY(nmi) ...@@ -986,9 +986,9 @@ ENTRY(nmi)
/* Are we currently on the SYSENTER stack? */ /* Are we currently on the SYSENTER stack? */
movl PER_CPU_VAR(cpu_entry_area), %ecx movl PER_CPU_VAR(cpu_entry_area), %ecx
addl $CPU_ENTRY_AREA_SYSENTER_stack + SIZEOF_SYSENTER_stack, %ecx addl $CPU_ENTRY_AREA_entry_stack + SIZEOF_entry_stack, %ecx
subl %eax, %ecx /* ecx = (end of SYSENTER_stack) - esp */ subl %eax, %ecx /* ecx = (end of entry_stack) - esp */
cmpl $SIZEOF_SYSENTER_stack, %ecx cmpl $SIZEOF_entry_stack, %ecx
jb .Lnmi_from_sysenter_stack jb .Lnmi_from_sysenter_stack
/* Not on SYSENTER stack. */ /* Not on SYSENTER stack. */
......
...@@ -154,8 +154,8 @@ END(native_usergs_sysret64) ...@@ -154,8 +154,8 @@ END(native_usergs_sysret64)
_entry_trampoline - CPU_ENTRY_AREA_entry_trampoline(%rip) _entry_trampoline - CPU_ENTRY_AREA_entry_trampoline(%rip)
/* The top word of the SYSENTER stack is hot and is usable as scratch space. */ /* The top word of the SYSENTER stack is hot and is usable as scratch space. */
#define RSP_SCRATCH CPU_ENTRY_AREA_SYSENTER_stack + \ #define RSP_SCRATCH CPU_ENTRY_AREA_entry_stack + \
SIZEOF_SYSENTER_stack - 8 + CPU_ENTRY_AREA SIZEOF_entry_stack - 8 + CPU_ENTRY_AREA
ENTRY(entry_SYSCALL_64_trampoline) ENTRY(entry_SYSCALL_64_trampoline)
UNWIND_HINT_EMPTY UNWIND_HINT_EMPTY
......
...@@ -56,10 +56,10 @@ struct cpu_entry_area { ...@@ -56,10 +56,10 @@ struct cpu_entry_area {
char gdt[PAGE_SIZE]; char gdt[PAGE_SIZE];
/* /*
* The GDT is just below SYSENTER_stack and thus serves (on x86_64) as * The GDT is just below entry_stack and thus serves (on x86_64) as
* a a read-only guard page. * a a read-only guard page.
*/ */
struct SYSENTER_stack_page SYSENTER_stack_page; struct entry_stack_page entry_stack_page;
/* /*
* On x86_64, the TSS is mapped RO. On x86_32, it's mapped RW because * On x86_64, the TSS is mapped RO. On x86_32, it's mapped RW because
...@@ -250,9 +250,9 @@ static inline struct cpu_entry_area *get_cpu_entry_area(int cpu) ...@@ -250,9 +250,9 @@ static inline struct cpu_entry_area *get_cpu_entry_area(int cpu)
return (struct cpu_entry_area *)__fix_to_virt(__get_cpu_entry_area_page_index(cpu, 0)); return (struct cpu_entry_area *)__fix_to_virt(__get_cpu_entry_area_page_index(cpu, 0));
} }
static inline struct SYSENTER_stack *cpu_SYSENTER_stack(int cpu) static inline struct entry_stack *cpu_entry_stack(int cpu)
{ {
return &get_cpu_entry_area(cpu)->SYSENTER_stack_page.stack; return &get_cpu_entry_area(cpu)->entry_stack_page.stack;
} }
#endif /* !__ASSEMBLY__ */ #endif /* !__ASSEMBLY__ */
......
...@@ -336,12 +336,12 @@ struct x86_hw_tss { ...@@ -336,12 +336,12 @@ struct x86_hw_tss {
#define IO_BITMAP_OFFSET (offsetof(struct tss_struct, io_bitmap) - offsetof(struct tss_struct, x86_tss)) #define IO_BITMAP_OFFSET (offsetof(struct tss_struct, io_bitmap) - offsetof(struct tss_struct, x86_tss))
#define INVALID_IO_BITMAP_OFFSET 0x8000 #define INVALID_IO_BITMAP_OFFSET 0x8000
struct SYSENTER_stack { struct entry_stack {
unsigned long words[64]; unsigned long words[64];
}; };
struct SYSENTER_stack_page { struct entry_stack_page {
struct SYSENTER_stack stack; struct entry_stack stack;
} __aligned(PAGE_SIZE); } __aligned(PAGE_SIZE);
struct tss_struct { struct tss_struct {
......
...@@ -16,7 +16,7 @@ enum stack_type { ...@@ -16,7 +16,7 @@ enum stack_type {
STACK_TYPE_TASK, STACK_TYPE_TASK,
STACK_TYPE_IRQ, STACK_TYPE_IRQ,
STACK_TYPE_SOFTIRQ, STACK_TYPE_SOFTIRQ,
STACK_TYPE_SYSENTER, STACK_TYPE_ENTRY,
STACK_TYPE_EXCEPTION, STACK_TYPE_EXCEPTION,
STACK_TYPE_EXCEPTION_LAST = STACK_TYPE_EXCEPTION + N_EXCEPTION_STACKS-1, STACK_TYPE_EXCEPTION_LAST = STACK_TYPE_EXCEPTION + N_EXCEPTION_STACKS-1,
}; };
...@@ -29,7 +29,7 @@ struct stack_info { ...@@ -29,7 +29,7 @@ struct stack_info {
bool in_task_stack(unsigned long *stack, struct task_struct *task, bool in_task_stack(unsigned long *stack, struct task_struct *task,
struct stack_info *info); struct stack_info *info);
bool in_sysenter_stack(unsigned long *stack, struct stack_info *info); bool in_entry_stack(unsigned long *stack, struct stack_info *info);
int get_stack_info(unsigned long *stack, struct task_struct *task, int get_stack_info(unsigned long *stack, struct task_struct *task,
struct stack_info *info, unsigned long *visit_mask); struct stack_info *info, unsigned long *visit_mask);
......
...@@ -97,6 +97,6 @@ void common(void) { ...@@ -97,6 +97,6 @@ void common(void) {
/* Layout info for cpu_entry_area */ /* Layout info for cpu_entry_area */
OFFSET(CPU_ENTRY_AREA_tss, cpu_entry_area, tss); OFFSET(CPU_ENTRY_AREA_tss, cpu_entry_area, tss);
OFFSET(CPU_ENTRY_AREA_entry_trampoline, cpu_entry_area, entry_trampoline); OFFSET(CPU_ENTRY_AREA_entry_trampoline, cpu_entry_area, entry_trampoline);
OFFSET(CPU_ENTRY_AREA_SYSENTER_stack, cpu_entry_area, SYSENTER_stack_page); OFFSET(CPU_ENTRY_AREA_entry_stack, cpu_entry_area, entry_stack_page);
DEFINE(SIZEOF_SYSENTER_stack, sizeof(struct SYSENTER_stack)); DEFINE(SIZEOF_entry_stack, sizeof(struct entry_stack));
} }
...@@ -48,7 +48,7 @@ void foo(void) ...@@ -48,7 +48,7 @@ void foo(void)
/* Offset from the sysenter stack to tss.sp0 */ /* Offset from the sysenter stack to tss.sp0 */
DEFINE(TSS_sysenter_sp0, offsetof(struct cpu_entry_area, tss.x86_tss.sp0) - DEFINE(TSS_sysenter_sp0, offsetof(struct cpu_entry_area, tss.x86_tss.sp0) -
offsetofend(struct cpu_entry_area, SYSENTER_stack_page.stack)); offsetofend(struct cpu_entry_area, entry_stack_page.stack));
#ifdef CONFIG_CC_STACKPROTECTOR #ifdef CONFIG_CC_STACKPROTECTOR
BLANK(); BLANK();
......
...@@ -487,8 +487,8 @@ static DEFINE_PER_CPU_PAGE_ALIGNED(char, exception_stacks ...@@ -487,8 +487,8 @@ static DEFINE_PER_CPU_PAGE_ALIGNED(char, exception_stacks
[(N_EXCEPTION_STACKS - 1) * EXCEPTION_STKSZ + DEBUG_STKSZ]); [(N_EXCEPTION_STACKS - 1) * EXCEPTION_STKSZ + DEBUG_STKSZ]);
#endif #endif
static DEFINE_PER_CPU_PAGE_ALIGNED(struct SYSENTER_stack_page, static DEFINE_PER_CPU_PAGE_ALIGNED(struct entry_stack_page,
SYSENTER_stack_storage); entry_stack_storage);
static void __init static void __init
set_percpu_fixmap_pages(int idx, void *ptr, int pages, pgprot_t prot) set_percpu_fixmap_pages(int idx, void *ptr, int pages, pgprot_t prot)
...@@ -523,8 +523,8 @@ static void __init setup_cpu_entry_area(int cpu) ...@@ -523,8 +523,8 @@ static void __init setup_cpu_entry_area(int cpu)
#endif #endif
__set_fixmap(get_cpu_entry_area_index(cpu, gdt), get_cpu_gdt_paddr(cpu), gdt_prot); __set_fixmap(get_cpu_entry_area_index(cpu, gdt), get_cpu_gdt_paddr(cpu), gdt_prot);
set_percpu_fixmap_pages(get_cpu_entry_area_index(cpu, SYSENTER_stack_page), set_percpu_fixmap_pages(get_cpu_entry_area_index(cpu, entry_stack_page),
per_cpu_ptr(&SYSENTER_stack_storage, cpu), 1, per_cpu_ptr(&entry_stack_storage, cpu), 1,
PAGE_KERNEL); PAGE_KERNEL);
/* /*
...@@ -1323,7 +1323,7 @@ void enable_sep_cpu(void) ...@@ -1323,7 +1323,7 @@ void enable_sep_cpu(void)
tss->x86_tss.ss1 = __KERNEL_CS; tss->x86_tss.ss1 = __KERNEL_CS;
wrmsr(MSR_IA32_SYSENTER_CS, tss->x86_tss.ss1, 0); wrmsr(MSR_IA32_SYSENTER_CS, tss->x86_tss.ss1, 0);
wrmsr(MSR_IA32_SYSENTER_ESP, (unsigned long)(cpu_SYSENTER_stack(cpu) + 1), 0); wrmsr(MSR_IA32_SYSENTER_ESP, (unsigned long)(cpu_entry_stack(cpu) + 1), 0);
wrmsr(MSR_IA32_SYSENTER_EIP, (unsigned long)entry_SYSENTER_32, 0); wrmsr(MSR_IA32_SYSENTER_EIP, (unsigned long)entry_SYSENTER_32, 0);
put_cpu(); put_cpu();
...@@ -1440,7 +1440,7 @@ void syscall_init(void) ...@@ -1440,7 +1440,7 @@ void syscall_init(void)
* AMD doesn't allow SYSENTER in long mode (either 32- or 64-bit). * AMD doesn't allow SYSENTER in long mode (either 32- or 64-bit).
*/ */
wrmsrl_safe(MSR_IA32_SYSENTER_CS, (u64)__KERNEL_CS); wrmsrl_safe(MSR_IA32_SYSENTER_CS, (u64)__KERNEL_CS);
wrmsrl_safe(MSR_IA32_SYSENTER_ESP, (unsigned long)(cpu_SYSENTER_stack(cpu) + 1)); wrmsrl_safe(MSR_IA32_SYSENTER_ESP, (unsigned long)(cpu_entry_stack(cpu) + 1));
wrmsrl_safe(MSR_IA32_SYSENTER_EIP, (u64)entry_SYSENTER_compat); wrmsrl_safe(MSR_IA32_SYSENTER_EIP, (u64)entry_SYSENTER_compat);
#else #else
wrmsrl(MSR_CSTAR, (unsigned long)ignore_sysret); wrmsrl(MSR_CSTAR, (unsigned long)ignore_sysret);
...@@ -1655,7 +1655,7 @@ void cpu_init(void) ...@@ -1655,7 +1655,7 @@ void cpu_init(void)
*/ */
set_tss_desc(cpu, &get_cpu_entry_area(cpu)->tss.x86_tss); set_tss_desc(cpu, &get_cpu_entry_area(cpu)->tss.x86_tss);
load_TR_desc(); load_TR_desc();
load_sp0((unsigned long)(cpu_SYSENTER_stack(cpu) + 1)); load_sp0((unsigned long)(cpu_entry_stack(cpu) + 1));
load_mm_ldt(&init_mm); load_mm_ldt(&init_mm);
......
...@@ -43,9 +43,9 @@ bool in_task_stack(unsigned long *stack, struct task_struct *task, ...@@ -43,9 +43,9 @@ bool in_task_stack(unsigned long *stack, struct task_struct *task,
return true; return true;
} }
bool in_sysenter_stack(unsigned long *stack, struct stack_info *info) bool in_entry_stack(unsigned long *stack, struct stack_info *info)
{ {
struct SYSENTER_stack *ss = cpu_SYSENTER_stack(smp_processor_id()); struct entry_stack *ss = cpu_entry_stack(smp_processor_id());
void *begin = ss; void *begin = ss;
void *end = ss + 1; void *end = ss + 1;
...@@ -53,7 +53,7 @@ bool in_sysenter_stack(unsigned long *stack, struct stack_info *info) ...@@ -53,7 +53,7 @@ bool in_sysenter_stack(unsigned long *stack, struct stack_info *info)
if ((void *)stack < begin || (void *)stack >= end) if ((void *)stack < begin || (void *)stack >= end)
return false; return false;
info->type = STACK_TYPE_SYSENTER; info->type = STACK_TYPE_ENTRY;
info->begin = begin; info->begin = begin;
info->end = end; info->end = end;
info->next_sp = NULL; info->next_sp = NULL;
...@@ -111,13 +111,13 @@ void show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs, ...@@ -111,13 +111,13 @@ void show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs,
* - task stack * - task stack
* - interrupt stack * - interrupt stack
* - HW exception stacks (double fault, nmi, debug, mce) * - HW exception stacks (double fault, nmi, debug, mce)
* - SYSENTER stack * - entry stack
* *
* x86-32 can have up to four stacks: * x86-32 can have up to four stacks:
* - task stack * - task stack
* - softirq stack * - softirq stack
* - hardirq stack * - hardirq stack
* - SYSENTER stack * - entry stack
*/ */
for (regs = NULL; stack; stack = PTR_ALIGN(stack_info.next_sp, sizeof(long))) { for (regs = NULL; stack; stack = PTR_ALIGN(stack_info.next_sp, sizeof(long))) {
const char *stack_name; const char *stack_name;
......
...@@ -26,8 +26,8 @@ const char *stack_type_name(enum stack_type type) ...@@ -26,8 +26,8 @@ const char *stack_type_name(enum stack_type type)
if (type == STACK_TYPE_SOFTIRQ) if (type == STACK_TYPE_SOFTIRQ)
return "SOFTIRQ"; return "SOFTIRQ";
if (type == STACK_TYPE_SYSENTER) if (type == STACK_TYPE_ENTRY)
return "SYSENTER"; return "ENTRY_TRAMPOLINE";
return NULL; return NULL;
} }
...@@ -96,7 +96,7 @@ int get_stack_info(unsigned long *stack, struct task_struct *task, ...@@ -96,7 +96,7 @@ int get_stack_info(unsigned long *stack, struct task_struct *task,
if (task != current) if (task != current)
goto unknown; goto unknown;
if (in_sysenter_stack(stack, info)) if (in_entry_stack(stack, info))
goto recursion_check; goto recursion_check;
if (in_hardirq_stack(stack, info)) if (in_hardirq_stack(stack, info))
......
...@@ -37,8 +37,14 @@ const char *stack_type_name(enum stack_type type) ...@@ -37,8 +37,14 @@ const char *stack_type_name(enum stack_type type)
if (type == STACK_TYPE_IRQ) if (type == STACK_TYPE_IRQ)
return "IRQ"; return "IRQ";
if (type == STACK_TYPE_SYSENTER) if (type == STACK_TYPE_ENTRY) {
return "SYSENTER"; /*
* On 64-bit, we have a generic entry stack that we
* use for all the kernel entry points, including
* SYSENTER.
*/
return "ENTRY_TRAMPOLINE";
}
if (type >= STACK_TYPE_EXCEPTION && type <= STACK_TYPE_EXCEPTION_LAST) if (type >= STACK_TYPE_EXCEPTION && type <= STACK_TYPE_EXCEPTION_LAST)
return exception_stack_names[type - STACK_TYPE_EXCEPTION]; return exception_stack_names[type - STACK_TYPE_EXCEPTION];
...@@ -118,7 +124,7 @@ int get_stack_info(unsigned long *stack, struct task_struct *task, ...@@ -118,7 +124,7 @@ int get_stack_info(unsigned long *stack, struct task_struct *task,
if (in_irq_stack(stack, info)) if (in_irq_stack(stack, info))
goto recursion_check; goto recursion_check;
if (in_sysenter_stack(stack, info)) if (in_entry_stack(stack, info))
goto recursion_check; goto recursion_check;
goto unknown; goto unknown;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment