Commit 6271fef0 authored by Thomas Gleixner's avatar Thomas Gleixner

x86/entry: Convert NMI to IDTENTRY_NMI

Convert #NMI to IDTENTRY_NMI:
  - Implement the C entry point with DEFINE_IDTENTRY_NMI
  - Fixup the XEN/PV code
  - Remove the old prototypes

No functional change.
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
Reviewed-by: default avatarAlexandre Chartre <alexandre.chartre@oracle.com>
Acked-by: default avatarPeter Zijlstra <peterz@infradead.org>
Acked-by: default avatarAndy Lutomirski <luto@kernel.org>
Link: https://lkml.kernel.org/r/20200505135314.609932306@linutronix.de


parent 9cce81cf
......@@ -1545,7 +1545,7 @@ SYM_CODE_END(double_fault)
* switched stacks. We handle both conditions by simply checking whether we
* interrupted kernel code running on the SYSENTER stack.
*/
SYM_CODE_START(nmi)
SYM_CODE_START(asm_exc_nmi)
ASM_CLAC
#ifdef CONFIG_X86_ESPFIX32
......@@ -1574,7 +1574,7 @@ SYM_CODE_START(nmi)
jb .Lnmi_from_sysenter_stack
/* Not on SYSENTER stack. */
call do_nmi
call exc_nmi
jmp .Lnmi_return
.Lnmi_from_sysenter_stack:
......@@ -1584,7 +1584,7 @@ SYM_CODE_START(nmi)
*/
movl %esp, %ebx
movl PER_CPU_VAR(cpu_current_top_of_stack), %esp
call do_nmi
call exc_nmi
movl %ebx, %esp
.Lnmi_return:
......@@ -1638,7 +1638,7 @@ SYM_CODE_START(nmi)
lss (1+5+6)*4(%esp), %esp # back to espfix stack
jmp .Lirq_return
#endif
SYM_CODE_END(nmi)
SYM_CODE_END(asm_exc_nmi)
.pushsection .text, "ax"
SYM_CODE_START(rewind_stack_do_exit)
......
......@@ -1079,7 +1079,6 @@ idtentry_df X86_TRAP_DF double_fault do_double_fault
#ifdef CONFIG_XEN_PV
idtentry 512 /* dummy */ hypervisor_callback xen_do_hypervisor_callback has_error_code=0
idtentry X86_TRAP_NMI xennmi do_nmi has_error_code=0
idtentry X86_TRAP_DB xendebug do_debug has_error_code=0
#endif
......@@ -1414,7 +1413,7 @@ SYM_CODE_END(error_return)
* %r14: Used to save/restore the CR3 of the interrupted context
* when PAGE_TABLE_ISOLATION is in use. Do not clobber.
*/
SYM_CODE_START(nmi)
SYM_CODE_START(asm_exc_nmi)
UNWIND_HINT_IRET_REGS
/*
......@@ -1499,7 +1498,7 @@ SYM_CODE_START(nmi)
movq %rsp, %rdi
movq $-1, %rsi
call do_nmi
call exc_nmi
/*
* Return back to user mode. We must *not* do the normal exit
......@@ -1556,7 +1555,7 @@ SYM_CODE_START(nmi)
* end_repeat_nmi, then we are a nested NMI. We must not
* modify the "iret" frame because it's being written by
* the outer NMI. That's okay; the outer NMI handler is
* about to about to call do_nmi anyway, so we can just
* about to about to call exc_nmi() anyway, so we can just
* resume the outer NMI.
*/
......@@ -1675,7 +1674,7 @@ repeat_nmi:
* RSP is pointing to "outermost RIP". gsbase is unknown, but, if
* we're repeating an NMI, gsbase has the same value that it had on
* the first iteration. paranoid_entry will load the kernel
* gsbase if needed before we call do_nmi. "NMI executing"
* gsbase if needed before we call exc_nmi(). "NMI executing"
* is zero.
*/
movq $1, 10*8(%rsp) /* Set "NMI executing". */
......@@ -1709,10 +1708,10 @@ end_repeat_nmi:
call paranoid_entry
UNWIND_HINT_REGS
/* paranoidentry do_nmi, 0; without TRACE_IRQS_OFF */
/* paranoidentry exc_nmi(), 0; without TRACE_IRQS_OFF */
movq %rsp, %rdi
movq $-1, %rsi
call do_nmi
call exc_nmi
/* Always restore stashed CR3 value (see paranoid_entry) */
RESTORE_CR3 scratch_reg=%r15 save_reg=%r14
......@@ -1749,7 +1748,7 @@ nmi_restore:
* about espfix64 on the way back to kernel mode.
*/
iretq
SYM_CODE_END(nmi)
SYM_CODE_END(asm_exc_nmi)
#ifndef CONFIG_IA32_EMULATION
/*
......
......@@ -258,4 +258,8 @@ DECLARE_IDTENTRY_RAW(X86_TRAP_BP, exc_int3);
DECLARE_IDTENTRY_MCE(X86_TRAP_MC, exc_machine_check);
#endif
/* NMI */
DECLARE_IDTENTRY_NMI(X86_TRAP_NMI, exc_nmi);
DECLARE_IDTENTRY_XEN(X86_TRAP_NMI, nmi);
#endif
......@@ -12,7 +12,6 @@
#define dotraplinkage __visible
asmlinkage void debug(void);
asmlinkage void nmi(void);
#ifdef CONFIG_X86_64
asmlinkage void double_fault(void);
#endif
......@@ -20,14 +19,12 @@ asmlinkage void page_fault(void);
asmlinkage void async_page_fault(void);
#if defined(CONFIG_X86_64) && defined(CONFIG_XEN_PV)
asmlinkage void xen_xennmi(void);
asmlinkage void xen_xendebug(void);
asmlinkage void xen_double_fault(void);
asmlinkage void xen_page_fault(void);
#endif
dotraplinkage void do_debug(struct pt_regs *regs, long error_code);
dotraplinkage void do_nmi(struct pt_regs *regs, long error_code);
dotraplinkage void do_double_fault(struct pt_regs *regs, long error_code, unsigned long cr2);
dotraplinkage void do_page_fault(struct pt_regs *regs, unsigned long error_code, unsigned long address);
......
......@@ -74,7 +74,7 @@ static const __initconst struct idt_data early_idts[] = {
*/
static const __initconst struct idt_data def_idts[] = {
INTG(X86_TRAP_DE, asm_exc_divide_error),
INTG(X86_TRAP_NMI, nmi),
INTG(X86_TRAP_NMI, asm_exc_nmi),
INTG(X86_TRAP_BR, asm_exc_bounds),
INTG(X86_TRAP_UD, asm_exc_invalid_op),
INTG(X86_TRAP_NM, asm_exc_device_not_available),
......@@ -186,7 +186,7 @@ gate_desc debug_idt_table[IDT_ENTRIES] __page_aligned_bss;
*/
static const __initconst struct idt_data ist_idts[] = {
ISTG(X86_TRAP_DB, debug, IST_INDEX_DB),
ISTG(X86_TRAP_NMI, nmi, IST_INDEX_NMI),
ISTG(X86_TRAP_NMI, asm_exc_nmi, IST_INDEX_NMI),
ISTG(X86_TRAP_DF, double_fault, IST_INDEX_DF),
#ifdef CONFIG_X86_MCE
ISTG(X86_TRAP_MC, asm_exc_machine_check, IST_INDEX_MCE),
......
......@@ -503,8 +503,7 @@ static bool notrace is_debug_stack(unsigned long addr)
NOKPROBE_SYMBOL(is_debug_stack);
#endif
dotraplinkage notrace void
do_nmi(struct pt_regs *regs, long error_code)
DEFINE_IDTENTRY_NMI(exc_nmi)
{
if (IS_ENABLED(CONFIG_SMP) && cpu_is_offline(smp_processor_id()))
return;
......@@ -554,7 +553,6 @@ do_nmi(struct pt_regs *regs, long error_code)
if (user_mode(regs))
mds_user_clear_cpu_buffers();
}
NOKPROBE_SYMBOL(do_nmi);
void stop_nmi(void)
{
......
......@@ -609,13 +609,18 @@ struct trap_array_entry {
.xen = xen_asm_##func, \
.ist_okay = ist_ok }
#define TRAP_ENTRY_REDIR(func, xenfunc, ist_ok) { \
.orig = asm_##func, \
.xen = xen_asm_##xenfunc, \
.ist_okay = ist_ok }
static struct trap_array_entry trap_array[] = {
{ debug, xen_xendebug, true },
{ double_fault, xen_double_fault, true },
#ifdef CONFIG_X86_MCE
TRAP_ENTRY(exc_machine_check, true ),
#endif
{ nmi, xen_xennmi, true },
TRAP_ENTRY_REDIR(exc_nmi, exc_xennmi, true ),
TRAP_ENTRY(exc_int3, false ),
TRAP_ENTRY(exc_overflow, false ),
#ifdef CONFIG_IA32_EMULATION
......
......@@ -32,7 +32,7 @@ xen_pv_trap asm_exc_divide_error
xen_pv_trap debug
xen_pv_trap xendebug
xen_pv_trap asm_exc_int3
xen_pv_trap xennmi
xen_pv_trap asm_exc_xennmi
xen_pv_trap asm_exc_overflow
xen_pv_trap asm_exc_bounds
xen_pv_trap asm_exc_invalid_op
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment