Commit 69ea03b5 authored by Peter Zijlstra's avatar Peter Zijlstra Committed by Thomas Gleixner

hardirq/nmi: Allow nested nmi_enter()

Since there are already a number of sites (ARM64, PowerPC) that effectively
nest nmi_enter(), make the primitive support this before adding even more.
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
Acked-by: default avatarMarc Zyngier <maz@kernel.org>
Acked-by: default avatarWill Deacon <will@kernel.org>
Cc: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lkml.kernel.org/r/20200505134100.864179229@linutronix.de
parent 28f6bf9e
...@@ -251,22 +251,12 @@ asmlinkage __kprobes notrace unsigned long ...@@ -251,22 +251,12 @@ asmlinkage __kprobes notrace unsigned long
__sdei_handler(struct pt_regs *regs, struct sdei_registered_event *arg) __sdei_handler(struct pt_regs *regs, struct sdei_registered_event *arg)
{ {
unsigned long ret; unsigned long ret;
bool do_nmi_exit = false;
/* nmi_enter();
* nmi_enter() deals with printk() re-entrance and use of RCU when
* RCU believed this CPU was idle. Because critical events can
* interrupt normal events, we may already be in_nmi().
*/
if (!in_nmi()) {
nmi_enter();
do_nmi_exit = true;
}
ret = _sdei_handler(regs, arg); ret = _sdei_handler(regs, arg);
if (do_nmi_exit) nmi_exit();
nmi_exit();
return ret; return ret;
} }
...@@ -906,17 +906,13 @@ bool arm64_is_fatal_ras_serror(struct pt_regs *regs, unsigned int esr) ...@@ -906,17 +906,13 @@ bool arm64_is_fatal_ras_serror(struct pt_regs *regs, unsigned int esr)
asmlinkage void do_serror(struct pt_regs *regs, unsigned int esr) asmlinkage void do_serror(struct pt_regs *regs, unsigned int esr)
{ {
const bool was_in_nmi = in_nmi(); nmi_enter();
if (!was_in_nmi)
nmi_enter();
/* non-RAS errors are not containable */ /* non-RAS errors are not containable */
if (!arm64_is_ras_serror(esr) || arm64_is_fatal_ras_serror(regs, esr)) if (!arm64_is_ras_serror(esr) || arm64_is_fatal_ras_serror(regs, esr))
arm64_serror_panic(regs, esr); arm64_serror_panic(regs, esr);
if (!was_in_nmi) nmi_exit();
nmi_exit();
} }
asmlinkage void enter_from_user_mode(void) asmlinkage void enter_from_user_mode(void)
......
...@@ -441,15 +441,9 @@ void hv_nmi_check_nonrecoverable(struct pt_regs *regs) ...@@ -441,15 +441,9 @@ void hv_nmi_check_nonrecoverable(struct pt_regs *regs)
void system_reset_exception(struct pt_regs *regs) void system_reset_exception(struct pt_regs *regs)
{ {
unsigned long hsrr0, hsrr1; unsigned long hsrr0, hsrr1;
bool nested = in_nmi();
bool saved_hsrrs = false; bool saved_hsrrs = false;
/* nmi_enter();
* Avoid crashes in case of nested NMI exceptions. Recoverability
* is determined by RI and in_nmi
*/
if (!nested)
nmi_enter();
/* /*
* System reset can interrupt code where HSRRs are live and MSR[RI]=1. * System reset can interrupt code where HSRRs are live and MSR[RI]=1.
...@@ -521,8 +515,7 @@ void system_reset_exception(struct pt_regs *regs) ...@@ -521,8 +515,7 @@ void system_reset_exception(struct pt_regs *regs)
mtspr(SPRN_HSRR1, hsrr1); mtspr(SPRN_HSRR1, hsrr1);
} }
if (!nested) nmi_exit();
nmi_exit();
/* What should we do here? We could issue a shutdown or hard reset. */ /* What should we do here? We could issue a shutdown or hard reset. */
} }
...@@ -823,9 +816,8 @@ int machine_check_generic(struct pt_regs *regs) ...@@ -823,9 +816,8 @@ int machine_check_generic(struct pt_regs *regs)
void machine_check_exception(struct pt_regs *regs) void machine_check_exception(struct pt_regs *regs)
{ {
int recover = 0; int recover = 0;
bool nested = in_nmi();
if (!nested) nmi_enter();
nmi_enter();
__this_cpu_inc(irq_stat.mce_exceptions); __this_cpu_inc(irq_stat.mce_exceptions);
...@@ -851,8 +843,7 @@ void machine_check_exception(struct pt_regs *regs) ...@@ -851,8 +843,7 @@ void machine_check_exception(struct pt_regs *regs)
if (check_io_access(regs)) if (check_io_access(regs))
goto bail; goto bail;
if (!nested) nmi_exit();
nmi_exit();
die("Machine check", regs, SIGBUS); die("Machine check", regs, SIGBUS);
...@@ -863,8 +854,7 @@ void machine_check_exception(struct pt_regs *regs) ...@@ -863,8 +854,7 @@ void machine_check_exception(struct pt_regs *regs)
return; return;
bail: bail:
if (!nested) nmi_exit();
nmi_exit();
} }
void SMIException(struct pt_regs *regs) void SMIException(struct pt_regs *regs)
......
...@@ -65,13 +65,16 @@ extern void irq_exit(void); ...@@ -65,13 +65,16 @@ extern void irq_exit(void);
#define arch_nmi_exit() do { } while (0) #define arch_nmi_exit() do { } while (0)
#endif #endif
/*
* nmi_enter() can nest up to 15 times; see NMI_BITS.
*/
#define nmi_enter() \ #define nmi_enter() \
do { \ do { \
arch_nmi_enter(); \ arch_nmi_enter(); \
printk_nmi_enter(); \ printk_nmi_enter(); \
lockdep_off(); \ lockdep_off(); \
ftrace_nmi_enter(); \ ftrace_nmi_enter(); \
BUG_ON(in_nmi()); \ BUG_ON(in_nmi() == NMI_MASK); \
preempt_count_add(NMI_OFFSET + HARDIRQ_OFFSET); \ preempt_count_add(NMI_OFFSET + HARDIRQ_OFFSET); \
rcu_nmi_enter(); \ rcu_nmi_enter(); \
lockdep_hardirq_enter(); \ lockdep_hardirq_enter(); \
......
...@@ -26,13 +26,13 @@ ...@@ -26,13 +26,13 @@
* PREEMPT_MASK: 0x000000ff * PREEMPT_MASK: 0x000000ff
* SOFTIRQ_MASK: 0x0000ff00 * SOFTIRQ_MASK: 0x0000ff00
* HARDIRQ_MASK: 0x000f0000 * HARDIRQ_MASK: 0x000f0000
* NMI_MASK: 0x00100000 * NMI_MASK: 0x00f00000
* PREEMPT_NEED_RESCHED: 0x80000000 * PREEMPT_NEED_RESCHED: 0x80000000
*/ */
#define PREEMPT_BITS 8 #define PREEMPT_BITS 8
#define SOFTIRQ_BITS 8 #define SOFTIRQ_BITS 8
#define HARDIRQ_BITS 4 #define HARDIRQ_BITS 4
#define NMI_BITS 1 #define NMI_BITS 4
#define PREEMPT_SHIFT 0 #define PREEMPT_SHIFT 0
#define SOFTIRQ_SHIFT (PREEMPT_SHIFT + PREEMPT_BITS) #define SOFTIRQ_SHIFT (PREEMPT_SHIFT + PREEMPT_BITS)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment