Commit 9c48f1c6 authored by Don Zickus's avatar Don Zickus Committed by Ingo Molnar

x86, nmi: Wire up NMI handlers to new routines

Just convert all the files that have an nmi handler to the new routines.
Most of it is straight forward conversion.  A couple of places needed some
tweaking like kgdb which separates the debug notifier from the nmi handler
and mce removes a call to notify_die.

[Thanks to Ying for finding out the history behind that mce call

https://lkml.org/lkml/2010/5/27/114

And Boris responding that he would like to remove that call because of it

https://lkml.org/lkml/2011/9/21/163]

The things that get converted are the registeration/unregistration routines
and the nmi handler itself has its args changed along with code removal
to check which list it is on (most are on one NMI list except for kgdb
which has both an NMI routine and an NMI Unknown routine).
Signed-off-by: default avatarDon Zickus <dzickus@redhat.com>
Signed-off-by: default avatarPeter Zijlstra <a.p.zijlstra@chello.nl>
Acked-by: default avatarCorey Minyard <minyard@acm.org>
Cc: Jason Wessel <jason.wessel@windriver.com>
Cc: Andi Kleen <ak@linux.intel.com>
Cc: Robert Richter <robert.richter@amd.com>
Cc: Huang Ying <ying.huang@intel.com>
Cc: Corey Minyard <minyard@acm.org>
Cc: Jack Steiner <steiner@sgi.com>
Link: http://lkml.kernel.org/r/1317409584-23662-4-git-send-email-dzickus@redhat.comSigned-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent c9126b2e
...@@ -22,26 +22,6 @@ void arch_trigger_all_cpu_backtrace(void); ...@@ -22,26 +22,6 @@ void arch_trigger_all_cpu_backtrace(void);
#define arch_trigger_all_cpu_backtrace arch_trigger_all_cpu_backtrace #define arch_trigger_all_cpu_backtrace arch_trigger_all_cpu_backtrace
#endif #endif
/*
* Define some priorities for the nmi notifier call chain.
*
* Create a local nmi bit that has a higher priority than
* external nmis, because the local ones are more frequent.
*
* Also setup some default high/normal/low settings for
* subsystems to registers with. Using 4 bits to separate
* the priorities. This can go a lot higher if needed be.
*/
#define NMI_LOCAL_SHIFT 16 /* randomly picked */
#define NMI_LOCAL_BIT (1ULL << NMI_LOCAL_SHIFT)
#define NMI_HIGH_PRIOR (1ULL << 8)
#define NMI_NORMAL_PRIOR (1ULL << 4)
#define NMI_LOW_PRIOR (1ULL << 0)
#define NMI_LOCAL_HIGH_PRIOR (NMI_LOCAL_BIT | NMI_HIGH_PRIOR)
#define NMI_LOCAL_NORMAL_PRIOR (NMI_LOCAL_BIT | NMI_NORMAL_PRIOR)
#define NMI_LOCAL_LOW_PRIOR (NMI_LOCAL_BIT | NMI_LOW_PRIOR)
#define NMI_FLAG_FIRST 1 #define NMI_FLAG_FIRST 1
enum { enum {
......
...@@ -23,7 +23,7 @@ void machine_real_restart(unsigned int type); ...@@ -23,7 +23,7 @@ void machine_real_restart(unsigned int type);
#define MRR_BIOS 0 #define MRR_BIOS 0
#define MRR_APM 1 #define MRR_APM 1
typedef void (*nmi_shootdown_cb)(int, struct die_args*); typedef void (*nmi_shootdown_cb)(int, struct pt_regs*);
void nmi_shootdown_cpus(nmi_shootdown_cb callback); void nmi_shootdown_cpus(nmi_shootdown_cb callback);
#endif /* _ASM_X86_REBOOT_H */ #endif /* _ASM_X86_REBOOT_H */
...@@ -60,22 +60,10 @@ void arch_trigger_all_cpu_backtrace(void) ...@@ -60,22 +60,10 @@ void arch_trigger_all_cpu_backtrace(void)
} }
static int __kprobes static int __kprobes
arch_trigger_all_cpu_backtrace_handler(struct notifier_block *self, arch_trigger_all_cpu_backtrace_handler(unsigned int cmd, struct pt_regs *regs)
unsigned long cmd, void *__args)
{ {
struct die_args *args = __args;
struct pt_regs *regs;
int cpu; int cpu;
switch (cmd) {
case DIE_NMI:
break;
default:
return NOTIFY_DONE;
}
regs = args->regs;
cpu = smp_processor_id(); cpu = smp_processor_id();
if (cpumask_test_cpu(cpu, to_cpumask(backtrace_mask))) { if (cpumask_test_cpu(cpu, to_cpumask(backtrace_mask))) {
...@@ -86,21 +74,16 @@ arch_trigger_all_cpu_backtrace_handler(struct notifier_block *self, ...@@ -86,21 +74,16 @@ arch_trigger_all_cpu_backtrace_handler(struct notifier_block *self,
show_regs(regs); show_regs(regs);
arch_spin_unlock(&lock); arch_spin_unlock(&lock);
cpumask_clear_cpu(cpu, to_cpumask(backtrace_mask)); cpumask_clear_cpu(cpu, to_cpumask(backtrace_mask));
return NOTIFY_STOP; return NMI_HANDLED;
} }
return NOTIFY_DONE; return NMI_DONE;
} }
static __read_mostly struct notifier_block backtrace_notifier = {
.notifier_call = arch_trigger_all_cpu_backtrace_handler,
.next = NULL,
.priority = NMI_LOCAL_LOW_PRIOR,
};
static int __init register_trigger_all_cpu_backtrace(void) static int __init register_trigger_all_cpu_backtrace(void)
{ {
register_die_notifier(&backtrace_notifier); register_nmi_handler(NMI_LOCAL, arch_trigger_all_cpu_backtrace_handler,
0, "arch_bt");
return 0; return 0;
} }
early_initcall(register_trigger_all_cpu_backtrace); early_initcall(register_trigger_all_cpu_backtrace);
......
...@@ -672,18 +672,11 @@ void __cpuinit uv_cpu_init(void) ...@@ -672,18 +672,11 @@ void __cpuinit uv_cpu_init(void)
/* /*
* When NMI is received, print a stack trace. * When NMI is received, print a stack trace.
*/ */
int uv_handle_nmi(struct notifier_block *self, unsigned long reason, void *data) int uv_handle_nmi(unsigned int reason, struct pt_regs *regs)
{ {
unsigned long real_uv_nmi; unsigned long real_uv_nmi;
int bid; int bid;
if (reason != DIE_NMIUNKNOWN)
return NOTIFY_OK;
if (in_crash_kexec)
/* do nothing if entering the crash kernel */
return NOTIFY_OK;
/* /*
* Each blade has an MMR that indicates when an NMI has been sent * Each blade has an MMR that indicates when an NMI has been sent
* to cpus on the blade. If an NMI is detected, atomically * to cpus on the blade. If an NMI is detected, atomically
...@@ -704,7 +697,7 @@ int uv_handle_nmi(struct notifier_block *self, unsigned long reason, void *data) ...@@ -704,7 +697,7 @@ int uv_handle_nmi(struct notifier_block *self, unsigned long reason, void *data)
} }
if (likely(__get_cpu_var(cpu_last_nmi_count) == uv_blade_info[bid].nmi_count)) if (likely(__get_cpu_var(cpu_last_nmi_count) == uv_blade_info[bid].nmi_count))
return NOTIFY_DONE; return NMI_DONE;
__get_cpu_var(cpu_last_nmi_count) = uv_blade_info[bid].nmi_count; __get_cpu_var(cpu_last_nmi_count) = uv_blade_info[bid].nmi_count;
...@@ -717,17 +710,12 @@ int uv_handle_nmi(struct notifier_block *self, unsigned long reason, void *data) ...@@ -717,17 +710,12 @@ int uv_handle_nmi(struct notifier_block *self, unsigned long reason, void *data)
dump_stack(); dump_stack();
spin_unlock(&uv_nmi_lock); spin_unlock(&uv_nmi_lock);
return NOTIFY_STOP; return NMI_HANDLED;
} }
static struct notifier_block uv_dump_stack_nmi_nb = {
.notifier_call = uv_handle_nmi,
.priority = NMI_LOCAL_LOW_PRIOR - 1,
};
void uv_register_nmi_notifier(void) void uv_register_nmi_notifier(void)
{ {
if (register_die_notifier(&uv_dump_stack_nmi_nb)) if (register_nmi_handler(NMI_UNKNOWN, uv_handle_nmi, 0, "uv"))
printk(KERN_WARNING "UV NMI handler failed to register\n"); printk(KERN_WARNING "UV NMI handler failed to register\n");
} }
......
...@@ -78,27 +78,20 @@ static void raise_exception(struct mce *m, struct pt_regs *pregs) ...@@ -78,27 +78,20 @@ static void raise_exception(struct mce *m, struct pt_regs *pregs)
static cpumask_var_t mce_inject_cpumask; static cpumask_var_t mce_inject_cpumask;
static int mce_raise_notify(struct notifier_block *self, static int mce_raise_notify(unsigned int cmd, struct pt_regs *regs)
unsigned long val, void *data)
{ {
struct die_args *args = (struct die_args *)data;
int cpu = smp_processor_id(); int cpu = smp_processor_id();
struct mce *m = &__get_cpu_var(injectm); struct mce *m = &__get_cpu_var(injectm);
if (val != DIE_NMI || !cpumask_test_cpu(cpu, mce_inject_cpumask)) if (!cpumask_test_cpu(cpu, mce_inject_cpumask))
return NOTIFY_DONE; return NMI_DONE;
cpumask_clear_cpu(cpu, mce_inject_cpumask); cpumask_clear_cpu(cpu, mce_inject_cpumask);
if (m->inject_flags & MCJ_EXCEPTION) if (m->inject_flags & MCJ_EXCEPTION)
raise_exception(m, args->regs); raise_exception(m, regs);
else if (m->status) else if (m->status)
raise_poll(m); raise_poll(m);
return NOTIFY_STOP; return NMI_HANDLED;
} }
static struct notifier_block mce_raise_nb = {
.notifier_call = mce_raise_notify,
.priority = NMI_LOCAL_NORMAL_PRIOR,
};
/* Inject mce on current CPU */ /* Inject mce on current CPU */
static int raise_local(void) static int raise_local(void)
{ {
...@@ -216,7 +209,8 @@ static int inject_init(void) ...@@ -216,7 +209,8 @@ static int inject_init(void)
return -ENOMEM; return -ENOMEM;
printk(KERN_INFO "Machine check injector initialized\n"); printk(KERN_INFO "Machine check injector initialized\n");
mce_chrdev_ops.write = mce_write; mce_chrdev_ops.write = mce_write;
register_die_notifier(&mce_raise_nb); register_nmi_handler(NMI_LOCAL, mce_raise_notify, 0,
"mce_notify");
return 0; return 0;
} }
......
...@@ -908,9 +908,6 @@ void do_machine_check(struct pt_regs *regs, long error_code) ...@@ -908,9 +908,6 @@ void do_machine_check(struct pt_regs *regs, long error_code)
percpu_inc(mce_exception_count); percpu_inc(mce_exception_count);
if (notify_die(DIE_NMI, "machine check", regs, error_code,
18, SIGKILL) == NOTIFY_STOP)
goto out;
if (!banks) if (!banks)
goto out; goto out;
......
...@@ -1058,76 +1058,15 @@ void perf_events_lapic_init(void) ...@@ -1058,76 +1058,15 @@ void perf_events_lapic_init(void)
apic_write(APIC_LVTPC, APIC_DM_NMI); apic_write(APIC_LVTPC, APIC_DM_NMI);
} }
struct pmu_nmi_state {
unsigned int marked;
int handled;
};
static DEFINE_PER_CPU(struct pmu_nmi_state, pmu_nmi);
static int __kprobes static int __kprobes
perf_event_nmi_handler(struct notifier_block *self, perf_event_nmi_handler(unsigned int cmd, struct pt_regs *regs)
unsigned long cmd, void *__args)
{ {
struct die_args *args = __args;
unsigned int this_nmi;
int handled;
if (!atomic_read(&active_events)) if (!atomic_read(&active_events))
return NOTIFY_DONE; return NMI_DONE;
switch (cmd) {
case DIE_NMI:
break;
case DIE_NMIUNKNOWN:
this_nmi = percpu_read(irq_stat.__nmi_count);
if (this_nmi != __this_cpu_read(pmu_nmi.marked))
/* let the kernel handle the unknown nmi */
return NOTIFY_DONE;
/*
* This one is a PMU back-to-back nmi. Two events
* trigger 'simultaneously' raising two back-to-back
* NMIs. If the first NMI handles both, the latter
* will be empty and daze the CPU. So, we drop it to
* avoid false-positive 'unknown nmi' messages.
*/
return NOTIFY_STOP;
default:
return NOTIFY_DONE;
}
handled = x86_pmu.handle_irq(args->regs);
if (!handled)
return NOTIFY_DONE;
this_nmi = percpu_read(irq_stat.__nmi_count); return x86_pmu.handle_irq(regs);
if ((handled > 1) ||
/* the next nmi could be a back-to-back nmi */
((__this_cpu_read(pmu_nmi.marked) == this_nmi) &&
(__this_cpu_read(pmu_nmi.handled) > 1))) {
/*
* We could have two subsequent back-to-back nmis: The
* first handles more than one counter, the 2nd
* handles only one counter and the 3rd handles no
* counter.
*
* This is the 2nd nmi because the previous was
* handling more than one counter. We will mark the
* next (3rd) and then drop it if unhandled.
*/
__this_cpu_write(pmu_nmi.marked, this_nmi + 1);
__this_cpu_write(pmu_nmi.handled, handled);
}
return NOTIFY_STOP;
} }
static __read_mostly struct notifier_block perf_event_nmi_notifier = {
.notifier_call = perf_event_nmi_handler,
.next = NULL,
.priority = NMI_LOCAL_LOW_PRIOR,
};
struct event_constraint emptyconstraint; struct event_constraint emptyconstraint;
struct event_constraint unconstrained; struct event_constraint unconstrained;
...@@ -1232,7 +1171,7 @@ static int __init init_hw_perf_events(void) ...@@ -1232,7 +1171,7 @@ static int __init init_hw_perf_events(void)
((1LL << x86_pmu.num_counters_fixed)-1) << X86_PMC_IDX_FIXED; ((1LL << x86_pmu.num_counters_fixed)-1) << X86_PMC_IDX_FIXED;
perf_events_lapic_init(); perf_events_lapic_init();
register_die_notifier(&perf_event_nmi_notifier); register_nmi_handler(NMI_LOCAL, perf_event_nmi_handler, 0, "PMI");
unconstrained = (struct event_constraint) unconstrained = (struct event_constraint)
__EVENT_CONSTRAINT(0, (1ULL << x86_pmu.num_counters) - 1, __EVENT_CONSTRAINT(0, (1ULL << x86_pmu.num_counters) - 1,
......
...@@ -32,15 +32,12 @@ int in_crash_kexec; ...@@ -32,15 +32,12 @@ int in_crash_kexec;
#if defined(CONFIG_SMP) && defined(CONFIG_X86_LOCAL_APIC) #if defined(CONFIG_SMP) && defined(CONFIG_X86_LOCAL_APIC)
static void kdump_nmi_callback(int cpu, struct die_args *args) static void kdump_nmi_callback(int cpu, struct pt_regs *regs)
{ {
struct pt_regs *regs;
#ifdef CONFIG_X86_32 #ifdef CONFIG_X86_32
struct pt_regs fixed_regs; struct pt_regs fixed_regs;
#endif #endif
regs = args->regs;
#ifdef CONFIG_X86_32 #ifdef CONFIG_X86_32
if (!user_mode_vm(regs)) { if (!user_mode_vm(regs)) {
crash_fixup_ss_esp(&fixed_regs, regs); crash_fixup_ss_esp(&fixed_regs, regs);
......
...@@ -511,28 +511,37 @@ single_step_cont(struct pt_regs *regs, struct die_args *args) ...@@ -511,28 +511,37 @@ single_step_cont(struct pt_regs *regs, struct die_args *args)
static int was_in_debug_nmi[NR_CPUS]; static int was_in_debug_nmi[NR_CPUS];
static int __kgdb_notify(struct die_args *args, unsigned long cmd) static int kgdb_nmi_handler(unsigned int cmd, struct pt_regs *regs)
{ {
struct pt_regs *regs = args->regs;
switch (cmd) { switch (cmd) {
case DIE_NMI: case NMI_LOCAL:
if (atomic_read(&kgdb_active) != -1) { if (atomic_read(&kgdb_active) != -1) {
/* KGDB CPU roundup */ /* KGDB CPU roundup */
kgdb_nmicallback(raw_smp_processor_id(), regs); kgdb_nmicallback(raw_smp_processor_id(), regs);
was_in_debug_nmi[raw_smp_processor_id()] = 1; was_in_debug_nmi[raw_smp_processor_id()] = 1;
touch_nmi_watchdog(); touch_nmi_watchdog();
return NOTIFY_STOP; return NMI_HANDLED;
} }
return NOTIFY_DONE; break;
case DIE_NMIUNKNOWN: case NMI_UNKNOWN:
if (was_in_debug_nmi[raw_smp_processor_id()]) { if (was_in_debug_nmi[raw_smp_processor_id()]) {
was_in_debug_nmi[raw_smp_processor_id()] = 0; was_in_debug_nmi[raw_smp_processor_id()] = 0;
return NOTIFY_STOP; return NMI_HANDLED;
} }
return NOTIFY_DONE; break;
default:
/* do nothing */
break;
}
return NMI_DONE;
}
static int __kgdb_notify(struct die_args *args, unsigned long cmd)
{
struct pt_regs *regs = args->regs;
switch (cmd) {
case DIE_DEBUG: case DIE_DEBUG:
if (atomic_read(&kgdb_cpu_doing_single_step) != -1) { if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
if (user_mode(regs)) if (user_mode(regs))
...@@ -590,11 +599,6 @@ kgdb_notify(struct notifier_block *self, unsigned long cmd, void *ptr) ...@@ -590,11 +599,6 @@ kgdb_notify(struct notifier_block *self, unsigned long cmd, void *ptr)
static struct notifier_block kgdb_notifier = { static struct notifier_block kgdb_notifier = {
.notifier_call = kgdb_notify, .notifier_call = kgdb_notify,
/*
* Lowest-prio notifier priority, we want to be notified last:
*/
.priority = NMI_LOCAL_LOW_PRIOR,
}; };
/** /**
...@@ -605,7 +609,31 @@ static struct notifier_block kgdb_notifier = { ...@@ -605,7 +609,31 @@ static struct notifier_block kgdb_notifier = {
*/ */
int kgdb_arch_init(void) int kgdb_arch_init(void)
{ {
return register_die_notifier(&kgdb_notifier); int retval;
retval = register_die_notifier(&kgdb_notifier);
if (retval)
goto out;
retval = register_nmi_handler(NMI_LOCAL, kgdb_nmi_handler,
0, "kgdb");
if (retval)
goto out1;
retval = register_nmi_handler(NMI_UNKNOWN, kgdb_nmi_handler,
0, "kgdb");
if (retval)
goto out2;
return retval;
out2:
unregister_nmi_handler(NMI_LOCAL, "kgdb");
out1:
unregister_die_notifier(&kgdb_notifier);
out:
return retval;
} }
static void kgdb_hw_overflow_handler(struct perf_event *event, static void kgdb_hw_overflow_handler(struct perf_event *event,
...@@ -673,6 +701,8 @@ void kgdb_arch_exit(void) ...@@ -673,6 +701,8 @@ void kgdb_arch_exit(void)
breakinfo[i].pev = NULL; breakinfo[i].pev = NULL;
} }
} }
unregister_nmi_handler(NMI_UNKNOWN, "kgdb");
unregister_nmi_handler(NMI_LOCAL, "kgdb");
unregister_die_notifier(&kgdb_notifier); unregister_die_notifier(&kgdb_notifier);
} }
......
/* /*
* Copyright (C) 1991, 1992 Linus Torvalds * Copyright (C) 1991, 1992 Linus Torvalds
* Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
* Copyright (C) 2011 Don Zickus Red Hat, Inc.
* *
* Pentium III FXSR, SSE support * Pentium III FXSR, SSE support
* Gareth Hughes <gareth@valinux.com>, May 2000 * Gareth Hughes <gareth@valinux.com>, May 2000
...@@ -248,8 +249,10 @@ io_check_error(unsigned char reason, struct pt_regs *regs) ...@@ -248,8 +249,10 @@ io_check_error(unsigned char reason, struct pt_regs *regs)
static notrace __kprobes void static notrace __kprobes void
unknown_nmi_error(unsigned char reason, struct pt_regs *regs) unknown_nmi_error(unsigned char reason, struct pt_regs *regs)
{ {
if (notify_die(DIE_NMIUNKNOWN, "nmi", regs, reason, 2, SIGINT) == int handled;
NOTIFY_STOP)
handled = nmi_handle(NMI_UNKNOWN, regs);
if (handled)
return; return;
#ifdef CONFIG_MCA #ifdef CONFIG_MCA
/* /*
...@@ -274,13 +277,15 @@ unknown_nmi_error(unsigned char reason, struct pt_regs *regs) ...@@ -274,13 +277,15 @@ unknown_nmi_error(unsigned char reason, struct pt_regs *regs)
static notrace __kprobes void default_do_nmi(struct pt_regs *regs) static notrace __kprobes void default_do_nmi(struct pt_regs *regs)
{ {
unsigned char reason = 0; unsigned char reason = 0;
int handled;
/* /*
* CPU-specific NMI must be processed before non-CPU-specific * CPU-specific NMI must be processed before non-CPU-specific
* NMI, otherwise we may lose it, because the CPU-specific * NMI, otherwise we may lose it, because the CPU-specific
* NMI can not be detected/processed on other CPUs. * NMI can not be detected/processed on other CPUs.
*/ */
if (notify_die(DIE_NMI, "nmi", regs, 0, 2, SIGINT) == NOTIFY_STOP) handled = nmi_handle(NMI_LOCAL, regs);
if (handled)
return; return;
/* Non-CPU-specific NMI: NMI sources can be processed on any CPU */ /* Non-CPU-specific NMI: NMI sources can be processed on any CPU */
......
...@@ -464,7 +464,7 @@ static inline void kb_wait(void) ...@@ -464,7 +464,7 @@ static inline void kb_wait(void)
} }
} }
static void vmxoff_nmi(int cpu, struct die_args *args) static void vmxoff_nmi(int cpu, struct pt_regs *regs)
{ {
cpu_emergency_vmxoff(); cpu_emergency_vmxoff();
} }
...@@ -736,14 +736,10 @@ static nmi_shootdown_cb shootdown_callback; ...@@ -736,14 +736,10 @@ static nmi_shootdown_cb shootdown_callback;
static atomic_t waiting_for_crash_ipi; static atomic_t waiting_for_crash_ipi;
static int crash_nmi_callback(struct notifier_block *self, static int crash_nmi_callback(unsigned int val, struct pt_regs *regs)
unsigned long val, void *data)
{ {
int cpu; int cpu;
if (val != DIE_NMI)
return NOTIFY_OK;
cpu = raw_smp_processor_id(); cpu = raw_smp_processor_id();
/* Don't do anything if this handler is invoked on crashing cpu. /* Don't do anything if this handler is invoked on crashing cpu.
...@@ -751,10 +747,10 @@ static int crash_nmi_callback(struct notifier_block *self, ...@@ -751,10 +747,10 @@ static int crash_nmi_callback(struct notifier_block *self,
* an NMI if system was initially booted with nmi_watchdog parameter. * an NMI if system was initially booted with nmi_watchdog parameter.
*/ */
if (cpu == crashing_cpu) if (cpu == crashing_cpu)
return NOTIFY_STOP; return NMI_HANDLED;
local_irq_disable(); local_irq_disable();
shootdown_callback(cpu, (struct die_args *)data); shootdown_callback(cpu, regs);
atomic_dec(&waiting_for_crash_ipi); atomic_dec(&waiting_for_crash_ipi);
/* Assume hlt works */ /* Assume hlt works */
...@@ -762,7 +758,7 @@ static int crash_nmi_callback(struct notifier_block *self, ...@@ -762,7 +758,7 @@ static int crash_nmi_callback(struct notifier_block *self,
for (;;) for (;;)
cpu_relax(); cpu_relax();
return 1; return NMI_HANDLED;
} }
static void smp_send_nmi_allbutself(void) static void smp_send_nmi_allbutself(void)
...@@ -770,12 +766,6 @@ static void smp_send_nmi_allbutself(void) ...@@ -770,12 +766,6 @@ static void smp_send_nmi_allbutself(void)
apic->send_IPI_allbutself(NMI_VECTOR); apic->send_IPI_allbutself(NMI_VECTOR);
} }
static struct notifier_block crash_nmi_nb = {
.notifier_call = crash_nmi_callback,
/* we want to be the first one called */
.priority = NMI_LOCAL_HIGH_PRIOR+1,
};
/* Halt all other CPUs, calling the specified function on each of them /* Halt all other CPUs, calling the specified function on each of them
* *
* This function can be used to halt all other CPUs on crash * This function can be used to halt all other CPUs on crash
...@@ -794,7 +784,8 @@ void nmi_shootdown_cpus(nmi_shootdown_cb callback) ...@@ -794,7 +784,8 @@ void nmi_shootdown_cpus(nmi_shootdown_cb callback)
atomic_set(&waiting_for_crash_ipi, num_online_cpus() - 1); atomic_set(&waiting_for_crash_ipi, num_online_cpus() - 1);
/* Would it be better to replace the trap vector here? */ /* Would it be better to replace the trap vector here? */
if (register_die_notifier(&crash_nmi_nb)) if (register_nmi_handler(NMI_LOCAL, crash_nmi_callback,
NMI_FLAG_FIRST, "crash"))
return; /* return what? */ return; /* return what? */
/* Ensure the new callback function is set before sending /* Ensure the new callback function is set before sending
* out the NMI * out the NMI
......
...@@ -61,26 +61,15 @@ u64 op_x86_get_ctrl(struct op_x86_model_spec const *model, ...@@ -61,26 +61,15 @@ u64 op_x86_get_ctrl(struct op_x86_model_spec const *model,
} }
static int profile_exceptions_notify(struct notifier_block *self, static int profile_exceptions_notify(unsigned int val, struct pt_regs *regs)
unsigned long val, void *data)
{ {
struct die_args *args = (struct die_args *)data; if (ctr_running)
int ret = NOTIFY_DONE; model->check_ctrs(regs, &__get_cpu_var(cpu_msrs));
else if (!nmi_enabled)
switch (val) { return NMI_DONE;
case DIE_NMI: else
if (ctr_running) model->stop(&__get_cpu_var(cpu_msrs));
model->check_ctrs(args->regs, &__get_cpu_var(cpu_msrs)); return NMI_HANDLED;
else if (!nmi_enabled)
break;
else
model->stop(&__get_cpu_var(cpu_msrs));
ret = NOTIFY_STOP;
break;
default:
break;
}
return ret;
} }
static void nmi_cpu_save_registers(struct op_msrs *msrs) static void nmi_cpu_save_registers(struct op_msrs *msrs)
...@@ -363,12 +352,6 @@ static void nmi_cpu_setup(void *dummy) ...@@ -363,12 +352,6 @@ static void nmi_cpu_setup(void *dummy)
apic_write(APIC_LVTPC, APIC_DM_NMI); apic_write(APIC_LVTPC, APIC_DM_NMI);
} }
static struct notifier_block profile_exceptions_nb = {
.notifier_call = profile_exceptions_notify,
.next = NULL,
.priority = NMI_LOCAL_LOW_PRIOR,
};
static void nmi_cpu_restore_registers(struct op_msrs *msrs) static void nmi_cpu_restore_registers(struct op_msrs *msrs)
{ {
struct op_msr *counters = msrs->counters; struct op_msr *counters = msrs->counters;
...@@ -508,7 +491,8 @@ static int nmi_setup(void) ...@@ -508,7 +491,8 @@ static int nmi_setup(void)
ctr_running = 0; ctr_running = 0;
/* make variables visible to the nmi handler: */ /* make variables visible to the nmi handler: */
smp_mb(); smp_mb();
err = register_die_notifier(&profile_exceptions_nb); err = register_nmi_handler(NMI_LOCAL, profile_exceptions_notify,
0, "oprofile");
if (err) if (err)
goto fail; goto fail;
...@@ -538,7 +522,7 @@ static void nmi_shutdown(void) ...@@ -538,7 +522,7 @@ static void nmi_shutdown(void)
put_online_cpus(); put_online_cpus();
/* make variables visible to the nmi handler: */ /* make variables visible to the nmi handler: */
smp_mb(); smp_mb();
unregister_die_notifier(&profile_exceptions_nb); unregister_nmi_handler(NMI_LOCAL, "oprofile");
msrs = &get_cpu_var(cpu_msrs); msrs = &get_cpu_var(cpu_msrs);
model->shutdown(msrs); model->shutdown(msrs);
free_msrs(); free_msrs();
......
...@@ -18,32 +18,16 @@ ...@@ -18,32 +18,16 @@
#include <asm/apic.h> #include <asm/apic.h>
#include <asm/ptrace.h> #include <asm/ptrace.h>
static int profile_timer_exceptions_notify(struct notifier_block *self, static int profile_timer_exceptions_notify(unsigned int val, struct pt_regs *regs)
unsigned long val, void *data)
{ {
struct die_args *args = (struct die_args *)data; oprofile_add_sample(regs, 0);
int ret = NOTIFY_DONE; return NMI_HANDLED;
switch (val) {
case DIE_NMI:
oprofile_add_sample(args->regs, 0);
ret = NOTIFY_STOP;
break;
default:
break;
}
return ret;
} }
static struct notifier_block profile_timer_exceptions_nb = {
.notifier_call = profile_timer_exceptions_notify,
.next = NULL,
.priority = NMI_LOW_PRIOR,
};
static int timer_start(void) static int timer_start(void)
{ {
if (register_die_notifier(&profile_timer_exceptions_nb)) if (register_nmi_handler(NMI_LOCAL, profile_timer_exceptions_notify,
0, "oprofile-timer"))
return 1; return 1;
return 0; return 0;
} }
...@@ -51,7 +35,7 @@ static int timer_start(void) ...@@ -51,7 +35,7 @@ static int timer_start(void)
static void timer_stop(void) static void timer_stop(void)
{ {
unregister_die_notifier(&profile_timer_exceptions_nb); unregister_nmi_handler(NMI_LOCAL, "oprofile-timer");
synchronize_sched(); /* Allow already-started NMIs to complete. */ synchronize_sched(); /* Allow already-started NMIs to complete. */
} }
......
...@@ -50,6 +50,7 @@ ...@@ -50,6 +50,7 @@
#include <acpi/hed.h> #include <acpi/hed.h>
#include <asm/mce.h> #include <asm/mce.h>
#include <asm/tlbflush.h> #include <asm/tlbflush.h>
#include <asm/nmi.h>
#include "apei-internal.h" #include "apei-internal.h"
...@@ -749,15 +750,11 @@ static void ghes_proc_in_irq(struct irq_work *irq_work) ...@@ -749,15 +750,11 @@ static void ghes_proc_in_irq(struct irq_work *irq_work)
} }
} }
static int ghes_notify_nmi(struct notifier_block *this, static int ghes_notify_nmi(unsigned int cmd, struct pt_regs *regs)
unsigned long cmd, void *data)
{ {
struct ghes *ghes, *ghes_global = NULL; struct ghes *ghes, *ghes_global = NULL;
int sev, sev_global = -1; int sev, sev_global = -1;
int ret = NOTIFY_DONE; int ret = NMI_DONE;
if (cmd != DIE_NMI)
return ret;
raw_spin_lock(&ghes_nmi_lock); raw_spin_lock(&ghes_nmi_lock);
list_for_each_entry_rcu(ghes, &ghes_nmi, list) { list_for_each_entry_rcu(ghes, &ghes_nmi, list) {
...@@ -770,10 +767,10 @@ static int ghes_notify_nmi(struct notifier_block *this, ...@@ -770,10 +767,10 @@ static int ghes_notify_nmi(struct notifier_block *this,
sev_global = sev; sev_global = sev;
ghes_global = ghes; ghes_global = ghes;
} }
ret = NOTIFY_STOP; ret = NMI_HANDLED;
} }
if (ret == NOTIFY_DONE) if (ret == NMI_DONE)
goto out; goto out;
if (sev_global >= GHES_SEV_PANIC) { if (sev_global >= GHES_SEV_PANIC) {
...@@ -825,10 +822,6 @@ static struct notifier_block ghes_notifier_sci = { ...@@ -825,10 +822,6 @@ static struct notifier_block ghes_notifier_sci = {
.notifier_call = ghes_notify_sci, .notifier_call = ghes_notify_sci,
}; };
static struct notifier_block ghes_notifier_nmi = {
.notifier_call = ghes_notify_nmi,
};
static unsigned long ghes_esource_prealloc_size( static unsigned long ghes_esource_prealloc_size(
const struct acpi_hest_generic *generic) const struct acpi_hest_generic *generic)
{ {
...@@ -918,7 +911,8 @@ static int __devinit ghes_probe(struct platform_device *ghes_dev) ...@@ -918,7 +911,8 @@ static int __devinit ghes_probe(struct platform_device *ghes_dev)
ghes_estatus_pool_expand(len); ghes_estatus_pool_expand(len);
mutex_lock(&ghes_list_mutex); mutex_lock(&ghes_list_mutex);
if (list_empty(&ghes_nmi)) if (list_empty(&ghes_nmi))
register_die_notifier(&ghes_notifier_nmi); register_nmi_handler(NMI_LOCAL, ghes_notify_nmi, 0,
"ghes");
list_add_rcu(&ghes->list, &ghes_nmi); list_add_rcu(&ghes->list, &ghes_nmi);
mutex_unlock(&ghes_list_mutex); mutex_unlock(&ghes_list_mutex);
break; break;
...@@ -964,7 +958,7 @@ static int __devexit ghes_remove(struct platform_device *ghes_dev) ...@@ -964,7 +958,7 @@ static int __devexit ghes_remove(struct platform_device *ghes_dev)
mutex_lock(&ghes_list_mutex); mutex_lock(&ghes_list_mutex);
list_del_rcu(&ghes->list); list_del_rcu(&ghes->list);
if (list_empty(&ghes_nmi)) if (list_empty(&ghes_nmi))
unregister_die_notifier(&ghes_notifier_nmi); unregister_nmi_handler(NMI_LOCAL, "ghes");
mutex_unlock(&ghes_list_mutex); mutex_unlock(&ghes_list_mutex);
/* /*
* To synchronize with NMI handler, ghes can only be * To synchronize with NMI handler, ghes can only be
......
...@@ -65,6 +65,7 @@ ...@@ -65,6 +65,7 @@
* mechanism for it at that time. * mechanism for it at that time.
*/ */
#include <asm/kdebug.h> #include <asm/kdebug.h>
#include <asm/nmi.h>
#define HAVE_DIE_NMI #define HAVE_DIE_NMI
#endif #endif
...@@ -1077,17 +1078,8 @@ static void ipmi_unregister_watchdog(int ipmi_intf) ...@@ -1077,17 +1078,8 @@ static void ipmi_unregister_watchdog(int ipmi_intf)
#ifdef HAVE_DIE_NMI #ifdef HAVE_DIE_NMI
static int static int
ipmi_nmi(struct notifier_block *self, unsigned long val, void *data) ipmi_nmi(unsigned int val, struct pt_regs *regs)
{ {
struct die_args *args = data;
if (val != DIE_NMIUNKNOWN)
return NOTIFY_OK;
/* Hack, if it's a memory or I/O error, ignore it. */
if (args->err & 0xc0)
return NOTIFY_OK;
/* /*
* If we get here, it's an NMI that's not a memory or I/O * If we get here, it's an NMI that's not a memory or I/O
* error. We can't truly tell if it's from IPMI or not * error. We can't truly tell if it's from IPMI or not
...@@ -1097,15 +1089,15 @@ ipmi_nmi(struct notifier_block *self, unsigned long val, void *data) ...@@ -1097,15 +1089,15 @@ ipmi_nmi(struct notifier_block *self, unsigned long val, void *data)
if (testing_nmi) { if (testing_nmi) {
testing_nmi = 2; testing_nmi = 2;
return NOTIFY_STOP; return NMI_HANDLED;
} }
/* If we are not expecting a timeout, ignore it. */ /* If we are not expecting a timeout, ignore it. */
if (ipmi_watchdog_state == WDOG_TIMEOUT_NONE) if (ipmi_watchdog_state == WDOG_TIMEOUT_NONE)
return NOTIFY_OK; return NMI_DONE;
if (preaction_val != WDOG_PRETIMEOUT_NMI) if (preaction_val != WDOG_PRETIMEOUT_NMI)
return NOTIFY_OK; return NMI_DONE;
/* /*
* If no one else handled the NMI, we assume it was the IPMI * If no one else handled the NMI, we assume it was the IPMI
...@@ -1120,12 +1112,8 @@ ipmi_nmi(struct notifier_block *self, unsigned long val, void *data) ...@@ -1120,12 +1112,8 @@ ipmi_nmi(struct notifier_block *self, unsigned long val, void *data)
panic(PFX "pre-timeout"); panic(PFX "pre-timeout");
} }
return NOTIFY_STOP; return NMI_HANDLED;
} }
static struct notifier_block ipmi_nmi_handler = {
.notifier_call = ipmi_nmi
};
#endif #endif
static int wdog_reboot_handler(struct notifier_block *this, static int wdog_reboot_handler(struct notifier_block *this,
...@@ -1290,7 +1278,8 @@ static void check_parms(void) ...@@ -1290,7 +1278,8 @@ static void check_parms(void)
} }
} }
if (do_nmi && !nmi_handler_registered) { if (do_nmi && !nmi_handler_registered) {
rv = register_die_notifier(&ipmi_nmi_handler); rv = register_nmi_handler(NMI_UNKNOWN, ipmi_nmi, 0,
"ipmi");
if (rv) { if (rv) {
printk(KERN_WARNING PFX printk(KERN_WARNING PFX
"Can't register nmi handler\n"); "Can't register nmi handler\n");
...@@ -1298,7 +1287,7 @@ static void check_parms(void) ...@@ -1298,7 +1287,7 @@ static void check_parms(void)
} else } else
nmi_handler_registered = 1; nmi_handler_registered = 1;
} else if (!do_nmi && nmi_handler_registered) { } else if (!do_nmi && nmi_handler_registered) {
unregister_die_notifier(&ipmi_nmi_handler); unregister_nmi_handler(NMI_UNKNOWN, "ipmi");
nmi_handler_registered = 0; nmi_handler_registered = 0;
} }
#endif #endif
...@@ -1336,7 +1325,7 @@ static int __init ipmi_wdog_init(void) ...@@ -1336,7 +1325,7 @@ static int __init ipmi_wdog_init(void)
if (rv) { if (rv) {
#ifdef HAVE_DIE_NMI #ifdef HAVE_DIE_NMI
if (nmi_handler_registered) if (nmi_handler_registered)
unregister_die_notifier(&ipmi_nmi_handler); unregister_nmi_handler(NMI_UNKNOWN, "ipmi");
#endif #endif
atomic_notifier_chain_unregister(&panic_notifier_list, atomic_notifier_chain_unregister(&panic_notifier_list,
&wdog_panic_notifier); &wdog_panic_notifier);
...@@ -1357,7 +1346,7 @@ static void __exit ipmi_wdog_exit(void) ...@@ -1357,7 +1346,7 @@ static void __exit ipmi_wdog_exit(void)
#ifdef HAVE_DIE_NMI #ifdef HAVE_DIE_NMI
if (nmi_handler_registered) if (nmi_handler_registered)
unregister_die_notifier(&ipmi_nmi_handler); unregister_nmi_handler(NMI_UNKNOWN, "ipmi");
#endif #endif
atomic_notifier_chain_unregister(&panic_notifier_list, atomic_notifier_chain_unregister(&panic_notifier_list,
......
...@@ -477,15 +477,11 @@ static int hpwdt_time_left(void) ...@@ -477,15 +477,11 @@ static int hpwdt_time_left(void)
/* /*
* NMI Handler * NMI Handler
*/ */
static int hpwdt_pretimeout(struct notifier_block *nb, unsigned long ulReason, static int hpwdt_pretimeout(unsigned int ulReason, struct pt_regs *regs)
void *data)
{ {
unsigned long rom_pl; unsigned long rom_pl;
static int die_nmi_called; static int die_nmi_called;
if (ulReason != DIE_NMIUNKNOWN)
goto out;
if (!hpwdt_nmi_decoding) if (!hpwdt_nmi_decoding)
goto out; goto out;
...@@ -508,7 +504,7 @@ static int hpwdt_pretimeout(struct notifier_block *nb, unsigned long ulReason, ...@@ -508,7 +504,7 @@ static int hpwdt_pretimeout(struct notifier_block *nb, unsigned long ulReason,
"Management Log for details.\n"); "Management Log for details.\n");
out: out:
return NOTIFY_OK; return NMI_DONE;
} }
#endif /* CONFIG_HPWDT_NMI_DECODING */ #endif /* CONFIG_HPWDT_NMI_DECODING */
...@@ -648,13 +644,6 @@ static struct miscdevice hpwdt_miscdev = { ...@@ -648,13 +644,6 @@ static struct miscdevice hpwdt_miscdev = {
.fops = &hpwdt_fops, .fops = &hpwdt_fops,
}; };
#ifdef CONFIG_HPWDT_NMI_DECODING
static struct notifier_block die_notifier = {
.notifier_call = hpwdt_pretimeout,
.priority = 0,
};
#endif /* CONFIG_HPWDT_NMI_DECODING */
/* /*
* Init & Exit * Init & Exit
*/ */
...@@ -740,10 +729,9 @@ static int __devinit hpwdt_init_nmi_decoding(struct pci_dev *dev) ...@@ -740,10 +729,9 @@ static int __devinit hpwdt_init_nmi_decoding(struct pci_dev *dev)
* die notify list to handle a critical NMI. The default is to * die notify list to handle a critical NMI. The default is to
* be last so other users of the NMI signal can function. * be last so other users of the NMI signal can function.
*/ */
if (priority) retval = register_nmi_handler(NMI_UNKNOWN, hpwdt_pretimeout,
die_notifier.priority = 0x7FFFFFFF; (priority) ? NMI_FLAG_FIRST : 0,
"hpwdt");
retval = register_die_notifier(&die_notifier);
if (retval != 0) { if (retval != 0) {
dev_warn(&dev->dev, dev_warn(&dev->dev,
"Unable to register a die notifier (err=%d).\n", "Unable to register a die notifier (err=%d).\n",
...@@ -763,7 +751,7 @@ static int __devinit hpwdt_init_nmi_decoding(struct pci_dev *dev) ...@@ -763,7 +751,7 @@ static int __devinit hpwdt_init_nmi_decoding(struct pci_dev *dev)
static void hpwdt_exit_nmi_decoding(void) static void hpwdt_exit_nmi_decoding(void)
{ {
unregister_die_notifier(&die_notifier); unregister_nmi_handler(NMI_UNKNOWN, "hpwdt");
if (cru_rom_addr) if (cru_rom_addr)
iounmap(cru_rom_addr); iounmap(cru_rom_addr);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment