Commit 2335a836 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'x86-reboot-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull x86 reboot changes from Ingo Molnar:
 "The biggest change is a gentler method of rebooting/stopping via IRQs
  first and then via NMIs.  There are several cleanups in the tree as
  well."

* 'x86-reboot-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  x86/reboot: Update nonmi_ipi parameter
  x86/reboot: Use NMI to assist in shutting down if IRQ fails
  Revert "x86, reboot: Use NMI instead of REBOOT_VECTOR to stop cpus"
  x86/reboot: Clean up coding style
  x86/reboot: Reduce to a single DMI table for reboot quirks
parents 44bc40e1 3aac27ab
This diff is collapsed.
...@@ -109,6 +109,9 @@ ...@@ -109,6 +109,9 @@
* about nothing of note with C stepping upwards. * about nothing of note with C stepping upwards.
*/ */
static atomic_t stopping_cpu = ATOMIC_INIT(-1);
static bool smp_no_nmi_ipi = false;
/* /*
* this function sends a 'reschedule' IPI to another CPU. * this function sends a 'reschedule' IPI to another CPU.
* it goes straight through and wastes no time serializing * it goes straight through and wastes no time serializing
...@@ -149,8 +152,6 @@ void native_send_call_func_ipi(const struct cpumask *mask) ...@@ -149,8 +152,6 @@ void native_send_call_func_ipi(const struct cpumask *mask)
free_cpumask_var(allbutself); free_cpumask_var(allbutself);
} }
static atomic_t stopping_cpu = ATOMIC_INIT(-1);
static int smp_stop_nmi_callback(unsigned int val, struct pt_regs *regs) static int smp_stop_nmi_callback(unsigned int val, struct pt_regs *regs)
{ {
/* We are registered on stopping cpu too, avoid spurious NMI */ /* We are registered on stopping cpu too, avoid spurious NMI */
...@@ -162,7 +163,19 @@ static int smp_stop_nmi_callback(unsigned int val, struct pt_regs *regs) ...@@ -162,7 +163,19 @@ static int smp_stop_nmi_callback(unsigned int val, struct pt_regs *regs)
return NMI_HANDLED; return NMI_HANDLED;
} }
static void native_nmi_stop_other_cpus(int wait) /*
* this function calls the 'stop' function on all other CPUs in the system.
*/
asmlinkage void smp_reboot_interrupt(void)
{
ack_APIC_irq();
irq_enter();
stop_this_cpu(NULL);
irq_exit();
}
static void native_stop_other_cpus(int wait)
{ {
unsigned long flags; unsigned long flags;
unsigned long timeout; unsigned long timeout;
...@@ -174,20 +187,25 @@ static void native_nmi_stop_other_cpus(int wait) ...@@ -174,20 +187,25 @@ static void native_nmi_stop_other_cpus(int wait)
* Use an own vector here because smp_call_function * Use an own vector here because smp_call_function
* does lots of things not suitable in a panic situation. * does lots of things not suitable in a panic situation.
*/ */
/*
* We start by using the REBOOT_VECTOR irq.
* The irq is treated as a sync point to allow critical
* regions of code on other cpus to release their spin locks
* and re-enable irqs. Jumping straight to an NMI might
* accidentally cause deadlocks with further shutdown/panic
* code. By syncing, we give the cpus up to one second to
* finish their work before we force them off with the NMI.
*/
if (num_online_cpus() > 1) { if (num_online_cpus() > 1) {
/* did someone beat us here? */ /* did someone beat us here? */
if (atomic_cmpxchg(&stopping_cpu, -1, safe_smp_processor_id()) != -1) if (atomic_cmpxchg(&stopping_cpu, -1, safe_smp_processor_id()) != -1)
return; return;
if (register_nmi_handler(NMI_LOCAL, smp_stop_nmi_callback, /* sync above data before sending IRQ */
NMI_FLAG_FIRST, "smp_stop"))
/* Note: we ignore failures here */
return;
/* sync above data before sending NMI */
wmb(); wmb();
apic->send_IPI_allbutself(NMI_VECTOR); apic->send_IPI_allbutself(REBOOT_VECTOR);
/* /*
* Don't wait longer than a second if the caller * Don't wait longer than a second if the caller
...@@ -198,62 +216,36 @@ static void native_nmi_stop_other_cpus(int wait) ...@@ -198,62 +216,36 @@ static void native_nmi_stop_other_cpus(int wait)
udelay(1); udelay(1);
} }
local_irq_save(flags); /* if the REBOOT_VECTOR didn't work, try with the NMI */
disable_local_APIC(); if ((num_online_cpus() > 1) && (!smp_no_nmi_ipi)) {
local_irq_restore(flags); if (register_nmi_handler(NMI_LOCAL, smp_stop_nmi_callback,
} NMI_FLAG_FIRST, "smp_stop"))
/* Note: we ignore failures here */
/* /* Hope the REBOOT_IRQ is good enough */
* this function calls the 'stop' function on all other CPUs in the system. goto finish;
*/
asmlinkage void smp_reboot_interrupt(void)
{
ack_APIC_irq();
irq_enter();
stop_this_cpu(NULL);
irq_exit();
}
static void native_irq_stop_other_cpus(int wait) /* sync above data before sending IRQ */
{ wmb();
unsigned long flags;
unsigned long timeout;
if (reboot_force) pr_emerg("Shutting down cpus with NMI\n");
return;
/* apic->send_IPI_allbutself(NMI_VECTOR);
* Use an own vector here because smp_call_function
* does lots of things not suitable in a panic situation.
* On most systems we could also use an NMI here,
* but there are a few systems around where NMI
* is problematic so stay with an non NMI for now
* (this implies we cannot stop CPUs spinning with irq off
* currently)
*/
if (num_online_cpus() > 1) {
apic->send_IPI_allbutself(REBOOT_VECTOR);
/* /*
* Don't wait longer than a second if the caller * Don't wait longer than a 10 ms if the caller
* didn't ask us to wait. * didn't ask us to wait.
*/ */
timeout = USEC_PER_SEC; timeout = USEC_PER_MSEC * 10;
while (num_online_cpus() > 1 && (wait || timeout--)) while (num_online_cpus() > 1 && (wait || timeout--))
udelay(1); udelay(1);
} }
finish:
local_irq_save(flags); local_irq_save(flags);
disable_local_APIC(); disable_local_APIC();
local_irq_restore(flags); local_irq_restore(flags);
} }
static void native_smp_disable_nmi_ipi(void)
{
smp_ops.stop_other_cpus = native_irq_stop_other_cpus;
}
/* /*
* Reschedule call back. * Reschedule call back.
*/ */
...@@ -287,7 +279,7 @@ void smp_call_function_single_interrupt(struct pt_regs *regs) ...@@ -287,7 +279,7 @@ void smp_call_function_single_interrupt(struct pt_regs *regs)
static int __init nonmi_ipi_setup(char *str) static int __init nonmi_ipi_setup(char *str)
{ {
native_smp_disable_nmi_ipi(); smp_no_nmi_ipi = true;
return 1; return 1;
} }
...@@ -298,7 +290,7 @@ struct smp_ops smp_ops = { ...@@ -298,7 +290,7 @@ struct smp_ops smp_ops = {
.smp_prepare_cpus = native_smp_prepare_cpus, .smp_prepare_cpus = native_smp_prepare_cpus,
.smp_cpus_done = native_smp_cpus_done, .smp_cpus_done = native_smp_cpus_done,
.stop_other_cpus = native_nmi_stop_other_cpus, .stop_other_cpus = native_stop_other_cpus,
.smp_send_reschedule = native_smp_send_reschedule, .smp_send_reschedule = native_smp_send_reschedule,
.cpu_up = native_cpu_up, .cpu_up = native_cpu_up,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment