Commit d303e9e9 authored by Tony Luck's avatar Tony Luck

Fix initialization of CMCI/CMCP interrupts

Back 2010 during a revamp of the irq code some initializations
were moved from ia64_mca_init() to ia64_mca_late_init() in

	commit c75f2aa1
	Cannot use register_percpu_irq() from ia64_mca_init()

But this was hideously wrong. First of all these initializations
are now down far too late. Specifically after all the other cpus
have been brought up and initialized their own CMC vectors from
smp_callin(). Also ia64_mca_late_init() may be called from any cpu
so the line:
	ia64_mca_cmc_vector_setup();       /* Setup vector on BSP */
is generally not executed on the BSP, and so the CMC vector isn't
setup at all on that processor.

Make use of the arch_early_irq_init() hook to get this code executed
at just the right moment: not too early, not too late.
Reported-by: default avatarFred Hartnett <fred.hartnett@hp.com>
Tested-by: default avatarFred Hartnett <fred.hartnett@hp.com>
Cc: stable@kernel.org # v2.6.37+
Signed-off-by: default avatarTony Luck <tony.luck@intel.com>
parent 96edc754
No related merge requests found
...@@ -143,6 +143,7 @@ extern unsigned long __per_cpu_mca[NR_CPUS]; ...@@ -143,6 +143,7 @@ extern unsigned long __per_cpu_mca[NR_CPUS];
extern int cpe_vector; extern int cpe_vector;
extern int ia64_cpe_irq; extern int ia64_cpe_irq;
extern void ia64_mca_init(void); extern void ia64_mca_init(void);
extern void ia64_mca_irq_init(void);
extern void ia64_mca_cpu_init(void *); extern void ia64_mca_cpu_init(void *);
extern void ia64_os_mca_dispatch(void); extern void ia64_os_mca_dispatch(void);
extern void ia64_os_mca_dispatch_end(void); extern void ia64_os_mca_dispatch_end(void);
......
...@@ -23,6 +23,8 @@ ...@@ -23,6 +23,8 @@
#include <linux/interrupt.h> #include <linux/interrupt.h>
#include <linux/kernel_stat.h> #include <linux/kernel_stat.h>
#include <asm/mca.h>
/* /*
* 'what should we do if we get a hw irq event on an illegal vector'. * 'what should we do if we get a hw irq event on an illegal vector'.
* each architecture has to answer this themselves. * each architecture has to answer this themselves.
...@@ -83,6 +85,12 @@ bool is_affinity_mask_valid(const struct cpumask *cpumask) ...@@ -83,6 +85,12 @@ bool is_affinity_mask_valid(const struct cpumask *cpumask)
#endif /* CONFIG_SMP */ #endif /* CONFIG_SMP */
int __init arch_early_irq_init(void)
{
ia64_mca_irq_init();
return 0;
}
#ifdef CONFIG_HOTPLUG_CPU #ifdef CONFIG_HOTPLUG_CPU
unsigned int vectors_in_migration[NR_IRQS]; unsigned int vectors_in_migration[NR_IRQS];
......
...@@ -2074,22 +2074,16 @@ ia64_mca_init(void) ...@@ -2074,22 +2074,16 @@ ia64_mca_init(void)
printk(KERN_INFO "MCA related initialization done\n"); printk(KERN_INFO "MCA related initialization done\n");
} }
/* /*
* ia64_mca_late_init * These pieces cannot be done in ia64_mca_init() because it is called before
* * early_irq_init() which would wipe out our percpu irq registrations. But we
* Opportunity to setup things that require initialization later * cannot leave them until ia64_mca_late_init() because by then all the other
* than ia64_mca_init. Setup a timer to poll for CPEs if the * processors have been brought online and have set their own CMC vectors to
* platform doesn't support an interrupt driven mechanism. * point at a non-existant action. Called from arch_early_irq_init().
*
* Inputs : None
* Outputs : Status
*/ */
static int __init void __init ia64_mca_irq_init(void)
ia64_mca_late_init(void)
{ {
if (!mca_init)
return 0;
/* /*
* Configure the CMCI/P vector and handler. Interrupts for CMC are * Configure the CMCI/P vector and handler. Interrupts for CMC are
* per-processor, so AP CMC interrupts are setup in smp_callin() (smpboot.c). * per-processor, so AP CMC interrupts are setup in smp_callin() (smpboot.c).
...@@ -2108,6 +2102,23 @@ ia64_mca_late_init(void) ...@@ -2108,6 +2102,23 @@ ia64_mca_late_init(void)
/* Setup the CPEI/P handler */ /* Setup the CPEI/P handler */
register_percpu_irq(IA64_CPEP_VECTOR, &mca_cpep_irqaction); register_percpu_irq(IA64_CPEP_VECTOR, &mca_cpep_irqaction);
#endif #endif
}
/*
* ia64_mca_late_init
*
* Opportunity to setup things that require initialization later
* than ia64_mca_init. Setup a timer to poll for CPEs if the
* platform doesn't support an interrupt driven mechanism.
*
* Inputs : None
* Outputs : Status
*/
static int __init
ia64_mca_late_init(void)
{
if (!mca_init)
return 0;
register_hotcpu_notifier(&mca_cpu_notifier); register_hotcpu_notifier(&mca_cpu_notifier);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment