Commit 061120ae authored by Chen, Gong's avatar Chen, Gong Committed by Ingo Molnar

x86/mce: Don't use percpu workqueues

An MCE is a rare event. Therefore, there's no need to have
per-CPU instances of both normal and IRQ workqueues. Make them
both global.
Signed-off-by: default avatarChen, Gong <gong.chen@linux.intel.com>
[ Fold in subsequent patch from Rui/Boris/Tony for early boot logging. ]
Signed-off-by: default avatarTony Luck <tony.luck@intel.com>
[ Massage commit message. ]
Signed-off-by: default avatarBorislav Petkov <bp@suse.de>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Link: http://lkml.kernel.org/r/1439396985-12812-4-git-send-email-bp@alien8.deSigned-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent 648ed940
...@@ -110,7 +110,8 @@ DEFINE_PER_CPU(mce_banks_t, mce_poll_banks) = { ...@@ -110,7 +110,8 @@ DEFINE_PER_CPU(mce_banks_t, mce_poll_banks) = {
*/ */
mce_banks_t mce_banks_ce_disabled; mce_banks_t mce_banks_ce_disabled;
static DEFINE_PER_CPU(struct work_struct, mce_work); static struct work_struct mce_work;
static struct irq_work mce_irq_work;
static void (*quirk_no_way_out)(int bank, struct mce *m, struct pt_regs *regs); static void (*quirk_no_way_out)(int bank, struct mce *m, struct pt_regs *regs);
...@@ -526,11 +527,9 @@ int mce_available(struct cpuinfo_x86 *c) ...@@ -526,11 +527,9 @@ int mce_available(struct cpuinfo_x86 *c)
static void mce_schedule_work(void) static void mce_schedule_work(void)
{ {
if (!mce_ring_empty()) if (!mce_ring_empty())
schedule_work(this_cpu_ptr(&mce_work)); schedule_work(&mce_work);
} }
static DEFINE_PER_CPU(struct irq_work, mce_irq_work);
static void mce_irq_work_cb(struct irq_work *entry) static void mce_irq_work_cb(struct irq_work *entry)
{ {
mce_notify_irq(); mce_notify_irq();
...@@ -551,7 +550,7 @@ static void mce_report_event(struct pt_regs *regs) ...@@ -551,7 +550,7 @@ static void mce_report_event(struct pt_regs *regs)
return; return;
} }
irq_work_queue(this_cpu_ptr(&mce_irq_work)); irq_work_queue(&mce_irq_work);
} }
/* /*
...@@ -1742,8 +1741,6 @@ void mcheck_cpu_init(struct cpuinfo_x86 *c) ...@@ -1742,8 +1741,6 @@ void mcheck_cpu_init(struct cpuinfo_x86 *c)
__mcheck_cpu_init_generic(); __mcheck_cpu_init_generic();
__mcheck_cpu_init_vendor(c); __mcheck_cpu_init_vendor(c);
__mcheck_cpu_init_timer(); __mcheck_cpu_init_timer();
INIT_WORK(this_cpu_ptr(&mce_work), mce_process_work);
init_irq_work(this_cpu_ptr(&mce_irq_work), &mce_irq_work_cb);
} }
/* /*
...@@ -2064,6 +2061,9 @@ int __init mcheck_init(void) ...@@ -2064,6 +2061,9 @@ int __init mcheck_init(void)
mcheck_intel_therm_init(); mcheck_intel_therm_init();
mcheck_vendor_init_severity(); mcheck_vendor_init_severity();
INIT_WORK(&mce_work, mce_process_work);
init_irq_work(&mce_irq_work, mce_irq_work_cb);
return 0; return 0;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment