Commit 9029a5e3 authored by Ingo Molnar's avatar Ingo Molnar

perf_counter: x86: Protect against infinite loops in intel_pmu_handle_irq()

intel_pmu_handle_irq() can lock up in an infinite loop if the hardware
does not allow the acking of irqs. Alas, this happened in testing so
make this robust and emit a warning if it happens in the future.

Also, clean up the IRQ handlers a bit.

[ Impact: improve perfcounter irq/nmi handling robustness ]
Acked-by: default avatarPeter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com>
LKML-Reference: <new-submission>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent 1c80f4b5
...@@ -722,9 +722,13 @@ static void intel_pmu_save_and_restart(struct perf_counter *counter) ...@@ -722,9 +722,13 @@ static void intel_pmu_save_and_restart(struct perf_counter *counter)
*/ */
static int intel_pmu_handle_irq(struct pt_regs *regs, int nmi) static int intel_pmu_handle_irq(struct pt_regs *regs, int nmi)
{ {
int bit, cpu = smp_processor_id(); struct cpu_hw_counters *cpuc;
struct cpu_hw_counters;
int bit, cpu, loops;
u64 ack, status; u64 ack, status;
struct cpu_hw_counters *cpuc = &per_cpu(cpu_hw_counters, cpu);
cpu = smp_processor_id();
cpuc = &per_cpu(cpu_hw_counters, cpu);
perf_disable(); perf_disable();
status = intel_pmu_get_status(); status = intel_pmu_get_status();
...@@ -733,7 +737,13 @@ static int intel_pmu_handle_irq(struct pt_regs *regs, int nmi) ...@@ -733,7 +737,13 @@ static int intel_pmu_handle_irq(struct pt_regs *regs, int nmi)
return 0; return 0;
} }
loops = 0;
again: again:
if (++loops > 100) {
WARN_ONCE(1, "perfcounters: irq loop stuck!\n");
return 1;
}
inc_irq_stat(apic_perf_irqs); inc_irq_stat(apic_perf_irqs);
ack = status; ack = status;
for_each_bit(bit, (unsigned long *)&status, X86_PMC_IDX_MAX) { for_each_bit(bit, (unsigned long *)&status, X86_PMC_IDX_MAX) {
...@@ -765,13 +775,14 @@ static int intel_pmu_handle_irq(struct pt_regs *regs, int nmi) ...@@ -765,13 +775,14 @@ static int intel_pmu_handle_irq(struct pt_regs *regs, int nmi)
static int amd_pmu_handle_irq(struct pt_regs *regs, int nmi) static int amd_pmu_handle_irq(struct pt_regs *regs, int nmi)
{ {
int cpu = smp_processor_id(); int cpu, idx, throttle = 0, handled = 0;
struct cpu_hw_counters *cpuc = &per_cpu(cpu_hw_counters, cpu); struct cpu_hw_counters *cpuc;
u64 val;
int handled = 0;
struct perf_counter *counter; struct perf_counter *counter;
struct hw_perf_counter *hwc; struct hw_perf_counter *hwc;
int idx, throttle = 0; u64 val;
cpu = smp_processor_id();
cpuc = &per_cpu(cpu_hw_counters, cpu);
if (++cpuc->interrupts == PERFMON_MAX_INTERRUPTS) { if (++cpuc->interrupts == PERFMON_MAX_INTERRUPTS) {
throttle = 1; throttle = 1;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment