Commit 451d24d1 authored by Peter Zijlstra's avatar Peter Zijlstra Committed by Ingo Molnar

perf/core: Fix crash in perf_event_read()

Alexei had his box explode because doing read() on a package
(rapl/uncore) event that isn't currently scheduled in ends up doing an
out-of-bounds load.

Rework the code to more explicitly deal with event->oncpu being -1.
Reported-by: default avatarAlexei Starovoitov <alexei.starovoitov@gmail.com>
Tested-by: default avatarAlexei Starovoitov <ast@kernel.org>
Tested-by: default avatarDavid Carrillo-Cisneros <davidcc@google.com>
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: eranian@google.com
Fixes: d6a2f903 ("perf/core: Introduce PMU_EV_CAP_READ_ACTIVE_PKG")
Link: http://lkml.kernel.org/r/20170131102710.GL6515@twins.programming.kicks-ass.netSigned-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent 53e74a11
...@@ -3487,14 +3487,15 @@ struct perf_read_data { ...@@ -3487,14 +3487,15 @@ struct perf_read_data {
int ret; int ret;
}; };
static int find_cpu_to_read(struct perf_event *event, int local_cpu) static int __perf_event_read_cpu(struct perf_event *event, int event_cpu)
{ {
int event_cpu = event->oncpu;
u16 local_pkg, event_pkg; u16 local_pkg, event_pkg;
if (event->group_caps & PERF_EV_CAP_READ_ACTIVE_PKG) { if (event->group_caps & PERF_EV_CAP_READ_ACTIVE_PKG) {
event_pkg = topology_physical_package_id(event_cpu); int local_cpu = smp_processor_id();
local_pkg = topology_physical_package_id(local_cpu);
event_pkg = topology_physical_package_id(event_cpu);
local_pkg = topology_physical_package_id(local_cpu);
if (event_pkg == local_pkg) if (event_pkg == local_pkg)
return local_cpu; return local_cpu;
...@@ -3624,7 +3625,7 @@ u64 perf_event_read_local(struct perf_event *event) ...@@ -3624,7 +3625,7 @@ u64 perf_event_read_local(struct perf_event *event)
static int perf_event_read(struct perf_event *event, bool group) static int perf_event_read(struct perf_event *event, bool group)
{ {
int ret = 0, cpu_to_read, local_cpu; int event_cpu, ret = 0;
/* /*
* If event is enabled and currently active on a CPU, update the * If event is enabled and currently active on a CPU, update the
...@@ -3637,21 +3638,25 @@ static int perf_event_read(struct perf_event *event, bool group) ...@@ -3637,21 +3638,25 @@ static int perf_event_read(struct perf_event *event, bool group)
.ret = 0, .ret = 0,
}; };
local_cpu = get_cpu(); event_cpu = READ_ONCE(event->oncpu);
cpu_to_read = find_cpu_to_read(event, local_cpu); if ((unsigned)event_cpu >= nr_cpu_ids)
put_cpu(); return 0;
preempt_disable();
event_cpu = __perf_event_read_cpu(event, event_cpu);
/* /*
* Purposely ignore the smp_call_function_single() return * Purposely ignore the smp_call_function_single() return
* value. * value.
* *
* If event->oncpu isn't a valid CPU it means the event got * If event_cpu isn't a valid CPU it means the event got
* scheduled out and that will have updated the event count. * scheduled out and that will have updated the event count.
* *
* Therefore, either way, we'll have an up-to-date event count * Therefore, either way, we'll have an up-to-date event count
* after this. * after this.
*/ */
(void)smp_call_function_single(cpu_to_read, __perf_event_read, &data, 1); (void)smp_call_function_single(event_cpu, __perf_event_read, &data, 1);
preempt_enable();
ret = data.ret; ret = data.ret;
} else if (event->state == PERF_EVENT_STATE_INACTIVE) { } else if (event->state == PERF_EVENT_STATE_INACTIVE) {
struct perf_event_context *ctx = event->ctx; struct perf_event_context *ctx = event->ctx;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment