Commit 44377277 authored by Alexander Shishkin's avatar Alexander Shishkin Committed by Ingo Molnar

perf: Disable all pmus on unthrottling and rescheduling

Currently, only one PMU in a context gets disabled during unthrottling
and event_sched_{out,in}(), however, events in one context may belong to
different pmus, which results in PMUs being reprogrammed while they are
still enabled.

This means that mixed PMU use [which is rare in itself] resulted in
potentially completely unreliable results: corrupted events, bogus
results, etc.

This patch temporarily disables PMUs that correspond to
each event in the context while these events are being modified.
Signed-off-by: default avatarAlexander Shishkin <alexander.shishkin@linux.intel.com>
Reviewed-by: default avatarAndi Kleen <ak@linux.intel.com>
Signed-off-by: default avatarPeter Zijlstra <peterz@infradead.org>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Stephane Eranian <eranian@google.com>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Link: http://lkml.kernel.org/r/1387196256-8030-1-git-send-email-alexander.shishkin@linux.intel.comSigned-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent cf30d52e
...@@ -1396,6 +1396,8 @@ event_sched_out(struct perf_event *event, ...@@ -1396,6 +1396,8 @@ event_sched_out(struct perf_event *event,
if (event->state != PERF_EVENT_STATE_ACTIVE) if (event->state != PERF_EVENT_STATE_ACTIVE)
return; return;
perf_pmu_disable(event->pmu);
event->state = PERF_EVENT_STATE_INACTIVE; event->state = PERF_EVENT_STATE_INACTIVE;
if (event->pending_disable) { if (event->pending_disable) {
event->pending_disable = 0; event->pending_disable = 0;
...@@ -1412,6 +1414,8 @@ event_sched_out(struct perf_event *event, ...@@ -1412,6 +1414,8 @@ event_sched_out(struct perf_event *event,
ctx->nr_freq--; ctx->nr_freq--;
if (event->attr.exclusive || !cpuctx->active_oncpu) if (event->attr.exclusive || !cpuctx->active_oncpu)
cpuctx->exclusive = 0; cpuctx->exclusive = 0;
perf_pmu_enable(event->pmu);
} }
static void static void
...@@ -1652,6 +1656,7 @@ event_sched_in(struct perf_event *event, ...@@ -1652,6 +1656,7 @@ event_sched_in(struct perf_event *event,
struct perf_event_context *ctx) struct perf_event_context *ctx)
{ {
u64 tstamp = perf_event_time(event); u64 tstamp = perf_event_time(event);
int ret = 0;
if (event->state <= PERF_EVENT_STATE_OFF) if (event->state <= PERF_EVENT_STATE_OFF)
return 0; return 0;
...@@ -1674,10 +1679,13 @@ event_sched_in(struct perf_event *event, ...@@ -1674,10 +1679,13 @@ event_sched_in(struct perf_event *event,
*/ */
smp_wmb(); smp_wmb();
perf_pmu_disable(event->pmu);
if (event->pmu->add(event, PERF_EF_START)) { if (event->pmu->add(event, PERF_EF_START)) {
event->state = PERF_EVENT_STATE_INACTIVE; event->state = PERF_EVENT_STATE_INACTIVE;
event->oncpu = -1; event->oncpu = -1;
return -EAGAIN; ret = -EAGAIN;
goto out;
} }
event->tstamp_running += tstamp - event->tstamp_stopped; event->tstamp_running += tstamp - event->tstamp_stopped;
...@@ -1693,7 +1701,10 @@ event_sched_in(struct perf_event *event, ...@@ -1693,7 +1701,10 @@ event_sched_in(struct perf_event *event,
if (event->attr.exclusive) if (event->attr.exclusive)
cpuctx->exclusive = 1; cpuctx->exclusive = 1;
return 0; out:
perf_pmu_enable(event->pmu);
return ret;
} }
static int static int
...@@ -2743,6 +2754,8 @@ static void perf_adjust_freq_unthr_context(struct perf_event_context *ctx, ...@@ -2743,6 +2754,8 @@ static void perf_adjust_freq_unthr_context(struct perf_event_context *ctx,
if (!event_filter_match(event)) if (!event_filter_match(event))
continue; continue;
perf_pmu_disable(event->pmu);
hwc = &event->hw; hwc = &event->hw;
if (hwc->interrupts == MAX_INTERRUPTS) { if (hwc->interrupts == MAX_INTERRUPTS) {
...@@ -2752,7 +2765,7 @@ static void perf_adjust_freq_unthr_context(struct perf_event_context *ctx, ...@@ -2752,7 +2765,7 @@ static void perf_adjust_freq_unthr_context(struct perf_event_context *ctx,
} }
if (!event->attr.freq || !event->attr.sample_freq) if (!event->attr.freq || !event->attr.sample_freq)
continue; goto next;
/* /*
* stop the event and update event->count * stop the event and update event->count
...@@ -2774,6 +2787,8 @@ static void perf_adjust_freq_unthr_context(struct perf_event_context *ctx, ...@@ -2774,6 +2787,8 @@ static void perf_adjust_freq_unthr_context(struct perf_event_context *ctx,
perf_adjust_period(event, period, delta, false); perf_adjust_period(event, period, delta, false);
event->pmu->start(event, delta > 0 ? PERF_EF_RELOAD : 0); event->pmu->start(event, delta > 0 ? PERF_EF_RELOAD : 0);
next:
perf_pmu_enable(event->pmu);
} }
perf_pmu_enable(ctx->pmu); perf_pmu_enable(ctx->pmu);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment