Commit 4fe757dd authored by Peter Zijlstra's avatar Peter Zijlstra Committed by Ingo Molnar

perf: Fix throttle logic

It was possible to call pmu::start() on an already running event. In
particular this lead so some wreckage as the hrtimer events would
re-initialize active timers.

This was due to throttled events being activated again by scheduling.
Scheduling in a context would add and force start events, resulting in
running events with a possible throttle status. The next tick to hit
that task will then try to unthrottle the event and call ->start() on
an already running event.
Reported-by: default avatarJeff Moyer <jmoyer@redhat.com>
Cc: <stable@kernel.org>
Signed-off-by: default avatarPeter Zijlstra <a.p.zijlstra@chello.nl>
LKML-Reference: <new-submission>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent 7d44ec19
...@@ -782,6 +782,10 @@ void perf_event_disable(struct perf_event *event) ...@@ -782,6 +782,10 @@ void perf_event_disable(struct perf_event *event)
raw_spin_unlock_irq(&ctx->lock); raw_spin_unlock_irq(&ctx->lock);
} }
#define MAX_INTERRUPTS (~0ULL)
static void perf_log_throttle(struct perf_event *event, int enable);
static int static int
event_sched_in(struct perf_event *event, event_sched_in(struct perf_event *event,
struct perf_cpu_context *cpuctx, struct perf_cpu_context *cpuctx,
...@@ -794,6 +798,17 @@ event_sched_in(struct perf_event *event, ...@@ -794,6 +798,17 @@ event_sched_in(struct perf_event *event,
event->state = PERF_EVENT_STATE_ACTIVE; event->state = PERF_EVENT_STATE_ACTIVE;
event->oncpu = smp_processor_id(); event->oncpu = smp_processor_id();
/*
* Unthrottle events, since we scheduled we might have missed several
* ticks already, also for a heavily scheduling task there is little
* guarantee it'll get a tick in a timely manner.
*/
if (unlikely(event->hw.interrupts == MAX_INTERRUPTS)) {
perf_log_throttle(event, 1);
event->hw.interrupts = 0;
}
/* /*
* The new state must be visible before we turn it on in the hardware: * The new state must be visible before we turn it on in the hardware:
*/ */
...@@ -1596,10 +1611,6 @@ void __perf_event_task_sched_in(struct task_struct *task) ...@@ -1596,10 +1611,6 @@ void __perf_event_task_sched_in(struct task_struct *task)
} }
} }
#define MAX_INTERRUPTS (~0ULL)
static void perf_log_throttle(struct perf_event *event, int enable);
static u64 perf_calculate_period(struct perf_event *event, u64 nsec, u64 count) static u64 perf_calculate_period(struct perf_event *event, u64 nsec, u64 count)
{ {
u64 frequency = event->attr.sample_freq; u64 frequency = event->attr.sample_freq;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment