Commit f80747a4 authored by Peter Zijlstra's avatar Peter Zijlstra Committed by Greg Kroah-Hartman

perf: Fix hotplug splat

commit e3703f8c upstream.

Drew Richardson reported that he could make the kernel go *boom* when hotplugging
while having perf events active.

It turned out that when you have a group event, the code in
__perf_event_exit_context() fails to remove the group siblings from
the context.

We then proceed with destroying and freeing the event, and when you
re-plug the CPU and try and add another event to that CPU, things go
*boom* because you've still got dead entries there.
Reported-by: default avatarDrew Richardson <drew.richardson@arm.com>
Signed-off-by: default avatarPeter Zijlstra <peterz@infradead.org>
Cc: Will Deacon <will.deacon@arm.com>
Link: http://lkml.kernel.org/n/tip-k6v5wundvusvcseqj1si0oz0@git.kernel.orgSigned-off-by: default avatarIngo Molnar <mingo@kernel.org>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent 23f0913c
......@@ -7149,14 +7149,14 @@ static void perf_pmu_rotate_stop(struct pmu *pmu)
static void __perf_event_exit_context(void *__info)
{
struct perf_event_context *ctx = __info;
struct perf_event *event, *tmp;
struct perf_event *event;
perf_pmu_rotate_stop(ctx->pmu);
list_for_each_entry_safe(event, tmp, &ctx->pinned_groups, group_entry)
__perf_remove_from_context(event);
list_for_each_entry_safe(event, tmp, &ctx->flexible_groups, group_entry)
rcu_read_lock();
list_for_each_entry_rcu(event, &ctx->event_list, event_entry)
__perf_remove_from_context(event);
rcu_read_unlock();
}
static void perf_event_exit_cpu_context(int cpu)
......@@ -7180,11 +7180,11 @@ static void perf_event_exit_cpu(int cpu)
{
struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu);
perf_event_exit_cpu_context(cpu);
mutex_lock(&swhash->hlist_mutex);
swevent_hlist_release(swhash);
mutex_unlock(&swhash->hlist_mutex);
perf_event_exit_cpu_context(cpu);
}
#else
static inline void perf_event_exit_cpu(int cpu) { }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment