Commit 27a96c4f authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'perf_urgent_for_v5.17_rc2_p2' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull perf fixes from Borislav Petkov:

 - Prevent accesses to the per-CPU cgroup context list from another CPU
   except the one it belongs to, to avoid list corruption

 - Make sure parent events are always woken up to avoid indefinite hangs
   in the traced workload

* tag 'perf_urgent_for_v5.17_rc2_p2' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  perf/core: Fix cgroup event list management
  perf: Always wake the parent event
parents 24f4db1f c5de60cd
...@@ -2462,7 +2462,11 @@ static void perf_remove_from_context(struct perf_event *event, unsigned long fla ...@@ -2462,7 +2462,11 @@ static void perf_remove_from_context(struct perf_event *event, unsigned long fla
* event_function_call() user. * event_function_call() user.
*/ */
raw_spin_lock_irq(&ctx->lock); raw_spin_lock_irq(&ctx->lock);
if (!ctx->is_active) { /*
* Cgroup events are per-cpu events, and must IPI because of
* cgrp_cpuctx_list.
*/
if (!ctx->is_active && !is_cgroup_event(event)) {
__perf_remove_from_context(event, __get_cpu_context(ctx), __perf_remove_from_context(event, __get_cpu_context(ctx),
ctx, (void *)flags); ctx, (void *)flags);
raw_spin_unlock_irq(&ctx->lock); raw_spin_unlock_irq(&ctx->lock);
...@@ -2895,11 +2899,14 @@ perf_install_in_context(struct perf_event_context *ctx, ...@@ -2895,11 +2899,14 @@ perf_install_in_context(struct perf_event_context *ctx,
* perf_event_attr::disabled events will not run and can be initialized * perf_event_attr::disabled events will not run and can be initialized
* without IPI. Except when this is the first event for the context, in * without IPI. Except when this is the first event for the context, in
* that case we need the magic of the IPI to set ctx->is_active. * that case we need the magic of the IPI to set ctx->is_active.
* Similarly, cgroup events for the context also needs the IPI to
* manipulate the cgrp_cpuctx_list.
* *
* The IOC_ENABLE that is sure to follow the creation of a disabled * The IOC_ENABLE that is sure to follow the creation of a disabled
* event will issue the IPI and reprogram the hardware. * event will issue the IPI and reprogram the hardware.
*/ */
if (__perf_effective_state(event) == PERF_EVENT_STATE_OFF && ctx->nr_events) { if (__perf_effective_state(event) == PERF_EVENT_STATE_OFF &&
ctx->nr_events && !is_cgroup_event(event)) {
raw_spin_lock_irq(&ctx->lock); raw_spin_lock_irq(&ctx->lock);
if (ctx->task == TASK_TOMBSTONE) { if (ctx->task == TASK_TOMBSTONE) {
raw_spin_unlock_irq(&ctx->lock); raw_spin_unlock_irq(&ctx->lock);
...@@ -5985,6 +5992,8 @@ static void ring_buffer_attach(struct perf_event *event, ...@@ -5985,6 +5992,8 @@ static void ring_buffer_attach(struct perf_event *event,
struct perf_buffer *old_rb = NULL; struct perf_buffer *old_rb = NULL;
unsigned long flags; unsigned long flags;
WARN_ON_ONCE(event->parent);
if (event->rb) { if (event->rb) {
/* /*
* Should be impossible, we set this when removing * Should be impossible, we set this when removing
...@@ -6042,6 +6051,9 @@ static void ring_buffer_wakeup(struct perf_event *event) ...@@ -6042,6 +6051,9 @@ static void ring_buffer_wakeup(struct perf_event *event)
{ {
struct perf_buffer *rb; struct perf_buffer *rb;
if (event->parent)
event = event->parent;
rcu_read_lock(); rcu_read_lock();
rb = rcu_dereference(event->rb); rb = rcu_dereference(event->rb);
if (rb) { if (rb) {
...@@ -6055,6 +6067,9 @@ struct perf_buffer *ring_buffer_get(struct perf_event *event) ...@@ -6055,6 +6067,9 @@ struct perf_buffer *ring_buffer_get(struct perf_event *event)
{ {
struct perf_buffer *rb; struct perf_buffer *rb;
if (event->parent)
event = event->parent;
rcu_read_lock(); rcu_read_lock();
rb = rcu_dereference(event->rb); rb = rcu_dereference(event->rb);
if (rb) { if (rb) {
...@@ -6763,7 +6778,7 @@ static unsigned long perf_prepare_sample_aux(struct perf_event *event, ...@@ -6763,7 +6778,7 @@ static unsigned long perf_prepare_sample_aux(struct perf_event *event,
if (WARN_ON_ONCE(READ_ONCE(sampler->oncpu) != smp_processor_id())) if (WARN_ON_ONCE(READ_ONCE(sampler->oncpu) != smp_processor_id()))
goto out; goto out;
rb = ring_buffer_get(sampler->parent ? sampler->parent : sampler); rb = ring_buffer_get(sampler);
if (!rb) if (!rb)
goto out; goto out;
...@@ -6829,7 +6844,7 @@ static void perf_aux_sample_output(struct perf_event *event, ...@@ -6829,7 +6844,7 @@ static void perf_aux_sample_output(struct perf_event *event,
if (WARN_ON_ONCE(!sampler || !data->aux_size)) if (WARN_ON_ONCE(!sampler || !data->aux_size))
return; return;
rb = ring_buffer_get(sampler->parent ? sampler->parent : sampler); rb = ring_buffer_get(sampler);
if (!rb) if (!rb)
return; return;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment