Commit 12522227 authored by Peter Zijlstra's avatar Peter Zijlstra Committed by Greg Kroah-Hartman

perf: Fix AUX buffer refcounting

commit 57ffc5ca upstream.

Its currently possible to drop the last refcount to the aux buffer
from NMI context, which results in the expected fireworks.

The refcounting needs a bigger overhaul, but to cure the immediate
problem, delay the freeing by using an irq_work.
Reviewed-and-tested-by: default avatarAlexander Shishkin <alexander.shishkin@linux.intel.com>
Reported-by: default avatarVince Weaver <vincent.weaver@maine.edu>
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Cc: Arnaldo Carvalho de Melo <acme@kernel.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Stephane Eranian <eranian@gmail.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Link: http://lkml.kernel.org/r/20150618103249.GK19282@twins.programming.kicks-ass.netSigned-off-by: default avatarIngo Molnar <mingo@kernel.org>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent 41e324d7
......@@ -4411,14 +4411,6 @@ static void ring_buffer_wakeup(struct perf_event *event)
rcu_read_unlock();
}
static void rb_free_rcu(struct rcu_head *rcu_head)
{
struct ring_buffer *rb;
rb = container_of(rcu_head, struct ring_buffer, rcu_head);
rb_free(rb);
}
struct ring_buffer *ring_buffer_get(struct perf_event *event)
{
struct ring_buffer *rb;
......
......@@ -11,6 +11,7 @@
struct ring_buffer {
atomic_t refcount;
struct rcu_head rcu_head;
struct irq_work irq_work;
#ifdef CONFIG_PERF_USE_VMALLOC
struct work_struct work;
int page_order; /* allocation order */
......@@ -55,6 +56,15 @@ struct ring_buffer {
};
extern void rb_free(struct ring_buffer *rb);
static inline void rb_free_rcu(struct rcu_head *rcu_head)
{
struct ring_buffer *rb;
rb = container_of(rcu_head, struct ring_buffer, rcu_head);
rb_free(rb);
}
extern struct ring_buffer *
rb_alloc(int nr_pages, long watermark, int cpu, int flags);
extern void perf_event_wakeup(struct perf_event *event);
......
......@@ -221,6 +221,8 @@ void perf_output_end(struct perf_output_handle *handle)
rcu_read_unlock();
}
static void rb_irq_work(struct irq_work *work);
static void
ring_buffer_init(struct ring_buffer *rb, long watermark, int flags)
{
......@@ -241,6 +243,16 @@ ring_buffer_init(struct ring_buffer *rb, long watermark, int flags)
INIT_LIST_HEAD(&rb->event_list);
spin_lock_init(&rb->event_lock);
init_irq_work(&rb->irq_work, rb_irq_work);
}
static void ring_buffer_put_async(struct ring_buffer *rb)
{
if (!atomic_dec_and_test(&rb->refcount))
return;
rb->rcu_head.next = (void *)rb;
irq_work_queue(&rb->irq_work);
}
/*
......@@ -319,7 +331,7 @@ void *perf_aux_output_begin(struct perf_output_handle *handle,
rb_free_aux(rb);
err:
ring_buffer_put(rb);
ring_buffer_put_async(rb);
handle->event = NULL;
return NULL;
......@@ -370,7 +382,7 @@ void perf_aux_output_end(struct perf_output_handle *handle, unsigned long size,
local_set(&rb->aux_nest, 0);
rb_free_aux(rb);
ring_buffer_put(rb);
ring_buffer_put_async(rb);
}
/*
......@@ -559,7 +571,18 @@ static void __rb_free_aux(struct ring_buffer *rb)
void rb_free_aux(struct ring_buffer *rb)
{
if (atomic_dec_and_test(&rb->aux_refcount))
irq_work_queue(&rb->irq_work);
}
static void rb_irq_work(struct irq_work *work)
{
struct ring_buffer *rb = container_of(work, struct ring_buffer, irq_work);
if (!atomic_read(&rb->aux_refcount))
__rb_free_aux(rb);
if (rb->rcu_head.next == (void *)rb)
call_rcu(&rb->rcu_head, rb_free_rcu);
}
#ifndef CONFIG_PERF_USE_VMALLOC
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment