Commit 5322ea58 authored by Peter Zijlstra's avatar Peter Zijlstra Committed by Ingo Molnar

perf/ring-buffer: Use regular variables for nesting

While the IRQ/NMI will nest, the nest-count will be invariant over the
actual exception, since it will decrement equal to increment.

This means we can -- carefully -- use a regular variable since the
typical LOAD-STORE race doesn't exist (similar to preempt_count).

This optimizes the ring-buffer for all LOAD-STORE architectures, since
they need to use atomic ops to implement local_t.
Suggested-by: default avatarAlexander Shishkin <alexander.shishkin@linux.intel.com>
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Stephane Eranian <eranian@google.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Vince Weaver <vincent.weaver@maine.edu>
Cc: acme@kernel.org
Cc: mark.rutland@arm.com
Cc: namhyung@kernel.org
Cc: yabinc@google.com
Link: http://lkml.kernel.org/r/20190517115418.481392777@infradead.orgSigned-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent 4d839dd9
...@@ -24,7 +24,7 @@ struct ring_buffer { ...@@ -24,7 +24,7 @@ struct ring_buffer {
atomic_t poll; /* POLL_ for wakeups */ atomic_t poll; /* POLL_ for wakeups */
local_t head; /* write position */ local_t head; /* write position */
local_t nest; /* nested writers */ unsigned int nest; /* nested writers */
local_t events; /* event limit */ local_t events; /* event limit */
local_t wakeup; /* wakeup stamp */ local_t wakeup; /* wakeup stamp */
local_t lost; /* nr records lost */ local_t lost; /* nr records lost */
...@@ -41,7 +41,7 @@ struct ring_buffer { ...@@ -41,7 +41,7 @@ struct ring_buffer {
/* AUX area */ /* AUX area */
long aux_head; long aux_head;
local_t aux_nest; unsigned int aux_nest;
long aux_wakeup; /* last aux_watermark boundary crossed by aux_head */ long aux_wakeup; /* last aux_watermark boundary crossed by aux_head */
unsigned long aux_pgoff; unsigned long aux_pgoff;
int aux_nr_pages; int aux_nr_pages;
......
...@@ -38,7 +38,12 @@ static void perf_output_get_handle(struct perf_output_handle *handle) ...@@ -38,7 +38,12 @@ static void perf_output_get_handle(struct perf_output_handle *handle)
struct ring_buffer *rb = handle->rb; struct ring_buffer *rb = handle->rb;
preempt_disable(); preempt_disable();
local_inc(&rb->nest);
/*
* Avoid an explicit LOAD/STORE such that architectures with memops
* can use them.
*/
(*(volatile unsigned int *)&rb->nest)++;
handle->wakeup = local_read(&rb->wakeup); handle->wakeup = local_read(&rb->wakeup);
} }
...@@ -46,6 +51,17 @@ static void perf_output_put_handle(struct perf_output_handle *handle) ...@@ -46,6 +51,17 @@ static void perf_output_put_handle(struct perf_output_handle *handle)
{ {
struct ring_buffer *rb = handle->rb; struct ring_buffer *rb = handle->rb;
unsigned long head; unsigned long head;
unsigned int nest;
/*
* If this isn't the outermost nesting, we don't have to update
* @rb->user_page->data_head.
*/
nest = READ_ONCE(rb->nest);
if (nest > 1) {
WRITE_ONCE(rb->nest, nest - 1);
goto out;
}
again: again:
/* /*
...@@ -64,15 +80,6 @@ static void perf_output_put_handle(struct perf_output_handle *handle) ...@@ -64,15 +80,6 @@ static void perf_output_put_handle(struct perf_output_handle *handle)
* load above to be stale. * load above to be stale.
*/ */
/*
* If this isn't the outermost nesting, we don't have to update
* @rb->user_page->data_head.
*/
if (local_read(&rb->nest) > 1) {
local_dec(&rb->nest);
goto out;
}
/* /*
* Since the mmap() consumer (userspace) can run on a different CPU: * Since the mmap() consumer (userspace) can run on a different CPU:
* *
...@@ -108,7 +115,7 @@ static void perf_output_put_handle(struct perf_output_handle *handle) ...@@ -108,7 +115,7 @@ static void perf_output_put_handle(struct perf_output_handle *handle)
* write will (temporarily) publish a stale value. * write will (temporarily) publish a stale value.
*/ */
barrier(); barrier();
local_set(&rb->nest, 0); WRITE_ONCE(rb->nest, 0);
/* /*
* Ensure we decrement @rb->nest before we validate the @rb->head. * Ensure we decrement @rb->nest before we validate the @rb->head.
...@@ -116,7 +123,7 @@ static void perf_output_put_handle(struct perf_output_handle *handle) ...@@ -116,7 +123,7 @@ static void perf_output_put_handle(struct perf_output_handle *handle)
*/ */
barrier(); barrier();
if (unlikely(head != local_read(&rb->head))) { if (unlikely(head != local_read(&rb->head))) {
local_inc(&rb->nest); WRITE_ONCE(rb->nest, 1);
goto again; goto again;
} }
...@@ -355,6 +362,7 @@ void *perf_aux_output_begin(struct perf_output_handle *handle, ...@@ -355,6 +362,7 @@ void *perf_aux_output_begin(struct perf_output_handle *handle,
struct perf_event *output_event = event; struct perf_event *output_event = event;
unsigned long aux_head, aux_tail; unsigned long aux_head, aux_tail;
struct ring_buffer *rb; struct ring_buffer *rb;
unsigned int nest;
if (output_event->parent) if (output_event->parent)
output_event = output_event->parent; output_event = output_event->parent;
...@@ -385,13 +393,16 @@ void *perf_aux_output_begin(struct perf_output_handle *handle, ...@@ -385,13 +393,16 @@ void *perf_aux_output_begin(struct perf_output_handle *handle,
if (!refcount_inc_not_zero(&rb->aux_refcount)) if (!refcount_inc_not_zero(&rb->aux_refcount))
goto err; goto err;
nest = READ_ONCE(rb->aux_nest);
/* /*
* Nesting is not supported for AUX area, make sure nested * Nesting is not supported for AUX area, make sure nested
* writers are caught early * writers are caught early
*/ */
if (WARN_ON_ONCE(local_xchg(&rb->aux_nest, 1))) if (WARN_ON_ONCE(nest))
goto err_put; goto err_put;
WRITE_ONCE(rb->aux_nest, nest + 1);
aux_head = rb->aux_head; aux_head = rb->aux_head;
handle->rb = rb; handle->rb = rb;
...@@ -419,7 +430,7 @@ void *perf_aux_output_begin(struct perf_output_handle *handle, ...@@ -419,7 +430,7 @@ void *perf_aux_output_begin(struct perf_output_handle *handle,
if (!handle->size) { /* A, matches D */ if (!handle->size) { /* A, matches D */
event->pending_disable = smp_processor_id(); event->pending_disable = smp_processor_id();
perf_output_wakeup(handle); perf_output_wakeup(handle);
local_set(&rb->aux_nest, 0); WRITE_ONCE(rb->aux_nest, 0);
goto err_put; goto err_put;
} }
} }
...@@ -508,7 +519,7 @@ void perf_aux_output_end(struct perf_output_handle *handle, unsigned long size) ...@@ -508,7 +519,7 @@ void perf_aux_output_end(struct perf_output_handle *handle, unsigned long size)
handle->event = NULL; handle->event = NULL;
local_set(&rb->aux_nest, 0); WRITE_ONCE(rb->aux_nest, 0);
/* can't be last */ /* can't be last */
rb_free_aux(rb); rb_free_aux(rb);
ring_buffer_put(rb); ring_buffer_put(rb);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment