Commit de8548cf authored by Steven Rostedt (Red Hat)'s avatar Steven Rostedt (Red Hat) Committed by Luis Henriques

ring-buffer: Do not wake up a splice waiter when page is not full

commit 1e0d6714 upstream.

When an application connects to the ring buffer via splice, it can only
read full pages. Splice does not work with partial pages. If there is
not enough data to fill a page, the splice command will either block
or return -EAGAIN (if set to nonblock).

Code was added where if the page is not full, to just sleep again.
The problem is, it will get woken up again on the next event. That
is, when something is written into the ring buffer, if there is a waiter
it will wake it up. The waiter would then check the buffer, see that
it still does not have enough data to fill a page and go back to sleep.
To make matters worse, when the waiter goes back to sleep, it could
cause another event, which would wake it back up again to see it
doesn't have enough data and sleep again. This produces a tremendous
overhead and fills the ring buffer with noise.

For example, recording sched_switch on an idle system for 10 seconds
produces 25,350,475 events!!!

Create another wait queue for those waiters wanting full pages.
When an event is written, it only wakes up waiters if there's a full
page of data. It does not wake up the waiter if the page is not yet
full.

After this change, recording sched_switch on an idle system for 10
seconds produces only 800 events. Getting rid of 25,349,675 useless
events (99.9969% of events!!), is something to take seriously.

Cc: Rabin Vincent <rabin@rab.in>
Fixes: e30f53aa "tracing: Do not busy wait in buffer splice"
Signed-off-by: default avatarSteven Rostedt <rostedt@goodmis.org>
Signed-off-by: default avatarLuis Henriques <luis.henriques@canonical.com>
parent ac627c9e
...@@ -450,7 +450,10 @@ int ring_buffer_print_page_header(struct trace_seq *s) ...@@ -450,7 +450,10 @@ int ring_buffer_print_page_header(struct trace_seq *s)
struct rb_irq_work { struct rb_irq_work {
struct irq_work work; struct irq_work work;
wait_queue_head_t waiters; wait_queue_head_t waiters;
wait_queue_head_t full_waiters;
bool waiters_pending; bool waiters_pending;
bool full_waiters_pending;
bool wakeup_full;
}; };
/* /*
...@@ -532,6 +535,10 @@ static void rb_wake_up_waiters(struct irq_work *work) ...@@ -532,6 +535,10 @@ static void rb_wake_up_waiters(struct irq_work *work)
struct rb_irq_work *rbwork = container_of(work, struct rb_irq_work, work); struct rb_irq_work *rbwork = container_of(work, struct rb_irq_work, work);
wake_up_all(&rbwork->waiters); wake_up_all(&rbwork->waiters);
if (rbwork->wakeup_full) {
rbwork->wakeup_full = false;
wake_up_all(&rbwork->full_waiters);
}
} }
/** /**
...@@ -556,9 +563,11 @@ int ring_buffer_wait(struct ring_buffer *buffer, int cpu, bool full) ...@@ -556,9 +563,11 @@ int ring_buffer_wait(struct ring_buffer *buffer, int cpu, bool full)
* data in any cpu buffer, or a specific buffer, put the * data in any cpu buffer, or a specific buffer, put the
* caller on the appropriate wait queue. * caller on the appropriate wait queue.
*/ */
if (cpu == RING_BUFFER_ALL_CPUS) if (cpu == RING_BUFFER_ALL_CPUS) {
work = &buffer->irq_work; work = &buffer->irq_work;
else { /* Full only makes sense on per cpu reads */
full = false;
} else {
if (!cpumask_test_cpu(cpu, buffer->cpumask)) if (!cpumask_test_cpu(cpu, buffer->cpumask))
return -ENODEV; return -ENODEV;
cpu_buffer = buffer->buffers[cpu]; cpu_buffer = buffer->buffers[cpu];
...@@ -567,7 +576,10 @@ int ring_buffer_wait(struct ring_buffer *buffer, int cpu, bool full) ...@@ -567,7 +576,10 @@ int ring_buffer_wait(struct ring_buffer *buffer, int cpu, bool full)
while (true) { while (true) {
prepare_to_wait(&work->waiters, &wait, TASK_INTERRUPTIBLE); if (full)
prepare_to_wait(&work->full_waiters, &wait, TASK_INTERRUPTIBLE);
else
prepare_to_wait(&work->waiters, &wait, TASK_INTERRUPTIBLE);
/* /*
* The events can happen in critical sections where * The events can happen in critical sections where
...@@ -589,7 +601,10 @@ int ring_buffer_wait(struct ring_buffer *buffer, int cpu, bool full) ...@@ -589,7 +601,10 @@ int ring_buffer_wait(struct ring_buffer *buffer, int cpu, bool full)
* that is necessary is that the wake up happens after * that is necessary is that the wake up happens after
* a task has been queued. It's OK for spurious wake ups. * a task has been queued. It's OK for spurious wake ups.
*/ */
work->waiters_pending = true; if (full)
work->full_waiters_pending = true;
else
work->waiters_pending = true;
if (signal_pending(current)) { if (signal_pending(current)) {
ret = -EINTR; ret = -EINTR;
...@@ -618,7 +633,10 @@ int ring_buffer_wait(struct ring_buffer *buffer, int cpu, bool full) ...@@ -618,7 +633,10 @@ int ring_buffer_wait(struct ring_buffer *buffer, int cpu, bool full)
schedule(); schedule();
} }
finish_wait(&work->waiters, &wait); if (full)
finish_wait(&work->full_waiters, &wait);
else
finish_wait(&work->waiters, &wait);
return ret; return ret;
} }
...@@ -1233,6 +1251,7 @@ rb_allocate_cpu_buffer(struct ring_buffer *buffer, int nr_pages, int cpu) ...@@ -1233,6 +1251,7 @@ rb_allocate_cpu_buffer(struct ring_buffer *buffer, int nr_pages, int cpu)
init_completion(&cpu_buffer->update_done); init_completion(&cpu_buffer->update_done);
init_irq_work(&cpu_buffer->irq_work.work, rb_wake_up_waiters); init_irq_work(&cpu_buffer->irq_work.work, rb_wake_up_waiters);
init_waitqueue_head(&cpu_buffer->irq_work.waiters); init_waitqueue_head(&cpu_buffer->irq_work.waiters);
init_waitqueue_head(&cpu_buffer->irq_work.full_waiters);
bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()), bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()),
GFP_KERNEL, cpu_to_node(cpu)); GFP_KERNEL, cpu_to_node(cpu));
...@@ -2820,6 +2839,8 @@ static void rb_commit(struct ring_buffer_per_cpu *cpu_buffer, ...@@ -2820,6 +2839,8 @@ static void rb_commit(struct ring_buffer_per_cpu *cpu_buffer,
static __always_inline void static __always_inline void
rb_wakeups(struct ring_buffer *buffer, struct ring_buffer_per_cpu *cpu_buffer) rb_wakeups(struct ring_buffer *buffer, struct ring_buffer_per_cpu *cpu_buffer)
{ {
bool pagebusy;
if (buffer->irq_work.waiters_pending) { if (buffer->irq_work.waiters_pending) {
buffer->irq_work.waiters_pending = false; buffer->irq_work.waiters_pending = false;
/* irq_work_queue() supplies it's own memory barriers */ /* irq_work_queue() supplies it's own memory barriers */
...@@ -2831,6 +2852,15 @@ rb_wakeups(struct ring_buffer *buffer, struct ring_buffer_per_cpu *cpu_buffer) ...@@ -2831,6 +2852,15 @@ rb_wakeups(struct ring_buffer *buffer, struct ring_buffer_per_cpu *cpu_buffer)
/* irq_work_queue() supplies it's own memory barriers */ /* irq_work_queue() supplies it's own memory barriers */
irq_work_queue(&cpu_buffer->irq_work.work); irq_work_queue(&cpu_buffer->irq_work.work);
} }
pagebusy = cpu_buffer->reader_page == cpu_buffer->commit_page;
if (!pagebusy && cpu_buffer->irq_work.full_waiters_pending) {
cpu_buffer->irq_work.wakeup_full = true;
cpu_buffer->irq_work.full_waiters_pending = false;
/* irq_work_queue() supplies it's own memory barriers */
irq_work_queue(&cpu_buffer->irq_work.work);
}
} }
/** /**
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment