Commit d769041f authored by Steven Rostedt's avatar Steven Rostedt Committed by Ingo Molnar

ring_buffer: implement new locking

The old "lock always" scheme had issues with lockdep, and was not very
efficient anyways.

This patch does a new design to be partially lockless on writes.
Writes will add new entries to the per cpu pages by simply disabling
interrupts. When a write needs to go to another page than it will
grab the lock.

A new "read page" has been added so that the reader can pull out a page
from the ring buffer to read without worrying about the writer writing over
it. This allows us to not take the lock for all reads. The lock is
now only taken when a read needs to go to a new page.

This is far from lockless, and interrupts still need to be disabled,
but it is a step towards a more lockless solution, and it also
solves a lot of the issues that were noticed by the first conversion
of ftrace to the ring buffers.

Note: the ring_buffer_{un}lock API has been removed.
Signed-off-by: default avatarSteven Rostedt <srostedt@redhat.com>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent 70255b5e
...@@ -63,9 +63,6 @@ ring_buffer_event_time_delta(struct ring_buffer_event *event) ...@@ -63,9 +63,6 @@ ring_buffer_event_time_delta(struct ring_buffer_event *event)
return event->time_delta; return event->time_delta;
} }
void ring_buffer_lock(struct ring_buffer *buffer, unsigned long *flags);
void ring_buffer_unlock(struct ring_buffer *buffer, unsigned long flags);
/* /*
* size is in bytes for each per CPU buffer. * size is in bytes for each per CPU buffer.
*/ */
......
This diff is collapsed.
...@@ -42,6 +42,20 @@ ...@@ -42,6 +42,20 @@
unsigned long __read_mostly tracing_max_latency = (cycle_t)ULONG_MAX; unsigned long __read_mostly tracing_max_latency = (cycle_t)ULONG_MAX;
unsigned long __read_mostly tracing_thresh; unsigned long __read_mostly tracing_thresh;
static DEFINE_PER_CPU(local_t, ftrace_cpu_disabled);
static inline void ftrace_disable_cpu(void)
{
preempt_disable();
local_inc(&__get_cpu_var(ftrace_cpu_disabled));
}
static inline void ftrace_enable_cpu(void)
{
local_dec(&__get_cpu_var(ftrace_cpu_disabled));
preempt_enable();
}
static cpumask_t __read_mostly tracing_buffer_mask; static cpumask_t __read_mostly tracing_buffer_mask;
#define for_each_tracing_cpu(cpu) \ #define for_each_tracing_cpu(cpu) \
...@@ -406,7 +420,9 @@ update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu) ...@@ -406,7 +420,9 @@ update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
tr->buffer = max_tr.buffer; tr->buffer = max_tr.buffer;
max_tr.buffer = buf; max_tr.buffer = buf;
ftrace_disable_cpu();
ring_buffer_reset(tr->buffer); ring_buffer_reset(tr->buffer);
ftrace_enable_cpu();
__update_max_tr(tr, tsk, cpu); __update_max_tr(tr, tsk, cpu);
__raw_spin_unlock(&ftrace_max_lock); __raw_spin_unlock(&ftrace_max_lock);
...@@ -428,9 +444,13 @@ update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu) ...@@ -428,9 +444,13 @@ update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
WARN_ON_ONCE(!irqs_disabled()); WARN_ON_ONCE(!irqs_disabled());
__raw_spin_lock(&ftrace_max_lock); __raw_spin_lock(&ftrace_max_lock);
ftrace_disable_cpu();
ring_buffer_reset(max_tr.buffer); ring_buffer_reset(max_tr.buffer);
ret = ring_buffer_swap_cpu(max_tr.buffer, tr->buffer, cpu); ret = ring_buffer_swap_cpu(max_tr.buffer, tr->buffer, cpu);
ftrace_enable_cpu();
WARN_ON_ONCE(ret); WARN_ON_ONCE(ret);
__update_max_tr(tr, tsk, cpu); __update_max_tr(tr, tsk, cpu);
...@@ -543,7 +563,9 @@ void unregister_tracer(struct tracer *type) ...@@ -543,7 +563,9 @@ void unregister_tracer(struct tracer *type)
void tracing_reset(struct trace_array *tr, int cpu) void tracing_reset(struct trace_array *tr, int cpu)
{ {
ftrace_disable_cpu();
ring_buffer_reset_cpu(tr->buffer, cpu); ring_buffer_reset_cpu(tr->buffer, cpu);
ftrace_enable_cpu();
} }
#define SAVED_CMDLINES 128 #define SAVED_CMDLINES 128
...@@ -654,6 +676,10 @@ trace_function(struct trace_array *tr, struct trace_array_cpu *data, ...@@ -654,6 +676,10 @@ trace_function(struct trace_array *tr, struct trace_array_cpu *data,
struct ftrace_entry *entry; struct ftrace_entry *entry;
unsigned long irq_flags; unsigned long irq_flags;
/* If we are reading the ring buffer, don't trace */
if (unlikely(local_read(&__get_cpu_var(ftrace_cpu_disabled))))
return;
event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry), event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry),
&irq_flags); &irq_flags);
if (!event) if (!event)
...@@ -870,8 +896,14 @@ enum trace_file_type { ...@@ -870,8 +896,14 @@ enum trace_file_type {
static void trace_iterator_increment(struct trace_iterator *iter, int cpu) static void trace_iterator_increment(struct trace_iterator *iter, int cpu)
{ {
/* Don't allow ftrace to trace into the ring buffers */
ftrace_disable_cpu();
iter->idx++; iter->idx++;
if (iter->buffer_iter[iter->cpu])
ring_buffer_read(iter->buffer_iter[iter->cpu], NULL); ring_buffer_read(iter->buffer_iter[iter->cpu], NULL);
ftrace_enable_cpu();
} }
static struct trace_entry * static struct trace_entry *
...@@ -880,9 +912,19 @@ peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts) ...@@ -880,9 +912,19 @@ peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts)
struct ring_buffer_event *event; struct ring_buffer_event *event;
struct ring_buffer_iter *buf_iter = iter->buffer_iter[cpu]; struct ring_buffer_iter *buf_iter = iter->buffer_iter[cpu];
/* Don't allow ftrace to trace into the ring buffers */
ftrace_disable_cpu();
if (buf_iter)
event = ring_buffer_iter_peek(buf_iter, ts); event = ring_buffer_iter_peek(buf_iter, ts);
else
event = ring_buffer_peek(iter->tr->buffer, cpu, ts);
ftrace_enable_cpu();
return event ? ring_buffer_event_data(event) : NULL; return event ? ring_buffer_event_data(event) : NULL;
} }
static struct trace_entry * static struct trace_entry *
__find_next_entry(struct trace_iterator *iter, int *ent_cpu, u64 *ent_ts) __find_next_entry(struct trace_iterator *iter, int *ent_cpu, u64 *ent_ts)
{ {
...@@ -938,7 +980,10 @@ static void *find_next_entry_inc(struct trace_iterator *iter) ...@@ -938,7 +980,10 @@ static void *find_next_entry_inc(struct trace_iterator *iter)
static void trace_consume(struct trace_iterator *iter) static void trace_consume(struct trace_iterator *iter)
{ {
/* Don't allow ftrace to trace into the ring buffers */
ftrace_disable_cpu();
ring_buffer_consume(iter->tr->buffer, iter->cpu, &iter->ts); ring_buffer_consume(iter->tr->buffer, iter->cpu, &iter->ts);
ftrace_enable_cpu();
} }
static void *s_next(struct seq_file *m, void *v, loff_t *pos) static void *s_next(struct seq_file *m, void *v, loff_t *pos)
...@@ -991,10 +1036,14 @@ static void *s_start(struct seq_file *m, loff_t *pos) ...@@ -991,10 +1036,14 @@ static void *s_start(struct seq_file *m, loff_t *pos)
iter->cpu = 0; iter->cpu = 0;
iter->idx = -1; iter->idx = -1;
ftrace_disable_cpu();
for_each_tracing_cpu(cpu) { for_each_tracing_cpu(cpu) {
ring_buffer_iter_reset(iter->buffer_iter[cpu]); ring_buffer_iter_reset(iter->buffer_iter[cpu]);
} }
ftrace_enable_cpu();
for (p = iter; p && l < *pos; p = s_next(m, p, &l)) for (p = iter; p && l < *pos; p = s_next(m, p, &l))
; ;
...@@ -1242,7 +1291,16 @@ void trace_seq_print_cont(struct trace_seq *s, struct trace_iterator *iter) ...@@ -1242,7 +1291,16 @@ void trace_seq_print_cont(struct trace_seq *s, struct trace_iterator *iter)
cont = (struct trace_field_cont *)ent; cont = (struct trace_field_cont *)ent;
if (ok) if (ok)
ok = (trace_seq_printf(s, "%s", cont->buf) > 0); ok = (trace_seq_printf(s, "%s", cont->buf) > 0);
ftrace_disable_cpu();
if (iter->buffer_iter[iter->cpu])
ring_buffer_read(iter->buffer_iter[iter->cpu], NULL); ring_buffer_read(iter->buffer_iter[iter->cpu], NULL);
else
ring_buffer_consume(iter->tr->buffer, iter->cpu, NULL);
ftrace_enable_cpu();
ent = peek_next_entry(iter, iter->cpu, NULL); ent = peek_next_entry(iter, iter->cpu, NULL);
} while (ent && ent->type == TRACE_CONT); } while (ent && ent->type == TRACE_CONT);
...@@ -1683,9 +1741,15 @@ static int trace_empty(struct trace_iterator *iter) ...@@ -1683,9 +1741,15 @@ static int trace_empty(struct trace_iterator *iter)
int cpu; int cpu;
for_each_tracing_cpu(cpu) { for_each_tracing_cpu(cpu) {
if (iter->buffer_iter[cpu]) {
if (!ring_buffer_iter_empty(iter->buffer_iter[cpu])) if (!ring_buffer_iter_empty(iter->buffer_iter[cpu]))
return 0; return 0;
} else {
if (!ring_buffer_empty_cpu(iter->tr->buffer, cpu))
return 0;
}
} }
return TRACE_TYPE_HANDLED; return TRACE_TYPE_HANDLED;
} }
...@@ -1776,8 +1840,10 @@ __tracing_open(struct inode *inode, struct file *file, int *ret) ...@@ -1776,8 +1840,10 @@ __tracing_open(struct inode *inode, struct file *file, int *ret)
iter->pos = -1; iter->pos = -1;
for_each_tracing_cpu(cpu) { for_each_tracing_cpu(cpu) {
iter->buffer_iter[cpu] = iter->buffer_iter[cpu] =
ring_buffer_read_start(iter->tr->buffer, cpu); ring_buffer_read_start(iter->tr->buffer, cpu);
if (!iter->buffer_iter[cpu]) if (!iter->buffer_iter[cpu])
goto fail_buffer; goto fail_buffer;
} }
...@@ -2341,7 +2407,6 @@ static atomic_t tracing_reader; ...@@ -2341,7 +2407,6 @@ static atomic_t tracing_reader;
static int tracing_open_pipe(struct inode *inode, struct file *filp) static int tracing_open_pipe(struct inode *inode, struct file *filp)
{ {
struct trace_iterator *iter; struct trace_iterator *iter;
int cpu;
if (tracing_disabled) if (tracing_disabled)
return -ENODEV; return -ENODEV;
...@@ -2362,38 +2427,17 @@ static int tracing_open_pipe(struct inode *inode, struct file *filp) ...@@ -2362,38 +2427,17 @@ static int tracing_open_pipe(struct inode *inode, struct file *filp)
iter->trace = current_trace; iter->trace = current_trace;
filp->private_data = iter; filp->private_data = iter;
for_each_tracing_cpu(cpu) {
iter->buffer_iter[cpu] =
ring_buffer_read_start(iter->tr->buffer, cpu);
if (!iter->buffer_iter[cpu])
goto fail_buffer;
}
if (iter->trace->pipe_open) if (iter->trace->pipe_open)
iter->trace->pipe_open(iter); iter->trace->pipe_open(iter);
mutex_unlock(&trace_types_lock); mutex_unlock(&trace_types_lock);
return 0; return 0;
fail_buffer:
for_each_tracing_cpu(cpu) {
if (iter->buffer_iter[cpu])
ring_buffer_read_finish(iter->buffer_iter[cpu]);
}
mutex_unlock(&trace_types_lock);
return -ENOMEM;
} }
static int tracing_release_pipe(struct inode *inode, struct file *file) static int tracing_release_pipe(struct inode *inode, struct file *file)
{ {
struct trace_iterator *iter = file->private_data; struct trace_iterator *iter = file->private_data;
int cpu;
for_each_tracing_cpu(cpu) {
if (iter->buffer_iter[cpu])
ring_buffer_read_finish(iter->buffer_iter[cpu]);
}
kfree(iter); kfree(iter);
atomic_dec(&tracing_reader); atomic_dec(&tracing_reader);
...@@ -2429,7 +2473,6 @@ tracing_read_pipe(struct file *filp, char __user *ubuf, ...@@ -2429,7 +2473,6 @@ tracing_read_pipe(struct file *filp, char __user *ubuf,
size_t cnt, loff_t *ppos) size_t cnt, loff_t *ppos)
{ {
struct trace_iterator *iter = filp->private_data; struct trace_iterator *iter = filp->private_data;
unsigned long flags;
#ifdef CONFIG_FTRACE #ifdef CONFIG_FTRACE
int ftrace_save; int ftrace_save;
#endif #endif
...@@ -2528,7 +2571,6 @@ tracing_read_pipe(struct file *filp, char __user *ubuf, ...@@ -2528,7 +2571,6 @@ tracing_read_pipe(struct file *filp, char __user *ubuf,
ftrace_enabled = 0; ftrace_enabled = 0;
#endif #endif
smp_wmb(); smp_wmb();
ring_buffer_lock(iter->tr->buffer, &flags);
while (find_next_entry_inc(iter) != NULL) { while (find_next_entry_inc(iter) != NULL) {
enum print_line_t ret; enum print_line_t ret;
...@@ -2547,7 +2589,6 @@ tracing_read_pipe(struct file *filp, char __user *ubuf, ...@@ -2547,7 +2589,6 @@ tracing_read_pipe(struct file *filp, char __user *ubuf,
break; break;
} }
ring_buffer_unlock(iter->tr->buffer, flags);
#ifdef CONFIG_FTRACE #ifdef CONFIG_FTRACE
ftrace_enabled = ftrace_save; ftrace_enabled = ftrace_save;
#endif #endif
...@@ -3010,8 +3051,8 @@ void ftrace_dump(void) ...@@ -3010,8 +3051,8 @@ void ftrace_dump(void)
static struct trace_iterator iter; static struct trace_iterator iter;
static cpumask_t mask; static cpumask_t mask;
static int dump_ran; static int dump_ran;
unsigned long flags, irq_flags; unsigned long flags;
int cnt = 0; int cnt = 0, cpu;
/* only one dump */ /* only one dump */
spin_lock_irqsave(&ftrace_dump_lock, flags); spin_lock_irqsave(&ftrace_dump_lock, flags);
...@@ -3023,6 +3064,10 @@ void ftrace_dump(void) ...@@ -3023,6 +3064,10 @@ void ftrace_dump(void)
/* No turning back! */ /* No turning back! */
ftrace_kill_atomic(); ftrace_kill_atomic();
for_each_tracing_cpu(cpu) {
atomic_inc(&global_trace.data[cpu]->disabled);
}
printk(KERN_TRACE "Dumping ftrace buffer:\n"); printk(KERN_TRACE "Dumping ftrace buffer:\n");
iter.tr = &global_trace; iter.tr = &global_trace;
...@@ -3037,8 +3082,6 @@ void ftrace_dump(void) ...@@ -3037,8 +3082,6 @@ void ftrace_dump(void)
cpus_clear(mask); cpus_clear(mask);
ring_buffer_lock(iter.tr->buffer, &irq_flags);
while (!trace_empty(&iter)) { while (!trace_empty(&iter)) {
if (!cnt) if (!cnt)
...@@ -3066,8 +3109,6 @@ void ftrace_dump(void) ...@@ -3066,8 +3109,6 @@ void ftrace_dump(void)
else else
printk(KERN_TRACE "---------------------------------\n"); printk(KERN_TRACE "---------------------------------\n");
ring_buffer_unlock(iter.tr->buffer, irq_flags);
out: out:
spin_unlock_irqrestore(&ftrace_dump_lock, flags); spin_unlock_irqrestore(&ftrace_dump_lock, flags);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment