Commit 10246fa3 authored by Steven Rostedt (Red Hat)'s avatar Steven Rostedt (Red Hat) Committed by Steven Rostedt

tracing: Use flag buffer_disabled for irqsoff tracer

If the ring buffer is disabled and the irqsoff tracer records a trace it
will clear out its buffer and lose the data it had previously recorded.

Currently there's a callback when writing to the tracing_of file, but if
tracing is disabled via the function tracer trigger, it will not inform
the irqsoff tracer to stop recording.

By using the "mirror" flag (buffer_disabled) in the trace_array, that keeps
track of the status of the trace_array's buffer, it gives the irqsoff
tracer a fast way to know if it should record a new trace or not.
The flag may be a little behind the real state of the buffer, but it
should not affect the trace too much. It's more important for the irqsoff
tracer to be fast.
Reported-by: default avatarDave Jones <davej@redhat.com>
Signed-off-by: default avatarSteven Rostedt <rostedt@goodmis.org>
parent b04d52e3
...@@ -226,9 +226,24 @@ cycle_t ftrace_now(int cpu) ...@@ -226,9 +226,24 @@ cycle_t ftrace_now(int cpu)
return ts; return ts;
} }
/**
* tracing_is_enabled - Show if global_trace has been disabled
*
* Shows if the global trace has been enabled or not. It uses the
* mirror flag "buffer_disabled" to be used in fast paths such as for
* the irqsoff tracer. But it may be inaccurate due to races. If you
* need to know the accurate state, use tracing_is_on() which is a little
* slower, but accurate.
*/
int tracing_is_enabled(void) int tracing_is_enabled(void)
{ {
return tracing_is_on(); /*
* For quick access (irqsoff uses this in fast path), just
* return the mirror variable of the state of the ring buffer.
* It's a little racy, but we don't really care.
*/
smp_rmb();
return !global_trace.buffer_disabled;
} }
/* /*
...@@ -341,6 +356,23 @@ unsigned long trace_flags = TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK | ...@@ -341,6 +356,23 @@ unsigned long trace_flags = TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK |
TRACE_ITER_GRAPH_TIME | TRACE_ITER_RECORD_CMD | TRACE_ITER_OVERWRITE | TRACE_ITER_GRAPH_TIME | TRACE_ITER_RECORD_CMD | TRACE_ITER_OVERWRITE |
TRACE_ITER_IRQ_INFO | TRACE_ITER_MARKERS | TRACE_ITER_FUNCTION; TRACE_ITER_IRQ_INFO | TRACE_ITER_MARKERS | TRACE_ITER_FUNCTION;
void tracer_tracing_on(struct trace_array *tr)
{
if (tr->trace_buffer.buffer)
ring_buffer_record_on(tr->trace_buffer.buffer);
/*
* This flag is looked at when buffers haven't been allocated
* yet, or by some tracers (like irqsoff), that just want to
* know if the ring buffer has been disabled, but it can handle
* races of where it gets disabled but we still do a record.
* As the check is in the fast path of the tracers, it is more
* important to be fast than accurate.
*/
tr->buffer_disabled = 0;
/* Make the flag seen by readers */
smp_wmb();
}
/** /**
* tracing_on - enable tracing buffers * tracing_on - enable tracing buffers
* *
...@@ -349,15 +381,7 @@ unsigned long trace_flags = TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK | ...@@ -349,15 +381,7 @@ unsigned long trace_flags = TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK |
*/ */
void tracing_on(void) void tracing_on(void)
{ {
if (global_trace.trace_buffer.buffer) tracer_tracing_on(&global_trace);
ring_buffer_record_on(global_trace.trace_buffer.buffer);
/*
* This flag is only looked at when buffers haven't been
* allocated yet. We don't really care about the race
* between setting this flag and actually turning
* on the buffer.
*/
global_trace.buffer_disabled = 0;
} }
EXPORT_SYMBOL_GPL(tracing_on); EXPORT_SYMBOL_GPL(tracing_on);
...@@ -551,6 +575,23 @@ void tracing_snapshot_alloc(void) ...@@ -551,6 +575,23 @@ void tracing_snapshot_alloc(void)
EXPORT_SYMBOL_GPL(tracing_snapshot_alloc); EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
#endif /* CONFIG_TRACER_SNAPSHOT */ #endif /* CONFIG_TRACER_SNAPSHOT */
void tracer_tracing_off(struct trace_array *tr)
{
if (tr->trace_buffer.buffer)
ring_buffer_record_off(tr->trace_buffer.buffer);
/*
* This flag is looked at when buffers haven't been allocated
* yet, or by some tracers (like irqsoff), that just want to
* know if the ring buffer has been disabled, but it can handle
* races of where it gets disabled but we still do a record.
* As the check is in the fast path of the tracers, it is more
* important to be fast than accurate.
*/
tr->buffer_disabled = 1;
/* Make the flag seen by readers */
smp_wmb();
}
/** /**
* tracing_off - turn off tracing buffers * tracing_off - turn off tracing buffers
* *
...@@ -561,15 +602,7 @@ EXPORT_SYMBOL_GPL(tracing_snapshot_alloc); ...@@ -561,15 +602,7 @@ EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
*/ */
void tracing_off(void) void tracing_off(void)
{ {
if (global_trace.trace_buffer.buffer) tracer_tracing_off(&global_trace);
ring_buffer_record_off(global_trace.trace_buffer.buffer);
/*
* This flag is only looked at when buffers haven't been
* allocated yet. We don't really care about the race
* between setting this flag and actually turning
* on the buffer.
*/
global_trace.buffer_disabled = 1;
} }
EXPORT_SYMBOL_GPL(tracing_off); EXPORT_SYMBOL_GPL(tracing_off);
...@@ -579,14 +612,25 @@ void disable_trace_on_warning(void) ...@@ -579,14 +612,25 @@ void disable_trace_on_warning(void)
tracing_off(); tracing_off();
} }
/**
* tracer_tracing_is_on - show real state of ring buffer enabled
* @tr : the trace array to know if ring buffer is enabled
*
* Shows real state of the ring buffer if it is enabled or not.
*/
int tracer_tracing_is_on(struct trace_array *tr)
{
if (tr->trace_buffer.buffer)
return ring_buffer_record_is_on(tr->trace_buffer.buffer);
return !tr->buffer_disabled;
}
/** /**
* tracing_is_on - show state of ring buffers enabled * tracing_is_on - show state of ring buffers enabled
*/ */
int tracing_is_on(void) int tracing_is_on(void)
{ {
if (global_trace.trace_buffer.buffer) return tracer_tracing_is_on(&global_trace);
return ring_buffer_record_is_on(global_trace.trace_buffer.buffer);
return !global_trace.buffer_disabled;
} }
EXPORT_SYMBOL_GPL(tracing_is_on); EXPORT_SYMBOL_GPL(tracing_is_on);
...@@ -3958,7 +4002,7 @@ static int tracing_wait_pipe(struct file *filp) ...@@ -3958,7 +4002,7 @@ static int tracing_wait_pipe(struct file *filp)
* *
* iter->pos will be 0 if we haven't read anything. * iter->pos will be 0 if we haven't read anything.
*/ */
if (!tracing_is_enabled() && iter->pos) if (!tracing_is_on() && iter->pos)
break; break;
} }
...@@ -5631,15 +5675,10 @@ rb_simple_read(struct file *filp, char __user *ubuf, ...@@ -5631,15 +5675,10 @@ rb_simple_read(struct file *filp, char __user *ubuf,
size_t cnt, loff_t *ppos) size_t cnt, loff_t *ppos)
{ {
struct trace_array *tr = filp->private_data; struct trace_array *tr = filp->private_data;
struct ring_buffer *buffer = tr->trace_buffer.buffer;
char buf[64]; char buf[64];
int r; int r;
if (buffer) r = tracer_tracing_is_on(tr);
r = ring_buffer_record_is_on(buffer);
else
r = 0;
r = sprintf(buf, "%d\n", r); r = sprintf(buf, "%d\n", r);
return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
...@@ -5661,11 +5700,11 @@ rb_simple_write(struct file *filp, const char __user *ubuf, ...@@ -5661,11 +5700,11 @@ rb_simple_write(struct file *filp, const char __user *ubuf,
if (buffer) { if (buffer) {
mutex_lock(&trace_types_lock); mutex_lock(&trace_types_lock);
if (val) { if (val) {
ring_buffer_record_on(buffer); tracer_tracing_on(tr);
if (tr->current_trace->start) if (tr->current_trace->start)
tr->current_trace->start(tr); tr->current_trace->start(tr);
} else { } else {
ring_buffer_record_off(buffer); tracer_tracing_off(tr);
if (tr->current_trace->stop) if (tr->current_trace->stop)
tr->current_trace->stop(tr); tr->current_trace->stop(tr);
} }
......
...@@ -373,7 +373,7 @@ start_critical_timing(unsigned long ip, unsigned long parent_ip) ...@@ -373,7 +373,7 @@ start_critical_timing(unsigned long ip, unsigned long parent_ip)
struct trace_array_cpu *data; struct trace_array_cpu *data;
unsigned long flags; unsigned long flags;
if (likely(!tracer_enabled)) if (!tracer_enabled || !tracing_is_enabled())
return; return;
cpu = raw_smp_processor_id(); cpu = raw_smp_processor_id();
...@@ -416,7 +416,7 @@ stop_critical_timing(unsigned long ip, unsigned long parent_ip) ...@@ -416,7 +416,7 @@ stop_critical_timing(unsigned long ip, unsigned long parent_ip)
else else
return; return;
if (!tracer_enabled) if (!tracer_enabled || !tracing_is_enabled())
return; return;
data = per_cpu_ptr(tr->trace_buffer.data, cpu); data = per_cpu_ptr(tr->trace_buffer.data, cpu);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment