Commit 8655e7e3 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'tracing-fixes-for-linus' of...

Merge branch 'tracing-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip

* 'tracing-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip:
  tracing: Do not record user stack trace from NMI context
  tracing: Disable buffer switching when starting or stopping trace
  tracing: Use same local variable when resetting the ring buffer
  function-graph: Init curr_ret_stack with ret_stack
  ring-buffer: Move disabled check into preempt disable section
  function-graph: Add tracing_thresh support to function_graph tracer
  tracing: Update the comm field in the right variable in update_max_tr
  function-graph: Use comment notation for func names of dangling '}'
  function-graph: Fix unused reference to ftrace_set_func()
  tracing: Fix warning in s_next of trace file ops
  tracing: Include irqflags headers from trace clock
parents 461d208c b6345879
...@@ -84,10 +84,6 @@ ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub; ...@@ -84,10 +84,6 @@ ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub;
ftrace_func_t __ftrace_trace_function __read_mostly = ftrace_stub; ftrace_func_t __ftrace_trace_function __read_mostly = ftrace_stub;
ftrace_func_t ftrace_pid_function __read_mostly = ftrace_stub; ftrace_func_t ftrace_pid_function __read_mostly = ftrace_stub;
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
static int ftrace_set_func(unsigned long *array, int *idx, char *buffer);
#endif
static void ftrace_list_func(unsigned long ip, unsigned long parent_ip) static void ftrace_list_func(unsigned long ip, unsigned long parent_ip)
{ {
struct ftrace_ops *op = ftrace_list; struct ftrace_ops *op = ftrace_list;
...@@ -2276,6 +2272,8 @@ __setup("ftrace_filter=", set_ftrace_filter); ...@@ -2276,6 +2272,8 @@ __setup("ftrace_filter=", set_ftrace_filter);
#ifdef CONFIG_FUNCTION_GRAPH_TRACER #ifdef CONFIG_FUNCTION_GRAPH_TRACER
static char ftrace_graph_buf[FTRACE_FILTER_SIZE] __initdata; static char ftrace_graph_buf[FTRACE_FILTER_SIZE] __initdata;
static int ftrace_set_func(unsigned long *array, int *idx, char *buffer);
static int __init set_graph_function(char *str) static int __init set_graph_function(char *str)
{ {
strlcpy(ftrace_graph_buf, str, FTRACE_FILTER_SIZE); strlcpy(ftrace_graph_buf, str, FTRACE_FILTER_SIZE);
...@@ -3351,6 +3349,7 @@ void ftrace_graph_init_task(struct task_struct *t) ...@@ -3351,6 +3349,7 @@ void ftrace_graph_init_task(struct task_struct *t)
{ {
/* Make sure we do not use the parent ret_stack */ /* Make sure we do not use the parent ret_stack */
t->ret_stack = NULL; t->ret_stack = NULL;
t->curr_ret_stack = -1;
if (ftrace_graph_active) { if (ftrace_graph_active) {
struct ftrace_ret_stack *ret_stack; struct ftrace_ret_stack *ret_stack;
...@@ -3360,7 +3359,6 @@ void ftrace_graph_init_task(struct task_struct *t) ...@@ -3360,7 +3359,6 @@ void ftrace_graph_init_task(struct task_struct *t)
GFP_KERNEL); GFP_KERNEL);
if (!ret_stack) if (!ret_stack)
return; return;
t->curr_ret_stack = -1;
atomic_set(&t->tracing_graph_pause, 0); atomic_set(&t->tracing_graph_pause, 0);
atomic_set(&t->trace_overrun, 0); atomic_set(&t->trace_overrun, 0);
t->ftrace_timestamp = 0; t->ftrace_timestamp = 0;
......
...@@ -2233,12 +2233,12 @@ ring_buffer_lock_reserve(struct ring_buffer *buffer, unsigned long length) ...@@ -2233,12 +2233,12 @@ ring_buffer_lock_reserve(struct ring_buffer *buffer, unsigned long length)
if (ring_buffer_flags != RB_BUFFERS_ON) if (ring_buffer_flags != RB_BUFFERS_ON)
return NULL; return NULL;
if (atomic_read(&buffer->record_disabled))
return NULL;
/* If we are tracing schedule, we don't want to recurse */ /* If we are tracing schedule, we don't want to recurse */
resched = ftrace_preempt_disable(); resched = ftrace_preempt_disable();
if (atomic_read(&buffer->record_disabled))
goto out_nocheck;
if (trace_recursive_lock()) if (trace_recursive_lock())
goto out_nocheck; goto out_nocheck;
...@@ -2470,11 +2470,11 @@ int ring_buffer_write(struct ring_buffer *buffer, ...@@ -2470,11 +2470,11 @@ int ring_buffer_write(struct ring_buffer *buffer,
if (ring_buffer_flags != RB_BUFFERS_ON) if (ring_buffer_flags != RB_BUFFERS_ON)
return -EBUSY; return -EBUSY;
if (atomic_read(&buffer->record_disabled))
return -EBUSY;
resched = ftrace_preempt_disable(); resched = ftrace_preempt_disable();
if (atomic_read(&buffer->record_disabled))
goto out;
cpu = raw_smp_processor_id(); cpu = raw_smp_processor_id();
if (!cpumask_test_cpu(cpu, buffer->cpumask)) if (!cpumask_test_cpu(cpu, buffer->cpumask))
......
...@@ -374,6 +374,21 @@ static int __init set_buf_size(char *str) ...@@ -374,6 +374,21 @@ static int __init set_buf_size(char *str)
} }
__setup("trace_buf_size=", set_buf_size); __setup("trace_buf_size=", set_buf_size);
static int __init set_tracing_thresh(char *str)
{
unsigned long threshhold;
int ret;
if (!str)
return 0;
ret = strict_strtoul(str, 0, &threshhold);
if (ret < 0)
return 0;
tracing_thresh = threshhold * 1000;
return 1;
}
__setup("tracing_thresh=", set_tracing_thresh);
unsigned long nsecs_to_usecs(unsigned long nsecs) unsigned long nsecs_to_usecs(unsigned long nsecs)
{ {
return nsecs / 1000; return nsecs / 1000;
...@@ -579,9 +594,10 @@ static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt) ...@@ -579,9 +594,10 @@ static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt)
static arch_spinlock_t ftrace_max_lock = static arch_spinlock_t ftrace_max_lock =
(arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
unsigned long __read_mostly tracing_thresh;
#ifdef CONFIG_TRACER_MAX_TRACE #ifdef CONFIG_TRACER_MAX_TRACE
unsigned long __read_mostly tracing_max_latency; unsigned long __read_mostly tracing_max_latency;
unsigned long __read_mostly tracing_thresh;
/* /*
* Copy the new maximum trace into the separate maximum-trace * Copy the new maximum trace into the separate maximum-trace
...@@ -592,7 +608,7 @@ static void ...@@ -592,7 +608,7 @@ static void
__update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu) __update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
{ {
struct trace_array_cpu *data = tr->data[cpu]; struct trace_array_cpu *data = tr->data[cpu];
struct trace_array_cpu *max_data = tr->data[cpu]; struct trace_array_cpu *max_data;
max_tr.cpu = cpu; max_tr.cpu = cpu;
max_tr.time_start = data->preempt_timestamp; max_tr.time_start = data->preempt_timestamp;
...@@ -602,7 +618,7 @@ __update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu) ...@@ -602,7 +618,7 @@ __update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
max_data->critical_start = data->critical_start; max_data->critical_start = data->critical_start;
max_data->critical_end = data->critical_end; max_data->critical_end = data->critical_end;
memcpy(data->comm, tsk->comm, TASK_COMM_LEN); memcpy(max_data->comm, tsk->comm, TASK_COMM_LEN);
max_data->pid = tsk->pid; max_data->pid = tsk->pid;
max_data->uid = task_uid(tsk); max_data->uid = task_uid(tsk);
max_data->nice = tsk->static_prio - 20 - MAX_RT_PRIO; max_data->nice = tsk->static_prio - 20 - MAX_RT_PRIO;
...@@ -824,10 +840,10 @@ void unregister_tracer(struct tracer *type) ...@@ -824,10 +840,10 @@ void unregister_tracer(struct tracer *type)
mutex_unlock(&trace_types_lock); mutex_unlock(&trace_types_lock);
} }
static void __tracing_reset(struct trace_array *tr, int cpu) static void __tracing_reset(struct ring_buffer *buffer, int cpu)
{ {
ftrace_disable_cpu(); ftrace_disable_cpu();
ring_buffer_reset_cpu(tr->buffer, cpu); ring_buffer_reset_cpu(buffer, cpu);
ftrace_enable_cpu(); ftrace_enable_cpu();
} }
...@@ -839,7 +855,7 @@ void tracing_reset(struct trace_array *tr, int cpu) ...@@ -839,7 +855,7 @@ void tracing_reset(struct trace_array *tr, int cpu)
/* Make sure all commits have finished */ /* Make sure all commits have finished */
synchronize_sched(); synchronize_sched();
__tracing_reset(tr, cpu); __tracing_reset(buffer, cpu);
ring_buffer_record_enable(buffer); ring_buffer_record_enable(buffer);
} }
...@@ -857,7 +873,7 @@ void tracing_reset_online_cpus(struct trace_array *tr) ...@@ -857,7 +873,7 @@ void tracing_reset_online_cpus(struct trace_array *tr)
tr->time_start = ftrace_now(tr->cpu); tr->time_start = ftrace_now(tr->cpu);
for_each_online_cpu(cpu) for_each_online_cpu(cpu)
__tracing_reset(tr, cpu); __tracing_reset(buffer, cpu);
ring_buffer_record_enable(buffer); ring_buffer_record_enable(buffer);
} }
...@@ -934,6 +950,8 @@ void tracing_start(void) ...@@ -934,6 +950,8 @@ void tracing_start(void)
goto out; goto out;
} }
/* Prevent the buffers from switching */
arch_spin_lock(&ftrace_max_lock);
buffer = global_trace.buffer; buffer = global_trace.buffer;
if (buffer) if (buffer)
...@@ -943,6 +961,8 @@ void tracing_start(void) ...@@ -943,6 +961,8 @@ void tracing_start(void)
if (buffer) if (buffer)
ring_buffer_record_enable(buffer); ring_buffer_record_enable(buffer);
arch_spin_unlock(&ftrace_max_lock);
ftrace_start(); ftrace_start();
out: out:
spin_unlock_irqrestore(&tracing_start_lock, flags); spin_unlock_irqrestore(&tracing_start_lock, flags);
...@@ -964,6 +984,9 @@ void tracing_stop(void) ...@@ -964,6 +984,9 @@ void tracing_stop(void)
if (trace_stop_count++) if (trace_stop_count++)
goto out; goto out;
/* Prevent the buffers from switching */
arch_spin_lock(&ftrace_max_lock);
buffer = global_trace.buffer; buffer = global_trace.buffer;
if (buffer) if (buffer)
ring_buffer_record_disable(buffer); ring_buffer_record_disable(buffer);
...@@ -972,6 +995,8 @@ void tracing_stop(void) ...@@ -972,6 +995,8 @@ void tracing_stop(void)
if (buffer) if (buffer)
ring_buffer_record_disable(buffer); ring_buffer_record_disable(buffer);
arch_spin_unlock(&ftrace_max_lock);
out: out:
spin_unlock_irqrestore(&tracing_start_lock, flags); spin_unlock_irqrestore(&tracing_start_lock, flags);
} }
...@@ -1259,6 +1284,13 @@ ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc) ...@@ -1259,6 +1284,13 @@ ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc)
if (!(trace_flags & TRACE_ITER_USERSTACKTRACE)) if (!(trace_flags & TRACE_ITER_USERSTACKTRACE))
return; return;
/*
* NMIs can not handle page faults, even with fix ups.
* The save user stack can (and often does) fault.
*/
if (unlikely(in_nmi()))
return;
event = trace_buffer_lock_reserve(buffer, TRACE_USER_STACK, event = trace_buffer_lock_reserve(buffer, TRACE_USER_STACK,
sizeof(*entry), flags, pc); sizeof(*entry), flags, pc);
if (!event) if (!event)
...@@ -1703,6 +1735,7 @@ static void *s_start(struct seq_file *m, loff_t *pos) ...@@ -1703,6 +1735,7 @@ static void *s_start(struct seq_file *m, loff_t *pos)
ftrace_enable_cpu(); ftrace_enable_cpu();
iter->leftover = 0;
for (p = iter; p && l < *pos; p = s_next(m, p, &l)) for (p = iter; p && l < *pos; p = s_next(m, p, &l))
; ;
...@@ -4248,10 +4281,10 @@ static __init int tracer_init_debugfs(void) ...@@ -4248,10 +4281,10 @@ static __init int tracer_init_debugfs(void)
#ifdef CONFIG_TRACER_MAX_TRACE #ifdef CONFIG_TRACER_MAX_TRACE
trace_create_file("tracing_max_latency", 0644, d_tracer, trace_create_file("tracing_max_latency", 0644, d_tracer,
&tracing_max_latency, &tracing_max_lat_fops); &tracing_max_latency, &tracing_max_lat_fops);
#endif
trace_create_file("tracing_thresh", 0644, d_tracer, trace_create_file("tracing_thresh", 0644, d_tracer,
&tracing_thresh, &tracing_max_lat_fops); &tracing_thresh, &tracing_max_lat_fops);
#endif
trace_create_file("README", 0444, d_tracer, trace_create_file("README", 0444, d_tracer,
NULL, &tracing_readme_fops); NULL, &tracing_readme_fops);
......
...@@ -396,9 +396,10 @@ extern int process_new_ksym_entry(char *ksymname, int op, unsigned long addr); ...@@ -396,9 +396,10 @@ extern int process_new_ksym_entry(char *ksymname, int op, unsigned long addr);
extern unsigned long nsecs_to_usecs(unsigned long nsecs); extern unsigned long nsecs_to_usecs(unsigned long nsecs);
extern unsigned long tracing_thresh;
#ifdef CONFIG_TRACER_MAX_TRACE #ifdef CONFIG_TRACER_MAX_TRACE
extern unsigned long tracing_max_latency; extern unsigned long tracing_max_latency;
extern unsigned long tracing_thresh;
void update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu); void update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu);
void update_max_tr_single(struct trace_array *tr, void update_max_tr_single(struct trace_array *tr,
......
...@@ -13,6 +13,7 @@ ...@@ -13,6 +13,7 @@
* Tracer plugins will chose a default from these clocks. * Tracer plugins will chose a default from these clocks.
*/ */
#include <linux/spinlock.h> #include <linux/spinlock.h>
#include <linux/irqflags.h>
#include <linux/hardirq.h> #include <linux/hardirq.h>
#include <linux/module.h> #include <linux/module.h>
#include <linux/percpu.h> #include <linux/percpu.h>
......
...@@ -237,6 +237,14 @@ int trace_graph_entry(struct ftrace_graph_ent *trace) ...@@ -237,6 +237,14 @@ int trace_graph_entry(struct ftrace_graph_ent *trace)
return ret; return ret;
} }
int trace_graph_thresh_entry(struct ftrace_graph_ent *trace)
{
if (tracing_thresh)
return 1;
else
return trace_graph_entry(trace);
}
static void __trace_graph_return(struct trace_array *tr, static void __trace_graph_return(struct trace_array *tr,
struct ftrace_graph_ret *trace, struct ftrace_graph_ret *trace,
unsigned long flags, unsigned long flags,
...@@ -290,13 +298,26 @@ void set_graph_array(struct trace_array *tr) ...@@ -290,13 +298,26 @@ void set_graph_array(struct trace_array *tr)
smp_mb(); smp_mb();
} }
void trace_graph_thresh_return(struct ftrace_graph_ret *trace)
{
if (tracing_thresh &&
(trace->rettime - trace->calltime < tracing_thresh))
return;
else
trace_graph_return(trace);
}
static int graph_trace_init(struct trace_array *tr) static int graph_trace_init(struct trace_array *tr)
{ {
int ret; int ret;
set_graph_array(tr); set_graph_array(tr);
ret = register_ftrace_graph(&trace_graph_return, if (tracing_thresh)
&trace_graph_entry); ret = register_ftrace_graph(&trace_graph_thresh_return,
&trace_graph_thresh_entry);
else
ret = register_ftrace_graph(&trace_graph_return,
&trace_graph_entry);
if (ret) if (ret)
return ret; return ret;
tracing_start_cmdline_record(); tracing_start_cmdline_record();
...@@ -920,7 +941,7 @@ print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s, ...@@ -920,7 +941,7 @@ print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s,
if (!ret) if (!ret)
return TRACE_TYPE_PARTIAL_LINE; return TRACE_TYPE_PARTIAL_LINE;
} else { } else {
ret = trace_seq_printf(s, "} (%ps)\n", (void *)trace->func); ret = trace_seq_printf(s, "} /* %ps */\n", (void *)trace->func);
if (!ret) if (!ret)
return TRACE_TYPE_PARTIAL_LINE; return TRACE_TYPE_PARTIAL_LINE;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment