Commit f54b2fe4 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'tracing-fixes-for-linus' of...

Merge branch 'tracing-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip

* 'tracing-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip:
  tracing: limit the number of loops the ring buffer self test can make
  tracing: have function trace select kallsyms
  tracing: disable tracing while testing ring buffer
  tracing/function-graph-tracer: trace the idle tasks
parents 83ff1af9 ed4a2f37
...@@ -52,6 +52,7 @@ config FUNCTION_TRACER ...@@ -52,6 +52,7 @@ config FUNCTION_TRACER
depends on HAVE_FUNCTION_TRACER depends on HAVE_FUNCTION_TRACER
depends on DEBUG_KERNEL depends on DEBUG_KERNEL
select FRAME_POINTER select FRAME_POINTER
select KALLSYMS
select TRACING select TRACING
select CONTEXT_SWITCH_TRACER select CONTEXT_SWITCH_TRACER
help help
...@@ -238,6 +239,7 @@ config STACK_TRACER ...@@ -238,6 +239,7 @@ config STACK_TRACER
depends on DEBUG_KERNEL depends on DEBUG_KERNEL
select FUNCTION_TRACER select FUNCTION_TRACER
select STACKTRACE select STACKTRACE
select KALLSYMS
help help
This special tracer records the maximum stack footprint of the This special tracer records the maximum stack footprint of the
kernel and displays it in debugfs/tracing/stack_trace. kernel and displays it in debugfs/tracing/stack_trace.
......
...@@ -2033,7 +2033,7 @@ static int alloc_retstack_tasklist(struct ftrace_ret_stack **ret_stack_list) ...@@ -2033,7 +2033,7 @@ static int alloc_retstack_tasklist(struct ftrace_ret_stack **ret_stack_list)
static int start_graph_tracing(void) static int start_graph_tracing(void)
{ {
struct ftrace_ret_stack **ret_stack_list; struct ftrace_ret_stack **ret_stack_list;
int ret; int ret, cpu;
ret_stack_list = kmalloc(FTRACE_RETSTACK_ALLOC_SIZE * ret_stack_list = kmalloc(FTRACE_RETSTACK_ALLOC_SIZE *
sizeof(struct ftrace_ret_stack *), sizeof(struct ftrace_ret_stack *),
...@@ -2042,6 +2042,10 @@ static int start_graph_tracing(void) ...@@ -2042,6 +2042,10 @@ static int start_graph_tracing(void)
if (!ret_stack_list) if (!ret_stack_list)
return -ENOMEM; return -ENOMEM;
/* The cpu_boot init_task->ret_stack will never be freed */
for_each_online_cpu(cpu)
ftrace_graph_init_task(idle_task(cpu));
do { do {
ret = alloc_retstack_tasklist(ret_stack_list); ret = alloc_retstack_tasklist(ret_stack_list);
} while (ret == -EAGAIN); } while (ret == -EAGAIN);
......
...@@ -23,10 +23,20 @@ static int trace_test_buffer_cpu(struct trace_array *tr, int cpu) ...@@ -23,10 +23,20 @@ static int trace_test_buffer_cpu(struct trace_array *tr, int cpu)
{ {
struct ring_buffer_event *event; struct ring_buffer_event *event;
struct trace_entry *entry; struct trace_entry *entry;
unsigned int loops = 0;
while ((event = ring_buffer_consume(tr->buffer, cpu, NULL))) { while ((event = ring_buffer_consume(tr->buffer, cpu, NULL))) {
entry = ring_buffer_event_data(event); entry = ring_buffer_event_data(event);
/*
* The ring buffer is a size of trace_buf_size, if
* we loop more than the size, there's something wrong
* with the ring buffer.
*/
if (loops++ > trace_buf_size) {
printk(KERN_CONT ".. bad ring buffer ");
goto failed;
}
if (!trace_valid_entry(entry)) { if (!trace_valid_entry(entry)) {
printk(KERN_CONT ".. invalid entry %d ", printk(KERN_CONT ".. invalid entry %d ",
entry->type); entry->type);
...@@ -57,11 +67,20 @@ static int trace_test_buffer(struct trace_array *tr, unsigned long *count) ...@@ -57,11 +67,20 @@ static int trace_test_buffer(struct trace_array *tr, unsigned long *count)
cnt = ring_buffer_entries(tr->buffer); cnt = ring_buffer_entries(tr->buffer);
/*
* The trace_test_buffer_cpu runs a while loop to consume all data.
* If the calling tracer is broken, and is constantly filling
* the buffer, this will run forever, and hard lock the box.
* We disable the ring buffer while we do this test to prevent
* a hard lock up.
*/
tracing_off();
for_each_possible_cpu(cpu) { for_each_possible_cpu(cpu) {
ret = trace_test_buffer_cpu(tr, cpu); ret = trace_test_buffer_cpu(tr, cpu);
if (ret) if (ret)
break; break;
} }
tracing_on();
__raw_spin_unlock(&ftrace_max_lock); __raw_spin_unlock(&ftrace_max_lock);
local_irq_restore(flags); local_irq_restore(flags);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment