Commit 931da613 authored by Ingo Molnar's avatar Ingo Molnar

Merge branch 'tip/perf/core-2' of...

Merge branch 'tip/perf/core-2' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-2.6-trace into perf/core
parents 5d67be97 1fd8df2c
...@@ -66,7 +66,7 @@ void save_stack_trace(struct stack_trace *trace) ...@@ -66,7 +66,7 @@ void save_stack_trace(struct stack_trace *trace)
} }
EXPORT_SYMBOL_GPL(save_stack_trace); EXPORT_SYMBOL_GPL(save_stack_trace);
void save_stack_trace_regs(struct stack_trace *trace, struct pt_regs *regs) void save_stack_trace_regs(struct pt_regs *regs, struct stack_trace *trace)
{ {
dump_trace(current, regs, NULL, 0, &save_stack_ops, trace); dump_trace(current, regs, NULL, 0, &save_stack_ops, trace);
if (trace->nr_entries < trace->max_entries) if (trace->nr_entries < trace->max_entries)
......
...@@ -185,7 +185,7 @@ void kmemcheck_error_save(enum kmemcheck_shadow state, ...@@ -185,7 +185,7 @@ void kmemcheck_error_save(enum kmemcheck_shadow state,
e->trace.entries = e->trace_entries; e->trace.entries = e->trace_entries;
e->trace.max_entries = ARRAY_SIZE(e->trace_entries); e->trace.max_entries = ARRAY_SIZE(e->trace_entries);
e->trace.skip = 0; e->trace.skip = 0;
save_stack_trace_regs(&e->trace, regs); save_stack_trace_regs(regs, &e->trace);
/* Round address down to nearest 16 bytes */ /* Round address down to nearest 16 bytes */
shadow_copy = kmemcheck_shadow_lookup(address shadow_copy = kmemcheck_shadow_lookup(address
......
...@@ -129,6 +129,10 @@ void trace_current_buffer_unlock_commit(struct ring_buffer *buffer, ...@@ -129,6 +129,10 @@ void trace_current_buffer_unlock_commit(struct ring_buffer *buffer,
void trace_nowake_buffer_unlock_commit(struct ring_buffer *buffer, void trace_nowake_buffer_unlock_commit(struct ring_buffer *buffer,
struct ring_buffer_event *event, struct ring_buffer_event *event,
unsigned long flags, int pc); unsigned long flags, int pc);
void trace_nowake_buffer_unlock_commit_regs(struct ring_buffer *buffer,
struct ring_buffer_event *event,
unsigned long flags, int pc,
struct pt_regs *regs);
void trace_current_buffer_discard_commit(struct ring_buffer *buffer, void trace_current_buffer_discard_commit(struct ring_buffer *buffer,
struct ring_buffer_event *event); struct ring_buffer_event *event);
......
...@@ -169,7 +169,7 @@ void ring_buffer_set_clock(struct ring_buffer *buffer, ...@@ -169,7 +169,7 @@ void ring_buffer_set_clock(struct ring_buffer *buffer,
size_t ring_buffer_page_len(void *page); size_t ring_buffer_page_len(void *page);
void *ring_buffer_alloc_read_page(struct ring_buffer *buffer); void *ring_buffer_alloc_read_page(struct ring_buffer *buffer, int cpu);
void ring_buffer_free_read_page(struct ring_buffer *buffer, void *data); void ring_buffer_free_read_page(struct ring_buffer *buffer, void *data);
int ring_buffer_read_page(struct ring_buffer *buffer, void **data_page, int ring_buffer_read_page(struct ring_buffer *buffer, void **data_page,
size_t len, int cpu, int full); size_t len, int cpu, int full);
......
...@@ -14,8 +14,8 @@ struct stack_trace { ...@@ -14,8 +14,8 @@ struct stack_trace {
}; };
extern void save_stack_trace(struct stack_trace *trace); extern void save_stack_trace(struct stack_trace *trace);
extern void save_stack_trace_regs(struct stack_trace *trace, extern void save_stack_trace_regs(struct pt_regs *regs,
struct pt_regs *regs); struct stack_trace *trace);
extern void save_stack_trace_tsk(struct task_struct *tsk, extern void save_stack_trace_tsk(struct task_struct *tsk,
struct stack_trace *trace); struct stack_trace *trace);
......
...@@ -49,12 +49,13 @@ asynchronous and synchronous parts of the kernel. ...@@ -49,12 +49,13 @@ asynchronous and synchronous parts of the kernel.
*/ */
#include <linux/async.h> #include <linux/async.h>
#include <linux/atomic.h>
#include <linux/ktime.h>
#include <linux/module.h> #include <linux/module.h>
#include <linux/wait.h> #include <linux/wait.h>
#include <linux/sched.h> #include <linux/sched.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/workqueue.h> #include <linux/workqueue.h>
#include <asm/atomic.h>
static async_cookie_t next_cookie = 1; static async_cookie_t next_cookie = 1;
...@@ -128,7 +129,8 @@ static void async_run_entry_fn(struct work_struct *work) ...@@ -128,7 +129,8 @@ static void async_run_entry_fn(struct work_struct *work)
/* 2) run (and print duration) */ /* 2) run (and print duration) */
if (initcall_debug && system_state == SYSTEM_BOOTING) { if (initcall_debug && system_state == SYSTEM_BOOTING) {
printk("calling %lli_%pF @ %i\n", (long long)entry->cookie, printk(KERN_DEBUG "calling %lli_%pF @ %i\n",
(long long)entry->cookie,
entry->func, task_pid_nr(current)); entry->func, task_pid_nr(current));
calltime = ktime_get(); calltime = ktime_get();
} }
...@@ -136,7 +138,7 @@ static void async_run_entry_fn(struct work_struct *work) ...@@ -136,7 +138,7 @@ static void async_run_entry_fn(struct work_struct *work)
if (initcall_debug && system_state == SYSTEM_BOOTING) { if (initcall_debug && system_state == SYSTEM_BOOTING) {
rettime = ktime_get(); rettime = ktime_get();
delta = ktime_sub(rettime, calltime); delta = ktime_sub(rettime, calltime);
printk("initcall %lli_%pF returned 0 after %lld usecs\n", printk(KERN_DEBUG "initcall %lli_%pF returned 0 after %lld usecs\n",
(long long)entry->cookie, (long long)entry->cookie,
entry->func, entry->func,
(long long)ktime_to_ns(delta) >> 10); (long long)ktime_to_ns(delta) >> 10);
...@@ -270,7 +272,7 @@ void async_synchronize_cookie_domain(async_cookie_t cookie, ...@@ -270,7 +272,7 @@ void async_synchronize_cookie_domain(async_cookie_t cookie,
ktime_t starttime, delta, endtime; ktime_t starttime, delta, endtime;
if (initcall_debug && system_state == SYSTEM_BOOTING) { if (initcall_debug && system_state == SYSTEM_BOOTING) {
printk("async_waiting @ %i\n", task_pid_nr(current)); printk(KERN_DEBUG "async_waiting @ %i\n", task_pid_nr(current));
starttime = ktime_get(); starttime = ktime_get();
} }
...@@ -280,7 +282,7 @@ void async_synchronize_cookie_domain(async_cookie_t cookie, ...@@ -280,7 +282,7 @@ void async_synchronize_cookie_domain(async_cookie_t cookie,
endtime = ktime_get(); endtime = ktime_get();
delta = ktime_sub(endtime, starttime); delta = ktime_sub(endtime, starttime);
printk("async_continuing @ %i after %lli usec\n", printk(KERN_DEBUG "async_continuing @ %i after %lli usec\n",
task_pid_nr(current), task_pid_nr(current),
(long long)ktime_to_ns(delta) >> 10); (long long)ktime_to_ns(delta) >> 10);
} }
......
...@@ -26,12 +26,18 @@ void print_stack_trace(struct stack_trace *trace, int spaces) ...@@ -26,12 +26,18 @@ void print_stack_trace(struct stack_trace *trace, int spaces)
EXPORT_SYMBOL_GPL(print_stack_trace); EXPORT_SYMBOL_GPL(print_stack_trace);
/* /*
* Architectures that do not implement save_stack_trace_tsk get this * Architectures that do not implement save_stack_trace_tsk or
* weak alias and a once-per-bootup warning (whenever this facility * save_stack_trace_regs get this weak alias and a once-per-bootup warning
* is utilized - for example by procfs): * (whenever this facility is utilized - for example by procfs):
*/ */
__weak void __weak void
save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace) save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
{ {
WARN_ONCE(1, KERN_INFO "save_stack_trace_tsk() not implemented yet.\n"); WARN_ONCE(1, KERN_INFO "save_stack_trace_tsk() not implemented yet.\n");
} }
__weak void
save_stack_trace_regs(struct pt_regs *regs, struct stack_trace *trace)
{
WARN_ONCE(1, KERN_INFO "save_stack_trace_regs() not implemented yet.\n");
}
...@@ -32,7 +32,6 @@ ...@@ -32,7 +32,6 @@
#include <trace/events/sched.h> #include <trace/events/sched.h>
#include <asm/ftrace.h>
#include <asm/setup.h> #include <asm/setup.h>
#include "trace_output.h" #include "trace_output.h"
...@@ -82,8 +81,7 @@ static int ftrace_disabled __read_mostly; ...@@ -82,8 +81,7 @@ static int ftrace_disabled __read_mostly;
static DEFINE_MUTEX(ftrace_lock); static DEFINE_MUTEX(ftrace_lock);
static struct ftrace_ops ftrace_list_end __read_mostly = static struct ftrace_ops ftrace_list_end __read_mostly = {
{
.func = ftrace_stub, .func = ftrace_stub,
}; };
...@@ -785,8 +783,7 @@ static void unregister_ftrace_profiler(void) ...@@ -785,8 +783,7 @@ static void unregister_ftrace_profiler(void)
unregister_ftrace_graph(); unregister_ftrace_graph();
} }
#else #else
static struct ftrace_ops ftrace_profile_ops __read_mostly = static struct ftrace_ops ftrace_profile_ops __read_mostly = {
{
.func = function_profile_call, .func = function_profile_call,
}; };
...@@ -806,19 +803,10 @@ ftrace_profile_write(struct file *filp, const char __user *ubuf, ...@@ -806,19 +803,10 @@ ftrace_profile_write(struct file *filp, const char __user *ubuf,
size_t cnt, loff_t *ppos) size_t cnt, loff_t *ppos)
{ {
unsigned long val; unsigned long val;
char buf[64]; /* big enough to hold a number */
int ret; int ret;
if (cnt >= sizeof(buf)) ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
return -EINVAL; if (ret)
if (copy_from_user(&buf, ubuf, cnt))
return -EFAULT;
buf[cnt] = 0;
ret = strict_strtoul(buf, 10, &val);
if (ret < 0)
return ret; return ret;
val = !!val; val = !!val;
......
...@@ -997,15 +997,21 @@ static int rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer, ...@@ -997,15 +997,21 @@ static int rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer,
unsigned nr_pages) unsigned nr_pages)
{ {
struct buffer_page *bpage, *tmp; struct buffer_page *bpage, *tmp;
unsigned long addr;
LIST_HEAD(pages); LIST_HEAD(pages);
unsigned i; unsigned i;
WARN_ON(!nr_pages); WARN_ON(!nr_pages);
for (i = 0; i < nr_pages; i++) { for (i = 0; i < nr_pages; i++) {
struct page *page;
/*
* __GFP_NORETRY flag makes sure that the allocation fails
* gracefully without invoking oom-killer and the system is
* not destabilized.
*/
bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()), bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()),
GFP_KERNEL, cpu_to_node(cpu_buffer->cpu)); GFP_KERNEL | __GFP_NORETRY,
cpu_to_node(cpu_buffer->cpu));
if (!bpage) if (!bpage)
goto free_pages; goto free_pages;
...@@ -1013,10 +1019,11 @@ static int rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer, ...@@ -1013,10 +1019,11 @@ static int rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer,
list_add(&bpage->list, &pages); list_add(&bpage->list, &pages);
addr = __get_free_page(GFP_KERNEL); page = alloc_pages_node(cpu_to_node(cpu_buffer->cpu),
if (!addr) GFP_KERNEL | __GFP_NORETRY, 0);
if (!page)
goto free_pages; goto free_pages;
bpage->page = (void *)addr; bpage->page = page_address(page);
rb_init_page(bpage->page); rb_init_page(bpage->page);
} }
...@@ -1045,7 +1052,7 @@ rb_allocate_cpu_buffer(struct ring_buffer *buffer, int cpu) ...@@ -1045,7 +1052,7 @@ rb_allocate_cpu_buffer(struct ring_buffer *buffer, int cpu)
{ {
struct ring_buffer_per_cpu *cpu_buffer; struct ring_buffer_per_cpu *cpu_buffer;
struct buffer_page *bpage; struct buffer_page *bpage;
unsigned long addr; struct page *page;
int ret; int ret;
cpu_buffer = kzalloc_node(ALIGN(sizeof(*cpu_buffer), cache_line_size()), cpu_buffer = kzalloc_node(ALIGN(sizeof(*cpu_buffer), cache_line_size()),
...@@ -1067,10 +1074,10 @@ rb_allocate_cpu_buffer(struct ring_buffer *buffer, int cpu) ...@@ -1067,10 +1074,10 @@ rb_allocate_cpu_buffer(struct ring_buffer *buffer, int cpu)
rb_check_bpage(cpu_buffer, bpage); rb_check_bpage(cpu_buffer, bpage);
cpu_buffer->reader_page = bpage; cpu_buffer->reader_page = bpage;
addr = __get_free_page(GFP_KERNEL); page = alloc_pages_node(cpu_to_node(cpu), GFP_KERNEL, 0);
if (!addr) if (!page)
goto fail_free_reader; goto fail_free_reader;
bpage->page = (void *)addr; bpage->page = page_address(page);
rb_init_page(bpage->page); rb_init_page(bpage->page);
INIT_LIST_HEAD(&cpu_buffer->reader_page->list); INIT_LIST_HEAD(&cpu_buffer->reader_page->list);
...@@ -1314,7 +1321,6 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size) ...@@ -1314,7 +1321,6 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size)
unsigned nr_pages, rm_pages, new_pages; unsigned nr_pages, rm_pages, new_pages;
struct buffer_page *bpage, *tmp; struct buffer_page *bpage, *tmp;
unsigned long buffer_size; unsigned long buffer_size;
unsigned long addr;
LIST_HEAD(pages); LIST_HEAD(pages);
int i, cpu; int i, cpu;
...@@ -1375,16 +1381,24 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size) ...@@ -1375,16 +1381,24 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size)
for_each_buffer_cpu(buffer, cpu) { for_each_buffer_cpu(buffer, cpu) {
for (i = 0; i < new_pages; i++) { for (i = 0; i < new_pages; i++) {
struct page *page;
/*
* __GFP_NORETRY flag makes sure that the allocation
* fails gracefully without invoking oom-killer and
* the system is not destabilized.
*/
bpage = kzalloc_node(ALIGN(sizeof(*bpage), bpage = kzalloc_node(ALIGN(sizeof(*bpage),
cache_line_size()), cache_line_size()),
GFP_KERNEL, cpu_to_node(cpu)); GFP_KERNEL | __GFP_NORETRY,
cpu_to_node(cpu));
if (!bpage) if (!bpage)
goto free_pages; goto free_pages;
list_add(&bpage->list, &pages); list_add(&bpage->list, &pages);
addr = __get_free_page(GFP_KERNEL); page = alloc_pages_node(cpu_to_node(cpu),
if (!addr) GFP_KERNEL | __GFP_NORETRY, 0);
if (!page)
goto free_pages; goto free_pages;
bpage->page = (void *)addr; bpage->page = page_address(page);
rb_init_page(bpage->page); rb_init_page(bpage->page);
} }
} }
...@@ -3730,16 +3744,17 @@ EXPORT_SYMBOL_GPL(ring_buffer_swap_cpu); ...@@ -3730,16 +3744,17 @@ EXPORT_SYMBOL_GPL(ring_buffer_swap_cpu);
* Returns: * Returns:
* The page allocated, or NULL on error. * The page allocated, or NULL on error.
*/ */
void *ring_buffer_alloc_read_page(struct ring_buffer *buffer) void *ring_buffer_alloc_read_page(struct ring_buffer *buffer, int cpu)
{ {
struct buffer_data_page *bpage; struct buffer_data_page *bpage;
unsigned long addr; struct page *page;
addr = __get_free_page(GFP_KERNEL); page = alloc_pages_node(cpu_to_node(cpu),
if (!addr) GFP_KERNEL | __GFP_NORETRY, 0);
if (!page)
return NULL; return NULL;
bpage = (void *)addr; bpage = page_address(page);
rb_init_page(bpage); rb_init_page(bpage);
...@@ -3978,20 +3993,11 @@ rb_simple_write(struct file *filp, const char __user *ubuf, ...@@ -3978,20 +3993,11 @@ rb_simple_write(struct file *filp, const char __user *ubuf,
size_t cnt, loff_t *ppos) size_t cnt, loff_t *ppos)
{ {
unsigned long *p = filp->private_data; unsigned long *p = filp->private_data;
char buf[64];
unsigned long val; unsigned long val;
int ret; int ret;
if (cnt >= sizeof(buf)) ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
return -EINVAL; if (ret)
if (copy_from_user(&buf, ubuf, cnt))
return -EFAULT;
buf[cnt] = 0;
ret = strict_strtoul(buf, 10, &val);
if (ret < 0)
return ret; return ret;
if (val) if (val)
......
...@@ -106,7 +106,7 @@ static enum event_status read_page(int cpu) ...@@ -106,7 +106,7 @@ static enum event_status read_page(int cpu)
int inc; int inc;
int i; int i;
bpage = ring_buffer_alloc_read_page(buffer); bpage = ring_buffer_alloc_read_page(buffer, cpu);
if (!bpage) if (!bpage)
return EVENT_DROPPED; return EVENT_DROPPED;
......
This diff is collapsed.
...@@ -389,6 +389,9 @@ void update_max_tr_single(struct trace_array *tr, ...@@ -389,6 +389,9 @@ void update_max_tr_single(struct trace_array *tr,
void ftrace_trace_stack(struct ring_buffer *buffer, unsigned long flags, void ftrace_trace_stack(struct ring_buffer *buffer, unsigned long flags,
int skip, int pc); int skip, int pc);
void ftrace_trace_stack_regs(struct ring_buffer *buffer, unsigned long flags,
int skip, int pc, struct pt_regs *regs);
void ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, void ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags,
int pc); int pc);
...@@ -400,6 +403,12 @@ static inline void ftrace_trace_stack(struct ring_buffer *buffer, ...@@ -400,6 +403,12 @@ static inline void ftrace_trace_stack(struct ring_buffer *buffer,
{ {
} }
static inline void ftrace_trace_stack_regs(struct ring_buffer *buffer,
unsigned long flags, int skip,
int pc, struct pt_regs *regs)
{
}
static inline void ftrace_trace_userstack(struct ring_buffer *buffer, static inline void ftrace_trace_userstack(struct ring_buffer *buffer,
unsigned long flags, int pc) unsigned long flags, int pc)
{ {
...@@ -609,6 +618,7 @@ enum trace_iterator_flags { ...@@ -609,6 +618,7 @@ enum trace_iterator_flags {
TRACE_ITER_GRAPH_TIME = 0x80000, TRACE_ITER_GRAPH_TIME = 0x80000,
TRACE_ITER_RECORD_CMD = 0x100000, TRACE_ITER_RECORD_CMD = 0x100000,
TRACE_ITER_OVERWRITE = 0x200000, TRACE_ITER_OVERWRITE = 0x200000,
TRACE_ITER_STOP_ON_FREE = 0x400000,
}; };
/* /*
......
...@@ -486,20 +486,11 @@ event_enable_write(struct file *filp, const char __user *ubuf, size_t cnt, ...@@ -486,20 +486,11 @@ event_enable_write(struct file *filp, const char __user *ubuf, size_t cnt,
loff_t *ppos) loff_t *ppos)
{ {
struct ftrace_event_call *call = filp->private_data; struct ftrace_event_call *call = filp->private_data;
char buf[64];
unsigned long val; unsigned long val;
int ret; int ret;
if (cnt >= sizeof(buf)) ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
return -EINVAL; if (ret)
if (copy_from_user(&buf, ubuf, cnt))
return -EFAULT;
buf[cnt] = 0;
ret = strict_strtoul(buf, 10, &val);
if (ret < 0)
return ret; return ret;
ret = tracing_update_buffers(); ret = tracing_update_buffers();
...@@ -571,19 +562,10 @@ system_enable_write(struct file *filp, const char __user *ubuf, size_t cnt, ...@@ -571,19 +562,10 @@ system_enable_write(struct file *filp, const char __user *ubuf, size_t cnt,
{ {
const char *system = filp->private_data; const char *system = filp->private_data;
unsigned long val; unsigned long val;
char buf[64];
ssize_t ret; ssize_t ret;
if (cnt >= sizeof(buf)) ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
return -EINVAL; if (ret)
if (copy_from_user(&buf, ubuf, cnt))
return -EFAULT;
buf[cnt] = 0;
ret = strict_strtoul(buf, 10, &val);
if (ret < 0)
return ret; return ret;
ret = tracing_update_buffers(); ret = tracing_update_buffers();
......
...@@ -74,6 +74,20 @@ static struct tracer_flags tracer_flags = { ...@@ -74,6 +74,20 @@ static struct tracer_flags tracer_flags = {
static struct trace_array *graph_array; static struct trace_array *graph_array;
/*
* DURATION column is being also used to display IRQ signs,
* following values are used by print_graph_irq and others
* to fill in space into DURATION column.
*/
enum {
DURATION_FILL_FULL = -1,
DURATION_FILL_START = -2,
DURATION_FILL_END = -3,
};
static enum print_line_t
print_graph_duration(unsigned long long duration, struct trace_seq *s,
u32 flags);
/* Add a function return address to the trace stack on thread info.*/ /* Add a function return address to the trace stack on thread info.*/
int int
...@@ -577,32 +591,6 @@ get_return_for_leaf(struct trace_iterator *iter, ...@@ -577,32 +591,6 @@ get_return_for_leaf(struct trace_iterator *iter,
return next; return next;
} }
/* Signal a overhead of time execution to the output */
static int
print_graph_overhead(unsigned long long duration, struct trace_seq *s,
u32 flags)
{
/* If duration disappear, we don't need anything */
if (!(flags & TRACE_GRAPH_PRINT_DURATION))
return 1;
/* Non nested entry or return */
if (duration == -1)
return trace_seq_printf(s, " ");
if (flags & TRACE_GRAPH_PRINT_OVERHEAD) {
/* Duration exceeded 100 msecs */
if (duration > 100000ULL)
return trace_seq_printf(s, "! ");
/* Duration exceeded 10 msecs */
if (duration > 10000ULL)
return trace_seq_printf(s, "+ ");
}
return trace_seq_printf(s, " ");
}
static int print_graph_abs_time(u64 t, struct trace_seq *s) static int print_graph_abs_time(u64 t, struct trace_seq *s)
{ {
unsigned long usecs_rem; unsigned long usecs_rem;
...@@ -625,6 +613,7 @@ print_graph_irq(struct trace_iterator *iter, unsigned long addr, ...@@ -625,6 +613,7 @@ print_graph_irq(struct trace_iterator *iter, unsigned long addr,
addr >= (unsigned long)__irqentry_text_end) addr >= (unsigned long)__irqentry_text_end)
return TRACE_TYPE_UNHANDLED; return TRACE_TYPE_UNHANDLED;
if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
/* Absolute time */ /* Absolute time */
if (flags & TRACE_GRAPH_PRINT_ABS_TIME) { if (flags & TRACE_GRAPH_PRINT_ABS_TIME) {
ret = print_graph_abs_time(iter->ts, s); ret = print_graph_abs_time(iter->ts, s);
...@@ -648,11 +637,12 @@ print_graph_irq(struct trace_iterator *iter, unsigned long addr, ...@@ -648,11 +637,12 @@ print_graph_irq(struct trace_iterator *iter, unsigned long addr,
if (!ret) if (!ret)
return TRACE_TYPE_PARTIAL_LINE; return TRACE_TYPE_PARTIAL_LINE;
} }
}
/* No overhead */ /* No overhead */
ret = print_graph_overhead(-1, s, flags); ret = print_graph_duration(DURATION_FILL_START, s, flags);
if (!ret) if (ret != TRACE_TYPE_HANDLED)
return TRACE_TYPE_PARTIAL_LINE; return ret;
if (type == TRACE_GRAPH_ENT) if (type == TRACE_GRAPH_ENT)
ret = trace_seq_printf(s, "==========>"); ret = trace_seq_printf(s, "==========>");
...@@ -662,9 +652,10 @@ print_graph_irq(struct trace_iterator *iter, unsigned long addr, ...@@ -662,9 +652,10 @@ print_graph_irq(struct trace_iterator *iter, unsigned long addr,
if (!ret) if (!ret)
return TRACE_TYPE_PARTIAL_LINE; return TRACE_TYPE_PARTIAL_LINE;
/* Don't close the duration column if haven't one */ ret = print_graph_duration(DURATION_FILL_END, s, flags);
if (flags & TRACE_GRAPH_PRINT_DURATION) if (ret != TRACE_TYPE_HANDLED)
trace_seq_printf(s, " |"); return ret;
ret = trace_seq_printf(s, "\n"); ret = trace_seq_printf(s, "\n");
if (!ret) if (!ret)
...@@ -716,9 +707,49 @@ trace_print_graph_duration(unsigned long long duration, struct trace_seq *s) ...@@ -716,9 +707,49 @@ trace_print_graph_duration(unsigned long long duration, struct trace_seq *s)
} }
static enum print_line_t static enum print_line_t
print_graph_duration(unsigned long long duration, struct trace_seq *s) print_graph_duration(unsigned long long duration, struct trace_seq *s,
u32 flags)
{ {
int ret; int ret = -1;
if (!(flags & TRACE_GRAPH_PRINT_DURATION) ||
!(trace_flags & TRACE_ITER_CONTEXT_INFO))
return TRACE_TYPE_HANDLED;
/* No real adata, just filling the column with spaces */
switch (duration) {
case DURATION_FILL_FULL:
ret = trace_seq_printf(s, " | ");
return ret ? TRACE_TYPE_HANDLED : TRACE_TYPE_PARTIAL_LINE;
case DURATION_FILL_START:
ret = trace_seq_printf(s, " ");
return ret ? TRACE_TYPE_HANDLED : TRACE_TYPE_PARTIAL_LINE;
case DURATION_FILL_END:
ret = trace_seq_printf(s, " |");
return ret ? TRACE_TYPE_HANDLED : TRACE_TYPE_PARTIAL_LINE;
}
/* Signal a overhead of time execution to the output */
if (flags & TRACE_GRAPH_PRINT_OVERHEAD) {
/* Duration exceeded 100 msecs */
if (duration > 100000ULL)
ret = trace_seq_printf(s, "! ");
/* Duration exceeded 10 msecs */
else if (duration > 10000ULL)
ret = trace_seq_printf(s, "+ ");
}
/*
* The -1 means we either did not exceed the duration tresholds
* or we dont want to print out the overhead. Either way we need
* to fill out the space.
*/
if (ret == -1)
ret = trace_seq_printf(s, " ");
/* Catching here any failure happenned above */
if (!ret)
return TRACE_TYPE_PARTIAL_LINE;
ret = trace_print_graph_duration(duration, s); ret = trace_print_graph_duration(duration, s);
if (ret != TRACE_TYPE_HANDLED) if (ret != TRACE_TYPE_HANDLED)
...@@ -767,17 +798,10 @@ print_graph_entry_leaf(struct trace_iterator *iter, ...@@ -767,17 +798,10 @@ print_graph_entry_leaf(struct trace_iterator *iter,
cpu_data->enter_funcs[call->depth] = 0; cpu_data->enter_funcs[call->depth] = 0;
} }
/* Overhead */ /* Overhead and duration */
ret = print_graph_overhead(duration, s, flags); ret = print_graph_duration(duration, s, flags);
if (!ret)
return TRACE_TYPE_PARTIAL_LINE;
/* Duration */
if (flags & TRACE_GRAPH_PRINT_DURATION) {
ret = print_graph_duration(duration, s);
if (ret == TRACE_TYPE_PARTIAL_LINE) if (ret == TRACE_TYPE_PARTIAL_LINE)
return TRACE_TYPE_PARTIAL_LINE; return TRACE_TYPE_PARTIAL_LINE;
}
/* Function */ /* Function */
for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++) { for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++) {
...@@ -815,17 +839,10 @@ print_graph_entry_nested(struct trace_iterator *iter, ...@@ -815,17 +839,10 @@ print_graph_entry_nested(struct trace_iterator *iter,
cpu_data->enter_funcs[call->depth] = call->func; cpu_data->enter_funcs[call->depth] = call->func;
} }
/* No overhead */
ret = print_graph_overhead(-1, s, flags);
if (!ret)
return TRACE_TYPE_PARTIAL_LINE;
/* No time */ /* No time */
if (flags & TRACE_GRAPH_PRINT_DURATION) { ret = print_graph_duration(DURATION_FILL_FULL, s, flags);
ret = trace_seq_printf(s, " | "); if (ret != TRACE_TYPE_HANDLED)
if (!ret) return ret;
return TRACE_TYPE_PARTIAL_LINE;
}
/* Function */ /* Function */
for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++) { for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++) {
...@@ -865,6 +882,9 @@ print_graph_prologue(struct trace_iterator *iter, struct trace_seq *s, ...@@ -865,6 +882,9 @@ print_graph_prologue(struct trace_iterator *iter, struct trace_seq *s,
return TRACE_TYPE_PARTIAL_LINE; return TRACE_TYPE_PARTIAL_LINE;
} }
if (!(trace_flags & TRACE_ITER_CONTEXT_INFO))
return 0;
/* Absolute time */ /* Absolute time */
if (flags & TRACE_GRAPH_PRINT_ABS_TIME) { if (flags & TRACE_GRAPH_PRINT_ABS_TIME) {
ret = print_graph_abs_time(iter->ts, s); ret = print_graph_abs_time(iter->ts, s);
...@@ -1078,17 +1098,10 @@ print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s, ...@@ -1078,17 +1098,10 @@ print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s,
if (print_graph_prologue(iter, s, 0, 0, flags)) if (print_graph_prologue(iter, s, 0, 0, flags))
return TRACE_TYPE_PARTIAL_LINE; return TRACE_TYPE_PARTIAL_LINE;
/* Overhead */ /* Overhead and duration */
ret = print_graph_overhead(duration, s, flags); ret = print_graph_duration(duration, s, flags);
if (!ret)
return TRACE_TYPE_PARTIAL_LINE;
/* Duration */
if (flags & TRACE_GRAPH_PRINT_DURATION) {
ret = print_graph_duration(duration, s);
if (ret == TRACE_TYPE_PARTIAL_LINE) if (ret == TRACE_TYPE_PARTIAL_LINE)
return TRACE_TYPE_PARTIAL_LINE; return TRACE_TYPE_PARTIAL_LINE;
}
/* Closing brace */ /* Closing brace */
for (i = 0; i < trace->depth * TRACE_GRAPH_INDENT; i++) { for (i = 0; i < trace->depth * TRACE_GRAPH_INDENT; i++) {
...@@ -1146,17 +1159,10 @@ print_graph_comment(struct trace_seq *s, struct trace_entry *ent, ...@@ -1146,17 +1159,10 @@ print_graph_comment(struct trace_seq *s, struct trace_entry *ent,
if (print_graph_prologue(iter, s, 0, 0, flags)) if (print_graph_prologue(iter, s, 0, 0, flags))
return TRACE_TYPE_PARTIAL_LINE; return TRACE_TYPE_PARTIAL_LINE;
/* No overhead */
ret = print_graph_overhead(-1, s, flags);
if (!ret)
return TRACE_TYPE_PARTIAL_LINE;
/* No time */ /* No time */
if (flags & TRACE_GRAPH_PRINT_DURATION) { ret = print_graph_duration(DURATION_FILL_FULL, s, flags);
ret = trace_seq_printf(s, " | "); if (ret != TRACE_TYPE_HANDLED)
if (!ret) return ret;
return TRACE_TYPE_PARTIAL_LINE;
}
/* Indentation */ /* Indentation */
if (depth > 0) if (depth > 0)
...@@ -1207,7 +1213,7 @@ print_graph_comment(struct trace_seq *s, struct trace_entry *ent, ...@@ -1207,7 +1213,7 @@ print_graph_comment(struct trace_seq *s, struct trace_entry *ent,
enum print_line_t enum print_line_t
__print_graph_function_flags(struct trace_iterator *iter, u32 flags) print_graph_function_flags(struct trace_iterator *iter, u32 flags)
{ {
struct ftrace_graph_ent_entry *field; struct ftrace_graph_ent_entry *field;
struct fgraph_data *data = iter->private; struct fgraph_data *data = iter->private;
...@@ -1270,18 +1276,7 @@ __print_graph_function_flags(struct trace_iterator *iter, u32 flags) ...@@ -1270,18 +1276,7 @@ __print_graph_function_flags(struct trace_iterator *iter, u32 flags)
static enum print_line_t static enum print_line_t
print_graph_function(struct trace_iterator *iter) print_graph_function(struct trace_iterator *iter)
{ {
return __print_graph_function_flags(iter, tracer_flags.val); return print_graph_function_flags(iter, tracer_flags.val);
}
enum print_line_t print_graph_function_flags(struct trace_iterator *iter,
u32 flags)
{
if (trace_flags & TRACE_ITER_LATENCY_FMT)
flags |= TRACE_GRAPH_PRINT_DURATION;
else
flags |= TRACE_GRAPH_PRINT_ABS_TIME;
return __print_graph_function_flags(iter, flags);
} }
static enum print_line_t static enum print_line_t
...@@ -1309,8 +1304,7 @@ static void print_lat_header(struct seq_file *s, u32 flags) ...@@ -1309,8 +1304,7 @@ static void print_lat_header(struct seq_file *s, u32 flags)
seq_printf(s, "#%.*s / _----=> need-resched \n", size, spaces); seq_printf(s, "#%.*s / _----=> need-resched \n", size, spaces);
seq_printf(s, "#%.*s| / _---=> hardirq/softirq \n", size, spaces); seq_printf(s, "#%.*s| / _---=> hardirq/softirq \n", size, spaces);
seq_printf(s, "#%.*s|| / _--=> preempt-depth \n", size, spaces); seq_printf(s, "#%.*s|| / _--=> preempt-depth \n", size, spaces);
seq_printf(s, "#%.*s||| / _-=> lock-depth \n", size, spaces); seq_printf(s, "#%.*s||| / \n", size, spaces);
seq_printf(s, "#%.*s|||| / \n", size, spaces);
} }
static void __print_graph_headers_flags(struct seq_file *s, u32 flags) static void __print_graph_headers_flags(struct seq_file *s, u32 flags)
...@@ -1329,7 +1323,7 @@ static void __print_graph_headers_flags(struct seq_file *s, u32 flags) ...@@ -1329,7 +1323,7 @@ static void __print_graph_headers_flags(struct seq_file *s, u32 flags)
if (flags & TRACE_GRAPH_PRINT_PROC) if (flags & TRACE_GRAPH_PRINT_PROC)
seq_printf(s, " TASK/PID "); seq_printf(s, " TASK/PID ");
if (lat) if (lat)
seq_printf(s, "|||||"); seq_printf(s, "||||");
if (flags & TRACE_GRAPH_PRINT_DURATION) if (flags & TRACE_GRAPH_PRINT_DURATION)
seq_printf(s, " DURATION "); seq_printf(s, " DURATION ");
seq_printf(s, " FUNCTION CALLS\n"); seq_printf(s, " FUNCTION CALLS\n");
...@@ -1343,7 +1337,7 @@ static void __print_graph_headers_flags(struct seq_file *s, u32 flags) ...@@ -1343,7 +1337,7 @@ static void __print_graph_headers_flags(struct seq_file *s, u32 flags)
if (flags & TRACE_GRAPH_PRINT_PROC) if (flags & TRACE_GRAPH_PRINT_PROC)
seq_printf(s, " | | "); seq_printf(s, " | | ");
if (lat) if (lat)
seq_printf(s, "|||||"); seq_printf(s, "||||");
if (flags & TRACE_GRAPH_PRINT_DURATION) if (flags & TRACE_GRAPH_PRINT_DURATION)
seq_printf(s, " | | "); seq_printf(s, " | | ");
seq_printf(s, " | | | |\n"); seq_printf(s, " | | | |\n");
...@@ -1358,15 +1352,16 @@ void print_graph_headers_flags(struct seq_file *s, u32 flags) ...@@ -1358,15 +1352,16 @@ void print_graph_headers_flags(struct seq_file *s, u32 flags)
{ {
struct trace_iterator *iter = s->private; struct trace_iterator *iter = s->private;
if (!(trace_flags & TRACE_ITER_CONTEXT_INFO))
return;
if (trace_flags & TRACE_ITER_LATENCY_FMT) { if (trace_flags & TRACE_ITER_LATENCY_FMT) {
/* print nothing if the buffers are empty */ /* print nothing if the buffers are empty */
if (trace_empty(iter)) if (trace_empty(iter))
return; return;
print_trace_header(s, iter); print_trace_header(s, iter);
flags |= TRACE_GRAPH_PRINT_DURATION; }
} else
flags |= TRACE_GRAPH_PRINT_ABS_TIME;
__print_graph_headers_flags(s, flags); __print_graph_headers_flags(s, flags);
} }
......
...@@ -226,7 +226,9 @@ static void irqsoff_trace_close(struct trace_iterator *iter) ...@@ -226,7 +226,9 @@ static void irqsoff_trace_close(struct trace_iterator *iter)
} }
#define GRAPH_TRACER_FLAGS (TRACE_GRAPH_PRINT_CPU | \ #define GRAPH_TRACER_FLAGS (TRACE_GRAPH_PRINT_CPU | \
TRACE_GRAPH_PRINT_PROC) TRACE_GRAPH_PRINT_PROC | \
TRACE_GRAPH_PRINT_ABS_TIME | \
TRACE_GRAPH_PRINT_DURATION)
static enum print_line_t irqsoff_print_line(struct trace_iterator *iter) static enum print_line_t irqsoff_print_line(struct trace_iterator *iter)
{ {
......
...@@ -1397,7 +1397,8 @@ static __kprobes void kprobe_trace_func(struct kprobe *kp, struct pt_regs *regs) ...@@ -1397,7 +1397,8 @@ static __kprobes void kprobe_trace_func(struct kprobe *kp, struct pt_regs *regs)
store_trace_args(sizeof(*entry), tp, regs, (u8 *)&entry[1], dsize); store_trace_args(sizeof(*entry), tp, regs, (u8 *)&entry[1], dsize);
if (!filter_current_check_discard(buffer, call, entry, event)) if (!filter_current_check_discard(buffer, call, entry, event))
trace_nowake_buffer_unlock_commit(buffer, event, irq_flags, pc); trace_nowake_buffer_unlock_commit_regs(buffer, event,
irq_flags, pc, regs);
} }
/* Kretprobe handler */ /* Kretprobe handler */
...@@ -1429,7 +1430,8 @@ static __kprobes void kretprobe_trace_func(struct kretprobe_instance *ri, ...@@ -1429,7 +1430,8 @@ static __kprobes void kretprobe_trace_func(struct kretprobe_instance *ri,
store_trace_args(sizeof(*entry), tp, regs, (u8 *)&entry[1], dsize); store_trace_args(sizeof(*entry), tp, regs, (u8 *)&entry[1], dsize);
if (!filter_current_check_discard(buffer, call, entry, event)) if (!filter_current_check_discard(buffer, call, entry, event))
trace_nowake_buffer_unlock_commit(buffer, event, irq_flags, pc); trace_nowake_buffer_unlock_commit_regs(buffer, event,
irq_flags, pc, regs);
} }
/* Event entry printers */ /* Event entry printers */
......
...@@ -227,7 +227,9 @@ static void wakeup_trace_close(struct trace_iterator *iter) ...@@ -227,7 +227,9 @@ static void wakeup_trace_close(struct trace_iterator *iter)
graph_trace_close(iter); graph_trace_close(iter);
} }
#define GRAPH_TRACER_FLAGS (TRACE_GRAPH_PRINT_PROC) #define GRAPH_TRACER_FLAGS (TRACE_GRAPH_PRINT_PROC | \
TRACE_GRAPH_PRINT_ABS_TIME | \
TRACE_GRAPH_PRINT_DURATION)
static enum print_line_t wakeup_print_line(struct trace_iterator *iter) static enum print_line_t wakeup_print_line(struct trace_iterator *iter)
{ {
......
...@@ -156,20 +156,11 @@ stack_max_size_write(struct file *filp, const char __user *ubuf, ...@@ -156,20 +156,11 @@ stack_max_size_write(struct file *filp, const char __user *ubuf,
{ {
long *ptr = filp->private_data; long *ptr = filp->private_data;
unsigned long val, flags; unsigned long val, flags;
char buf[64];
int ret; int ret;
int cpu; int cpu;
if (count >= sizeof(buf)) ret = kstrtoul_from_user(ubuf, count, 10, &val);
return -EINVAL; if (ret)
if (copy_from_user(&buf, ubuf, count))
return -EFAULT;
buf[count] = 0;
ret = strict_strtoul(buf, 10, &val);
if (ret < 0)
return ret; return ret;
local_irq_save(flags); local_irq_save(flags);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment