Commit 8b54e45b authored by Ingo Molnar's avatar Ingo Molnar

Merge branches 'tracing/docs', 'tracing/filters', 'tracing/ftrace',...

Merge branches 'tracing/docs', 'tracing/filters', 'tracing/ftrace', 'tracing/kprobes', 'tracing/blktrace-v2' and 'tracing/textedit' into tracing/core-v2
...@@ -410,7 +410,6 @@ int ftrace_disable_ftrace_graph_caller(void) ...@@ -410,7 +410,6 @@ int ftrace_disable_ftrace_graph_caller(void)
void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr) void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr)
{ {
unsigned long old; unsigned long old;
unsigned long long calltime;
int faulted; int faulted;
struct ftrace_graph_ent trace; struct ftrace_graph_ent trace;
unsigned long return_hooker = (unsigned long) unsigned long return_hooker = (unsigned long)
...@@ -453,10 +452,7 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr) ...@@ -453,10 +452,7 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr)
return; return;
} }
calltime = trace_clock_local(); if (ftrace_push_return_trace(old, self_addr, &trace.depth) == -EBUSY) {
if (ftrace_push_return_trace(old, calltime,
self_addr, &trace.depth) == -EBUSY) {
*parent = old; *parent = old;
return; return;
} }
......
...@@ -638,13 +638,13 @@ static void __used __kprobes kretprobe_trampoline_holder(void) ...@@ -638,13 +638,13 @@ static void __used __kprobes kretprobe_trampoline_holder(void)
#else #else
" pushf\n" " pushf\n"
/* /*
* Skip cs, ip, orig_ax. * Skip cs, ip, orig_ax and gs.
* trampoline_handler() will plug in these values * trampoline_handler() will plug in these values
*/ */
" subl $12, %esp\n" " subl $16, %esp\n"
" pushl %fs\n" " pushl %fs\n"
" pushl %ds\n"
" pushl %es\n" " pushl %es\n"
" pushl %ds\n"
" pushl %eax\n" " pushl %eax\n"
" pushl %ebp\n" " pushl %ebp\n"
" pushl %edi\n" " pushl %edi\n"
...@@ -655,10 +655,10 @@ static void __used __kprobes kretprobe_trampoline_holder(void) ...@@ -655,10 +655,10 @@ static void __used __kprobes kretprobe_trampoline_holder(void)
" movl %esp, %eax\n" " movl %esp, %eax\n"
" call trampoline_handler\n" " call trampoline_handler\n"
/* Move flags to cs */ /* Move flags to cs */
" movl 52(%esp), %edx\n" " movl 56(%esp), %edx\n"
" movl %edx, 48(%esp)\n" " movl %edx, 52(%esp)\n"
/* Replace saved flags with true return address. */ /* Replace saved flags with true return address. */
" movl %eax, 52(%esp)\n" " movl %eax, 56(%esp)\n"
" popl %ebx\n" " popl %ebx\n"
" popl %ecx\n" " popl %ecx\n"
" popl %edx\n" " popl %edx\n"
...@@ -666,8 +666,8 @@ static void __used __kprobes kretprobe_trampoline_holder(void) ...@@ -666,8 +666,8 @@ static void __used __kprobes kretprobe_trampoline_holder(void)
" popl %edi\n" " popl %edi\n"
" popl %ebp\n" " popl %ebp\n"
" popl %eax\n" " popl %eax\n"
/* Skip ip, orig_ax, es, ds, fs */ /* Skip ds, es, fs, gs, orig_ax and ip */
" addl $20, %esp\n" " addl $24, %esp\n"
" popf\n" " popf\n"
#endif #endif
" ret\n"); " ret\n");
...@@ -691,6 +691,7 @@ static __used __kprobes void *trampoline_handler(struct pt_regs *regs) ...@@ -691,6 +691,7 @@ static __used __kprobes void *trampoline_handler(struct pt_regs *regs)
regs->cs = __KERNEL_CS; regs->cs = __KERNEL_CS;
#else #else
regs->cs = __KERNEL_CS | get_kernel_rpl(); regs->cs = __KERNEL_CS | get_kernel_rpl();
regs->gs = 0;
#endif #endif
regs->ip = trampoline_address; regs->ip = trampoline_address;
regs->orig_ax = ~0UL; regs->orig_ax = ~0UL;
......
...@@ -30,6 +30,7 @@ ...@@ -30,6 +30,7 @@
static struct vfsmount *debugfs_mount; static struct vfsmount *debugfs_mount;
static int debugfs_mount_count; static int debugfs_mount_count;
static bool debugfs_registered;
static struct inode *debugfs_get_inode(struct super_block *sb, int mode, dev_t dev) static struct inode *debugfs_get_inode(struct super_block *sb, int mode, dev_t dev)
{ {
...@@ -496,6 +497,16 @@ struct dentry *debugfs_rename(struct dentry *old_dir, struct dentry *old_dentry, ...@@ -496,6 +497,16 @@ struct dentry *debugfs_rename(struct dentry *old_dir, struct dentry *old_dentry,
} }
EXPORT_SYMBOL_GPL(debugfs_rename); EXPORT_SYMBOL_GPL(debugfs_rename);
/**
* debugfs_initialized - Tells whether debugfs has been registered
*/
bool debugfs_initialized(void)
{
return debugfs_registered;
}
EXPORT_SYMBOL_GPL(debugfs_initialized);
static struct kobject *debug_kobj; static struct kobject *debug_kobj;
static int __init debugfs_init(void) static int __init debugfs_init(void)
...@@ -509,11 +520,16 @@ static int __init debugfs_init(void) ...@@ -509,11 +520,16 @@ static int __init debugfs_init(void)
retval = register_filesystem(&debug_fs_type); retval = register_filesystem(&debug_fs_type);
if (retval) if (retval)
kobject_put(debug_kobj); kobject_put(debug_kobj);
else
debugfs_registered = true;
return retval; return retval;
} }
static void __exit debugfs_exit(void) static void __exit debugfs_exit(void)
{ {
debugfs_registered = false;
simple_release_fs(&debugfs_mount, &debugfs_mount_count); simple_release_fs(&debugfs_mount, &debugfs_mount_count);
unregister_filesystem(&debug_fs_type); unregister_filesystem(&debug_fs_type);
kobject_put(debug_kobj); kobject_put(debug_kobj);
......
...@@ -71,6 +71,9 @@ struct dentry *debugfs_create_bool(const char *name, mode_t mode, ...@@ -71,6 +71,9 @@ struct dentry *debugfs_create_bool(const char *name, mode_t mode,
struct dentry *debugfs_create_blob(const char *name, mode_t mode, struct dentry *debugfs_create_blob(const char *name, mode_t mode,
struct dentry *parent, struct dentry *parent,
struct debugfs_blob_wrapper *blob); struct debugfs_blob_wrapper *blob);
bool debugfs_initialized(void);
#else #else
#include <linux/err.h> #include <linux/err.h>
...@@ -183,6 +186,11 @@ static inline struct dentry *debugfs_create_blob(const char *name, mode_t mode, ...@@ -183,6 +186,11 @@ static inline struct dentry *debugfs_create_blob(const char *name, mode_t mode,
return ERR_PTR(-ENODEV); return ERR_PTR(-ENODEV);
} }
static inline bool debugfs_initialized(void)
{
return false;
}
#endif #endif
#endif #endif
...@@ -145,8 +145,14 @@ enum { ...@@ -145,8 +145,14 @@ enum {
}; };
struct dyn_ftrace { struct dyn_ftrace {
union {
unsigned long ip; /* address of mcount call-site */ unsigned long ip; /* address of mcount call-site */
struct dyn_ftrace *freelist;
};
union {
unsigned long flags; unsigned long flags;
struct dyn_ftrace *newlist;
};
struct dyn_arch_ftrace arch; struct dyn_arch_ftrace arch;
}; };
...@@ -369,8 +375,7 @@ struct ftrace_ret_stack { ...@@ -369,8 +375,7 @@ struct ftrace_ret_stack {
extern void return_to_handler(void); extern void return_to_handler(void);
extern int extern int
ftrace_push_return_trace(unsigned long ret, unsigned long long time, ftrace_push_return_trace(unsigned long ret, unsigned long func, int *depth);
unsigned long func, int *depth);
extern void extern void
ftrace_pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret); ftrace_pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret);
......
...@@ -18,10 +18,13 @@ struct ring_buffer_event { ...@@ -18,10 +18,13 @@ struct ring_buffer_event {
/** /**
* enum ring_buffer_type - internal ring buffer types * enum ring_buffer_type - internal ring buffer types
* *
* @RINGBUF_TYPE_PADDING: Left over page padding * @RINGBUF_TYPE_PADDING: Left over page padding or discarded event
* If time_delta is 0:
* array is ignored * array is ignored
* size is variable depending on how much * size is variable depending on how much
* padding is needed * padding is needed
* If time_delta is non zero:
* everything else same as RINGBUF_TYPE_DATA
* *
* @RINGBUF_TYPE_TIME_EXTEND: Extend the time delta * @RINGBUF_TYPE_TIME_EXTEND: Extend the time delta
* array[0] = time delta (28 .. 59) * array[0] = time delta (28 .. 59)
...@@ -65,6 +68,8 @@ ring_buffer_event_time_delta(struct ring_buffer_event *event) ...@@ -65,6 +68,8 @@ ring_buffer_event_time_delta(struct ring_buffer_event *event)
return event->time_delta; return event->time_delta;
} }
void ring_buffer_event_discard(struct ring_buffer_event *event);
/* /*
* size is in bytes for each per CPU buffer. * size is in bytes for each per CPU buffer.
*/ */
......
...@@ -1409,6 +1409,8 @@ struct task_struct { ...@@ -1409,6 +1409,8 @@ struct task_struct {
int curr_ret_stack; int curr_ret_stack;
/* Stack of return addresses for return function tracing */ /* Stack of return addresses for return function tracing */
struct ftrace_ret_stack *ret_stack; struct ftrace_ret_stack *ret_stack;
/* time stamp for last schedule */
unsigned long long ftrace_timestamp;
/* /*
* Number of functions that haven't been traced * Number of functions that haven't been traced
* because of depth overrun. * because of depth overrun.
......
...@@ -772,6 +772,7 @@ static void __init do_basic_setup(void) ...@@ -772,6 +772,7 @@ static void __init do_basic_setup(void)
{ {
rcu_init_sched(); /* needed by module_init stage. */ rcu_init_sched(); /* needed by module_init stage. */
init_workqueues(); init_workqueues();
cpuset_init_smp();
usermodehelper_init(); usermodehelper_init();
driver_init(); driver_init();
init_irq_proc(); init_irq_proc();
...@@ -865,8 +866,6 @@ static int __init kernel_init(void * unused) ...@@ -865,8 +866,6 @@ static int __init kernel_init(void * unused)
smp_init(); smp_init();
sched_init_smp(); sched_init_smp();
cpuset_init_smp();
do_basic_setup(); do_basic_setup();
/* /*
......
...@@ -16,6 +16,7 @@ ...@@ -16,6 +16,7 @@
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/ */
#include <linux/ftrace.h> #include <linux/ftrace.h>
#include <linux/memory.h>
#include <linux/module.h> #include <linux/module.h>
#include <linux/mutex.h> #include <linux/mutex.h>
#include <linux/init.h> #include <linux/init.h>
......
...@@ -63,7 +63,11 @@ config TRACING ...@@ -63,7 +63,11 @@ config TRACING
# #
config TRACING_SUPPORT config TRACING_SUPPORT
bool bool
depends on TRACE_IRQFLAGS_SUPPORT # PPC32 has no irqflags tracing support, but it can use most of the
# tracers anyway, they were tested to build and work. Note that new
# exceptions to this list aren't welcomed, better implement the
# irqflags tracing for your architecture.
depends on TRACE_IRQFLAGS_SUPPORT || PPC32
depends on STACKTRACE_SUPPORT depends on STACKTRACE_SUPPORT
default y default y
......
...@@ -45,5 +45,6 @@ obj-$(CONFIG_EVENT_TRACER) += events.o ...@@ -45,5 +45,6 @@ obj-$(CONFIG_EVENT_TRACER) += events.o
obj-$(CONFIG_EVENT_TRACER) += trace_export.o obj-$(CONFIG_EVENT_TRACER) += trace_export.o
obj-$(CONFIG_FTRACE_SYSCALLS) += trace_syscalls.o obj-$(CONFIG_FTRACE_SYSCALLS) += trace_syscalls.o
obj-$(CONFIG_EVENT_PROFILE) += trace_event_profile.o obj-$(CONFIG_EVENT_PROFILE) += trace_event_profile.o
obj-$(CONFIG_EVENT_TRACER) += trace_events_filter.o
libftrace-y := ftrace.o libftrace-y := ftrace.o
This diff is collapsed.
...@@ -29,6 +29,8 @@ ...@@ -29,6 +29,8 @@
#include <linux/list.h> #include <linux/list.h>
#include <linux/hash.h> #include <linux/hash.h>
#include <trace/sched.h>
#include <asm/ftrace.h> #include <asm/ftrace.h>
#include "trace.h" #include "trace.h"
...@@ -339,7 +341,7 @@ static inline int record_frozen(struct dyn_ftrace *rec) ...@@ -339,7 +341,7 @@ static inline int record_frozen(struct dyn_ftrace *rec)
static void ftrace_free_rec(struct dyn_ftrace *rec) static void ftrace_free_rec(struct dyn_ftrace *rec)
{ {
rec->ip = (unsigned long)ftrace_free_records; rec->freelist = ftrace_free_records;
ftrace_free_records = rec; ftrace_free_records = rec;
rec->flags |= FTRACE_FL_FREE; rec->flags |= FTRACE_FL_FREE;
} }
...@@ -356,9 +358,14 @@ void ftrace_release(void *start, unsigned long size) ...@@ -356,9 +358,14 @@ void ftrace_release(void *start, unsigned long size)
mutex_lock(&ftrace_lock); mutex_lock(&ftrace_lock);
do_for_each_ftrace_rec(pg, rec) { do_for_each_ftrace_rec(pg, rec) {
if ((rec->ip >= s) && (rec->ip < e) && if ((rec->ip >= s) && (rec->ip < e)) {
!(rec->flags & FTRACE_FL_FREE)) /*
* rec->ip is changed in ftrace_free_rec()
* It should not between s and e if record was freed.
*/
FTRACE_WARN_ON(rec->flags & FTRACE_FL_FREE);
ftrace_free_rec(rec); ftrace_free_rec(rec);
}
} while_for_each_ftrace_rec(); } while_for_each_ftrace_rec();
mutex_unlock(&ftrace_lock); mutex_unlock(&ftrace_lock);
} }
...@@ -377,7 +384,7 @@ static struct dyn_ftrace *ftrace_alloc_dyn_node(unsigned long ip) ...@@ -377,7 +384,7 @@ static struct dyn_ftrace *ftrace_alloc_dyn_node(unsigned long ip)
return NULL; return NULL;
} }
ftrace_free_records = (void *)rec->ip; ftrace_free_records = rec->freelist;
memset(rec, 0, sizeof(*rec)); memset(rec, 0, sizeof(*rec));
return rec; return rec;
} }
...@@ -409,7 +416,7 @@ ftrace_record_ip(unsigned long ip) ...@@ -409,7 +416,7 @@ ftrace_record_ip(unsigned long ip)
return NULL; return NULL;
rec->ip = ip; rec->ip = ip;
rec->flags = (unsigned long)ftrace_new_addrs; rec->newlist = ftrace_new_addrs;
ftrace_new_addrs = rec; ftrace_new_addrs = rec;
return rec; return rec;
...@@ -729,7 +736,7 @@ static int ftrace_update_code(struct module *mod) ...@@ -729,7 +736,7 @@ static int ftrace_update_code(struct module *mod)
return -1; return -1;
p = ftrace_new_addrs; p = ftrace_new_addrs;
ftrace_new_addrs = (struct dyn_ftrace *)p->flags; ftrace_new_addrs = p->newlist;
p->flags = 0L; p->flags = 0L;
/* convert record (i.e, patch mcount-call with NOP) */ /* convert record (i.e, patch mcount-call with NOP) */
...@@ -2262,7 +2269,7 @@ ftrace_pid_read(struct file *file, char __user *ubuf, ...@@ -2262,7 +2269,7 @@ ftrace_pid_read(struct file *file, char __user *ubuf,
if (ftrace_pid_trace == ftrace_swapper_pid) if (ftrace_pid_trace == ftrace_swapper_pid)
r = sprintf(buf, "swapper tasks\n"); r = sprintf(buf, "swapper tasks\n");
else if (ftrace_pid_trace) else if (ftrace_pid_trace)
r = sprintf(buf, "%u\n", pid_nr(ftrace_pid_trace)); r = sprintf(buf, "%u\n", pid_vnr(ftrace_pid_trace));
else else
r = sprintf(buf, "no pid\n"); r = sprintf(buf, "no pid\n");
...@@ -2590,6 +2597,38 @@ static int alloc_retstack_tasklist(struct ftrace_ret_stack **ret_stack_list) ...@@ -2590,6 +2597,38 @@ static int alloc_retstack_tasklist(struct ftrace_ret_stack **ret_stack_list)
return ret; return ret;
} }
static void
ftrace_graph_probe_sched_switch(struct rq *__rq, struct task_struct *prev,
struct task_struct *next)
{
unsigned long long timestamp;
int index;
/*
* Does the user want to count the time a function was asleep.
* If so, do not update the time stamps.
*/
if (trace_flags & TRACE_ITER_SLEEP_TIME)
return;
timestamp = trace_clock_local();
prev->ftrace_timestamp = timestamp;
/* only process tasks that we timestamped */
if (!next->ftrace_timestamp)
return;
/*
* Update all the counters in next to make up for the
* time next was sleeping.
*/
timestamp -= next->ftrace_timestamp;
for (index = next->curr_ret_stack; index >= 0; index--)
next->ret_stack[index].calltime += timestamp;
}
/* Allocate a return stack for each task */ /* Allocate a return stack for each task */
static int start_graph_tracing(void) static int start_graph_tracing(void)
{ {
...@@ -2611,6 +2650,13 @@ static int start_graph_tracing(void) ...@@ -2611,6 +2650,13 @@ static int start_graph_tracing(void)
ret = alloc_retstack_tasklist(ret_stack_list); ret = alloc_retstack_tasklist(ret_stack_list);
} while (ret == -EAGAIN); } while (ret == -EAGAIN);
if (!ret) {
ret = register_trace_sched_switch(ftrace_graph_probe_sched_switch);
if (ret)
pr_info("ftrace_graph: Couldn't activate tracepoint"
" probe to kernel_sched_switch\n");
}
kfree(ret_stack_list); kfree(ret_stack_list);
return ret; return ret;
} }
...@@ -2643,6 +2689,12 @@ int register_ftrace_graph(trace_func_graph_ret_t retfunc, ...@@ -2643,6 +2689,12 @@ int register_ftrace_graph(trace_func_graph_ret_t retfunc,
mutex_lock(&ftrace_lock); mutex_lock(&ftrace_lock);
/* we currently allow only one tracer registered at a time */
if (atomic_read(&ftrace_graph_active)) {
ret = -EBUSY;
goto out;
}
ftrace_suspend_notifier.notifier_call = ftrace_suspend_notifier_call; ftrace_suspend_notifier.notifier_call = ftrace_suspend_notifier_call;
register_pm_notifier(&ftrace_suspend_notifier); register_pm_notifier(&ftrace_suspend_notifier);
...@@ -2668,6 +2720,7 @@ void unregister_ftrace_graph(void) ...@@ -2668,6 +2720,7 @@ void unregister_ftrace_graph(void)
mutex_lock(&ftrace_lock); mutex_lock(&ftrace_lock);
atomic_dec(&ftrace_graph_active); atomic_dec(&ftrace_graph_active);
unregister_trace_sched_switch(ftrace_graph_probe_sched_switch);
ftrace_graph_return = (trace_func_graph_ret_t)ftrace_stub; ftrace_graph_return = (trace_func_graph_ret_t)ftrace_stub;
ftrace_graph_entry = ftrace_graph_entry_stub; ftrace_graph_entry = ftrace_graph_entry_stub;
ftrace_shutdown(FTRACE_STOP_FUNC_RET); ftrace_shutdown(FTRACE_STOP_FUNC_RET);
...@@ -2688,6 +2741,7 @@ void ftrace_graph_init_task(struct task_struct *t) ...@@ -2688,6 +2741,7 @@ void ftrace_graph_init_task(struct task_struct *t)
t->curr_ret_stack = -1; t->curr_ret_stack = -1;
atomic_set(&t->tracing_graph_pause, 0); atomic_set(&t->tracing_graph_pause, 0);
atomic_set(&t->trace_overrun, 0); atomic_set(&t->trace_overrun, 0);
t->ftrace_timestamp = 0;
} else } else
t->ret_stack = NULL; t->ret_stack = NULL;
} }
......
...@@ -189,16 +189,65 @@ enum { ...@@ -189,16 +189,65 @@ enum {
RB_LEN_TIME_STAMP = 16, RB_LEN_TIME_STAMP = 16,
}; };
/* inline for ring buffer fast paths */ static inline int rb_null_event(struct ring_buffer_event *event)
{
return event->type == RINGBUF_TYPE_PADDING && event->time_delta == 0;
}
static inline int rb_discarded_event(struct ring_buffer_event *event)
{
return event->type == RINGBUF_TYPE_PADDING && event->time_delta;
}
static void rb_event_set_padding(struct ring_buffer_event *event)
{
event->type = RINGBUF_TYPE_PADDING;
event->time_delta = 0;
}
/**
* ring_buffer_event_discard - discard an event in the ring buffer
* @buffer: the ring buffer
* @event: the event to discard
*
* Sometimes a event that is in the ring buffer needs to be ignored.
* This function lets the user discard an event in the ring buffer
* and then that event will not be read later.
*
* Note, it is up to the user to be careful with this, and protect
* against races. If the user discards an event that has been consumed
* it is possible that it could corrupt the ring buffer.
*/
void ring_buffer_event_discard(struct ring_buffer_event *event)
{
event->type = RINGBUF_TYPE_PADDING;
/* time delta must be non zero */
if (!event->time_delta)
event->time_delta = 1;
}
static unsigned static unsigned
rb_event_length(struct ring_buffer_event *event) rb_event_data_length(struct ring_buffer_event *event)
{ {
unsigned length; unsigned length;
if (event->len)
length = event->len * RB_ALIGNMENT;
else
length = event->array[0];
return length + RB_EVNT_HDR_SIZE;
}
/* inline for ring buffer fast paths */
static unsigned
rb_event_length(struct ring_buffer_event *event)
{
switch (event->type) { switch (event->type) {
case RINGBUF_TYPE_PADDING: case RINGBUF_TYPE_PADDING:
if (rb_null_event(event))
/* undefined */ /* undefined */
return -1; return -1;
return rb_event_data_length(event);
case RINGBUF_TYPE_TIME_EXTEND: case RINGBUF_TYPE_TIME_EXTEND:
return RB_LEN_TIME_EXTEND; return RB_LEN_TIME_EXTEND;
...@@ -207,11 +256,7 @@ rb_event_length(struct ring_buffer_event *event) ...@@ -207,11 +256,7 @@ rb_event_length(struct ring_buffer_event *event)
return RB_LEN_TIME_STAMP; return RB_LEN_TIME_STAMP;
case RINGBUF_TYPE_DATA: case RINGBUF_TYPE_DATA:
if (event->len) return rb_event_data_length(event);
length = event->len * RB_ALIGNMENT;
else
length = event->array[0];
return length + RB_EVNT_HDR_SIZE;
default: default:
BUG(); BUG();
} }
...@@ -845,11 +890,6 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size) ...@@ -845,11 +890,6 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size)
} }
EXPORT_SYMBOL_GPL(ring_buffer_resize); EXPORT_SYMBOL_GPL(ring_buffer_resize);
static inline int rb_null_event(struct ring_buffer_event *event)
{
return event->type == RINGBUF_TYPE_PADDING;
}
static inline void * static inline void *
__rb_data_page_index(struct buffer_data_page *bpage, unsigned index) __rb_data_page_index(struct buffer_data_page *bpage, unsigned index)
{ {
...@@ -1219,7 +1259,7 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer, ...@@ -1219,7 +1259,7 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
if (tail < BUF_PAGE_SIZE) { if (tail < BUF_PAGE_SIZE) {
/* Mark the rest of the page with padding */ /* Mark the rest of the page with padding */
event = __rb_page_index(tail_page, tail); event = __rb_page_index(tail_page, tail);
event->type = RINGBUF_TYPE_PADDING; rb_event_set_padding(event);
} }
if (tail <= BUF_PAGE_SIZE) if (tail <= BUF_PAGE_SIZE)
...@@ -1969,7 +2009,7 @@ static void rb_advance_reader(struct ring_buffer_per_cpu *cpu_buffer) ...@@ -1969,7 +2009,7 @@ static void rb_advance_reader(struct ring_buffer_per_cpu *cpu_buffer)
event = rb_reader_event(cpu_buffer); event = rb_reader_event(cpu_buffer);
if (event->type == RINGBUF_TYPE_DATA) if (event->type == RINGBUF_TYPE_DATA || rb_discarded_event(event))
cpu_buffer->entries--; cpu_buffer->entries--;
rb_update_read_stamp(cpu_buffer, event); rb_update_read_stamp(cpu_buffer, event);
...@@ -2052,9 +2092,18 @@ rb_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts) ...@@ -2052,9 +2092,18 @@ rb_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts)
switch (event->type) { switch (event->type) {
case RINGBUF_TYPE_PADDING: case RINGBUF_TYPE_PADDING:
if (rb_null_event(event))
RB_WARN_ON(cpu_buffer, 1); RB_WARN_ON(cpu_buffer, 1);
/*
* Because the writer could be discarding every
* event it creates (which would probably be bad)
* if we were to go back to "again" then we may never
* catch up, and will trigger the warn on, or lock
* the box. Return the padding, and we will release
* the current locks, and try again.
*/
rb_advance_reader(cpu_buffer); rb_advance_reader(cpu_buffer);
return NULL; return event;
case RINGBUF_TYPE_TIME_EXTEND: case RINGBUF_TYPE_TIME_EXTEND:
/* Internal data, OK to advance */ /* Internal data, OK to advance */
...@@ -2115,8 +2164,12 @@ rb_iter_peek(struct ring_buffer_iter *iter, u64 *ts) ...@@ -2115,8 +2164,12 @@ rb_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
switch (event->type) { switch (event->type) {
case RINGBUF_TYPE_PADDING: case RINGBUF_TYPE_PADDING:
if (rb_null_event(event)) {
rb_inc_iter(iter); rb_inc_iter(iter);
goto again; goto again;
}
rb_advance_iter(iter);
return event;
case RINGBUF_TYPE_TIME_EXTEND: case RINGBUF_TYPE_TIME_EXTEND:
/* Internal data, OK to advance */ /* Internal data, OK to advance */
...@@ -2163,10 +2216,16 @@ ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts) ...@@ -2163,10 +2216,16 @@ ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts)
if (!cpumask_test_cpu(cpu, buffer->cpumask)) if (!cpumask_test_cpu(cpu, buffer->cpumask))
return NULL; return NULL;
again:
spin_lock_irqsave(&cpu_buffer->reader_lock, flags); spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
event = rb_buffer_peek(buffer, cpu, ts); event = rb_buffer_peek(buffer, cpu, ts);
spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
if (event && event->type == RINGBUF_TYPE_PADDING) {
cpu_relax();
goto again;
}
return event; return event;
} }
...@@ -2185,10 +2244,16 @@ ring_buffer_iter_peek(struct ring_buffer_iter *iter, u64 *ts) ...@@ -2185,10 +2244,16 @@ ring_buffer_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
struct ring_buffer_event *event; struct ring_buffer_event *event;
unsigned long flags; unsigned long flags;
again:
spin_lock_irqsave(&cpu_buffer->reader_lock, flags); spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
event = rb_iter_peek(iter, ts); event = rb_iter_peek(iter, ts);
spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
if (event && event->type == RINGBUF_TYPE_PADDING) {
cpu_relax();
goto again;
}
return event; return event;
} }
...@@ -2207,6 +2272,7 @@ ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts) ...@@ -2207,6 +2272,7 @@ ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts)
struct ring_buffer_event *event = NULL; struct ring_buffer_event *event = NULL;
unsigned long flags; unsigned long flags;
again:
/* might be called in atomic */ /* might be called in atomic */
preempt_disable(); preempt_disable();
...@@ -2228,6 +2294,11 @@ ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts) ...@@ -2228,6 +2294,11 @@ ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts)
out: out:
preempt_enable(); preempt_enable();
if (event && event->type == RINGBUF_TYPE_PADDING) {
cpu_relax();
goto again;
}
return event; return event;
} }
EXPORT_SYMBOL_GPL(ring_buffer_consume); EXPORT_SYMBOL_GPL(ring_buffer_consume);
...@@ -2306,6 +2377,7 @@ ring_buffer_read(struct ring_buffer_iter *iter, u64 *ts) ...@@ -2306,6 +2377,7 @@ ring_buffer_read(struct ring_buffer_iter *iter, u64 *ts)
struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
unsigned long flags; unsigned long flags;
again:
spin_lock_irqsave(&cpu_buffer->reader_lock, flags); spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
event = rb_iter_peek(iter, ts); event = rb_iter_peek(iter, ts);
if (!event) if (!event)
...@@ -2315,6 +2387,11 @@ ring_buffer_read(struct ring_buffer_iter *iter, u64 *ts) ...@@ -2315,6 +2387,11 @@ ring_buffer_read(struct ring_buffer_iter *iter, u64 *ts)
out: out:
spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
if (event && event->type == RINGBUF_TYPE_PADDING) {
cpu_relax();
goto again;
}
return event; return event;
} }
EXPORT_SYMBOL_GPL(ring_buffer_read); EXPORT_SYMBOL_GPL(ring_buffer_read);
......
...@@ -255,7 +255,7 @@ static DECLARE_WAIT_QUEUE_HEAD(trace_wait); ...@@ -255,7 +255,7 @@ static DECLARE_WAIT_QUEUE_HEAD(trace_wait);
/* trace_flags holds trace_options default values */ /* trace_flags holds trace_options default values */
unsigned long trace_flags = TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK | unsigned long trace_flags = TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK |
TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO; TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO | TRACE_ITER_SLEEP_TIME;
/** /**
* trace_wake_up - wake up tasks waiting for trace input * trace_wake_up - wake up tasks waiting for trace input
...@@ -316,6 +316,7 @@ static const char *trace_options[] = { ...@@ -316,6 +316,7 @@ static const char *trace_options[] = {
"context-info", "context-info",
"latency-format", "latency-format",
"global-clock", "global-clock",
"sleep-time",
NULL NULL
}; };
...@@ -382,7 +383,7 @@ ssize_t trace_seq_to_user(struct trace_seq *s, char __user *ubuf, size_t cnt) ...@@ -382,7 +383,7 @@ ssize_t trace_seq_to_user(struct trace_seq *s, char __user *ubuf, size_t cnt)
return cnt; return cnt;
} }
ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt) static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt)
{ {
int len; int len;
void *ret; void *ret;
...@@ -860,17 +861,27 @@ static void ftrace_trace_stack(struct trace_array *tr, ...@@ -860,17 +861,27 @@ static void ftrace_trace_stack(struct trace_array *tr,
static void ftrace_trace_userstack(struct trace_array *tr, static void ftrace_trace_userstack(struct trace_array *tr,
unsigned long flags, int pc); unsigned long flags, int pc);
void trace_buffer_unlock_commit(struct trace_array *tr, static inline void __trace_buffer_unlock_commit(struct trace_array *tr,
struct ring_buffer_event *event, struct ring_buffer_event *event,
unsigned long flags, int pc) unsigned long flags, int pc,
int wake)
{ {
ring_buffer_unlock_commit(tr->buffer, event); ring_buffer_unlock_commit(tr->buffer, event);
ftrace_trace_stack(tr, flags, 6, pc); ftrace_trace_stack(tr, flags, 6, pc);
ftrace_trace_userstack(tr, flags, pc); ftrace_trace_userstack(tr, flags, pc);
if (wake)
trace_wake_up(); trace_wake_up();
} }
void trace_buffer_unlock_commit(struct trace_array *tr,
struct ring_buffer_event *event,
unsigned long flags, int pc)
{
__trace_buffer_unlock_commit(tr, event, flags, pc, 1);
}
struct ring_buffer_event * struct ring_buffer_event *
trace_current_buffer_lock_reserve(unsigned char type, unsigned long len, trace_current_buffer_lock_reserve(unsigned char type, unsigned long len,
unsigned long flags, int pc) unsigned long flags, int pc)
...@@ -882,7 +893,13 @@ trace_current_buffer_lock_reserve(unsigned char type, unsigned long len, ...@@ -882,7 +893,13 @@ trace_current_buffer_lock_reserve(unsigned char type, unsigned long len,
void trace_current_buffer_unlock_commit(struct ring_buffer_event *event, void trace_current_buffer_unlock_commit(struct ring_buffer_event *event,
unsigned long flags, int pc) unsigned long flags, int pc)
{ {
return trace_buffer_unlock_commit(&global_trace, event, flags, pc); return __trace_buffer_unlock_commit(&global_trace, event, flags, pc, 1);
}
void trace_nowake_buffer_unlock_commit(struct ring_buffer_event *event,
unsigned long flags, int pc)
{
return __trace_buffer_unlock_commit(&global_trace, event, flags, pc, 0);
} }
void void
...@@ -908,7 +925,7 @@ trace_function(struct trace_array *tr, ...@@ -908,7 +925,7 @@ trace_function(struct trace_array *tr,
} }
#ifdef CONFIG_FUNCTION_GRAPH_TRACER #ifdef CONFIG_FUNCTION_GRAPH_TRACER
static void __trace_graph_entry(struct trace_array *tr, static int __trace_graph_entry(struct trace_array *tr,
struct ftrace_graph_ent *trace, struct ftrace_graph_ent *trace,
unsigned long flags, unsigned long flags,
int pc) int pc)
...@@ -917,15 +934,17 @@ static void __trace_graph_entry(struct trace_array *tr, ...@@ -917,15 +934,17 @@ static void __trace_graph_entry(struct trace_array *tr,
struct ftrace_graph_ent_entry *entry; struct ftrace_graph_ent_entry *entry;
if (unlikely(local_read(&__get_cpu_var(ftrace_cpu_disabled)))) if (unlikely(local_read(&__get_cpu_var(ftrace_cpu_disabled))))
return; return 0;
event = trace_buffer_lock_reserve(&global_trace, TRACE_GRAPH_ENT, event = trace_buffer_lock_reserve(&global_trace, TRACE_GRAPH_ENT,
sizeof(*entry), flags, pc); sizeof(*entry), flags, pc);
if (!event) if (!event)
return; return 0;
entry = ring_buffer_event_data(event); entry = ring_buffer_event_data(event);
entry->graph_ent = *trace; entry->graph_ent = *trace;
ring_buffer_unlock_commit(global_trace.buffer, event); ring_buffer_unlock_commit(global_trace.buffer, event);
return 1;
} }
static void __trace_graph_return(struct trace_array *tr, static void __trace_graph_return(struct trace_array *tr,
...@@ -1146,6 +1165,7 @@ int trace_graph_entry(struct ftrace_graph_ent *trace) ...@@ -1146,6 +1165,7 @@ int trace_graph_entry(struct ftrace_graph_ent *trace)
struct trace_array_cpu *data; struct trace_array_cpu *data;
unsigned long flags; unsigned long flags;
long disabled; long disabled;
int ret;
int cpu; int cpu;
int pc; int pc;
...@@ -1161,15 +1181,18 @@ int trace_graph_entry(struct ftrace_graph_ent *trace) ...@@ -1161,15 +1181,18 @@ int trace_graph_entry(struct ftrace_graph_ent *trace)
disabled = atomic_inc_return(&data->disabled); disabled = atomic_inc_return(&data->disabled);
if (likely(disabled == 1)) { if (likely(disabled == 1)) {
pc = preempt_count(); pc = preempt_count();
__trace_graph_entry(tr, trace, flags, pc); ret = __trace_graph_entry(tr, trace, flags, pc);
} else {
ret = 0;
} }
/* Only do the atomic if it is not already set */ /* Only do the atomic if it is not already set */
if (!test_tsk_trace_graph(current)) if (!test_tsk_trace_graph(current))
set_tsk_trace_graph(current); set_tsk_trace_graph(current);
atomic_dec(&data->disabled); atomic_dec(&data->disabled);
local_irq_restore(flags); local_irq_restore(flags);
return 1; return ret;
} }
void trace_graph_return(struct ftrace_graph_ret *trace) void trace_graph_return(struct ftrace_graph_ret *trace)
...@@ -3513,6 +3536,9 @@ struct dentry *tracing_init_dentry(void) ...@@ -3513,6 +3536,9 @@ struct dentry *tracing_init_dentry(void)
if (d_tracer) if (d_tracer)
return d_tracer; return d_tracer;
if (!debugfs_initialized())
return NULL;
d_tracer = debugfs_create_dir("tracing", NULL); d_tracer = debugfs_create_dir("tracing", NULL);
if (!d_tracer && !once) { if (!d_tracer && !once) {
......
...@@ -483,6 +483,8 @@ trace_current_buffer_lock_reserve(unsigned char type, unsigned long len, ...@@ -483,6 +483,8 @@ trace_current_buffer_lock_reserve(unsigned char type, unsigned long len,
unsigned long flags, int pc); unsigned long flags, int pc);
void trace_current_buffer_unlock_commit(struct ring_buffer_event *event, void trace_current_buffer_unlock_commit(struct ring_buffer_event *event,
unsigned long flags, int pc); unsigned long flags, int pc);
void trace_nowake_buffer_unlock_commit(struct ring_buffer_event *event,
unsigned long flags, int pc);
struct trace_entry *tracing_get_trace_entry(struct trace_array *tr, struct trace_entry *tracing_get_trace_entry(struct trace_array *tr,
struct trace_array_cpu *data); struct trace_array_cpu *data);
...@@ -683,6 +685,7 @@ enum trace_iterator_flags { ...@@ -683,6 +685,7 @@ enum trace_iterator_flags {
TRACE_ITER_CONTEXT_INFO = 0x20000, /* Print pid/cpu/time */ TRACE_ITER_CONTEXT_INFO = 0x20000, /* Print pid/cpu/time */
TRACE_ITER_LATENCY_FMT = 0x40000, TRACE_ITER_LATENCY_FMT = 0x40000,
TRACE_ITER_GLOBAL_CLK = 0x80000, TRACE_ITER_GLOBAL_CLK = 0x80000,
TRACE_ITER_SLEEP_TIME = 0x100000,
}; };
/* /*
...@@ -775,6 +778,14 @@ enum { ...@@ -775,6 +778,14 @@ enum {
TRACE_EVENT_TYPE_RAW = 2, TRACE_EVENT_TYPE_RAW = 2,
}; };
struct ftrace_event_field {
struct list_head link;
char *name;
char *type;
int offset;
int size;
};
struct ftrace_event_call { struct ftrace_event_call {
char *name; char *name;
char *system; char *system;
...@@ -785,6 +796,9 @@ struct ftrace_event_call { ...@@ -785,6 +796,9 @@ struct ftrace_event_call {
int id; int id;
int (*raw_init)(void); int (*raw_init)(void);
int (*show_format)(struct trace_seq *s); int (*show_format)(struct trace_seq *s);
int (*define_fields)(void);
struct list_head fields;
struct filter_pred **preds;
#ifdef CONFIG_EVENT_PROFILE #ifdef CONFIG_EVENT_PROFILE
atomic_t profile_count; atomic_t profile_count;
...@@ -793,6 +807,51 @@ struct ftrace_event_call { ...@@ -793,6 +807,51 @@ struct ftrace_event_call {
#endif #endif
}; };
struct event_subsystem {
struct list_head list;
const char *name;
struct dentry *entry;
struct filter_pred **preds;
};
#define events_for_each(event) \
for (event = __start_ftrace_events; \
(unsigned long)event < (unsigned long)__stop_ftrace_events; \
event++)
#define MAX_FILTER_PRED 8
struct filter_pred;
typedef int (*filter_pred_fn_t) (struct filter_pred *pred, void *event);
struct filter_pred {
filter_pred_fn_t fn;
u64 val;
char *str_val;
int str_len;
char *field_name;
int offset;
int not;
int or;
int compound;
int clear;
};
int trace_define_field(struct ftrace_event_call *call, char *type,
char *name, int offset, int size);
extern void filter_free_pred(struct filter_pred *pred);
extern void filter_print_preds(struct filter_pred **preds,
struct trace_seq *s);
extern int filter_parse(char **pbuf, struct filter_pred *pred);
extern int filter_add_pred(struct ftrace_event_call *call,
struct filter_pred *pred);
extern void filter_free_preds(struct ftrace_event_call *call);
extern int filter_match_preds(struct ftrace_event_call *call, void *rec);
extern void filter_free_subsystem_preds(struct event_subsystem *system);
extern int filter_add_subsystem_pred(struct event_subsystem *system,
struct filter_pred *pred);
void event_trace_printk(unsigned long ip, const char *fmt, ...); void event_trace_printk(unsigned long ip, const char *fmt, ...);
extern struct ftrace_event_call __start_ftrace_events[]; extern struct ftrace_event_call __start_ftrace_events[];
extern struct ftrace_event_call __stop_ftrace_events[]; extern struct ftrace_event_call __stop_ftrace_events[];
......
...@@ -18,6 +18,7 @@ ...@@ -18,6 +18,7 @@
#include <linux/percpu.h> #include <linux/percpu.h>
#include <linux/sched.h> #include <linux/sched.h>
#include <linux/ktime.h> #include <linux/ktime.h>
#include <linux/trace_clock.h>
/* /*
* trace_clock_local(): the simplest and least coherent tracing clock. * trace_clock_local(): the simplest and least coherent tracing clock.
......
...@@ -19,6 +19,39 @@ ...@@ -19,6 +19,39 @@
static DEFINE_MUTEX(event_mutex); static DEFINE_MUTEX(event_mutex);
int trace_define_field(struct ftrace_event_call *call, char *type,
char *name, int offset, int size)
{
struct ftrace_event_field *field;
field = kzalloc(sizeof(*field), GFP_KERNEL);
if (!field)
goto err;
field->name = kstrdup(name, GFP_KERNEL);
if (!field->name)
goto err;
field->type = kstrdup(type, GFP_KERNEL);
if (!field->type)
goto err;
field->offset = offset;
field->size = size;
list_add(&field->link, &call->fields);
return 0;
err:
if (field) {
kfree(field->name);
kfree(field->type);
}
kfree(field);
return -ENOMEM;
}
static void ftrace_clear_events(void) static void ftrace_clear_events(void)
{ {
struct ftrace_event_call *call = (void *)__start_ftrace_events; struct ftrace_event_call *call = (void *)__start_ftrace_events;
...@@ -343,7 +376,8 @@ event_enable_write(struct file *filp, const char __user *ubuf, size_t cnt, ...@@ -343,7 +376,8 @@ event_enable_write(struct file *filp, const char __user *ubuf, size_t cnt,
#undef FIELD #undef FIELD
#define FIELD(type, name) \ #define FIELD(type, name) \
#type, #name, offsetof(typeof(field), name), sizeof(field.name) #type, "common_" #name, offsetof(typeof(field), name), \
sizeof(field.name)
static int trace_write_header(struct trace_seq *s) static int trace_write_header(struct trace_seq *s)
{ {
...@@ -430,6 +464,139 @@ event_id_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos) ...@@ -430,6 +464,139 @@ event_id_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
return r; return r;
} }
static ssize_t
event_filter_read(struct file *filp, char __user *ubuf, size_t cnt,
loff_t *ppos)
{
struct ftrace_event_call *call = filp->private_data;
struct trace_seq *s;
int r;
if (*ppos)
return 0;
s = kmalloc(sizeof(*s), GFP_KERNEL);
if (!s)
return -ENOMEM;
trace_seq_init(s);
filter_print_preds(call->preds, s);
r = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer, s->len);
kfree(s);
return r;
}
static ssize_t
event_filter_write(struct file *filp, const char __user *ubuf, size_t cnt,
loff_t *ppos)
{
struct ftrace_event_call *call = filp->private_data;
char buf[64], *pbuf = buf;
struct filter_pred *pred;
int err;
if (cnt >= sizeof(buf))
return -EINVAL;
if (copy_from_user(&buf, ubuf, cnt))
return -EFAULT;
pred = kzalloc(sizeof(*pred), GFP_KERNEL);
if (!pred)
return -ENOMEM;
err = filter_parse(&pbuf, pred);
if (err < 0) {
filter_free_pred(pred);
return err;
}
if (pred->clear) {
filter_free_preds(call);
filter_free_pred(pred);
return cnt;
}
if (filter_add_pred(call, pred)) {
filter_free_pred(pred);
return -EINVAL;
}
*ppos += cnt;
return cnt;
}
static ssize_t
subsystem_filter_read(struct file *filp, char __user *ubuf, size_t cnt,
loff_t *ppos)
{
struct event_subsystem *system = filp->private_data;
struct trace_seq *s;
int r;
if (*ppos)
return 0;
s = kmalloc(sizeof(*s), GFP_KERNEL);
if (!s)
return -ENOMEM;
trace_seq_init(s);
filter_print_preds(system->preds, s);
r = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer, s->len);
kfree(s);
return r;
}
static ssize_t
subsystem_filter_write(struct file *filp, const char __user *ubuf, size_t cnt,
loff_t *ppos)
{
struct event_subsystem *system = filp->private_data;
char buf[64], *pbuf = buf;
struct filter_pred *pred;
int err;
if (cnt >= sizeof(buf))
return -EINVAL;
if (copy_from_user(&buf, ubuf, cnt))
return -EFAULT;
pred = kzalloc(sizeof(*pred), GFP_KERNEL);
if (!pred)
return -ENOMEM;
err = filter_parse(&pbuf, pred);
if (err < 0) {
filter_free_pred(pred);
return err;
}
if (pred->clear) {
filter_free_subsystem_preds(system);
filter_free_pred(pred);
return cnt;
}
if (filter_add_subsystem_pred(system, pred)) {
filter_free_subsystem_preds(system);
filter_free_pred(pred);
return -EINVAL;
}
*ppos += cnt;
return cnt;
}
static const struct seq_operations show_event_seq_ops = { static const struct seq_operations show_event_seq_ops = {
.start = t_start, .start = t_start,
.next = t_next, .next = t_next,
...@@ -475,6 +642,18 @@ static const struct file_operations ftrace_event_id_fops = { ...@@ -475,6 +642,18 @@ static const struct file_operations ftrace_event_id_fops = {
.read = event_id_read, .read = event_id_read,
}; };
static const struct file_operations ftrace_event_filter_fops = {
.open = tracing_open_generic,
.read = event_filter_read,
.write = event_filter_write,
};
static const struct file_operations ftrace_subsystem_filter_fops = {
.open = tracing_open_generic,
.read = subsystem_filter_read,
.write = subsystem_filter_write,
};
static struct dentry *event_trace_events_dir(void) static struct dentry *event_trace_events_dir(void)
{ {
static struct dentry *d_tracer; static struct dentry *d_tracer;
...@@ -495,12 +674,6 @@ static struct dentry *event_trace_events_dir(void) ...@@ -495,12 +674,6 @@ static struct dentry *event_trace_events_dir(void)
return d_events; return d_events;
} }
struct event_subsystem {
struct list_head list;
const char *name;
struct dentry *entry;
};
static LIST_HEAD(event_subsystems); static LIST_HEAD(event_subsystems);
static struct dentry * static struct dentry *
...@@ -533,6 +706,8 @@ event_subsystem_dir(const char *name, struct dentry *d_events) ...@@ -533,6 +706,8 @@ event_subsystem_dir(const char *name, struct dentry *d_events)
system->name = name; system->name = name;
list_add(&system->list, &event_subsystems); list_add(&system->list, &event_subsystems);
system->preds = NULL;
return system->entry; return system->entry;
} }
...@@ -581,6 +756,20 @@ event_create_dir(struct ftrace_event_call *call, struct dentry *d_events) ...@@ -581,6 +756,20 @@ event_create_dir(struct ftrace_event_call *call, struct dentry *d_events)
call->name); call->name);
} }
if (call->define_fields) {
ret = call->define_fields();
if (ret < 0) {
pr_warning("Could not initialize trace point"
" events/%s\n", call->name);
return ret;
}
entry = debugfs_create_file("filter", 0644, call->dir, call,
&ftrace_event_filter_fops);
if (!entry)
pr_warning("Could not create debugfs "
"'%s/filter' entry\n", call->name);
}
/* A trace may not want to export its format */ /* A trace may not want to export its format */
if (!call->show_format) if (!call->show_format)
return 0; return 0;
......
/*
* trace_events_filter - generic event filtering
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*
* Copyright (C) 2009 Tom Zanussi <tzanussi@gmail.com>
*/
#include <linux/debugfs.h>
#include <linux/uaccess.h>
#include <linux/module.h>
#include <linux/ctype.h>
#include "trace.h"
#include "trace_output.h"
static int filter_pred_64(struct filter_pred *pred, void *event)
{
u64 *addr = (u64 *)(event + pred->offset);
u64 val = (u64)pred->val;
int match;
match = (val == *addr) ^ pred->not;
return match;
}
static int filter_pred_32(struct filter_pred *pred, void *event)
{
u32 *addr = (u32 *)(event + pred->offset);
u32 val = (u32)pred->val;
int match;
match = (val == *addr) ^ pred->not;
return match;
}
static int filter_pred_16(struct filter_pred *pred, void *event)
{
u16 *addr = (u16 *)(event + pred->offset);
u16 val = (u16)pred->val;
int match;
match = (val == *addr) ^ pred->not;
return match;
}
static int filter_pred_8(struct filter_pred *pred, void *event)
{
u8 *addr = (u8 *)(event + pred->offset);
u8 val = (u8)pred->val;
int match;
match = (val == *addr) ^ pred->not;
return match;
}
static int filter_pred_string(struct filter_pred *pred, void *event)
{
char *addr = (char *)(event + pred->offset);
int cmp, match;
cmp = strncmp(addr, pred->str_val, pred->str_len);
match = (!cmp) ^ pred->not;
return match;
}
/* return 1 if event matches, 0 otherwise (discard) */
int filter_match_preds(struct ftrace_event_call *call, void *rec)
{
int i, matched, and_failed = 0;
struct filter_pred *pred;
for (i = 0; i < MAX_FILTER_PRED; i++) {
if (call->preds[i]) {
pred = call->preds[i];
if (and_failed && !pred->or)
continue;
matched = pred->fn(pred, rec);
if (!matched && !pred->or) {
and_failed = 1;
continue;
} else if (matched && pred->or)
return 1;
} else
break;
}
if (and_failed)
return 0;
return 1;
}
void filter_print_preds(struct filter_pred **preds, struct trace_seq *s)
{
char *field_name;
struct filter_pred *pred;
int i;
if (!preds) {
trace_seq_printf(s, "none\n");
return;
}
for (i = 0; i < MAX_FILTER_PRED; i++) {
if (preds[i]) {
pred = preds[i];
field_name = pred->field_name;
if (i)
trace_seq_printf(s, pred->or ? "|| " : "&& ");
trace_seq_printf(s, "%s ", field_name);
trace_seq_printf(s, pred->not ? "!= " : "== ");
if (pred->str_val)
trace_seq_printf(s, "%s\n", pred->str_val);
else
trace_seq_printf(s, "%llu\n", pred->val);
} else
break;
}
}
static struct ftrace_event_field *
find_event_field(struct ftrace_event_call *call, char *name)
{
struct ftrace_event_field *field;
list_for_each_entry(field, &call->fields, link) {
if (!strcmp(field->name, name))
return field;
}
return NULL;
}
void filter_free_pred(struct filter_pred *pred)
{
if (!pred)
return;
kfree(pred->field_name);
kfree(pred->str_val);
kfree(pred);
}
void filter_free_preds(struct ftrace_event_call *call)
{
int i;
if (call->preds) {
for (i = 0; i < MAX_FILTER_PRED; i++)
filter_free_pred(call->preds[i]);
kfree(call->preds);
call->preds = NULL;
}
}
void filter_free_subsystem_preds(struct event_subsystem *system)
{
struct ftrace_event_call *call = __start_ftrace_events;
int i;
if (system->preds) {
for (i = 0; i < MAX_FILTER_PRED; i++)
filter_free_pred(system->preds[i]);
kfree(system->preds);
system->preds = NULL;
}
events_for_each(call) {
if (!call->name || !call->regfunc)
continue;
if (!strcmp(call->system, system->name))
filter_free_preds(call);
}
}
static int __filter_add_pred(struct ftrace_event_call *call,
struct filter_pred *pred)
{
int i;
if (call->preds && !pred->compound)
filter_free_preds(call);
if (!call->preds) {
call->preds = kzalloc(MAX_FILTER_PRED * sizeof(pred),
GFP_KERNEL);
if (!call->preds)
return -ENOMEM;
}
for (i = 0; i < MAX_FILTER_PRED; i++) {
if (!call->preds[i]) {
call->preds[i] = pred;
return 0;
}
}
return -ENOMEM;
}
static int is_string_field(const char *type)
{
if (strchr(type, '[') && strstr(type, "char"))
return 1;
return 0;
}
int filter_add_pred(struct ftrace_event_call *call, struct filter_pred *pred)
{
struct ftrace_event_field *field;
field = find_event_field(call, pred->field_name);
if (!field)
return -EINVAL;
pred->offset = field->offset;
if (is_string_field(field->type)) {
if (!pred->str_val)
return -EINVAL;
pred->fn = filter_pred_string;
pred->str_len = field->size;
return __filter_add_pred(call, pred);
} else {
if (pred->str_val)
return -EINVAL;
}
switch (field->size) {
case 8:
pred->fn = filter_pred_64;
break;
case 4:
pred->fn = filter_pred_32;
break;
case 2:
pred->fn = filter_pred_16;
break;
case 1:
pred->fn = filter_pred_8;
break;
default:
return -EINVAL;
}
return __filter_add_pred(call, pred);
}
static struct filter_pred *copy_pred(struct filter_pred *pred)
{
struct filter_pred *new_pred = kmalloc(sizeof(*pred), GFP_KERNEL);
if (!new_pred)
return NULL;
memcpy(new_pred, pred, sizeof(*pred));
if (pred->field_name) {
new_pred->field_name = kstrdup(pred->field_name, GFP_KERNEL);
if (!new_pred->field_name) {
kfree(new_pred);
return NULL;
}
}
if (pred->str_val) {
new_pred->str_val = kstrdup(pred->str_val, GFP_KERNEL);
if (!new_pred->str_val) {
filter_free_pred(new_pred);
return NULL;
}
}
return new_pred;
}
int filter_add_subsystem_pred(struct event_subsystem *system,
struct filter_pred *pred)
{
struct ftrace_event_call *call = __start_ftrace_events;
struct filter_pred *event_pred;
int i;
if (system->preds && !pred->compound)
filter_free_subsystem_preds(system);
if (!system->preds) {
system->preds = kzalloc(MAX_FILTER_PRED * sizeof(pred),
GFP_KERNEL);
if (!system->preds)
return -ENOMEM;
}
for (i = 0; i < MAX_FILTER_PRED; i++) {
if (!system->preds[i]) {
system->preds[i] = pred;
break;
}
}
if (i == MAX_FILTER_PRED)
return -EINVAL;
events_for_each(call) {
int err;
if (!call->name || !call->regfunc)
continue;
if (strcmp(call->system, system->name))
continue;
if (!find_event_field(call, pred->field_name))
continue;
event_pred = copy_pred(pred);
if (!event_pred)
goto oom;
err = filter_add_pred(call, event_pred);
if (err)
filter_free_pred(event_pred);
if (err == -ENOMEM)
goto oom;
}
return 0;
oom:
system->preds[i] = NULL;
return -ENOMEM;
}
int filter_parse(char **pbuf, struct filter_pred *pred)
{
char *tmp, *tok, *val_str = NULL;
int tok_n = 0;
/* field ==/!= number, or/and field ==/!= number, number */
while ((tok = strsep(pbuf, " \n"))) {
if (tok_n == 0) {
if (!strcmp(tok, "0")) {
pred->clear = 1;
return 0;
} else if (!strcmp(tok, "&&")) {
pred->or = 0;
pred->compound = 1;
} else if (!strcmp(tok, "||")) {
pred->or = 1;
pred->compound = 1;
} else
pred->field_name = tok;
tok_n = 1;
continue;
}
if (tok_n == 1) {
if (!pred->field_name)
pred->field_name = tok;
else if (!strcmp(tok, "!="))
pred->not = 1;
else if (!strcmp(tok, "=="))
pred->not = 0;
else {
pred->field_name = NULL;
return -EINVAL;
}
tok_n = 2;
continue;
}
if (tok_n == 2) {
if (pred->compound) {
if (!strcmp(tok, "!="))
pred->not = 1;
else if (!strcmp(tok, "=="))
pred->not = 0;
else {
pred->field_name = NULL;
return -EINVAL;
}
} else {
val_str = tok;
break; /* done */
}
tok_n = 3;
continue;
}
if (tok_n == 3) {
val_str = tok;
break; /* done */
}
}
pred->field_name = kstrdup(pred->field_name, GFP_KERNEL);
if (!pred->field_name)
return -ENOMEM;
pred->val = simple_strtoull(val_str, &tmp, 10);
if (tmp == val_str) {
pred->str_val = kstrdup(val_str, GFP_KERNEL);
if (!pred->str_val)
return -ENOMEM;
}
return 0;
}
...@@ -129,3 +129,48 @@ ftrace_format_##call(struct trace_seq *s) \ ...@@ -129,3 +129,48 @@ ftrace_format_##call(struct trace_seq *s) \
} }
#include <trace/trace_event_types.h> #include <trace/trace_event_types.h>
#undef __field
#define __field(type, item) \
ret = trace_define_field(event_call, #type, #item, \
offsetof(typeof(field), item), \
sizeof(field.item)); \
if (ret) \
return ret;
#undef __array
#define __array(type, item, len) \
ret = trace_define_field(event_call, #type "[" #len "]", #item, \
offsetof(typeof(field), item), \
sizeof(field.item)); \
if (ret) \
return ret;
#define __common_field(type, item) \
ret = trace_define_field(event_call, #type, "common_" #item, \
offsetof(typeof(field.ent), item), \
sizeof(field.ent.item)); \
if (ret) \
return ret;
#undef TRACE_EVENT
#define TRACE_EVENT(call, proto, args, tstruct, func, print) \
int \
ftrace_define_fields_##call(void) \
{ \
struct ftrace_raw_##call field; \
struct ftrace_event_call *event_call = &event_##call; \
int ret; \
\
__common_field(unsigned char, type); \
__common_field(unsigned char, flags); \
__common_field(unsigned char, preempt_count); \
__common_field(int, pid); \
__common_field(int, tgid); \
\
tstruct; \
\
return ret; \
}
#include <trace/trace_event_types.h>
...@@ -204,6 +204,7 @@ static struct ftrace_event_call event_##call; \ ...@@ -204,6 +204,7 @@ static struct ftrace_event_call event_##call; \
\ \
static void ftrace_raw_event_##call(proto) \ static void ftrace_raw_event_##call(proto) \
{ \ { \
struct ftrace_event_call *call = &event_##call; \
struct ring_buffer_event *event; \ struct ring_buffer_event *event; \
struct ftrace_raw_##call *entry; \ struct ftrace_raw_##call *entry; \
unsigned long irq_flags; \ unsigned long irq_flags; \
...@@ -221,7 +222,11 @@ static void ftrace_raw_event_##call(proto) \ ...@@ -221,7 +222,11 @@ static void ftrace_raw_event_##call(proto) \
\ \
assign; \ assign; \
\ \
trace_current_buffer_unlock_commit(event, irq_flags, pc); \ if (call->preds && !filter_match_preds(call, entry)) \
ring_buffer_event_discard(event); \
\
trace_nowake_buffer_unlock_commit(event, irq_flags, pc); \
\
} \ } \
\ \
static int ftrace_raw_reg_event_##call(void) \ static int ftrace_raw_reg_event_##call(void) \
...@@ -252,6 +257,7 @@ static int ftrace_raw_init_event_##call(void) \ ...@@ -252,6 +257,7 @@ static int ftrace_raw_init_event_##call(void) \
if (!id) \ if (!id) \
return -ENODEV; \ return -ENODEV; \
event_##call.id = id; \ event_##call.id = id; \
INIT_LIST_HEAD(&event_##call.fields); \
return 0; \ return 0; \
} \ } \
\ \
...@@ -264,6 +270,7 @@ __attribute__((section("_ftrace_events"))) event_##call = { \ ...@@ -264,6 +270,7 @@ __attribute__((section("_ftrace_events"))) event_##call = { \
.regfunc = ftrace_raw_reg_event_##call, \ .regfunc = ftrace_raw_reg_event_##call, \
.unregfunc = ftrace_raw_unreg_event_##call, \ .unregfunc = ftrace_raw_unreg_event_##call, \
.show_format = ftrace_format_##call, \ .show_format = ftrace_format_##call, \
.define_fields = ftrace_define_fields_##call, \
_TRACE_PROFILE_INIT(call) \ _TRACE_PROFILE_INIT(call) \
} }
......
...@@ -57,9 +57,9 @@ static struct tracer_flags tracer_flags = { ...@@ -57,9 +57,9 @@ static struct tracer_flags tracer_flags = {
/* Add a function return address to the trace stack on thread info.*/ /* Add a function return address to the trace stack on thread info.*/
int int
ftrace_push_return_trace(unsigned long ret, unsigned long long time, ftrace_push_return_trace(unsigned long ret, unsigned long func, int *depth)
unsigned long func, int *depth)
{ {
unsigned long long calltime;
int index; int index;
if (!current->ret_stack) if (!current->ret_stack)
...@@ -71,11 +71,13 @@ ftrace_push_return_trace(unsigned long ret, unsigned long long time, ...@@ -71,11 +71,13 @@ ftrace_push_return_trace(unsigned long ret, unsigned long long time,
return -EBUSY; return -EBUSY;
} }
calltime = trace_clock_local();
index = ++current->curr_ret_stack; index = ++current->curr_ret_stack;
barrier(); barrier();
current->ret_stack[index].ret = ret; current->ret_stack[index].ret = ret;
current->ret_stack[index].func = func; current->ret_stack[index].func = func;
current->ret_stack[index].calltime = time; current->ret_stack[index].calltime = calltime;
*depth = index; *depth = index;
return 0; return 0;
......
...@@ -91,6 +91,7 @@ struct tracer nop_trace __read_mostly = ...@@ -91,6 +91,7 @@ struct tracer nop_trace __read_mostly =
.name = "nop", .name = "nop",
.init = nop_trace_init, .init = nop_trace_init,
.reset = nop_trace_reset, .reset = nop_trace_reset,
.wait_pipe = poll_wait_pipe,
#ifdef CONFIG_FTRACE_SELFTEST #ifdef CONFIG_FTRACE_SELFTEST
.selftest = trace_selftest_startup_nop, .selftest = trace_selftest_startup_nop,
#endif #endif
......
...@@ -137,7 +137,7 @@ int trace_seq_putc(struct trace_seq *s, unsigned char c) ...@@ -137,7 +137,7 @@ int trace_seq_putc(struct trace_seq *s, unsigned char c)
return 1; return 1;
} }
int trace_seq_putmem(struct trace_seq *s, void *mem, size_t len) int trace_seq_putmem(struct trace_seq *s, const void *mem, size_t len)
{ {
if (len > ((PAGE_SIZE - 1) - s->len)) if (len > ((PAGE_SIZE - 1) - s->len))
return 0; return 0;
...@@ -148,10 +148,10 @@ int trace_seq_putmem(struct trace_seq *s, void *mem, size_t len) ...@@ -148,10 +148,10 @@ int trace_seq_putmem(struct trace_seq *s, void *mem, size_t len)
return len; return len;
} }
int trace_seq_putmem_hex(struct trace_seq *s, void *mem, size_t len) int trace_seq_putmem_hex(struct trace_seq *s, const void *mem, size_t len)
{ {
unsigned char hex[HEX_CHARS]; unsigned char hex[HEX_CHARS];
unsigned char *data = mem; const unsigned char *data = mem;
int i, j; int i, j;
#ifdef __BIG_ENDIAN #ifdef __BIG_ENDIAN
...@@ -167,6 +167,19 @@ int trace_seq_putmem_hex(struct trace_seq *s, void *mem, size_t len) ...@@ -167,6 +167,19 @@ int trace_seq_putmem_hex(struct trace_seq *s, void *mem, size_t len)
return trace_seq_putmem(s, hex, j); return trace_seq_putmem(s, hex, j);
} }
void *trace_seq_reserve(struct trace_seq *s, size_t len)
{
void *ret;
if (len > ((PAGE_SIZE - 1) - s->len))
return NULL;
ret = s->buffer + s->len;
s->len += len;
return ret;
}
int trace_seq_path(struct trace_seq *s, struct path *path) int trace_seq_path(struct trace_seq *s, struct path *path)
{ {
unsigned char *p; unsigned char *p;
......
...@@ -29,24 +29,27 @@ seq_print_ip_sym(struct trace_seq *s, unsigned long ip, ...@@ -29,24 +29,27 @@ seq_print_ip_sym(struct trace_seq *s, unsigned long ip,
unsigned long sym_flags); unsigned long sym_flags);
extern ssize_t trace_seq_to_user(struct trace_seq *s, char __user *ubuf, extern ssize_t trace_seq_to_user(struct trace_seq *s, char __user *ubuf,
size_t cnt); size_t cnt);
int trace_seq_puts(struct trace_seq *s, const char *str); extern int trace_seq_puts(struct trace_seq *s, const char *str);
int trace_seq_putc(struct trace_seq *s, unsigned char c); extern int trace_seq_putc(struct trace_seq *s, unsigned char c);
int trace_seq_putmem(struct trace_seq *s, void *mem, size_t len); extern int trace_seq_putmem(struct trace_seq *s, const void *mem, size_t len);
int trace_seq_putmem_hex(struct trace_seq *s, void *mem, size_t len); extern int trace_seq_putmem_hex(struct trace_seq *s, const void *mem,
int trace_seq_path(struct trace_seq *s, struct path *path); size_t len);
int seq_print_userip_objs(const struct userstack_entry *entry, extern void *trace_seq_reserve(struct trace_seq *s, size_t len);
extern int trace_seq_path(struct trace_seq *s, struct path *path);
extern int seq_print_userip_objs(const struct userstack_entry *entry,
struct trace_seq *s, unsigned long sym_flags); struct trace_seq *s, unsigned long sym_flags);
int seq_print_user_ip(struct trace_seq *s, struct mm_struct *mm, extern int seq_print_user_ip(struct trace_seq *s, struct mm_struct *mm,
unsigned long ip, unsigned long sym_flags); unsigned long ip, unsigned long sym_flags);
int trace_print_context(struct trace_iterator *iter); extern int trace_print_context(struct trace_iterator *iter);
int trace_print_lat_context(struct trace_iterator *iter); extern int trace_print_lat_context(struct trace_iterator *iter);
struct trace_event *ftrace_find_event(int type); extern struct trace_event *ftrace_find_event(int type);
int register_ftrace_event(struct trace_event *event); extern int register_ftrace_event(struct trace_event *event);
int unregister_ftrace_event(struct trace_event *event); extern int unregister_ftrace_event(struct trace_event *event);
enum print_line_t trace_nop_print(struct trace_iterator *iter, int flags); extern enum print_line_t trace_nop_print(struct trace_iterator *iter,
int flags);
#define MAX_MEMHEX_BYTES 8 #define MAX_MEMHEX_BYTES 8
#define HEX_CHARS (MAX_MEMHEX_BYTES*2 + 1) #define HEX_CHARS (MAX_MEMHEX_BYTES*2 + 1)
......
...@@ -75,7 +75,7 @@ static int stat_seq_init(struct tracer_stat_session *session) ...@@ -75,7 +75,7 @@ static int stat_seq_init(struct tracer_stat_session *session)
{ {
struct trace_stat_list *iter_entry, *new_entry; struct trace_stat_list *iter_entry, *new_entry;
struct tracer_stat *ts = session->ts; struct tracer_stat *ts = session->ts;
void *prev_stat; void *stat;
int ret = 0; int ret = 0;
int i; int i;
...@@ -85,6 +85,10 @@ static int stat_seq_init(struct tracer_stat_session *session) ...@@ -85,6 +85,10 @@ static int stat_seq_init(struct tracer_stat_session *session)
if (!ts->stat_cmp) if (!ts->stat_cmp)
ts->stat_cmp = dummy_cmp; ts->stat_cmp = dummy_cmp;
stat = ts->stat_start();
if (!stat)
goto exit;
/* /*
* The first entry. Actually this is the second, but the first * The first entry. Actually this is the second, but the first
* one (the stat_list head) is pointless. * one (the stat_list head) is pointless.
...@@ -99,14 +103,19 @@ static int stat_seq_init(struct tracer_stat_session *session) ...@@ -99,14 +103,19 @@ static int stat_seq_init(struct tracer_stat_session *session)
list_add(&new_entry->list, &session->stat_list); list_add(&new_entry->list, &session->stat_list);
new_entry->stat = ts->stat_start(); new_entry->stat = stat;
prev_stat = new_entry->stat;
/* /*
* Iterate over the tracer stat entries and store them in a sorted * Iterate over the tracer stat entries and store them in a sorted
* list. * list.
*/ */
for (i = 1; ; i++) { for (i = 1; ; i++) {
stat = ts->stat_next(stat, i);
/* End of insertion */
if (!stat)
break;
new_entry = kmalloc(sizeof(struct trace_stat_list), GFP_KERNEL); new_entry = kmalloc(sizeof(struct trace_stat_list), GFP_KERNEL);
if (!new_entry) { if (!new_entry) {
ret = -ENOMEM; ret = -ENOMEM;
...@@ -114,31 +123,23 @@ static int stat_seq_init(struct tracer_stat_session *session) ...@@ -114,31 +123,23 @@ static int stat_seq_init(struct tracer_stat_session *session)
} }
INIT_LIST_HEAD(&new_entry->list); INIT_LIST_HEAD(&new_entry->list);
new_entry->stat = ts->stat_next(prev_stat, i); new_entry->stat = stat;
/* End of insertion */
if (!new_entry->stat)
break;
list_for_each_entry(iter_entry, &session->stat_list, list) { list_for_each_entry_reverse(iter_entry, &session->stat_list,
list) {
/* Insertion with a descendent sorting */ /* Insertion with a descendent sorting */
if (ts->stat_cmp(new_entry->stat, if (ts->stat_cmp(iter_entry->stat,
iter_entry->stat) > 0) { new_entry->stat) >= 0) {
list_add_tail(&new_entry->list,
&iter_entry->list);
break;
/* The current smaller value */
} else if (list_is_last(&iter_entry->list,
&session->stat_list)) {
list_add(&new_entry->list, &iter_entry->list); list_add(&new_entry->list, &iter_entry->list);
break; break;
} }
} }
prev_stat = new_entry->stat; /* The current larger value */
if (list_empty(&new_entry->list))
list_add(&new_entry->list, &session->stat_list);
} }
exit: exit:
mutex_unlock(&session->stat_mutex); mutex_unlock(&session->stat_mutex);
...@@ -160,7 +161,7 @@ static void *stat_seq_start(struct seq_file *s, loff_t *pos) ...@@ -160,7 +161,7 @@ static void *stat_seq_start(struct seq_file *s, loff_t *pos)
/* If we are in the beginning of the file, print the headers */ /* If we are in the beginning of the file, print the headers */
if (!*pos && session->ts->stat_headers) if (!*pos && session->ts->stat_headers)
session->ts->stat_headers(s); return SEQ_START_TOKEN;
return seq_list_start(&session->stat_list, *pos); return seq_list_start(&session->stat_list, *pos);
} }
...@@ -169,6 +170,9 @@ static void *stat_seq_next(struct seq_file *s, void *p, loff_t *pos) ...@@ -169,6 +170,9 @@ static void *stat_seq_next(struct seq_file *s, void *p, loff_t *pos)
{ {
struct tracer_stat_session *session = s->private; struct tracer_stat_session *session = s->private;
if (p == SEQ_START_TOKEN)
return seq_list_start(&session->stat_list, *pos);
return seq_list_next(p, &session->stat_list, pos); return seq_list_next(p, &session->stat_list, pos);
} }
...@@ -183,6 +187,9 @@ static int stat_seq_show(struct seq_file *s, void *v) ...@@ -183,6 +187,9 @@ static int stat_seq_show(struct seq_file *s, void *v)
struct tracer_stat_session *session = s->private; struct tracer_stat_session *session = s->private;
struct trace_stat_list *l = list_entry(v, struct trace_stat_list, list); struct trace_stat_list *l = list_entry(v, struct trace_stat_list, list);
if (v == SEQ_START_TOKEN)
return session->ts->stat_headers(s);
return session->ts->stat_show(s, l->stat); return session->ts->stat_show(s, l->stat);
} }
......
...@@ -196,6 +196,11 @@ static int workqueue_stat_show(struct seq_file *s, void *p) ...@@ -196,6 +196,11 @@ static int workqueue_stat_show(struct seq_file *s, void *p)
struct pid *pid; struct pid *pid;
struct task_struct *tsk; struct task_struct *tsk;
spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags);
if (&cws->list == workqueue_cpu_stat(cpu)->list.next)
seq_printf(s, "\n");
spin_unlock_irqrestore(&workqueue_cpu_stat(cpu)->lock, flags);
pid = find_get_pid(cws->pid); pid = find_get_pid(cws->pid);
if (pid) { if (pid) {
tsk = get_pid_task(pid, PIDTYPE_PID); tsk = get_pid_task(pid, PIDTYPE_PID);
...@@ -208,18 +213,13 @@ static int workqueue_stat_show(struct seq_file *s, void *p) ...@@ -208,18 +213,13 @@ static int workqueue_stat_show(struct seq_file *s, void *p)
put_pid(pid); put_pid(pid);
} }
spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags);
if (&cws->list == workqueue_cpu_stat(cpu)->list.next)
seq_printf(s, "\n");
spin_unlock_irqrestore(&workqueue_cpu_stat(cpu)->lock, flags);
return 0; return 0;
} }
static int workqueue_stat_headers(struct seq_file *s) static int workqueue_stat_headers(struct seq_file *s)
{ {
seq_printf(s, "# CPU INSERTED EXECUTED NAME\n"); seq_printf(s, "# CPU INSERTED EXECUTED NAME\n");
seq_printf(s, "# | | | |\n\n"); seq_printf(s, "# | | | |\n");
return 0; return 0;
} }
......
...@@ -48,8 +48,6 @@ ...@@ -48,8 +48,6 @@
#include <linux/rmap.h> #include <linux/rmap.h>
#include <linux/module.h> #include <linux/module.h>
#include <linux/delayacct.h> #include <linux/delayacct.h>
#include <linux/kprobes.h>
#include <linux/mutex.h>
#include <linux/init.h> #include <linux/init.h>
#include <linux/writeback.h> #include <linux/writeback.h>
#include <linux/memcontrol.h> #include <linux/memcontrol.h>
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment