Commit c72bb316 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'trace-3.11' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace

Pull tracing changes from Steven Rostedt:
 "The majority of the changes here are cleanups for the large changes
  that were added to 3.10, which includes several bug fixes that have
  been marked for stable.

  As for new features, there were a few, but nothing to write to LWN
  about.  These include:

  New function trigger called "dump" and "cpudump" that will cause
  ftrace to dump its buffer to the console when the function is called.
  The difference between "dump" and "cpudump" is that "dump" will dump
  the entire contents of the ftrace buffer, where as "cpudump" will only
  dump the contents of the ftrace buffer for the CPU that called the
  function.

  Another small enhancement is a new sysctl switch called
  "traceoff_on_warning" which, when enabled, will disable tracing if any
  WARN_ON() is triggered.  This is useful if you want to debug what
  caused a warning and do not want to risk losing your trace data by the
  ring buffer overwriting the data before you can disable it.  There's
  also a kernel command line option that will make this enabled at boot
  up called the same thing"

* tag 'trace-3.11' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace: (34 commits)
  tracing: Make tracing_open_generic_{tr,tc}() static
  tracing: Remove ftrace() function
  tracing: Remove TRACE_EVENT_TYPE enum definition
  tracing: Make tracer_tracing_{off,on,is_on}() static
  tracing: Fix irqs-off tag display in syscall tracing
  uprobes: Fix return value in error handling path
  tracing: Fix race between deleting buffer and setting events
  tracing: Add trace_array_get/put() to event handling
  tracing: Get trace_array ref counts when accessing trace files
  tracing: Add trace_array_get/put() to handle instance refs better
  tracing: Protect ftrace_trace_arrays list in trace_events.c
  tracing: Make trace_marker use the correct per-instance buffer
  ftrace: Do not run selftest if command line parameter is set
  tracing/kprobes: Don't pass addr=ip to perf_trace_buf_submit()
  tracing: Use flag buffer_disabled for irqsoff tracer
  tracing/kprobes: Turn trace_probe->files into list_head
  tracing: Fix disabling of soft disable
  tracing: Add missing syscall_metadata comment
  tracing: Simplify code for showing of soft disabled flag
  tracing/kprobes: Kill probe_enable_lock
  ...
parents 6d128e1e dcc30223
...@@ -3081,6 +3081,19 @@ bytes respectively. Such letter suffixes can also be entirely omitted. ...@@ -3081,6 +3081,19 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
See also Documentation/trace/ftrace.txt "trace options" See also Documentation/trace/ftrace.txt "trace options"
section. section.
traceoff_on_warning
[FTRACE] enable this option to disable tracing when a
warning is hit. This turns off "tracing_on". Tracing can
be enabled again by echoing '1' into the "tracing_on"
file located in /sys/kernel/debug/tracing/
This option is useful, as it disables the trace before
the WARNING dump is called, which prevents the trace to
be filled with content caused by the warning output.
This option can also be set at run time via the sysctl
option: kernel/traceoff_on_warning
transparent_hugepage= transparent_hugepage=
[KNL] [KNL]
Format: [always|madvise|never] Format: [always|madvise|never]
......
...@@ -183,13 +183,22 @@ The relational-operators depend on the type of the field being tested: ...@@ -183,13 +183,22 @@ The relational-operators depend on the type of the field being tested:
The operators available for numeric fields are: The operators available for numeric fields are:
==, !=, <, <=, >, >= ==, !=, <, <=, >, >=, &
And for string fields they are: And for string fields they are:
==, != ==, !=, ~
Currently, only exact string matches are supported. The glob (~) only accepts a wild card character (*) at the start and or
end of the string. For example:
prev_comm ~ "*sh"
prev_comm ~ "sh*"
prev_comm ~ "*sh*"
But does not allow for it to be within the string:
prev_comm ~ "ba*sh" <-- is invalid
5.2 Setting filters 5.2 Setting filters
------------------- -------------------
......
...@@ -2430,6 +2430,19 @@ The following commands are supported: ...@@ -2430,6 +2430,19 @@ The following commands are supported:
echo '!schedule:disable_event:sched:sched_switch' > \ echo '!schedule:disable_event:sched:sched_switch' > \
set_ftrace_filter set_ftrace_filter
- dump
When the function is hit, it will dump the contents of the ftrace
ring buffer to the console. This is useful if you need to debug
something, and want to dump the trace when a certain function
is hit. Perhaps its a function that is called before a tripple
fault happens and does not allow you to get a regular dump.
- cpudump
When the function is hit, it will dump the contents of the ftrace
ring buffer for the current CPU to the console. Unlike the "dump"
command, it only prints out the contents of the ring buffer for the
CPU that executed the function that triggered the dump.
trace_pipe trace_pipe
---------- ----------
......
...@@ -566,10 +566,6 @@ static inline ssize_t ftrace_filter_write(struct file *file, const char __user * ...@@ -566,10 +566,6 @@ static inline ssize_t ftrace_filter_write(struct file *file, const char __user *
size_t cnt, loff_t *ppos) { return -ENODEV; } size_t cnt, loff_t *ppos) { return -ENODEV; }
static inline ssize_t ftrace_notrace_write(struct file *file, const char __user *ubuf, static inline ssize_t ftrace_notrace_write(struct file *file, const char __user *ubuf,
size_t cnt, loff_t *ppos) { return -ENODEV; } size_t cnt, loff_t *ppos) { return -ENODEV; }
static inline loff_t ftrace_regex_lseek(struct file *file, loff_t offset, int whence)
{
return -ENODEV;
}
static inline int static inline int
ftrace_regex_release(struct inode *inode, struct file *file) { return -ENODEV; } ftrace_regex_release(struct inode *inode, struct file *file) { return -ENODEV; }
#endif /* CONFIG_DYNAMIC_FTRACE */ #endif /* CONFIG_DYNAMIC_FTRACE */
...@@ -828,10 +824,15 @@ enum ftrace_dump_mode; ...@@ -828,10 +824,15 @@ enum ftrace_dump_mode;
extern enum ftrace_dump_mode ftrace_dump_on_oops; extern enum ftrace_dump_mode ftrace_dump_on_oops;
extern void disable_trace_on_warning(void);
extern int __disable_trace_on_warning;
#ifdef CONFIG_PREEMPT #ifdef CONFIG_PREEMPT
#define INIT_TRACE_RECURSION .trace_recursion = 0, #define INIT_TRACE_RECURSION .trace_recursion = 0,
#endif #endif
#else /* CONFIG_TRACING */
static inline void disable_trace_on_warning(void) { }
#endif /* CONFIG_TRACING */ #endif /* CONFIG_TRACING */
#ifndef INIT_TRACE_RECURSION #ifndef INIT_TRACE_RECURSION
......
...@@ -16,6 +16,7 @@ ...@@ -16,6 +16,7 @@
* @nb_args: number of parameters it takes * @nb_args: number of parameters it takes
* @types: list of types as strings * @types: list of types as strings
* @args: list of args as strings (args[i] matches types[i]) * @args: list of args as strings (args[i] matches types[i])
* @enter_fields: list of fields for syscall_enter trace event
* @enter_event: associated syscall_enter trace event * @enter_event: associated syscall_enter trace event
* @exit_event: associated syscall_exit trace event * @exit_event: associated syscall_exit trace event
*/ */
......
...@@ -15,6 +15,7 @@ ...@@ -15,6 +15,7 @@
#include <linux/notifier.h> #include <linux/notifier.h>
#include <linux/module.h> #include <linux/module.h>
#include <linux/random.h> #include <linux/random.h>
#include <linux/ftrace.h>
#include <linux/reboot.h> #include <linux/reboot.h>
#include <linux/delay.h> #include <linux/delay.h>
#include <linux/kexec.h> #include <linux/kexec.h>
...@@ -399,6 +400,8 @@ struct slowpath_args { ...@@ -399,6 +400,8 @@ struct slowpath_args {
static void warn_slowpath_common(const char *file, int line, void *caller, static void warn_slowpath_common(const char *file, int line, void *caller,
unsigned taint, struct slowpath_args *args) unsigned taint, struct slowpath_args *args)
{ {
disable_trace_on_warning();
pr_warn("------------[ cut here ]------------\n"); pr_warn("------------[ cut here ]------------\n");
pr_warn("WARNING: CPU: %d PID: %d at %s:%d %pS()\n", pr_warn("WARNING: CPU: %d PID: %d at %s:%d %pS()\n",
raw_smp_processor_id(), current->pid, file, line, caller); raw_smp_processor_id(), current->pid, file, line, caller);
......
...@@ -599,6 +599,13 @@ static struct ctl_table kern_table[] = { ...@@ -599,6 +599,13 @@ static struct ctl_table kern_table[] = {
.mode = 0644, .mode = 0644,
.proc_handler = proc_dointvec, .proc_handler = proc_dointvec,
}, },
{
.procname = "traceoff_on_warning",
.data = &__disable_trace_on_warning,
.maxlen = sizeof(__disable_trace_on_warning),
.mode = 0644,
.proc_handler = proc_dointvec,
},
#endif #endif
#ifdef CONFIG_MODULES #ifdef CONFIG_MODULES
{ {
......
...@@ -413,6 +413,17 @@ static int __register_ftrace_function(struct ftrace_ops *ops) ...@@ -413,6 +413,17 @@ static int __register_ftrace_function(struct ftrace_ops *ops)
return 0; return 0;
} }
static void ftrace_sync(struct work_struct *work)
{
/*
* This function is just a stub to implement a hard force
* of synchronize_sched(). This requires synchronizing
* tasks even in userspace and idle.
*
* Yes, function tracing is rude.
*/
}
static int __unregister_ftrace_function(struct ftrace_ops *ops) static int __unregister_ftrace_function(struct ftrace_ops *ops)
{ {
int ret; int ret;
...@@ -440,8 +451,12 @@ static int __unregister_ftrace_function(struct ftrace_ops *ops) ...@@ -440,8 +451,12 @@ static int __unregister_ftrace_function(struct ftrace_ops *ops)
* so there'll be no new users. We must ensure * so there'll be no new users. We must ensure
* all current users are done before we free * all current users are done before we free
* the control data. * the control data.
* Note synchronize_sched() is not enough, as we
* use preempt_disable() to do RCU, but the function
* tracer can be called where RCU is not active
* (before user_exit()).
*/ */
synchronize_sched(); schedule_on_each_cpu(ftrace_sync);
control_ops_free(ops); control_ops_free(ops);
} }
} else } else
...@@ -456,9 +471,13 @@ static int __unregister_ftrace_function(struct ftrace_ops *ops) ...@@ -456,9 +471,13 @@ static int __unregister_ftrace_function(struct ftrace_ops *ops)
/* /*
* Dynamic ops may be freed, we must make sure that all * Dynamic ops may be freed, we must make sure that all
* callers are done before leaving this function. * callers are done before leaving this function.
*
* Again, normal synchronize_sched() is not good enough.
* We need to do a hard force of sched synchronization.
*/ */
if (ops->flags & FTRACE_OPS_FL_DYNAMIC) if (ops->flags & FTRACE_OPS_FL_DYNAMIC)
synchronize_sched(); schedule_on_each_cpu(ftrace_sync);
return 0; return 0;
} }
...@@ -622,12 +641,18 @@ static int function_stat_show(struct seq_file *m, void *v) ...@@ -622,12 +641,18 @@ static int function_stat_show(struct seq_file *m, void *v)
if (rec->counter <= 1) if (rec->counter <= 1)
stddev = 0; stddev = 0;
else { else {
stddev = rec->time_squared - rec->counter * avg * avg; /*
* Apply Welford's method:
* s^2 = 1 / (n * (n-1)) * (n * \Sum (x_i)^2 - (\Sum x_i)^2)
*/
stddev = rec->counter * rec->time_squared -
rec->time * rec->time;
/* /*
* Divide only 1000 for ns^2 -> us^2 conversion. * Divide only 1000 for ns^2 -> us^2 conversion.
* trace_print_graph_duration will divide 1000 again. * trace_print_graph_duration will divide 1000 again.
*/ */
do_div(stddev, (rec->counter - 1) * 1000); do_div(stddev, rec->counter * (rec->counter - 1) * 1000);
} }
trace_seq_init(&s); trace_seq_init(&s);
...@@ -3512,8 +3537,12 @@ EXPORT_SYMBOL_GPL(ftrace_set_global_notrace); ...@@ -3512,8 +3537,12 @@ EXPORT_SYMBOL_GPL(ftrace_set_global_notrace);
static char ftrace_notrace_buf[FTRACE_FILTER_SIZE] __initdata; static char ftrace_notrace_buf[FTRACE_FILTER_SIZE] __initdata;
static char ftrace_filter_buf[FTRACE_FILTER_SIZE] __initdata; static char ftrace_filter_buf[FTRACE_FILTER_SIZE] __initdata;
/* Used by function selftest to not test if filter is set */
bool ftrace_filter_param __initdata;
static int __init set_ftrace_notrace(char *str) static int __init set_ftrace_notrace(char *str)
{ {
ftrace_filter_param = true;
strlcpy(ftrace_notrace_buf, str, FTRACE_FILTER_SIZE); strlcpy(ftrace_notrace_buf, str, FTRACE_FILTER_SIZE);
return 1; return 1;
} }
...@@ -3521,6 +3550,7 @@ __setup("ftrace_notrace=", set_ftrace_notrace); ...@@ -3521,6 +3550,7 @@ __setup("ftrace_notrace=", set_ftrace_notrace);
static int __init set_ftrace_filter(char *str) static int __init set_ftrace_filter(char *str)
{ {
ftrace_filter_param = true;
strlcpy(ftrace_filter_buf, str, FTRACE_FILTER_SIZE); strlcpy(ftrace_filter_buf, str, FTRACE_FILTER_SIZE);
return 1; return 1;
} }
......
This diff is collapsed.
...@@ -224,6 +224,11 @@ enum { ...@@ -224,6 +224,11 @@ enum {
extern struct list_head ftrace_trace_arrays; extern struct list_head ftrace_trace_arrays;
extern struct mutex trace_types_lock;
extern int trace_array_get(struct trace_array *tr);
extern void trace_array_put(struct trace_array *tr);
/* /*
* The global tracer (top) should be the first trace array added, * The global tracer (top) should be the first trace array added,
* but we check the flag anyway. * but we check the flag anyway.
...@@ -554,11 +559,6 @@ void tracing_iter_reset(struct trace_iterator *iter, int cpu); ...@@ -554,11 +559,6 @@ void tracing_iter_reset(struct trace_iterator *iter, int cpu);
void poll_wait_pipe(struct trace_iterator *iter); void poll_wait_pipe(struct trace_iterator *iter);
void ftrace(struct trace_array *tr,
struct trace_array_cpu *data,
unsigned long ip,
unsigned long parent_ip,
unsigned long flags, int pc);
void tracing_sched_switch_trace(struct trace_array *tr, void tracing_sched_switch_trace(struct trace_array *tr,
struct task_struct *prev, struct task_struct *prev,
struct task_struct *next, struct task_struct *next,
...@@ -774,6 +774,7 @@ print_graph_function_flags(struct trace_iterator *iter, u32 flags) ...@@ -774,6 +774,7 @@ print_graph_function_flags(struct trace_iterator *iter, u32 flags)
extern struct list_head ftrace_pids; extern struct list_head ftrace_pids;
#ifdef CONFIG_FUNCTION_TRACER #ifdef CONFIG_FUNCTION_TRACER
extern bool ftrace_filter_param __initdata;
static inline int ftrace_trace_task(struct task_struct *task) static inline int ftrace_trace_task(struct task_struct *task)
{ {
if (list_empty(&ftrace_pids)) if (list_empty(&ftrace_pids))
...@@ -899,12 +900,6 @@ static inline void trace_branch_disable(void) ...@@ -899,12 +900,6 @@ static inline void trace_branch_disable(void)
/* set ring buffers to default size if not already done so */ /* set ring buffers to default size if not already done so */
int tracing_update_buffers(void); int tracing_update_buffers(void);
/* trace event type bit fields, not numeric */
enum {
TRACE_EVENT_TYPE_PRINTF = 1,
TRACE_EVENT_TYPE_RAW = 2,
};
struct ftrace_event_field { struct ftrace_event_field {
struct list_head link; struct list_head link;
const char *name; const char *name;
......
This diff is collapsed.
...@@ -44,6 +44,7 @@ enum filter_op_ids ...@@ -44,6 +44,7 @@ enum filter_op_ids
OP_LE, OP_LE,
OP_GT, OP_GT,
OP_GE, OP_GE,
OP_BAND,
OP_NONE, OP_NONE,
OP_OPEN_PAREN, OP_OPEN_PAREN,
}; };
...@@ -54,6 +55,7 @@ struct filter_op { ...@@ -54,6 +55,7 @@ struct filter_op {
int precedence; int precedence;
}; };
/* Order must be the same as enum filter_op_ids above */
static struct filter_op filter_ops[] = { static struct filter_op filter_ops[] = {
{ OP_OR, "||", 1 }, { OP_OR, "||", 1 },
{ OP_AND, "&&", 2 }, { OP_AND, "&&", 2 },
...@@ -64,6 +66,7 @@ static struct filter_op filter_ops[] = { ...@@ -64,6 +66,7 @@ static struct filter_op filter_ops[] = {
{ OP_LE, "<=", 5 }, { OP_LE, "<=", 5 },
{ OP_GT, ">", 5 }, { OP_GT, ">", 5 },
{ OP_GE, ">=", 5 }, { OP_GE, ">=", 5 },
{ OP_BAND, "&", 6 },
{ OP_NONE, "OP_NONE", 0 }, { OP_NONE, "OP_NONE", 0 },
{ OP_OPEN_PAREN, "(", 0 }, { OP_OPEN_PAREN, "(", 0 },
}; };
...@@ -156,6 +159,9 @@ static int filter_pred_##type(struct filter_pred *pred, void *event) \ ...@@ -156,6 +159,9 @@ static int filter_pred_##type(struct filter_pred *pred, void *event) \
case OP_GE: \ case OP_GE: \
match = (*addr >= val); \ match = (*addr >= val); \
break; \ break; \
case OP_BAND: \
match = (*addr & val); \
break; \
default: \ default: \
break; \ break; \
} \ } \
......
...@@ -290,6 +290,21 @@ ftrace_stacktrace_count(unsigned long ip, unsigned long parent_ip, void **data) ...@@ -290,6 +290,21 @@ ftrace_stacktrace_count(unsigned long ip, unsigned long parent_ip, void **data)
trace_dump_stack(STACK_SKIP); trace_dump_stack(STACK_SKIP);
} }
static void
ftrace_dump_probe(unsigned long ip, unsigned long parent_ip, void **data)
{
if (update_count(data))
ftrace_dump(DUMP_ALL);
}
/* Only dump the current CPU buffer. */
static void
ftrace_cpudump_probe(unsigned long ip, unsigned long parent_ip, void **data)
{
if (update_count(data))
ftrace_dump(DUMP_ORIG);
}
static int static int
ftrace_probe_print(const char *name, struct seq_file *m, ftrace_probe_print(const char *name, struct seq_file *m,
unsigned long ip, void *data) unsigned long ip, void *data)
...@@ -327,6 +342,20 @@ ftrace_stacktrace_print(struct seq_file *m, unsigned long ip, ...@@ -327,6 +342,20 @@ ftrace_stacktrace_print(struct seq_file *m, unsigned long ip,
return ftrace_probe_print("stacktrace", m, ip, data); return ftrace_probe_print("stacktrace", m, ip, data);
} }
static int
ftrace_dump_print(struct seq_file *m, unsigned long ip,
struct ftrace_probe_ops *ops, void *data)
{
return ftrace_probe_print("dump", m, ip, data);
}
static int
ftrace_cpudump_print(struct seq_file *m, unsigned long ip,
struct ftrace_probe_ops *ops, void *data)
{
return ftrace_probe_print("cpudump", m, ip, data);
}
static struct ftrace_probe_ops traceon_count_probe_ops = { static struct ftrace_probe_ops traceon_count_probe_ops = {
.func = ftrace_traceon_count, .func = ftrace_traceon_count,
.print = ftrace_traceon_print, .print = ftrace_traceon_print,
...@@ -342,6 +371,16 @@ static struct ftrace_probe_ops stacktrace_count_probe_ops = { ...@@ -342,6 +371,16 @@ static struct ftrace_probe_ops stacktrace_count_probe_ops = {
.print = ftrace_stacktrace_print, .print = ftrace_stacktrace_print,
}; };
static struct ftrace_probe_ops dump_probe_ops = {
.func = ftrace_dump_probe,
.print = ftrace_dump_print,
};
static struct ftrace_probe_ops cpudump_probe_ops = {
.func = ftrace_cpudump_probe,
.print = ftrace_cpudump_print,
};
static struct ftrace_probe_ops traceon_probe_ops = { static struct ftrace_probe_ops traceon_probe_ops = {
.func = ftrace_traceon, .func = ftrace_traceon,
.print = ftrace_traceon_print, .print = ftrace_traceon_print,
...@@ -425,6 +464,32 @@ ftrace_stacktrace_callback(struct ftrace_hash *hash, ...@@ -425,6 +464,32 @@ ftrace_stacktrace_callback(struct ftrace_hash *hash,
param, enable); param, enable);
} }
static int
ftrace_dump_callback(struct ftrace_hash *hash,
char *glob, char *cmd, char *param, int enable)
{
struct ftrace_probe_ops *ops;
ops = &dump_probe_ops;
/* Only dump once. */
return ftrace_trace_probe_callback(ops, hash, glob, cmd,
"1", enable);
}
static int
ftrace_cpudump_callback(struct ftrace_hash *hash,
char *glob, char *cmd, char *param, int enable)
{
struct ftrace_probe_ops *ops;
ops = &cpudump_probe_ops;
/* Only dump once. */
return ftrace_trace_probe_callback(ops, hash, glob, cmd,
"1", enable);
}
static struct ftrace_func_command ftrace_traceon_cmd = { static struct ftrace_func_command ftrace_traceon_cmd = {
.name = "traceon", .name = "traceon",
.func = ftrace_trace_onoff_callback, .func = ftrace_trace_onoff_callback,
...@@ -440,6 +505,16 @@ static struct ftrace_func_command ftrace_stacktrace_cmd = { ...@@ -440,6 +505,16 @@ static struct ftrace_func_command ftrace_stacktrace_cmd = {
.func = ftrace_stacktrace_callback, .func = ftrace_stacktrace_callback,
}; };
static struct ftrace_func_command ftrace_dump_cmd = {
.name = "dump",
.func = ftrace_dump_callback,
};
static struct ftrace_func_command ftrace_cpudump_cmd = {
.name = "cpudump",
.func = ftrace_cpudump_callback,
};
static int __init init_func_cmd_traceon(void) static int __init init_func_cmd_traceon(void)
{ {
int ret; int ret;
...@@ -450,13 +525,31 @@ static int __init init_func_cmd_traceon(void) ...@@ -450,13 +525,31 @@ static int __init init_func_cmd_traceon(void)
ret = register_ftrace_command(&ftrace_traceon_cmd); ret = register_ftrace_command(&ftrace_traceon_cmd);
if (ret) if (ret)
unregister_ftrace_command(&ftrace_traceoff_cmd); goto out_free_traceoff;
ret = register_ftrace_command(&ftrace_stacktrace_cmd); ret = register_ftrace_command(&ftrace_stacktrace_cmd);
if (ret) { if (ret)
unregister_ftrace_command(&ftrace_traceoff_cmd); goto out_free_traceon;
unregister_ftrace_command(&ftrace_traceon_cmd);
} ret = register_ftrace_command(&ftrace_dump_cmd);
if (ret)
goto out_free_stacktrace;
ret = register_ftrace_command(&ftrace_cpudump_cmd);
if (ret)
goto out_free_dump;
return 0;
out_free_dump:
unregister_ftrace_command(&ftrace_dump_cmd);
out_free_stacktrace:
unregister_ftrace_command(&ftrace_stacktrace_cmd);
out_free_traceon:
unregister_ftrace_command(&ftrace_traceon_cmd);
out_free_traceoff:
unregister_ftrace_command(&ftrace_traceoff_cmd);
return ret; return ret;
} }
#else #else
......
...@@ -373,7 +373,7 @@ start_critical_timing(unsigned long ip, unsigned long parent_ip) ...@@ -373,7 +373,7 @@ start_critical_timing(unsigned long ip, unsigned long parent_ip)
struct trace_array_cpu *data; struct trace_array_cpu *data;
unsigned long flags; unsigned long flags;
if (likely(!tracer_enabled)) if (!tracer_enabled || !tracing_is_enabled())
return; return;
cpu = raw_smp_processor_id(); cpu = raw_smp_processor_id();
...@@ -416,7 +416,7 @@ stop_critical_timing(unsigned long ip, unsigned long parent_ip) ...@@ -416,7 +416,7 @@ stop_critical_timing(unsigned long ip, unsigned long parent_ip)
else else
return; return;
if (!tracer_enabled) if (!tracer_enabled || !tracing_is_enabled())
return; return;
data = per_cpu_ptr(tr->trace_buffer.data, cpu); data = per_cpu_ptr(tr->trace_buffer.data, cpu);
......
...@@ -35,12 +35,17 @@ struct trace_probe { ...@@ -35,12 +35,17 @@ struct trace_probe {
const char *symbol; /* symbol name */ const char *symbol; /* symbol name */
struct ftrace_event_class class; struct ftrace_event_class class;
struct ftrace_event_call call; struct ftrace_event_call call;
struct ftrace_event_file * __rcu *files; struct list_head files;
ssize_t size; /* trace entry size */ ssize_t size; /* trace entry size */
unsigned int nr_args; unsigned int nr_args;
struct probe_arg args[]; struct probe_arg args[];
}; };
struct event_file_link {
struct ftrace_event_file *file;
struct list_head list;
};
#define SIZEOF_TRACE_PROBE(n) \ #define SIZEOF_TRACE_PROBE(n) \
(offsetof(struct trace_probe, args) + \ (offsetof(struct trace_probe, args) + \
(sizeof(struct probe_arg) * (n))) (sizeof(struct probe_arg) * (n)))
...@@ -150,6 +155,7 @@ static struct trace_probe *alloc_trace_probe(const char *group, ...@@ -150,6 +155,7 @@ static struct trace_probe *alloc_trace_probe(const char *group,
goto error; goto error;
INIT_LIST_HEAD(&tp->list); INIT_LIST_HEAD(&tp->list);
INIT_LIST_HEAD(&tp->files);
return tp; return tp;
error: error:
kfree(tp->call.name); kfree(tp->call.name);
...@@ -183,25 +189,6 @@ static struct trace_probe *find_trace_probe(const char *event, ...@@ -183,25 +189,6 @@ static struct trace_probe *find_trace_probe(const char *event,
return NULL; return NULL;
} }
static int trace_probe_nr_files(struct trace_probe *tp)
{
struct ftrace_event_file **file;
int ret = 0;
/*
* Since all tp->files updater is protected by probe_enable_lock,
* we don't need to lock an rcu_read_lock.
*/
file = rcu_dereference_raw(tp->files);
if (file)
while (*(file++))
ret++;
return ret;
}
static DEFINE_MUTEX(probe_enable_lock);
/* /*
* Enable trace_probe * Enable trace_probe
* if the file is NULL, enable "perf" handler, or enable "trace" handler. * if the file is NULL, enable "perf" handler, or enable "trace" handler.
...@@ -211,67 +198,42 @@ enable_trace_probe(struct trace_probe *tp, struct ftrace_event_file *file) ...@@ -211,67 +198,42 @@ enable_trace_probe(struct trace_probe *tp, struct ftrace_event_file *file)
{ {
int ret = 0; int ret = 0;
mutex_lock(&probe_enable_lock);
if (file) { if (file) {
struct ftrace_event_file **new, **old; struct event_file_link *link;
int n = trace_probe_nr_files(tp);
link = kmalloc(sizeof(*link), GFP_KERNEL);
old = rcu_dereference_raw(tp->files); if (!link) {
/* 1 is for new one and 1 is for stopper */
new = kzalloc((n + 2) * sizeof(struct ftrace_event_file *),
GFP_KERNEL);
if (!new) {
ret = -ENOMEM; ret = -ENOMEM;
goto out_unlock; goto out;
} }
memcpy(new, old, n * sizeof(struct ftrace_event_file *));
new[n] = file;
/* The last one keeps a NULL */
rcu_assign_pointer(tp->files, new); link->file = file;
tp->flags |= TP_FLAG_TRACE; list_add_tail_rcu(&link->list, &tp->files);
if (old) { tp->flags |= TP_FLAG_TRACE;
/* Make sure the probe is done with old files */
synchronize_sched();
kfree(old);
}
} else } else
tp->flags |= TP_FLAG_PROFILE; tp->flags |= TP_FLAG_PROFILE;
if (trace_probe_is_enabled(tp) && trace_probe_is_registered(tp) && if (trace_probe_is_registered(tp) && !trace_probe_has_gone(tp)) {
!trace_probe_has_gone(tp)) {
if (trace_probe_is_return(tp)) if (trace_probe_is_return(tp))
ret = enable_kretprobe(&tp->rp); ret = enable_kretprobe(&tp->rp);
else else
ret = enable_kprobe(&tp->rp.kp); ret = enable_kprobe(&tp->rp.kp);
} }
out:
out_unlock:
mutex_unlock(&probe_enable_lock);
return ret; return ret;
} }
static int static struct event_file_link *
trace_probe_file_index(struct trace_probe *tp, struct ftrace_event_file *file) find_event_file_link(struct trace_probe *tp, struct ftrace_event_file *file)
{ {
struct ftrace_event_file **files; struct event_file_link *link;
int i;
/* list_for_each_entry(link, &tp->files, list)
* Since all tp->files updater is protected by probe_enable_lock, if (link->file == file)
* we don't need to lock an rcu_read_lock. return link;
*/
files = rcu_dereference_raw(tp->files);
if (files) {
for (i = 0; files[i]; i++)
if (files[i] == file)
return i;
}
return -1; return NULL;
} }
/* /*
...@@ -283,41 +245,24 @@ disable_trace_probe(struct trace_probe *tp, struct ftrace_event_file *file) ...@@ -283,41 +245,24 @@ disable_trace_probe(struct trace_probe *tp, struct ftrace_event_file *file)
{ {
int ret = 0; int ret = 0;
mutex_lock(&probe_enable_lock);
if (file) { if (file) {
struct ftrace_event_file **new, **old; struct event_file_link *link;
int n = trace_probe_nr_files(tp);
int i, j;
old = rcu_dereference_raw(tp->files); link = find_event_file_link(tp, file);
if (n == 0 || trace_probe_file_index(tp, file) < 0) { if (!link) {
ret = -EINVAL; ret = -EINVAL;
goto out_unlock; goto out;
} }
if (n == 1) { /* Remove the last file */ list_del_rcu(&link->list);
tp->flags &= ~TP_FLAG_TRACE; /* synchronize with kprobe_trace_func/kretprobe_trace_func */
new = NULL; synchronize_sched();
} else { kfree(link);
new = kzalloc(n * sizeof(struct ftrace_event_file *),
GFP_KERNEL);
if (!new) {
ret = -ENOMEM;
goto out_unlock;
}
/* This copy & check loop copies the NULL stopper too */
for (i = 0, j = 0; j < n && i < n + 1; i++)
if (old[i] != file)
new[j++] = old[i];
}
rcu_assign_pointer(tp->files, new); if (!list_empty(&tp->files))
goto out;
/* Make sure the probe is done with old files */ tp->flags &= ~TP_FLAG_TRACE;
synchronize_sched();
kfree(old);
} else } else
tp->flags &= ~TP_FLAG_PROFILE; tp->flags &= ~TP_FLAG_PROFILE;
...@@ -327,10 +272,7 @@ disable_trace_probe(struct trace_probe *tp, struct ftrace_event_file *file) ...@@ -327,10 +272,7 @@ disable_trace_probe(struct trace_probe *tp, struct ftrace_event_file *file)
else else
disable_kprobe(&tp->rp.kp); disable_kprobe(&tp->rp.kp);
} }
out:
out_unlock:
mutex_unlock(&probe_enable_lock);
return ret; return ret;
} }
...@@ -885,20 +827,10 @@ __kprobe_trace_func(struct trace_probe *tp, struct pt_regs *regs, ...@@ -885,20 +827,10 @@ __kprobe_trace_func(struct trace_probe *tp, struct pt_regs *regs,
static __kprobes void static __kprobes void
kprobe_trace_func(struct trace_probe *tp, struct pt_regs *regs) kprobe_trace_func(struct trace_probe *tp, struct pt_regs *regs)
{ {
/* struct event_file_link *link;
* Note: preempt is already disabled around the kprobe handler.
* However, we still need an smp_read_barrier_depends() corresponding
* to smp_wmb() in rcu_assign_pointer() to access the pointer.
*/
struct ftrace_event_file **file = rcu_dereference_raw(tp->files);
if (unlikely(!file))
return;
while (*file) { list_for_each_entry_rcu(link, &tp->files, list)
__kprobe_trace_func(tp, regs, *file); __kprobe_trace_func(tp, regs, link->file);
file++;
}
} }
/* Kretprobe handler */ /* Kretprobe handler */
...@@ -945,20 +877,10 @@ static __kprobes void ...@@ -945,20 +877,10 @@ static __kprobes void
kretprobe_trace_func(struct trace_probe *tp, struct kretprobe_instance *ri, kretprobe_trace_func(struct trace_probe *tp, struct kretprobe_instance *ri,
struct pt_regs *regs) struct pt_regs *regs)
{ {
/* struct event_file_link *link;
* Note: preempt is already disabled around the kprobe handler.
* However, we still need an smp_read_barrier_depends() corresponding
* to smp_wmb() in rcu_assign_pointer() to access the pointer.
*/
struct ftrace_event_file **file = rcu_dereference_raw(tp->files);
if (unlikely(!file)) list_for_each_entry_rcu(link, &tp->files, list)
return; __kretprobe_trace_func(tp, ri, regs, link->file);
while (*file) {
__kretprobe_trace_func(tp, ri, regs, *file);
file++;
}
} }
/* Event entry printers */ /* Event entry printers */
...@@ -1157,6 +1079,10 @@ kprobe_perf_func(struct trace_probe *tp, struct pt_regs *regs) ...@@ -1157,6 +1079,10 @@ kprobe_perf_func(struct trace_probe *tp, struct pt_regs *regs)
int size, __size, dsize; int size, __size, dsize;
int rctx; int rctx;
head = this_cpu_ptr(call->perf_events);
if (hlist_empty(head))
return;
dsize = __get_data_size(tp, regs); dsize = __get_data_size(tp, regs);
__size = sizeof(*entry) + tp->size + dsize; __size = sizeof(*entry) + tp->size + dsize;
size = ALIGN(__size + sizeof(u32), sizeof(u64)); size = ALIGN(__size + sizeof(u32), sizeof(u64));
...@@ -1172,10 +1098,7 @@ kprobe_perf_func(struct trace_probe *tp, struct pt_regs *regs) ...@@ -1172,10 +1098,7 @@ kprobe_perf_func(struct trace_probe *tp, struct pt_regs *regs)
entry->ip = (unsigned long)tp->rp.kp.addr; entry->ip = (unsigned long)tp->rp.kp.addr;
memset(&entry[1], 0, dsize); memset(&entry[1], 0, dsize);
store_trace_args(sizeof(*entry), tp, regs, (u8 *)&entry[1], dsize); store_trace_args(sizeof(*entry), tp, regs, (u8 *)&entry[1], dsize);
perf_trace_buf_submit(entry, size, rctx, 0, 1, regs, head, NULL);
head = this_cpu_ptr(call->perf_events);
perf_trace_buf_submit(entry, size, rctx,
entry->ip, 1, regs, head, NULL);
} }
/* Kretprobe profile handler */ /* Kretprobe profile handler */
...@@ -1189,6 +1112,10 @@ kretprobe_perf_func(struct trace_probe *tp, struct kretprobe_instance *ri, ...@@ -1189,6 +1112,10 @@ kretprobe_perf_func(struct trace_probe *tp, struct kretprobe_instance *ri,
int size, __size, dsize; int size, __size, dsize;
int rctx; int rctx;
head = this_cpu_ptr(call->perf_events);
if (hlist_empty(head))
return;
dsize = __get_data_size(tp, regs); dsize = __get_data_size(tp, regs);
__size = sizeof(*entry) + tp->size + dsize; __size = sizeof(*entry) + tp->size + dsize;
size = ALIGN(__size + sizeof(u32), sizeof(u64)); size = ALIGN(__size + sizeof(u32), sizeof(u64));
...@@ -1204,13 +1131,16 @@ kretprobe_perf_func(struct trace_probe *tp, struct kretprobe_instance *ri, ...@@ -1204,13 +1131,16 @@ kretprobe_perf_func(struct trace_probe *tp, struct kretprobe_instance *ri,
entry->func = (unsigned long)tp->rp.kp.addr; entry->func = (unsigned long)tp->rp.kp.addr;
entry->ret_ip = (unsigned long)ri->ret_addr; entry->ret_ip = (unsigned long)ri->ret_addr;
store_trace_args(sizeof(*entry), tp, regs, (u8 *)&entry[1], dsize); store_trace_args(sizeof(*entry), tp, regs, (u8 *)&entry[1], dsize);
perf_trace_buf_submit(entry, size, rctx, 0, 1, regs, head, NULL);
head = this_cpu_ptr(call->perf_events);
perf_trace_buf_submit(entry, size, rctx,
entry->ret_ip, 1, regs, head, NULL);
} }
#endif /* CONFIG_PERF_EVENTS */ #endif /* CONFIG_PERF_EVENTS */
/*
* called by perf_trace_init() or __ftrace_set_clr_event() under event_mutex.
*
* kprobe_trace_self_tests_init() does enable_trace_probe/disable_trace_probe
* lockless, but we can't race with this __init function.
*/
static __kprobes static __kprobes
int kprobe_register(struct ftrace_event_call *event, int kprobe_register(struct ftrace_event_call *event,
enum trace_reg type, void *data) enum trace_reg type, void *data)
...@@ -1376,6 +1306,10 @@ find_trace_probe_file(struct trace_probe *tp, struct trace_array *tr) ...@@ -1376,6 +1306,10 @@ find_trace_probe_file(struct trace_probe *tp, struct trace_array *tr)
return NULL; return NULL;
} }
/*
* Nobody but us can call enable_trace_probe/disable_trace_probe at this
* stage, we can do this lockless.
*/
static __init int kprobe_trace_self_tests_init(void) static __init int kprobe_trace_self_tests_init(void)
{ {
int ret, warn = 0; int ret, warn = 0;
......
...@@ -640,13 +640,20 @@ trace_selftest_function_regs(void) ...@@ -640,13 +640,20 @@ trace_selftest_function_regs(void)
* Enable ftrace, sleep 1/10 second, and then read the trace * Enable ftrace, sleep 1/10 second, and then read the trace
* buffer to see if all is in order. * buffer to see if all is in order.
*/ */
int __init int
trace_selftest_startup_function(struct tracer *trace, struct trace_array *tr) trace_selftest_startup_function(struct tracer *trace, struct trace_array *tr)
{ {
int save_ftrace_enabled = ftrace_enabled; int save_ftrace_enabled = ftrace_enabled;
unsigned long count; unsigned long count;
int ret; int ret;
#ifdef CONFIG_DYNAMIC_FTRACE
if (ftrace_filter_param) {
printk(KERN_CONT " ... kernel command line filter set: force PASS ... ");
return 0;
}
#endif
/* make sure msleep has been recorded */ /* make sure msleep has been recorded */
msleep(1); msleep(1);
...@@ -727,13 +734,20 @@ static int trace_graph_entry_watchdog(struct ftrace_graph_ent *trace) ...@@ -727,13 +734,20 @@ static int trace_graph_entry_watchdog(struct ftrace_graph_ent *trace)
* Pretty much the same than for the function tracer from which the selftest * Pretty much the same than for the function tracer from which the selftest
* has been borrowed. * has been borrowed.
*/ */
int __init int
trace_selftest_startup_function_graph(struct tracer *trace, trace_selftest_startup_function_graph(struct tracer *trace,
struct trace_array *tr) struct trace_array *tr)
{ {
int ret; int ret;
unsigned long count; unsigned long count;
#ifdef CONFIG_DYNAMIC_FTRACE
if (ftrace_filter_param) {
printk(KERN_CONT " ... kernel command line filter set: force PASS ... ");
return 0;
}
#endif
/* /*
* Simulate the init() callback but we attach a watchdog callback * Simulate the init() callback but we attach a watchdog callback
* to detect and recover from possible hangs * to detect and recover from possible hangs
......
...@@ -306,6 +306,8 @@ static void ftrace_syscall_enter(void *data, struct pt_regs *regs, long id) ...@@ -306,6 +306,8 @@ static void ftrace_syscall_enter(void *data, struct pt_regs *regs, long id)
struct syscall_metadata *sys_data; struct syscall_metadata *sys_data;
struct ring_buffer_event *event; struct ring_buffer_event *event;
struct ring_buffer *buffer; struct ring_buffer *buffer;
unsigned long irq_flags;
int pc;
int syscall_nr; int syscall_nr;
int size; int size;
...@@ -321,9 +323,12 @@ static void ftrace_syscall_enter(void *data, struct pt_regs *regs, long id) ...@@ -321,9 +323,12 @@ static void ftrace_syscall_enter(void *data, struct pt_regs *regs, long id)
size = sizeof(*entry) + sizeof(unsigned long) * sys_data->nb_args; size = sizeof(*entry) + sizeof(unsigned long) * sys_data->nb_args;
local_save_flags(irq_flags);
pc = preempt_count();
buffer = tr->trace_buffer.buffer; buffer = tr->trace_buffer.buffer;
event = trace_buffer_lock_reserve(buffer, event = trace_buffer_lock_reserve(buffer,
sys_data->enter_event->event.type, size, 0, 0); sys_data->enter_event->event.type, size, irq_flags, pc);
if (!event) if (!event)
return; return;
...@@ -333,7 +338,8 @@ static void ftrace_syscall_enter(void *data, struct pt_regs *regs, long id) ...@@ -333,7 +338,8 @@ static void ftrace_syscall_enter(void *data, struct pt_regs *regs, long id)
if (!filter_current_check_discard(buffer, sys_data->enter_event, if (!filter_current_check_discard(buffer, sys_data->enter_event,
entry, event)) entry, event))
trace_current_buffer_unlock_commit(buffer, event, 0, 0); trace_current_buffer_unlock_commit(buffer, event,
irq_flags, pc);
} }
static void ftrace_syscall_exit(void *data, struct pt_regs *regs, long ret) static void ftrace_syscall_exit(void *data, struct pt_regs *regs, long ret)
...@@ -343,6 +349,8 @@ static void ftrace_syscall_exit(void *data, struct pt_regs *regs, long ret) ...@@ -343,6 +349,8 @@ static void ftrace_syscall_exit(void *data, struct pt_regs *regs, long ret)
struct syscall_metadata *sys_data; struct syscall_metadata *sys_data;
struct ring_buffer_event *event; struct ring_buffer_event *event;
struct ring_buffer *buffer; struct ring_buffer *buffer;
unsigned long irq_flags;
int pc;
int syscall_nr; int syscall_nr;
syscall_nr = trace_get_syscall_nr(current, regs); syscall_nr = trace_get_syscall_nr(current, regs);
...@@ -355,9 +363,13 @@ static void ftrace_syscall_exit(void *data, struct pt_regs *regs, long ret) ...@@ -355,9 +363,13 @@ static void ftrace_syscall_exit(void *data, struct pt_regs *regs, long ret)
if (!sys_data) if (!sys_data)
return; return;
local_save_flags(irq_flags);
pc = preempt_count();
buffer = tr->trace_buffer.buffer; buffer = tr->trace_buffer.buffer;
event = trace_buffer_lock_reserve(buffer, event = trace_buffer_lock_reserve(buffer,
sys_data->exit_event->event.type, sizeof(*entry), 0, 0); sys_data->exit_event->event.type, sizeof(*entry),
irq_flags, pc);
if (!event) if (!event)
return; return;
...@@ -367,7 +379,8 @@ static void ftrace_syscall_exit(void *data, struct pt_regs *regs, long ret) ...@@ -367,7 +379,8 @@ static void ftrace_syscall_exit(void *data, struct pt_regs *regs, long ret)
if (!filter_current_check_discard(buffer, sys_data->exit_event, if (!filter_current_check_discard(buffer, sys_data->exit_event,
entry, event)) entry, event))
trace_current_buffer_unlock_commit(buffer, event, 0, 0); trace_current_buffer_unlock_commit(buffer, event,
irq_flags, pc);
} }
static int reg_event_syscall_enter(struct ftrace_event_file *file, static int reg_event_syscall_enter(struct ftrace_event_file *file,
......
...@@ -283,8 +283,10 @@ static int create_trace_uprobe(int argc, char **argv) ...@@ -283,8 +283,10 @@ static int create_trace_uprobe(int argc, char **argv)
return -EINVAL; return -EINVAL;
} }
arg = strchr(argv[1], ':'); arg = strchr(argv[1], ':');
if (!arg) if (!arg) {
ret = -EINVAL;
goto fail_address_parse; goto fail_address_parse;
}
*arg++ = '\0'; *arg++ = '\0';
filename = argv[1]; filename = argv[1];
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment