Commit c624c866 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'trace-v4.8' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace

Pull tracing updates from Steven Rostedt:
 "This is mostly clean ups and small fixes.  Some of the more visible
  changes are:

   - The function pid code uses the event pid filtering logic
   - [ku]probe events have access to current->comm
   - trace_printk now has sample code
   - PCI devices now trace physical addresses
   - stack tracing has less unnessary functions traced"

* tag 'trace-v4.8' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace:
  printk, tracing: Avoiding unneeded blank lines
  tracing: Use __get_str() when manipulating strings
  tracing, RAS: Cleanup on __get_str() usage
  tracing: Use outer () on __get_str() definition
  ftrace: Reduce size of function graph entries
  tracing: Have HIST_TRIGGERS select TRACING
  tracing: Using for_each_set_bit() to simplify trace_pid_write()
  ftrace: Move toplevel init out of ftrace_init_tracefs()
  tracing/function_graph: Fix filters for function_graph threshold
  tracing: Skip more functions when doing stack tracing of events
  tracing: Expose CPU physical addresses (resource values) for PCI devices
  tracing: Show the preempt count of when the event was called
  tracing: Add trace_printk sample code
  tracing: Choose static tp_printk buffer by explicit nesting count
  tracing: expose current->comm to [ku]probe events
  ftrace: Have set_ftrace_pid use the bitmap like events do
  tracing: Move pid_list write processing into its own function
  tracing: Move the pid_list seq_file functions to be global
  tracing: Move filtered_pid helper functions into trace.c
  tracing: Make the pid filtering helper functions global
parents e55884d2 78aebca2
...@@ -40,6 +40,7 @@ Synopsis of kprobe_events ...@@ -40,6 +40,7 @@ Synopsis of kprobe_events
$stackN : Fetch Nth entry of stack (N >= 0) $stackN : Fetch Nth entry of stack (N >= 0)
$stack : Fetch stack address. $stack : Fetch stack address.
$retval : Fetch return value.(*) $retval : Fetch return value.(*)
$comm : Fetch current task comm.
+|-offs(FETCHARG) : Fetch memory at FETCHARG +|- offs address.(**) +|-offs(FETCHARG) : Fetch memory at FETCHARG +|- offs address.(**)
NAME=FETCHARG : Set NAME as the argument name of FETCHARG. NAME=FETCHARG : Set NAME as the argument name of FETCHARG.
FETCHARG:TYPE : Set TYPE as the type of FETCHARG. Currently, basic types FETCHARG:TYPE : Set TYPE as the type of FETCHARG. Currently, basic types
...@@ -62,6 +63,8 @@ offset, and container-size (usually 32). The syntax is; ...@@ -62,6 +63,8 @@ offset, and container-size (usually 32). The syntax is;
b<bit-width>@<bit-offset>/<container-size> b<bit-width>@<bit-offset>/<container-size>
For $comm, the default type is "string"; any other type is invalid.
Per-Probe Event Filtering Per-Probe Event Filtering
------------------------- -------------------------
......
...@@ -36,6 +36,7 @@ Synopsis of uprobe_tracer ...@@ -36,6 +36,7 @@ Synopsis of uprobe_tracer
$stackN : Fetch Nth entry of stack (N >= 0) $stackN : Fetch Nth entry of stack (N >= 0)
$stack : Fetch stack address. $stack : Fetch stack address.
$retval : Fetch return value.(*) $retval : Fetch return value.(*)
$comm : Fetch current task comm.
+|-offs(FETCHARG) : Fetch memory at FETCHARG +|- offs address.(**) +|-offs(FETCHARG) : Fetch memory at FETCHARG +|- offs address.(**)
NAME=FETCHARG : Set NAME as the argument name of FETCHARG. NAME=FETCHARG : Set NAME as the argument name of FETCHARG.
FETCHARG:TYPE : Set TYPE as the type of FETCHARG. Currently, basic types FETCHARG:TYPE : Set TYPE as the type of FETCHARG. Currently, basic types
...@@ -57,6 +58,8 @@ offset, and container-size (usually 32). The syntax is; ...@@ -57,6 +58,8 @@ offset, and container-size (usually 32). The syntax is;
b<bit-width>@<bit-offset>/<container-size> b<bit-width>@<bit-offset>/<container-size>
For $comm, the default type is "string"; any other type is invalid.
Event Profiling Event Profiling
--------------- ---------------
......
...@@ -1235,8 +1235,8 @@ DECLARE_EVENT_CLASS(nfs4_idmap_event, ...@@ -1235,8 +1235,8 @@ DECLARE_EVENT_CLASS(nfs4_idmap_event,
len = 0; len = 0;
__entry->error = error < 0 ? error : 0; __entry->error = error < 0 ? error : 0;
__entry->id = id; __entry->id = id;
memcpy(__get_dynamic_array(name), name, len); memcpy(__get_str(name), name, len);
((char *)__get_dynamic_array(name))[len] = 0; __get_str(name)[len] = 0;
), ),
TP_printk( TP_printk(
......
...@@ -707,9 +707,9 @@ TRACE_EVENT(nfs_sillyrename_unlink, ...@@ -707,9 +707,9 @@ TRACE_EVENT(nfs_sillyrename_unlink,
__entry->dev = dir->i_sb->s_dev; __entry->dev = dir->i_sb->s_dev;
__entry->dir = NFS_FILEID(dir); __entry->dir = NFS_FILEID(dir);
__entry->error = error; __entry->error = error;
memcpy(__get_dynamic_array(name), memcpy(__get_str(name),
data->args.name.name, len); data->args.name.name, len);
((char *)__get_dynamic_array(name))[len] = 0; __get_str(name)[len] = 0;
), ),
TP_printk( TP_printk(
......
...@@ -754,23 +754,27 @@ static inline void ftrace_init(void) { } ...@@ -754,23 +754,27 @@ static inline void ftrace_init(void) { }
/* /*
* Structure that defines an entry function trace. * Structure that defines an entry function trace.
* It's already packed but the attribute "packed" is needed
* to remove extra padding at the end.
*/ */
struct ftrace_graph_ent { struct ftrace_graph_ent {
unsigned long func; /* Current function */ unsigned long func; /* Current function */
int depth; int depth;
}; } __packed;
/* /*
* Structure that defines a return function trace. * Structure that defines a return function trace.
* It's already packed but the attribute "packed" is needed
* to remove extra padding at the end.
*/ */
struct ftrace_graph_ret { struct ftrace_graph_ret {
unsigned long func; /* Current function */ unsigned long func; /* Current function */
unsigned long long calltime;
unsigned long long rettime;
/* Number of functions that overran the depth limit for current task */ /* Number of functions that overran the depth limit for current task */
unsigned long overrun; unsigned long overrun;
unsigned long long calltime;
unsigned long long rettime;
int depth; int depth;
}; } __packed;
/* Type of the callback handlers for tracing function graph*/ /* Type of the callback handlers for tracing function graph*/
typedef void (*trace_func_graph_ret_t)(struct ftrace_graph_ret *); /* return */ typedef void (*trace_func_graph_ret_t)(struct ftrace_graph_ret *); /* return */
......
...@@ -147,7 +147,7 @@ TRACE_EVENT(mc_event, ...@@ -147,7 +147,7 @@ TRACE_EVENT(mc_event,
__entry->error_count, __entry->error_count,
mc_event_error_type(__entry->error_type), mc_event_error_type(__entry->error_type),
__entry->error_count > 1 ? "s" : "", __entry->error_count > 1 ? "s" : "",
((char *)__get_str(msg))[0] ? " " : "", __get_str(msg)[0] ? " " : "",
__get_str(msg), __get_str(msg),
__get_str(label), __get_str(label),
__entry->mc_index, __entry->mc_index,
...@@ -157,7 +157,7 @@ TRACE_EVENT(mc_event, ...@@ -157,7 +157,7 @@ TRACE_EVENT(mc_event,
__entry->address, __entry->address,
1 << __entry->grain_bits, 1 << __entry->grain_bits,
__entry->syndrome, __entry->syndrome,
((char *)__get_str(driver_detail))[0] ? " " : "", __get_str(driver_detail)[0] ? " " : "",
__get_str(driver_detail)) __get_str(driver_detail))
); );
......
...@@ -16,8 +16,16 @@ TRACE_EVENT(console, ...@@ -16,8 +16,16 @@ TRACE_EVENT(console,
), ),
TP_fast_assign( TP_fast_assign(
memcpy(__get_dynamic_array(msg), text, len); /*
((char *)__get_dynamic_array(msg))[len] = 0; * Each trace entry is printed in a new line.
* If the msg finishes with '\n', cut it off
* to avoid blank lines in the trace.
*/
if ((len > 0) && (text[len-1] == '\n'))
len -= 1;
memcpy(__get_str(msg), text, len);
__get_str(msg)[len] = 0;
), ),
TP_printk("%s", __get_str(msg)) TP_printk("%s", __get_str(msg))
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
((__entry->__data_loc_##field >> 16) & 0xffff) ((__entry->__data_loc_##field >> 16) & 0xffff)
#undef __get_str #undef __get_str
#define __get_str(field) (char *)__get_dynamic_array(field) #define __get_str(field) ((char *)__get_dynamic_array(field))
#undef __get_bitmask #undef __get_bitmask
#define __get_bitmask(field) (char *)__get_dynamic_array(field) #define __get_bitmask(field) (char *)__get_dynamic_array(field)
......
...@@ -256,7 +256,7 @@ TRACE_MAKE_SYSTEM_STR(); ...@@ -256,7 +256,7 @@ TRACE_MAKE_SYSTEM_STR();
((__entry->__data_loc_##field >> 16) & 0xffff) ((__entry->__data_loc_##field >> 16) & 0xffff)
#undef __get_str #undef __get_str
#define __get_str(field) (char *)__get_dynamic_array(field) #define __get_str(field) ((char *)__get_dynamic_array(field))
#undef __get_bitmask #undef __get_bitmask
#define __get_bitmask(field) \ #define __get_bitmask(field) \
......
...@@ -542,6 +542,7 @@ config HIST_TRIGGERS ...@@ -542,6 +542,7 @@ config HIST_TRIGGERS
bool "Histogram triggers" bool "Histogram triggers"
depends on ARCH_HAVE_NMI_SAFE_CMPXCHG depends on ARCH_HAVE_NMI_SAFE_CMPXCHG
select TRACING_MAP select TRACING_MAP
select TRACING
default n default n
help help
Hist triggers allow one or more arbitrary trace event fields Hist triggers allow one or more arbitrary trace event fields
......
This diff is collapsed.
This diff is collapsed.
...@@ -80,6 +80,12 @@ enum trace_type { ...@@ -80,6 +80,12 @@ enum trace_type {
FTRACE_ENTRY(name, struct_name, id, PARAMS(tstruct), PARAMS(print), \ FTRACE_ENTRY(name, struct_name, id, PARAMS(tstruct), PARAMS(print), \
filter) filter)
#undef FTRACE_ENTRY_PACKED
#define FTRACE_ENTRY_PACKED(name, struct_name, id, tstruct, print, \
filter) \
FTRACE_ENTRY(name, struct_name, id, PARAMS(tstruct), PARAMS(print), \
filter) __packed
#include "trace_entries.h" #include "trace_entries.h"
/* /*
...@@ -156,6 +162,9 @@ struct trace_array_cpu { ...@@ -156,6 +162,9 @@ struct trace_array_cpu {
char comm[TASK_COMM_LEN]; char comm[TASK_COMM_LEN];
bool ignore_pid; bool ignore_pid;
#ifdef CONFIG_FUNCTION_TRACER
bool ftrace_ignore_pid;
#endif
}; };
struct tracer; struct tracer;
...@@ -247,6 +256,7 @@ struct trace_array { ...@@ -247,6 +256,7 @@ struct trace_array {
int ref; int ref;
#ifdef CONFIG_FUNCTION_TRACER #ifdef CONFIG_FUNCTION_TRACER
struct ftrace_ops *ops; struct ftrace_ops *ops;
struct trace_pid_list __rcu *function_pids;
/* function tracing enabled */ /* function tracing enabled */
int function_enabled; int function_enabled;
#endif #endif
...@@ -628,6 +638,25 @@ extern unsigned long nsecs_to_usecs(unsigned long nsecs); ...@@ -628,6 +638,25 @@ extern unsigned long nsecs_to_usecs(unsigned long nsecs);
extern unsigned long tracing_thresh; extern unsigned long tracing_thresh;
/* PID filtering */
extern int pid_max;
bool trace_find_filtered_pid(struct trace_pid_list *filtered_pids,
pid_t search_pid);
bool trace_ignore_this_task(struct trace_pid_list *filtered_pids,
struct task_struct *task);
void trace_filter_add_remove_task(struct trace_pid_list *pid_list,
struct task_struct *self,
struct task_struct *task);
void *trace_pid_next(struct trace_pid_list *pid_list, void *v, loff_t *pos);
void *trace_pid_start(struct trace_pid_list *pid_list, loff_t *pos);
int trace_pid_show(struct seq_file *m, void *v);
void trace_free_pid_list(struct trace_pid_list *pid_list);
int trace_pid_write(struct trace_pid_list *filtered_pids,
struct trace_pid_list **new_pid_list,
const char __user *ubuf, size_t cnt);
#ifdef CONFIG_TRACER_MAX_TRACE #ifdef CONFIG_TRACER_MAX_TRACE
void update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu); void update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu);
void update_max_tr_single(struct trace_array *tr, void update_max_tr_single(struct trace_array *tr,
...@@ -821,12 +850,9 @@ extern struct list_head ftrace_pids; ...@@ -821,12 +850,9 @@ extern struct list_head ftrace_pids;
#ifdef CONFIG_FUNCTION_TRACER #ifdef CONFIG_FUNCTION_TRACER
extern bool ftrace_filter_param __initdata; extern bool ftrace_filter_param __initdata;
static inline int ftrace_trace_task(struct task_struct *task) static inline int ftrace_trace_task(struct trace_array *tr)
{ {
if (list_empty(&ftrace_pids)) return !this_cpu_read(tr->trace_buffer.data->ftrace_ignore_pid);
return 1;
return test_tsk_trace_trace(task);
} }
extern int ftrace_is_dead(void); extern int ftrace_is_dead(void);
int ftrace_create_function_files(struct trace_array *tr, int ftrace_create_function_files(struct trace_array *tr,
...@@ -836,8 +862,11 @@ void ftrace_init_global_array_ops(struct trace_array *tr); ...@@ -836,8 +862,11 @@ void ftrace_init_global_array_ops(struct trace_array *tr);
void ftrace_init_array_ops(struct trace_array *tr, ftrace_func_t func); void ftrace_init_array_ops(struct trace_array *tr, ftrace_func_t func);
void ftrace_reset_array_ops(struct trace_array *tr); void ftrace_reset_array_ops(struct trace_array *tr);
int using_ftrace_ops_list_func(void); int using_ftrace_ops_list_func(void);
void ftrace_init_tracefs(struct trace_array *tr, struct dentry *d_tracer);
void ftrace_init_tracefs_toplevel(struct trace_array *tr,
struct dentry *d_tracer);
#else #else
static inline int ftrace_trace_task(struct task_struct *task) static inline int ftrace_trace_task(struct trace_array *tr)
{ {
return 1; return 1;
} }
...@@ -852,6 +881,8 @@ static inline void ftrace_destroy_function_files(struct trace_array *tr) { } ...@@ -852,6 +881,8 @@ static inline void ftrace_destroy_function_files(struct trace_array *tr) { }
static inline __init void static inline __init void
ftrace_init_global_array_ops(struct trace_array *tr) { } ftrace_init_global_array_ops(struct trace_array *tr) { }
static inline void ftrace_reset_array_ops(struct trace_array *tr) { } static inline void ftrace_reset_array_ops(struct trace_array *tr) { }
static inline void ftrace_init_tracefs(struct trace_array *tr, struct dentry *d) { }
static inline void ftrace_init_tracefs_toplevel(struct trace_array *tr, struct dentry *d) { }
/* ftace_func_t type is not defined, use macro instead of static inline */ /* ftace_func_t type is not defined, use macro instead of static inline */
#define ftrace_init_array_ops(tr, func) do { } while (0) #define ftrace_init_array_ops(tr, func) do { } while (0)
#endif /* CONFIG_FUNCTION_TRACER */ #endif /* CONFIG_FUNCTION_TRACER */
...@@ -1600,6 +1631,11 @@ int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled); ...@@ -1600,6 +1631,11 @@ int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled);
#define FTRACE_ENTRY_DUP(call, struct_name, id, tstruct, print, filter) \ #define FTRACE_ENTRY_DUP(call, struct_name, id, tstruct, print, filter) \
FTRACE_ENTRY(call, struct_name, id, PARAMS(tstruct), PARAMS(print), \ FTRACE_ENTRY(call, struct_name, id, PARAMS(tstruct), PARAMS(print), \
filter) filter)
#undef FTRACE_ENTRY_PACKED
#define FTRACE_ENTRY_PACKED(call, struct_name, id, tstruct, print, filter) \
FTRACE_ENTRY(call, struct_name, id, PARAMS(tstruct), PARAMS(print), \
filter)
#include "trace_entries.h" #include "trace_entries.h"
#if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_FUNCTION_TRACER) #if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_FUNCTION_TRACER)
......
...@@ -72,7 +72,7 @@ FTRACE_ENTRY_REG(function, ftrace_entry, ...@@ -72,7 +72,7 @@ FTRACE_ENTRY_REG(function, ftrace_entry,
); );
/* Function call entry */ /* Function call entry */
FTRACE_ENTRY(funcgraph_entry, ftrace_graph_ent_entry, FTRACE_ENTRY_PACKED(funcgraph_entry, ftrace_graph_ent_entry,
TRACE_GRAPH_ENT, TRACE_GRAPH_ENT,
...@@ -88,7 +88,7 @@ FTRACE_ENTRY(funcgraph_entry, ftrace_graph_ent_entry, ...@@ -88,7 +88,7 @@ FTRACE_ENTRY(funcgraph_entry, ftrace_graph_ent_entry,
); );
/* Function return entry */ /* Function return entry */
FTRACE_ENTRY(funcgraph_exit, ftrace_graph_ret_entry, FTRACE_ENTRY_PACKED(funcgraph_exit, ftrace_graph_ret_entry,
TRACE_GRAPH_RET, TRACE_GRAPH_RET,
......
...@@ -15,7 +15,6 @@ ...@@ -15,7 +15,6 @@
#include <linux/kthread.h> #include <linux/kthread.h>
#include <linux/tracefs.h> #include <linux/tracefs.h>
#include <linux/uaccess.h> #include <linux/uaccess.h>
#include <linux/vmalloc.h>
#include <linux/module.h> #include <linux/module.h>
#include <linux/ctype.h> #include <linux/ctype.h>
#include <linux/sort.h> #include <linux/sort.h>
...@@ -262,6 +261,14 @@ void *trace_event_buffer_reserve(struct trace_event_buffer *fbuffer, ...@@ -262,6 +261,14 @@ void *trace_event_buffer_reserve(struct trace_event_buffer *fbuffer,
local_save_flags(fbuffer->flags); local_save_flags(fbuffer->flags);
fbuffer->pc = preempt_count(); fbuffer->pc = preempt_count();
/*
* If CONFIG_PREEMPT is enabled, then the tracepoint itself disables
* preemption (adding one to the preempt_count). Since we are
* interested in the preempt_count at the time the tracepoint was
* hit, we need to subtract one to offset the increment.
*/
if (IS_ENABLED(CONFIG_PREEMPT))
fbuffer->pc--;
fbuffer->trace_file = trace_file; fbuffer->trace_file = trace_file;
fbuffer->event = fbuffer->event =
...@@ -499,60 +506,6 @@ static void ftrace_clear_events(struct trace_array *tr) ...@@ -499,60 +506,6 @@ static void ftrace_clear_events(struct trace_array *tr)
mutex_unlock(&event_mutex); mutex_unlock(&event_mutex);
} }
/* Shouldn't this be in a header? */
extern int pid_max;
/* Returns true if found in filter */
static bool
find_filtered_pid(struct trace_pid_list *filtered_pids, pid_t search_pid)
{
/*
* If pid_max changed after filtered_pids was created, we
* by default ignore all pids greater than the previous pid_max.
*/
if (search_pid >= filtered_pids->pid_max)
return false;
return test_bit(search_pid, filtered_pids->pids);
}
static bool
ignore_this_task(struct trace_pid_list *filtered_pids, struct task_struct *task)
{
/*
* Return false, because if filtered_pids does not exist,
* all pids are good to trace.
*/
if (!filtered_pids)
return false;
return !find_filtered_pid(filtered_pids, task->pid);
}
static void filter_add_remove_task(struct trace_pid_list *pid_list,
struct task_struct *self,
struct task_struct *task)
{
if (!pid_list)
return;
/* For forks, we only add if the forking task is listed */
if (self) {
if (!find_filtered_pid(pid_list, self->pid))
return;
}
/* Sorry, but we don't support pid_max changing after setting */
if (task->pid >= pid_list->pid_max)
return;
/* "self" is set for forks, and NULL for exits */
if (self)
set_bit(task->pid, pid_list->pids);
else
clear_bit(task->pid, pid_list->pids);
}
static void static void
event_filter_pid_sched_process_exit(void *data, struct task_struct *task) event_filter_pid_sched_process_exit(void *data, struct task_struct *task)
{ {
...@@ -560,7 +513,7 @@ event_filter_pid_sched_process_exit(void *data, struct task_struct *task) ...@@ -560,7 +513,7 @@ event_filter_pid_sched_process_exit(void *data, struct task_struct *task)
struct trace_array *tr = data; struct trace_array *tr = data;
pid_list = rcu_dereference_sched(tr->filtered_pids); pid_list = rcu_dereference_sched(tr->filtered_pids);
filter_add_remove_task(pid_list, NULL, task); trace_filter_add_remove_task(pid_list, NULL, task);
} }
static void static void
...@@ -572,7 +525,7 @@ event_filter_pid_sched_process_fork(void *data, ...@@ -572,7 +525,7 @@ event_filter_pid_sched_process_fork(void *data,
struct trace_array *tr = data; struct trace_array *tr = data;
pid_list = rcu_dereference_sched(tr->filtered_pids); pid_list = rcu_dereference_sched(tr->filtered_pids);
filter_add_remove_task(pid_list, self, task); trace_filter_add_remove_task(pid_list, self, task);
} }
void trace_event_follow_fork(struct trace_array *tr, bool enable) void trace_event_follow_fork(struct trace_array *tr, bool enable)
...@@ -600,8 +553,8 @@ event_filter_pid_sched_switch_probe_pre(void *data, bool preempt, ...@@ -600,8 +553,8 @@ event_filter_pid_sched_switch_probe_pre(void *data, bool preempt,
pid_list = rcu_dereference_sched(tr->filtered_pids); pid_list = rcu_dereference_sched(tr->filtered_pids);
this_cpu_write(tr->trace_buffer.data->ignore_pid, this_cpu_write(tr->trace_buffer.data->ignore_pid,
ignore_this_task(pid_list, prev) && trace_ignore_this_task(pid_list, prev) &&
ignore_this_task(pid_list, next)); trace_ignore_this_task(pid_list, next));
} }
static void static void
...@@ -614,7 +567,7 @@ event_filter_pid_sched_switch_probe_post(void *data, bool preempt, ...@@ -614,7 +567,7 @@ event_filter_pid_sched_switch_probe_post(void *data, bool preempt,
pid_list = rcu_dereference_sched(tr->filtered_pids); pid_list = rcu_dereference_sched(tr->filtered_pids);
this_cpu_write(tr->trace_buffer.data->ignore_pid, this_cpu_write(tr->trace_buffer.data->ignore_pid,
ignore_this_task(pid_list, next)); trace_ignore_this_task(pid_list, next));
} }
static void static void
...@@ -630,7 +583,7 @@ event_filter_pid_sched_wakeup_probe_pre(void *data, struct task_struct *task) ...@@ -630,7 +583,7 @@ event_filter_pid_sched_wakeup_probe_pre(void *data, struct task_struct *task)
pid_list = rcu_dereference_sched(tr->filtered_pids); pid_list = rcu_dereference_sched(tr->filtered_pids);
this_cpu_write(tr->trace_buffer.data->ignore_pid, this_cpu_write(tr->trace_buffer.data->ignore_pid,
ignore_this_task(pid_list, task)); trace_ignore_this_task(pid_list, task));
} }
static void static void
...@@ -647,7 +600,7 @@ event_filter_pid_sched_wakeup_probe_post(void *data, struct task_struct *task) ...@@ -647,7 +600,7 @@ event_filter_pid_sched_wakeup_probe_post(void *data, struct task_struct *task)
/* Set tracing if current is enabled */ /* Set tracing if current is enabled */
this_cpu_write(tr->trace_buffer.data->ignore_pid, this_cpu_write(tr->trace_buffer.data->ignore_pid,
ignore_this_task(pid_list, current)); trace_ignore_this_task(pid_list, current));
} }
static void __ftrace_clear_event_pids(struct trace_array *tr) static void __ftrace_clear_event_pids(struct trace_array *tr)
...@@ -685,8 +638,7 @@ static void __ftrace_clear_event_pids(struct trace_array *tr) ...@@ -685,8 +638,7 @@ static void __ftrace_clear_event_pids(struct trace_array *tr)
/* Wait till all users are no longer using pid filtering */ /* Wait till all users are no longer using pid filtering */
synchronize_sched(); synchronize_sched();
vfree(pid_list->pids); trace_free_pid_list(pid_list);
kfree(pid_list);
} }
static void ftrace_clear_event_pids(struct trace_array *tr) static void ftrace_clear_event_pids(struct trace_array *tr)
...@@ -1034,18 +986,8 @@ p_next(struct seq_file *m, void *v, loff_t *pos) ...@@ -1034,18 +986,8 @@ p_next(struct seq_file *m, void *v, loff_t *pos)
{ {
struct trace_array *tr = m->private; struct trace_array *tr = m->private;
struct trace_pid_list *pid_list = rcu_dereference_sched(tr->filtered_pids); struct trace_pid_list *pid_list = rcu_dereference_sched(tr->filtered_pids);
unsigned long pid = (unsigned long)v;
(*pos)++;
/* pid already is +1 of the actual prevous bit */
pid = find_next_bit(pid_list->pids, pid_list->pid_max, pid);
/* Return pid + 1 to allow zero to be represented */ return trace_pid_next(pid_list, v, pos);
if (pid < pid_list->pid_max)
return (void *)(pid + 1);
return NULL;
} }
static void *p_start(struct seq_file *m, loff_t *pos) static void *p_start(struct seq_file *m, loff_t *pos)
...@@ -1053,8 +995,6 @@ static void *p_start(struct seq_file *m, loff_t *pos) ...@@ -1053,8 +995,6 @@ static void *p_start(struct seq_file *m, loff_t *pos)
{ {
struct trace_pid_list *pid_list; struct trace_pid_list *pid_list;
struct trace_array *tr = m->private; struct trace_array *tr = m->private;
unsigned long pid;
loff_t l = 0;
/* /*
* Grab the mutex, to keep calls to p_next() having the same * Grab the mutex, to keep calls to p_next() having the same
...@@ -1070,15 +1010,7 @@ static void *p_start(struct seq_file *m, loff_t *pos) ...@@ -1070,15 +1010,7 @@ static void *p_start(struct seq_file *m, loff_t *pos)
if (!pid_list) if (!pid_list)
return NULL; return NULL;
pid = find_first_bit(pid_list->pids, pid_list->pid_max); return trace_pid_start(pid_list, pos);
if (pid >= pid_list->pid_max)
return NULL;
/* Return pid + 1 so that zero can be the exit value */
for (pid++; pid && l < *pos;
pid = (unsigned long)p_next(m, (void *)pid, &l))
;
return (void *)pid;
} }
static void p_stop(struct seq_file *m, void *p) static void p_stop(struct seq_file *m, void *p)
...@@ -1088,14 +1020,6 @@ static void p_stop(struct seq_file *m, void *p) ...@@ -1088,14 +1020,6 @@ static void p_stop(struct seq_file *m, void *p)
mutex_unlock(&event_mutex); mutex_unlock(&event_mutex);
} }
static int p_show(struct seq_file *m, void *v)
{
unsigned long pid = (unsigned long)v - 1;
seq_printf(m, "%lu\n", pid);
return 0;
}
static ssize_t static ssize_t
event_enable_read(struct file *filp, char __user *ubuf, size_t cnt, event_enable_read(struct file *filp, char __user *ubuf, size_t cnt,
loff_t *ppos) loff_t *ppos)
...@@ -1654,7 +1578,7 @@ static void ignore_task_cpu(void *data) ...@@ -1654,7 +1578,7 @@ static void ignore_task_cpu(void *data)
mutex_is_locked(&event_mutex)); mutex_is_locked(&event_mutex));
this_cpu_write(tr->trace_buffer.data->ignore_pid, this_cpu_write(tr->trace_buffer.data->ignore_pid,
ignore_this_task(pid_list, current)); trace_ignore_this_task(pid_list, current));
} }
static ssize_t static ssize_t
...@@ -1666,13 +1590,7 @@ ftrace_event_pid_write(struct file *filp, const char __user *ubuf, ...@@ -1666,13 +1590,7 @@ ftrace_event_pid_write(struct file *filp, const char __user *ubuf,
struct trace_pid_list *filtered_pids = NULL; struct trace_pid_list *filtered_pids = NULL;
struct trace_pid_list *pid_list; struct trace_pid_list *pid_list;
struct trace_event_file *file; struct trace_event_file *file;
struct trace_parser parser; ssize_t ret;
unsigned long val;
loff_t this_pos;
ssize_t read = 0;
ssize_t ret = 0;
pid_t pid;
int nr_pids = 0;
if (!cnt) if (!cnt)
return 0; return 0;
...@@ -1681,93 +1599,15 @@ ftrace_event_pid_write(struct file *filp, const char __user *ubuf, ...@@ -1681,93 +1599,15 @@ ftrace_event_pid_write(struct file *filp, const char __user *ubuf,
if (ret < 0) if (ret < 0)
return ret; return ret;
if (trace_parser_get_init(&parser, EVENT_BUF_SIZE + 1))
return -ENOMEM;
mutex_lock(&event_mutex); mutex_lock(&event_mutex);
filtered_pids = rcu_dereference_protected(tr->filtered_pids, filtered_pids = rcu_dereference_protected(tr->filtered_pids,
lockdep_is_held(&event_mutex)); lockdep_is_held(&event_mutex));
/* ret = trace_pid_write(filtered_pids, &pid_list, ubuf, cnt);
* Always recreate a new array. The write is an all or nothing if (ret < 0)
* operation. Always create a new array when adding new pids by
* the user. If the operation fails, then the current list is
* not modified.
*/
pid_list = kmalloc(sizeof(*pid_list), GFP_KERNEL);
if (!pid_list) {
read = -ENOMEM;
goto out;
}
pid_list->pid_max = READ_ONCE(pid_max);
/* Only truncating will shrink pid_max */
if (filtered_pids && filtered_pids->pid_max > pid_list->pid_max)
pid_list->pid_max = filtered_pids->pid_max;
pid_list->pids = vzalloc((pid_list->pid_max + 7) >> 3);
if (!pid_list->pids) {
kfree(pid_list);
read = -ENOMEM;
goto out;
}
if (filtered_pids) {
/* copy the current bits to the new max */
pid = find_first_bit(filtered_pids->pids,
filtered_pids->pid_max);
while (pid < filtered_pids->pid_max) {
set_bit(pid, pid_list->pids);
pid = find_next_bit(filtered_pids->pids,
filtered_pids->pid_max,
pid + 1);
nr_pids++;
}
}
while (cnt > 0) {
this_pos = 0;
ret = trace_get_user(&parser, ubuf, cnt, &this_pos);
if (ret < 0 || !trace_parser_loaded(&parser))
break;
read += ret;
ubuf += ret;
cnt -= ret;
parser.buffer[parser.idx] = 0;
ret = -EINVAL;
if (kstrtoul(parser.buffer, 0, &val))
break;
if (val >= pid_list->pid_max)
break;
pid = (pid_t)val;
set_bit(pid, pid_list->pids);
nr_pids++;
trace_parser_clear(&parser);
ret = 0;
}
trace_parser_put(&parser);
if (ret < 0) {
vfree(pid_list->pids);
kfree(pid_list);
read = ret;
goto out; goto out;
}
if (!nr_pids) {
/* Cleared the list of pids */
vfree(pid_list->pids);
kfree(pid_list);
read = ret;
if (!filtered_pids)
goto out;
pid_list = NULL;
}
rcu_assign_pointer(tr->filtered_pids, pid_list); rcu_assign_pointer(tr->filtered_pids, pid_list);
list_for_each_entry(file, &tr->events, list) { list_for_each_entry(file, &tr->events, list) {
...@@ -1776,10 +1616,8 @@ ftrace_event_pid_write(struct file *filp, const char __user *ubuf, ...@@ -1776,10 +1616,8 @@ ftrace_event_pid_write(struct file *filp, const char __user *ubuf,
if (filtered_pids) { if (filtered_pids) {
synchronize_sched(); synchronize_sched();
trace_free_pid_list(filtered_pids);
vfree(filtered_pids->pids); } else if (pid_list) {
kfree(filtered_pids);
} else {
/* /*
* Register a probe that is called before all other probes * Register a probe that is called before all other probes
* to set ignore_pid if next or prev do not match. * to set ignore_pid if next or prev do not match.
...@@ -1817,9 +1655,8 @@ ftrace_event_pid_write(struct file *filp, const char __user *ubuf, ...@@ -1817,9 +1655,8 @@ ftrace_event_pid_write(struct file *filp, const char __user *ubuf,
out: out:
mutex_unlock(&event_mutex); mutex_unlock(&event_mutex);
ret = read; if (ret > 0)
if (read > 0) *ppos += ret;
*ppos += read;
return ret; return ret;
} }
...@@ -1846,7 +1683,7 @@ static const struct seq_operations show_set_event_seq_ops = { ...@@ -1846,7 +1683,7 @@ static const struct seq_operations show_set_event_seq_ops = {
static const struct seq_operations show_set_pid_seq_ops = { static const struct seq_operations show_set_pid_seq_ops = {
.start = p_start, .start = p_start,
.next = p_next, .next = p_next,
.show = p_show, .show = trace_pid_show,
.stop = p_stop, .stop = p_stop,
}; };
......
...@@ -43,7 +43,7 @@ static int allocate_ftrace_ops(struct trace_array *tr) ...@@ -43,7 +43,7 @@ static int allocate_ftrace_ops(struct trace_array *tr)
/* Currently only the non stack verision is supported */ /* Currently only the non stack verision is supported */
ops->func = function_trace_call; ops->func = function_trace_call;
ops->flags = FTRACE_OPS_FL_RECURSION_SAFE; ops->flags = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_PID;
tr->ops = ops; tr->ops = ops;
ops->private = tr; ops->private = tr;
......
...@@ -319,7 +319,7 @@ int trace_graph_entry(struct ftrace_graph_ent *trace) ...@@ -319,7 +319,7 @@ int trace_graph_entry(struct ftrace_graph_ent *trace)
int cpu; int cpu;
int pc; int pc;
if (!ftrace_trace_task(current)) if (!ftrace_trace_task(tr))
return 0; return 0;
/* trace it when it is-nested-in or is a function enabled. */ /* trace it when it is-nested-in or is a function enabled. */
...@@ -338,6 +338,13 @@ int trace_graph_entry(struct ftrace_graph_ent *trace) ...@@ -338,6 +338,13 @@ int trace_graph_entry(struct ftrace_graph_ent *trace)
if (ftrace_graph_notrace_addr(trace->func)) if (ftrace_graph_notrace_addr(trace->func))
return 1; return 1;
/*
* Stop here if tracing_threshold is set. We only write function return
* events to the ring buffer.
*/
if (tracing_thresh)
return 1;
local_irq_save(flags); local_irq_save(flags);
cpu = raw_smp_processor_id(); cpu = raw_smp_processor_id();
data = per_cpu_ptr(tr->trace_buffer.data, cpu); data = per_cpu_ptr(tr->trace_buffer.data, cpu);
...@@ -355,14 +362,6 @@ int trace_graph_entry(struct ftrace_graph_ent *trace) ...@@ -355,14 +362,6 @@ int trace_graph_entry(struct ftrace_graph_ent *trace)
return ret; return ret;
} }
static int trace_graph_thresh_entry(struct ftrace_graph_ent *trace)
{
if (tracing_thresh)
return 1;
else
return trace_graph_entry(trace);
}
static void static void
__trace_graph_function(struct trace_array *tr, __trace_graph_function(struct trace_array *tr,
unsigned long ip, unsigned long flags, int pc) unsigned long ip, unsigned long flags, int pc)
...@@ -457,7 +456,7 @@ static int graph_trace_init(struct trace_array *tr) ...@@ -457,7 +456,7 @@ static int graph_trace_init(struct trace_array *tr)
set_graph_array(tr); set_graph_array(tr);
if (tracing_thresh) if (tracing_thresh)
ret = register_ftrace_graph(&trace_graph_thresh_return, ret = register_ftrace_graph(&trace_graph_thresh_return,
&trace_graph_thresh_entry); &trace_graph_entry);
else else
ret = register_ftrace_graph(&trace_graph_return, ret = register_ftrace_graph(&trace_graph_return,
&trace_graph_entry); &trace_graph_entry);
......
...@@ -587,6 +587,7 @@ static int create_trace_kprobe(int argc, char **argv) ...@@ -587,6 +587,7 @@ static int create_trace_kprobe(int argc, char **argv)
* $retval : fetch return value * $retval : fetch return value
* $stack : fetch stack address * $stack : fetch stack address
* $stackN : fetch Nth of stack (N:0-) * $stackN : fetch Nth of stack (N:0-)
* $comm : fetch current task comm
* @ADDR : fetch memory at ADDR (ADDR should be in kernel) * @ADDR : fetch memory at ADDR (ADDR should be in kernel)
* @SYM[+|-offs] : fetch memory at SYM +|- offs (SYM is a data symbol) * @SYM[+|-offs] : fetch memory at SYM +|- offs (SYM is a data symbol)
* %REG : fetch register REG * %REG : fetch register REG
......
...@@ -68,19 +68,15 @@ static void mmio_print_pcidev(struct trace_seq *s, const struct pci_dev *dev) ...@@ -68,19 +68,15 @@ static void mmio_print_pcidev(struct trace_seq *s, const struct pci_dev *dev)
trace_seq_printf(s, "PCIDEV %02x%02x %04x%04x %x", trace_seq_printf(s, "PCIDEV %02x%02x %04x%04x %x",
dev->bus->number, dev->devfn, dev->bus->number, dev->devfn,
dev->vendor, dev->device, dev->irq); dev->vendor, dev->device, dev->irq);
/*
* XXX: is pci_resource_to_user() appropriate, since we are
* supposed to interpret the __ioremap() phys_addr argument based on
* these printed values?
*/
for (i = 0; i < 7; i++) { for (i = 0; i < 7; i++) {
pci_resource_to_user(dev, i, &dev->resource[i], &start, &end); start = dev->resource[i].start;
trace_seq_printf(s, " %llx", trace_seq_printf(s, " %llx",
(unsigned long long)(start | (unsigned long long)(start |
(dev->resource[i].flags & PCI_REGION_FLAG_MASK))); (dev->resource[i].flags & PCI_REGION_FLAG_MASK)));
} }
for (i = 0; i < 7; i++) { for (i = 0; i < 7; i++) {
pci_resource_to_user(dev, i, &dev->resource[i], &start, &end); start = dev->resource[i].start;
end = dev->resource[i].end;
trace_seq_printf(s, " %llx", trace_seq_printf(s, " %llx",
dev->resource[i].start < dev->resource[i].end ? dev->resource[i].start < dev->resource[i].end ?
(unsigned long long)(end - start) + 1 : 0); (unsigned long long)(end - start) + 1 : 0);
......
...@@ -218,6 +218,28 @@ free_bitfield_fetch_param(struct bitfield_fetch_param *data) ...@@ -218,6 +218,28 @@ free_bitfield_fetch_param(struct bitfield_fetch_param *data)
kfree(data); kfree(data);
} }
void FETCH_FUNC_NAME(comm, string)(struct pt_regs *regs,
void *data, void *dest)
{
int maxlen = get_rloc_len(*(u32 *)dest);
u8 *dst = get_rloc_data(dest);
long ret;
if (!maxlen)
return;
ret = strlcpy(dst, current->comm, maxlen);
*(u32 *)dest = make_data_rloc(ret, get_rloc_offs(*(u32 *)dest));
}
NOKPROBE_SYMBOL(FETCH_FUNC_NAME(comm, string));
void FETCH_FUNC_NAME(comm, string_size)(struct pt_regs *regs,
void *data, void *dest)
{
*(u32 *)dest = strlen(current->comm) + 1;
}
NOKPROBE_SYMBOL(FETCH_FUNC_NAME(comm, string_size));
static const struct fetch_type *find_fetch_type(const char *type, static const struct fetch_type *find_fetch_type(const char *type,
const struct fetch_type *ftbl) const struct fetch_type *ftbl)
{ {
...@@ -348,6 +370,11 @@ static int parse_probe_vars(char *arg, const struct fetch_type *t, ...@@ -348,6 +370,11 @@ static int parse_probe_vars(char *arg, const struct fetch_type *t,
} }
} else } else
ret = -EINVAL; ret = -EINVAL;
} else if (strcmp(arg, "comm") == 0) {
if (strcmp(t->name, "string") != 0 &&
strcmp(t->name, "string_size") != 0)
return -EINVAL;
f->fn = t->fetch[FETCH_MTD_comm];
} else } else
ret = -EINVAL; ret = -EINVAL;
...@@ -522,6 +549,12 @@ int traceprobe_parse_probe_arg(char *arg, ssize_t *size, ...@@ -522,6 +549,12 @@ int traceprobe_parse_probe_arg(char *arg, ssize_t *size,
arg[t - parg->comm] = '\0'; arg[t - parg->comm] = '\0';
t++; t++;
} }
/*
* The default type of $comm should be "string", and it can't be
* dereferenced.
*/
if (!t && strcmp(arg, "$comm") == 0)
t = "string";
parg->type = find_fetch_type(t, ftbl); parg->type = find_fetch_type(t, ftbl);
if (!parg->type) { if (!parg->type) {
pr_info("Unsupported type: %s\n", t); pr_info("Unsupported type: %s\n", t);
......
...@@ -102,6 +102,7 @@ enum { ...@@ -102,6 +102,7 @@ enum {
FETCH_MTD_reg = 0, FETCH_MTD_reg = 0,
FETCH_MTD_stack, FETCH_MTD_stack,
FETCH_MTD_retval, FETCH_MTD_retval,
FETCH_MTD_comm,
FETCH_MTD_memory, FETCH_MTD_memory,
FETCH_MTD_symbol, FETCH_MTD_symbol,
FETCH_MTD_deref, FETCH_MTD_deref,
...@@ -183,6 +184,14 @@ DECLARE_BASIC_FETCH_FUNCS(bitfield); ...@@ -183,6 +184,14 @@ DECLARE_BASIC_FETCH_FUNCS(bitfield);
#define fetch_bitfield_string NULL #define fetch_bitfield_string NULL
#define fetch_bitfield_string_size NULL #define fetch_bitfield_string_size NULL
/* comm only makes sense as a string */
#define fetch_comm_u8 NULL
#define fetch_comm_u16 NULL
#define fetch_comm_u32 NULL
#define fetch_comm_u64 NULL
DECLARE_FETCH_FUNC(comm, string);
DECLARE_FETCH_FUNC(comm, string_size);
/* /*
* Define macro for basic types - we don't need to define s* types, because * Define macro for basic types - we don't need to define s* types, because
* we have to care only about bitwidth at recording time. * we have to care only about bitwidth at recording time.
...@@ -213,6 +222,7 @@ DEFINE_FETCH_##method(u64) ...@@ -213,6 +222,7 @@ DEFINE_FETCH_##method(u64)
ASSIGN_FETCH_FUNC(reg, ftype), \ ASSIGN_FETCH_FUNC(reg, ftype), \
ASSIGN_FETCH_FUNC(stack, ftype), \ ASSIGN_FETCH_FUNC(stack, ftype), \
ASSIGN_FETCH_FUNC(retval, ftype), \ ASSIGN_FETCH_FUNC(retval, ftype), \
ASSIGN_FETCH_FUNC(comm, ftype), \
ASSIGN_FETCH_FUNC(memory, ftype), \ ASSIGN_FETCH_FUNC(memory, ftype), \
ASSIGN_FETCH_FUNC(symbol, ftype), \ ASSIGN_FETCH_FUNC(symbol, ftype), \
ASSIGN_FETCH_FUNC(deref, ftype), \ ASSIGN_FETCH_FUNC(deref, ftype), \
......
...@@ -11,6 +11,13 @@ config SAMPLE_TRACE_EVENTS ...@@ -11,6 +11,13 @@ config SAMPLE_TRACE_EVENTS
help help
This build trace event example modules. This build trace event example modules.
config SAMPLE_TRACE_PRINTK
tristate "Build trace_printk module - tests various trace_printk formats"
depends on EVENT_TRACING && m
help
This builds a module that calls trace_printk() and can be used to
test various trace_printk() calls from a module.
config SAMPLE_KOBJECT config SAMPLE_KOBJECT
tristate "Build kobject examples -- loadable modules only" tristate "Build kobject examples -- loadable modules only"
depends on m depends on m
......
...@@ -2,4 +2,4 @@ ...@@ -2,4 +2,4 @@
obj-$(CONFIG_SAMPLES) += kobject/ kprobes/ trace_events/ livepatch/ \ obj-$(CONFIG_SAMPLES) += kobject/ kprobes/ trace_events/ livepatch/ \
hw_breakpoint/ kfifo/ kdb/ hidraw/ rpmsg/ seccomp/ \ hw_breakpoint/ kfifo/ kdb/ hidraw/ rpmsg/ seccomp/ \
configfs/ connector/ v4l/ configfs/ connector/ v4l/ trace_printk/
# builds a module that calls various trace_printk routines
# then to use one (as root): insmod <module_name.ko>
# This module can also be used to test the trace_printk code.
obj-$(CONFIG_SAMPLE_TRACE_PRINTK) += trace-printk.o
#include <linux/module.h>
#include <linux/kthread.h>
#include <linux/irq_work.h>
/* Must not be static to force gcc to consider these non constant */
char *trace_printk_test_global_str =
"This is a dynamic string that will use trace_puts\n";
char *trace_printk_test_global_str_irq =
"(irq) This is a dynamic string that will use trace_puts\n";
char *trace_printk_test_global_str_fmt =
"%sThis is a %s that will use trace_printk\n";
static struct irq_work irqwork;
static void trace_printk_irq_work(struct irq_work *work)
{
trace_printk("(irq) This is a static string that will use trace_bputs\n");
trace_printk(trace_printk_test_global_str_irq);
trace_printk("(irq) This is a %s that will use trace_bprintk()\n",
"static string");
trace_printk(trace_printk_test_global_str_fmt,
"(irq) ", "dynamic string");
}
static int __init trace_printk_init(void)
{
init_irq_work(&irqwork, trace_printk_irq_work);
trace_printk("This is a static string that will use trace_bputs\n");
trace_printk(trace_printk_test_global_str);
/* Kick off printing in irq context */
irq_work_queue(&irqwork);
trace_printk("This is a %s that will use trace_bprintk()\n",
"static string");
trace_printk(trace_printk_test_global_str_fmt, "", "dynamic string");
return 0;
}
static void __exit trace_printk_exit(void)
{
}
module_init(trace_printk_init);
module_exit(trace_printk_exit);
MODULE_AUTHOR("Steven Rostedt");
MODULE_DESCRIPTION("trace-printk");
MODULE_LICENSE("GPL");
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment