Commit 64edbc56 authored by Ingo Molnar's avatar Ingo Molnar

Merge branch 'tracing/ftrace' into tracing/core

Merge reason: this mini-topic had outstanding problems that delayed
              its merge, so it does not fast-forward.
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parents 43bd1236 0f6ce3de
...@@ -751,12 +751,25 @@ and is between 256 and 4096 characters. It is defined in the file ...@@ -751,12 +751,25 @@ and is between 256 and 4096 characters. It is defined in the file
ia64_pal_cache_flush instead of SAL_CACHE_FLUSH. ia64_pal_cache_flush instead of SAL_CACHE_FLUSH.
ftrace=[tracer] ftrace=[tracer]
[ftrace] will set and start the specified tracer [FTRACE] will set and start the specified tracer
as early as possible in order to facilitate early as early as possible in order to facilitate early
boot debugging. boot debugging.
ftrace_dump_on_oops ftrace_dump_on_oops
[ftrace] will dump the trace buffers on oops. [FTRACE] will dump the trace buffers on oops.
ftrace_filter=[function-list]
[FTRACE] Limit the functions traced by the function
tracer at boot up. function-list is a comma separated
list of functions. This list can be changed at run
time by the set_ftrace_filter file in the debugfs
tracing directory.
ftrace_notrace=[function-list]
[FTRACE] Do not trace the functions specified in
function-list. This list can be changed at run time
by the set_ftrace_notrace file in the debugfs
tracing directory.
gamecon.map[2|3]= gamecon.map[2|3]=
[HW,JOY] Multisystem joystick and NES/SNES/PSX pad [HW,JOY] Multisystem joystick and NES/SNES/PSX pad
......
...@@ -51,6 +51,7 @@ struct trace_iterator { ...@@ -51,6 +51,7 @@ struct trace_iterator {
int cpu_file; int cpu_file;
struct mutex mutex; struct mutex mutex;
struct ring_buffer_iter *buffer_iter[NR_CPUS]; struct ring_buffer_iter *buffer_iter[NR_CPUS];
unsigned long iter_flags;
/* The below is zeroed out in pipe_read */ /* The below is zeroed out in pipe_read */
struct trace_seq seq; struct trace_seq seq;
...@@ -58,7 +59,6 @@ struct trace_iterator { ...@@ -58,7 +59,6 @@ struct trace_iterator {
int cpu; int cpu;
u64 ts; u64 ts;
unsigned long iter_flags;
loff_t pos; loff_t pos;
long idx; long idx;
......
...@@ -7,18 +7,18 @@ ...@@ -7,18 +7,18 @@
#undef TRACE_SYSTEM #undef TRACE_SYSTEM
#define TRACE_SYSTEM irq #define TRACE_SYSTEM irq
#define softirq_name(sirq) { sirq, #sirq } #define softirq_name(sirq) { sirq##_SOFTIRQ, #sirq }
#define show_softirq_name(val) \ #define show_softirq_name(val) \
__print_symbolic(val, \ __print_symbolic(val, \
softirq_name(HI_SOFTIRQ), \ softirq_name(HI), \
softirq_name(TIMER_SOFTIRQ), \ softirq_name(TIMER), \
softirq_name(NET_TX_SOFTIRQ), \ softirq_name(NET_TX), \
softirq_name(NET_RX_SOFTIRQ), \ softirq_name(NET_RX), \
softirq_name(BLOCK_SOFTIRQ), \ softirq_name(BLOCK), \
softirq_name(TASKLET_SOFTIRQ), \ softirq_name(TASKLET), \
softirq_name(SCHED_SOFTIRQ), \ softirq_name(SCHED), \
softirq_name(HRTIMER_SOFTIRQ), \ softirq_name(HRTIMER), \
softirq_name(RCU_SOFTIRQ)) softirq_name(RCU))
/** /**
* irq_handler_entry - called immediately before the irq action handler * irq_handler_entry - called immediately before the irq action handler
......
...@@ -18,14 +18,17 @@ ...@@ -18,14 +18,17 @@
#include <linux/ftrace_event.h> #include <linux/ftrace_event.h>
#undef __field
#define __field(type, item) type item;
#undef __array #undef __array
#define __array(type, item, len) type item[len]; #define __array(type, item, len) type item[len];
#undef __field #undef __dynamic_array
#define __field(type, item) type item; #define __dynamic_array(type, item, len) unsigned short __data_loc_##item;
#undef __string #undef __string
#define __string(item, src) unsigned short __str_loc_##item; #define __string(item, src) __dynamic_array(char, item, -1)
#undef TP_STRUCT__entry #undef TP_STRUCT__entry
#define TP_STRUCT__entry(args...) args #define TP_STRUCT__entry(args...) args
...@@ -35,7 +38,7 @@ ...@@ -35,7 +38,7 @@
struct ftrace_raw_##name { \ struct ftrace_raw_##name { \
struct trace_entry ent; \ struct trace_entry ent; \
tstruct \ tstruct \
char __str_data[0]; \ char __data[0]; \
}; \ }; \
static struct ftrace_event_call event_##name static struct ftrace_event_call event_##name
...@@ -47,30 +50,31 @@ ...@@ -47,30 +50,31 @@
* *
* Include the following: * Include the following:
* *
* struct ftrace_str_offsets_<call> { * struct ftrace_data_offsets_<call> {
* int <str1>; * int <item1>;
* int <str2>; * int <item2>;
* [...] * [...]
* }; * };
* *
* The __string() macro will create each int <str>, this is to * The __dynamic_array() macro will create each int <item>, this is
* keep the offset of each string from the beggining of the event * to keep the offset of each array from the beginning of the event.
* once we perform the strlen() of the src strings.
*
*/ */
#undef __field
#define __field(type, item);
#undef __array #undef __array
#define __array(type, item, len) #define __array(type, item, len)
#undef __field #undef __dynamic_array
#define __field(type, item); #define __dynamic_array(type, item, len) int item;
#undef __string #undef __string
#define __string(item, src) int item; #define __string(item, src) __dynamic_array(char, item, -1)
#undef TRACE_EVENT #undef TRACE_EVENT
#define TRACE_EVENT(call, proto, args, tstruct, assign, print) \ #define TRACE_EVENT(call, proto, args, tstruct, assign, print) \
struct ftrace_str_offsets_##call { \ struct ftrace_data_offsets_##call { \
tstruct; \ tstruct; \
}; };
...@@ -119,8 +123,12 @@ ...@@ -119,8 +123,12 @@
#undef TP_printk #undef TP_printk
#define TP_printk(fmt, args...) fmt "\n", args #define TP_printk(fmt, args...) fmt "\n", args
#undef __get_dynamic_array
#define __get_dynamic_array(field) \
((void *)__entry + __entry->__data_loc_##field)
#undef __get_str #undef __get_str
#define __get_str(field) ((char *)__entry + __entry->__str_loc_##field) #define __get_str(field) (char *)__get_dynamic_array(field)
#undef __print_flags #undef __print_flags
#define __print_flags(flag, delim, flag_array...) \ #define __print_flags(flag, delim, flag_array...) \
...@@ -207,16 +215,19 @@ ftrace_raw_output_##call(struct trace_iterator *iter, int flags) \ ...@@ -207,16 +215,19 @@ ftrace_raw_output_##call(struct trace_iterator *iter, int flags) \
if (!ret) \ if (!ret) \
return 0; return 0;
#undef __string #undef __dynamic_array
#define __string(item, src) \ #define __dynamic_array(type, item, len) \
ret = trace_seq_printf(s, "\tfield: __str_loc " #item ";\t" \ ret = trace_seq_printf(s, "\tfield:__data_loc " #item ";\t" \
"offset:%u;tsize:%u;\n", \ "offset:%u;\tsize:%u;\n", \
(unsigned int)offsetof(typeof(field), \ (unsigned int)offsetof(typeof(field), \
__str_loc_##item), \ __data_loc_##item), \
(unsigned int)sizeof(field.__str_loc_##item)); \ (unsigned int)sizeof(field.__data_loc_##item)); \
if (!ret) \ if (!ret) \
return 0; return 0;
#undef __string
#define __string(item, src) __dynamic_array(char, item, -1)
#undef __entry #undef __entry
#define __entry REC #define __entry REC
...@@ -260,11 +271,14 @@ ftrace_format_##call(struct trace_seq *s) \ ...@@ -260,11 +271,14 @@ ftrace_format_##call(struct trace_seq *s) \
if (ret) \ if (ret) \
return ret; return ret;
#undef __dynamic_array
#define __dynamic_array(type, item, len) \
ret = trace_define_field(event_call, "__data_loc" "[" #type "]", #item,\
offsetof(typeof(field), __data_loc_##item), \
sizeof(field.__data_loc_##item), 0);
#undef __string #undef __string
#define __string(item, src) \ #define __string(item, src) __dynamic_array(char, item, -1)
ret = trace_define_field(event_call, "__str_loc", #item, \
offsetof(typeof(field), __str_loc_##item), \
sizeof(field.__str_loc_##item), 0);
#undef TRACE_EVENT #undef TRACE_EVENT
#define TRACE_EVENT(call, proto, args, tstruct, func, print) \ #define TRACE_EVENT(call, proto, args, tstruct, func, print) \
...@@ -288,6 +302,43 @@ ftrace_define_fields_##call(void) \ ...@@ -288,6 +302,43 @@ ftrace_define_fields_##call(void) \
#include TRACE_INCLUDE(TRACE_INCLUDE_FILE) #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
/*
* remember the offset of each array from the beginning of the event.
*/
#undef __entry
#define __entry entry
#undef __field
#define __field(type, item)
#undef __array
#define __array(type, item, len)
#undef __dynamic_array
#define __dynamic_array(type, item, len) \
__data_offsets->item = __data_size + \
offsetof(typeof(*entry), __data); \
__data_size += (len) * sizeof(type);
#undef __string
#define __string(item, src) __dynamic_array(char, item, strlen(src) + 1) \
#undef TRACE_EVENT
#define TRACE_EVENT(call, proto, args, tstruct, assign, print) \
static inline int ftrace_get_offsets_##call( \
struct ftrace_data_offsets_##call *__data_offsets, proto) \
{ \
int __data_size = 0; \
struct ftrace_raw_##call __maybe_unused *entry; \
\
tstruct; \
\
return __data_size; \
}
#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
/* /*
* Stage 4 of the trace events. * Stage 4 of the trace events.
* *
...@@ -432,15 +483,15 @@ static void ftrace_profile_disable_##call(struct ftrace_event_call *event_call)\ ...@@ -432,15 +483,15 @@ static void ftrace_profile_disable_##call(struct ftrace_event_call *event_call)\
#undef __array #undef __array
#define __array(type, item, len) #define __array(type, item, len)
#undef __dynamic_array
#define __dynamic_array(type, item, len) \
__entry->__data_loc_##item = __data_offsets.item;
#undef __string #undef __string
#define __string(item, src) \ #define __string(item, src) __dynamic_array(char, item, -1) \
__str_offsets.item = __str_size + \
offsetof(typeof(*entry), __str_data); \
__str_size += strlen(src) + 1;
#undef __assign_str #undef __assign_str
#define __assign_str(dst, src) \ #define __assign_str(dst, src) \
__entry->__str_loc_##dst = __str_offsets.dst; \
strcpy(__get_str(dst), src); strcpy(__get_str(dst), src);
#undef TRACE_EVENT #undef TRACE_EVENT
...@@ -451,27 +502,30 @@ static struct ftrace_event_call event_##call; \ ...@@ -451,27 +502,30 @@ static struct ftrace_event_call event_##call; \
\ \
static void ftrace_raw_event_##call(proto) \ static void ftrace_raw_event_##call(proto) \
{ \ { \
struct ftrace_str_offsets_##call __maybe_unused __str_offsets; \ struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\
struct ftrace_event_call *event_call = &event_##call; \ struct ftrace_event_call *event_call = &event_##call; \
struct ring_buffer_event *event; \ struct ring_buffer_event *event; \
struct ftrace_raw_##call *entry; \ struct ftrace_raw_##call *entry; \
unsigned long irq_flags; \ unsigned long irq_flags; \
int __str_size = 0; \ int __data_size; \
int pc; \ int pc; \
\ \
local_save_flags(irq_flags); \ local_save_flags(irq_flags); \
pc = preempt_count(); \ pc = preempt_count(); \
\ \
tstruct; \ __data_size = ftrace_get_offsets_##call(&__data_offsets, args); \
\ \
event = trace_current_buffer_lock_reserve(event_##call.id, \ event = trace_current_buffer_lock_reserve(event_##call.id, \
sizeof(struct ftrace_raw_##call) + __str_size,\ sizeof(*entry) + __data_size, \
irq_flags, pc); \ irq_flags, pc); \
if (!event) \ if (!event) \
return; \ return; \
entry = ring_buffer_event_data(event); \ entry = ring_buffer_event_data(event); \
\ \
assign; \ \
tstruct \
\
{ assign; } \
\ \
if (!filter_current_check_discard(event_call, entry, event)) \ if (!filter_current_check_discard(event_call, entry, event)) \
trace_nowake_buffer_unlock_commit(event, irq_flags, pc); \ trace_nowake_buffer_unlock_commit(event, irq_flags, pc); \
......
...@@ -56,6 +56,13 @@ config CONTEXT_SWITCH_TRACER ...@@ -56,6 +56,13 @@ config CONTEXT_SWITCH_TRACER
select MARKERS select MARKERS
bool bool
# All tracer options should select GENERIC_TRACER. For those options that are
# enabled by all tracers (context switch and event tracer) they select TRACING.
# This allows those options to appear when no other tracer is selected. But the
# options do not appear when something else selects it. We need the two options
# GENERIC_TRACER and TRACING to avoid circular dependencies to accomplish the
# hidding of the automatic options options.
config TRACING config TRACING
bool bool
select DEBUG_FS select DEBUG_FS
...@@ -66,6 +73,10 @@ config TRACING ...@@ -66,6 +73,10 @@ config TRACING
select BINARY_PRINTF select BINARY_PRINTF
select EVENT_TRACING select EVENT_TRACING
config GENERIC_TRACER
bool
select TRACING
# #
# Minimum requirements an architecture has to meet for us to # Minimum requirements an architecture has to meet for us to
# be able to offer generic tracing facilities: # be able to offer generic tracing facilities:
...@@ -95,7 +106,7 @@ config FUNCTION_TRACER ...@@ -95,7 +106,7 @@ config FUNCTION_TRACER
depends on HAVE_FUNCTION_TRACER depends on HAVE_FUNCTION_TRACER
select FRAME_POINTER select FRAME_POINTER
select KALLSYMS select KALLSYMS
select TRACING select GENERIC_TRACER
select CONTEXT_SWITCH_TRACER select CONTEXT_SWITCH_TRACER
help help
Enable the kernel to trace every kernel function. This is done Enable the kernel to trace every kernel function. This is done
...@@ -126,7 +137,7 @@ config IRQSOFF_TRACER ...@@ -126,7 +137,7 @@ config IRQSOFF_TRACER
depends on TRACE_IRQFLAGS_SUPPORT depends on TRACE_IRQFLAGS_SUPPORT
depends on GENERIC_TIME depends on GENERIC_TIME
select TRACE_IRQFLAGS select TRACE_IRQFLAGS
select TRACING select GENERIC_TRACER
select TRACER_MAX_TRACE select TRACER_MAX_TRACE
help help
This option measures the time spent in irqs-off critical This option measures the time spent in irqs-off critical
...@@ -147,7 +158,7 @@ config PREEMPT_TRACER ...@@ -147,7 +158,7 @@ config PREEMPT_TRACER
default n default n
depends on GENERIC_TIME depends on GENERIC_TIME
depends on PREEMPT depends on PREEMPT
select TRACING select GENERIC_TRACER
select TRACER_MAX_TRACE select TRACER_MAX_TRACE
help help
This option measures the time spent in preemption off critical This option measures the time spent in preemption off critical
...@@ -166,7 +177,7 @@ config PREEMPT_TRACER ...@@ -166,7 +177,7 @@ config PREEMPT_TRACER
config SYSPROF_TRACER config SYSPROF_TRACER
bool "Sysprof Tracer" bool "Sysprof Tracer"
depends on X86 depends on X86
select TRACING select GENERIC_TRACER
select CONTEXT_SWITCH_TRACER select CONTEXT_SWITCH_TRACER
help help
This tracer provides the trace needed by the 'Sysprof' userspace This tracer provides the trace needed by the 'Sysprof' userspace
...@@ -174,44 +185,33 @@ config SYSPROF_TRACER ...@@ -174,44 +185,33 @@ config SYSPROF_TRACER
config SCHED_TRACER config SCHED_TRACER
bool "Scheduling Latency Tracer" bool "Scheduling Latency Tracer"
select TRACING select GENERIC_TRACER
select CONTEXT_SWITCH_TRACER select CONTEXT_SWITCH_TRACER
select TRACER_MAX_TRACE select TRACER_MAX_TRACE
help help
This tracer tracks the latency of the highest priority task This tracer tracks the latency of the highest priority task
to be scheduled in, starting from the point it has woken up. to be scheduled in, starting from the point it has woken up.
config ENABLE_CONTEXT_SWITCH_TRACER config ENABLE_DEFAULT_TRACERS
bool "Trace process context switches" bool "Trace process context switches and events"
select TRACING depends on !GENERIC_TRACER
select CONTEXT_SWITCH_TRACER
help
This tracer gets called from the context switch and records
all switching of tasks.
config ENABLE_EVENT_TRACING
bool "Trace various events in the kernel"
select TRACING select TRACING
help help
This tracer hooks to various trace points in the kernel This tracer hooks to various trace points in the kernel
allowing the user to pick and choose which trace point they allowing the user to pick and choose which trace point they
want to trace. want to trace. It also includes the sched_switch tracer plugin.
Note, all tracers enable event tracing. This option is
only a convenience to enable event tracing when no other
tracers are selected.
config FTRACE_SYSCALLS config FTRACE_SYSCALLS
bool "Trace syscalls" bool "Trace syscalls"
depends on HAVE_FTRACE_SYSCALLS depends on HAVE_FTRACE_SYSCALLS
select TRACING select GENERIC_TRACER
select KALLSYMS select KALLSYMS
help help
Basic tracer to catch the syscall entry and exit events. Basic tracer to catch the syscall entry and exit events.
config BOOT_TRACER config BOOT_TRACER
bool "Trace boot initcalls" bool "Trace boot initcalls"
select TRACING select GENERIC_TRACER
select CONTEXT_SWITCH_TRACER select CONTEXT_SWITCH_TRACER
help help
This tracer helps developers to optimize boot times: it records This tracer helps developers to optimize boot times: it records
...@@ -228,7 +228,7 @@ config BOOT_TRACER ...@@ -228,7 +228,7 @@ config BOOT_TRACER
config TRACE_BRANCH_PROFILING config TRACE_BRANCH_PROFILING
bool bool
select TRACING select GENERIC_TRACER
choice choice
prompt "Branch Profiling" prompt "Branch Profiling"
...@@ -308,7 +308,7 @@ config BRANCH_TRACER ...@@ -308,7 +308,7 @@ config BRANCH_TRACER
config POWER_TRACER config POWER_TRACER
bool "Trace power consumption behavior" bool "Trace power consumption behavior"
depends on X86 depends on X86
select TRACING select GENERIC_TRACER
help help
This tracer helps developers to analyze and optimize the kernels This tracer helps developers to analyze and optimize the kernels
power management decisions, specifically the C-state and P-state power management decisions, specifically the C-state and P-state
...@@ -342,14 +342,14 @@ config STACK_TRACER ...@@ -342,14 +342,14 @@ config STACK_TRACER
config HW_BRANCH_TRACER config HW_BRANCH_TRACER
depends on HAVE_HW_BRANCH_TRACER depends on HAVE_HW_BRANCH_TRACER
bool "Trace hw branches" bool "Trace hw branches"
select TRACING select GENERIC_TRACER
help help
This tracer records all branches on the system in a circular This tracer records all branches on the system in a circular
buffer giving access to the last N branches for each cpu. buffer giving access to the last N branches for each cpu.
config KMEMTRACE config KMEMTRACE
bool "Trace SLAB allocations" bool "Trace SLAB allocations"
select TRACING select GENERIC_TRACER
help help
kmemtrace provides tracing for slab allocator functions, such as kmemtrace provides tracing for slab allocator functions, such as
kmalloc, kfree, kmem_cache_alloc, kmem_cache_free etc.. Collected kmalloc, kfree, kmem_cache_alloc, kmem_cache_free etc.. Collected
...@@ -369,7 +369,7 @@ config KMEMTRACE ...@@ -369,7 +369,7 @@ config KMEMTRACE
config WORKQUEUE_TRACER config WORKQUEUE_TRACER
bool "Trace workqueues" bool "Trace workqueues"
select TRACING select GENERIC_TRACER
help help
The workqueue tracer provides some statistical informations The workqueue tracer provides some statistical informations
about each cpu workqueue thread such as the number of the about each cpu workqueue thread such as the number of the
...@@ -385,7 +385,7 @@ config BLK_DEV_IO_TRACE ...@@ -385,7 +385,7 @@ config BLK_DEV_IO_TRACE
select RELAY select RELAY
select DEBUG_FS select DEBUG_FS
select TRACEPOINTS select TRACEPOINTS
select TRACING select GENERIC_TRACER
select STACKTRACE select STACKTRACE
help help
Say Y here if you want to be able to trace the block layer actions Say Y here if you want to be able to trace the block layer actions
...@@ -446,7 +446,7 @@ config FTRACE_SELFTEST ...@@ -446,7 +446,7 @@ config FTRACE_SELFTEST
config FTRACE_STARTUP_TEST config FTRACE_STARTUP_TEST
bool "Perform a startup test on ftrace" bool "Perform a startup test on ftrace"
depends on TRACING depends on GENERIC_TRACER
select FTRACE_SELFTEST select FTRACE_SELFTEST
help help
This option performs a series of startup tests on ftrace. On bootup This option performs a series of startup tests on ftrace. On bootup
...@@ -457,7 +457,7 @@ config FTRACE_STARTUP_TEST ...@@ -457,7 +457,7 @@ config FTRACE_STARTUP_TEST
config MMIOTRACE config MMIOTRACE
bool "Memory mapped IO tracing" bool "Memory mapped IO tracing"
depends on HAVE_MMIOTRACE_SUPPORT && PCI depends on HAVE_MMIOTRACE_SUPPORT && PCI
select TRACING select GENERIC_TRACER
help help
Mmiotrace traces Memory Mapped I/O access and is meant for Mmiotrace traces Memory Mapped I/O access and is meant for
debugging and reverse engineering. It is called from the ioremap debugging and reverse engineering. It is called from the ioremap
......
...@@ -32,6 +32,7 @@ ...@@ -32,6 +32,7 @@
#include <trace/events/sched.h> #include <trace/events/sched.h>
#include <asm/ftrace.h> #include <asm/ftrace.h>
#include <asm/setup.h>
#include "trace_output.h" #include "trace_output.h"
#include "trace_stat.h" #include "trace_stat.h"
...@@ -598,7 +599,7 @@ function_profile_call(unsigned long ip, unsigned long parent_ip) ...@@ -598,7 +599,7 @@ function_profile_call(unsigned long ip, unsigned long parent_ip)
local_irq_save(flags); local_irq_save(flags);
stat = &__get_cpu_var(ftrace_profile_stats); stat = &__get_cpu_var(ftrace_profile_stats);
if (!stat->hash) if (!stat->hash || !ftrace_profile_enabled)
goto out; goto out;
rec = ftrace_find_profiled_func(stat, ip); rec = ftrace_find_profiled_func(stat, ip);
...@@ -629,7 +630,7 @@ static void profile_graph_return(struct ftrace_graph_ret *trace) ...@@ -629,7 +630,7 @@ static void profile_graph_return(struct ftrace_graph_ret *trace)
local_irq_save(flags); local_irq_save(flags);
stat = &__get_cpu_var(ftrace_profile_stats); stat = &__get_cpu_var(ftrace_profile_stats);
if (!stat->hash) if (!stat->hash || !ftrace_profile_enabled)
goto out; goto out;
calltime = trace->rettime - trace->calltime; calltime = trace->rettime - trace->calltime;
...@@ -723,6 +724,10 @@ ftrace_profile_write(struct file *filp, const char __user *ubuf, ...@@ -723,6 +724,10 @@ ftrace_profile_write(struct file *filp, const char __user *ubuf,
ftrace_profile_enabled = 1; ftrace_profile_enabled = 1;
} else { } else {
ftrace_profile_enabled = 0; ftrace_profile_enabled = 0;
/*
* unregister_ftrace_profiler calls stop_machine
* so this acts like an synchronize_sched.
*/
unregister_ftrace_profiler(); unregister_ftrace_profiler();
} }
} }
...@@ -2369,6 +2374,45 @@ void ftrace_set_notrace(unsigned char *buf, int len, int reset) ...@@ -2369,6 +2374,45 @@ void ftrace_set_notrace(unsigned char *buf, int len, int reset)
ftrace_set_regex(buf, len, reset, 0); ftrace_set_regex(buf, len, reset, 0);
} }
/*
* command line interface to allow users to set filters on boot up.
*/
#define FTRACE_FILTER_SIZE COMMAND_LINE_SIZE
static char ftrace_notrace_buf[FTRACE_FILTER_SIZE] __initdata;
static char ftrace_filter_buf[FTRACE_FILTER_SIZE] __initdata;
static int __init set_ftrace_notrace(char *str)
{
strncpy(ftrace_notrace_buf, str, FTRACE_FILTER_SIZE);
return 1;
}
__setup("ftrace_notrace=", set_ftrace_notrace);
static int __init set_ftrace_filter(char *str)
{
strncpy(ftrace_filter_buf, str, FTRACE_FILTER_SIZE);
return 1;
}
__setup("ftrace_filter=", set_ftrace_filter);
static void __init set_ftrace_early_filter(char *buf, int enable)
{
char *func;
while (buf) {
func = strsep(&buf, ",");
ftrace_set_regex(func, strlen(func), 0, enable);
}
}
static void __init set_ftrace_early_filters(void)
{
if (ftrace_filter_buf[0])
set_ftrace_early_filter(ftrace_filter_buf, 1);
if (ftrace_notrace_buf[0])
set_ftrace_early_filter(ftrace_notrace_buf, 0);
}
static int static int
ftrace_regex_release(struct inode *inode, struct file *file, int enable) ftrace_regex_release(struct inode *inode, struct file *file, int enable)
{ {
...@@ -2829,6 +2873,8 @@ void __init ftrace_init(void) ...@@ -2829,6 +2873,8 @@ void __init ftrace_init(void)
if (ret) if (ret)
pr_warning("Failed to register trace ftrace module notifier\n"); pr_warning("Failed to register trace ftrace module notifier\n");
set_ftrace_early_filters();
return; return;
failed: failed:
ftrace_disabled = 1; ftrace_disabled = 1;
......
...@@ -2826,6 +2826,9 @@ static int tracing_open_pipe(struct inode *inode, struct file *filp) ...@@ -2826,6 +2826,9 @@ static int tracing_open_pipe(struct inode *inode, struct file *filp)
/* trace pipe does not show start of buffer */ /* trace pipe does not show start of buffer */
cpumask_setall(iter->started); cpumask_setall(iter->started);
if (trace_flags & TRACE_ITER_LATENCY_FMT)
iter->iter_flags |= TRACE_FILE_LAT_FMT;
iter->cpu_file = cpu_file; iter->cpu_file = cpu_file;
iter->tr = &global_trace; iter->tr = &global_trace;
mutex_init(&iter->mutex); mutex_init(&iter->mutex);
......
...@@ -478,12 +478,12 @@ enum { ...@@ -478,12 +478,12 @@ enum {
static int is_string_field(const char *type) static int is_string_field(const char *type)
{ {
if (strstr(type, "__data_loc") && strstr(type, "char"))
return FILTER_DYN_STRING;
if (strchr(type, '[') && strstr(type, "char")) if (strchr(type, '[') && strstr(type, "char"))
return FILTER_STATIC_STRING; return FILTER_STATIC_STRING;
if (!strcmp(type, "__str_loc"))
return FILTER_DYN_STRING;
return 0; return 0;
} }
......
...@@ -17,6 +17,7 @@ ...@@ -17,6 +17,7 @@
static DECLARE_RWSEM(trace_event_mutex); static DECLARE_RWSEM(trace_event_mutex);
DEFINE_PER_CPU(struct trace_seq, ftrace_event_seq); DEFINE_PER_CPU(struct trace_seq, ftrace_event_seq);
EXPORT_PER_CPU_SYMBOL(ftrace_event_seq);
static struct hlist_head event_hash[EVENT_HASHSIZE] __read_mostly; static struct hlist_head event_hash[EVENT_HASHSIZE] __read_mostly;
...@@ -250,6 +251,7 @@ ftrace_print_flags_seq(struct trace_seq *p, const char *delim, ...@@ -250,6 +251,7 @@ ftrace_print_flags_seq(struct trace_seq *p, const char *delim,
return p->buffer; return p->buffer;
} }
EXPORT_SYMBOL(ftrace_print_flags_seq);
const char * const char *
ftrace_print_symbols_seq(struct trace_seq *p, unsigned long val, ftrace_print_symbols_seq(struct trace_seq *p, unsigned long val,
...@@ -275,6 +277,7 @@ ftrace_print_symbols_seq(struct trace_seq *p, unsigned long val, ...@@ -275,6 +277,7 @@ ftrace_print_symbols_seq(struct trace_seq *p, unsigned long val,
return p->buffer; return p->buffer;
} }
EXPORT_SYMBOL(ftrace_print_symbols_seq);
#ifdef CONFIG_KRETPROBES #ifdef CONFIG_KRETPROBES
static inline const char *kretprobed(const char *name) static inline const char *kretprobed(const char *name)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment