Commit 214b9313 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'trace-3.16' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace

Pull tracing updates from Steven Rostedt:
 "Lots of tweaks, small fixes, optimizations, and some helper functions
  to help out the rest of the kernel to ease their use of trace events.

  The big change for this release is the allowing of other tracers, such
  as the latency tracers, to be used in the trace instances and allow
  for function or function graph tracing to be in the top level
  simultaneously"

* tag 'trace-3.16' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace: (44 commits)
  tracing: Fix memory leak on instance deletion
  tracing: Fix leak of ring buffer data when new instances creation fails
  tracing/kprobes: Avoid self tests if tracing is disabled on boot up
  tracing: Return error if ftrace_trace_arrays list is empty
  tracing: Only calculate stats of tracepoint benchmarks for 2^32 times
  tracing: Convert stddev into u64 in tracepoint benchmark
  tracing: Introduce saved_cmdlines_size file
  tracing: Add __get_dynamic_array_len() macro for trace events
  tracing: Remove unused variable in trace_benchmark
  tracing: Eliminate double free on failure of allocation on boot up
  ftrace/x86: Call text_ip_addr() instead of the duplicated code
  tracing: Print max callstack on stacktrace bug
  tracing: Move locking of trace_cmdline_lock into start/stop seq calls
  tracing: Try again for saved cmdline if failed due to locking
  tracing: Have saved_cmdlines use the seq_read infrastructure
  tracing: Add tracepoint benchmark tracepoint
  tracing: Print nasty banner when trace_printk() is in use
  tracing: Add funcgraph_tail option to print function name after closing braces
  tracing: Eliminate duplicate TRACE_GRAPH_PRINT_xx defines
  tracing: Add __bitmask() macro to trace events to cpumasks and other bitmasks
  ...
parents 14208b0e a9fcaaac
...@@ -2003,6 +2003,32 @@ want, depending on your needs. ...@@ -2003,6 +2003,32 @@ want, depending on your needs.
360.774530 | 1) 0.594 us | __phys_addr(); 360.774530 | 1) 0.594 us | __phys_addr();
The function name is always displayed after the closing bracket
for a function if the start of that function is not in the
trace buffer.
Display of the function name after the closing bracket may be
enabled for functions whose start is in the trace buffer,
allowing easier searching with grep for function durations.
It is default disabled.
hide: echo nofuncgraph-tail > trace_options
show: echo funcgraph-tail > trace_options
Example with nofuncgraph-tail (default):
0) | putname() {
0) | kmem_cache_free() {
0) 0.518 us | __phys_addr();
0) 1.757 us | }
0) 2.861 us | }
Example with funcgraph-tail:
0) | putname() {
0) | kmem_cache_free() {
0) 0.518 us | __phys_addr();
0) 1.757 us | } /* kmem_cache_free() */
0) 2.861 us | } /* putname() */
You can put some comments on specific functions by using You can put some comments on specific functions by using
trace_printk() For example, if you want to put a comment inside trace_printk() For example, if you want to put a comment inside
the __might_sleep() function, you just have to include the __might_sleep() function, you just have to include
......
...@@ -115,6 +115,30 @@ If the tracepoint has to be used in kernel modules, an ...@@ -115,6 +115,30 @@ If the tracepoint has to be used in kernel modules, an
EXPORT_TRACEPOINT_SYMBOL_GPL() or EXPORT_TRACEPOINT_SYMBOL() can be EXPORT_TRACEPOINT_SYMBOL_GPL() or EXPORT_TRACEPOINT_SYMBOL() can be
used to export the defined tracepoints. used to export the defined tracepoints.
If you need to do a bit of work for a tracepoint parameter, and
that work is only used for the tracepoint, that work can be encapsulated
within an if statement with the following:
if (trace_foo_bar_enabled()) {
int i;
int tot = 0;
for (i = 0; i < count; i++)
tot += calculate_nuggets();
trace_foo_bar(tot);
}
All trace_<tracepoint>() calls have a matching trace_<tracepoint>_enabled()
function defined that returns true if the tracepoint is enabled and
false otherwise. The trace_<tracepoint>() should always be within the
block of the if (trace_<tracepoint>_enabled()) to prevent races between
the tracepoint being enabled and the check being seen.
The advantage of using the trace_<tracepoint>_enabled() is that it uses
the static_key of the tracepoint to allow the if statement to be implemented
with jump labels and avoid conditional branches.
Note: The convenience macro TRACE_EVENT provides an alternative way to Note: The convenience macro TRACE_EVENT provides an alternative way to
define tracepoints. Check http://lwn.net/Articles/379903, define tracepoints. Check http://lwn.net/Articles/379903,
http://lwn.net/Articles/381064 and http://lwn.net/Articles/383362 http://lwn.net/Articles/381064 and http://lwn.net/Articles/383362
......
...@@ -9106,7 +9106,6 @@ F: drivers/char/tpm/ ...@@ -9106,7 +9106,6 @@ F: drivers/char/tpm/
TRACING TRACING
M: Steven Rostedt <rostedt@goodmis.org> M: Steven Rostedt <rostedt@goodmis.org>
M: Frederic Weisbecker <fweisbec@gmail.com>
M: Ingo Molnar <mingo@redhat.com> M: Ingo Molnar <mingo@redhat.com>
T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git perf/core T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git perf/core
S: Maintained S: Maintained
......
...@@ -26,6 +26,7 @@ obj-$(CONFIG_IRQ_WORK) += irq_work.o ...@@ -26,6 +26,7 @@ obj-$(CONFIG_IRQ_WORK) += irq_work.o
obj-y += probe_roms.o obj-y += probe_roms.o
obj-$(CONFIG_X86_32) += i386_ksyms_32.o obj-$(CONFIG_X86_32) += i386_ksyms_32.o
obj-$(CONFIG_X86_64) += sys_x86_64.o x8664_ksyms_64.o obj-$(CONFIG_X86_64) += sys_x86_64.o x8664_ksyms_64.o
obj-$(CONFIG_X86_64) += mcount_64.o
obj-y += syscall_$(BITS).o vsyscall_gtod.o obj-y += syscall_$(BITS).o vsyscall_gtod.o
obj-$(CONFIG_X86_64) += vsyscall_64.o obj-$(CONFIG_X86_64) += vsyscall_64.o
obj-$(CONFIG_X86_64) += vsyscall_emu_64.o obj-$(CONFIG_X86_64) += vsyscall_emu_64.o
......
...@@ -53,7 +53,6 @@ ...@@ -53,7 +53,6 @@
#include <asm/page_types.h> #include <asm/page_types.h>
#include <asm/irqflags.h> #include <asm/irqflags.h>
#include <asm/paravirt.h> #include <asm/paravirt.h>
#include <asm/ftrace.h>
#include <asm/percpu.h> #include <asm/percpu.h>
#include <asm/asm.h> #include <asm/asm.h>
#include <asm/context_tracking.h> #include <asm/context_tracking.h>
...@@ -70,209 +69,6 @@ ...@@ -70,209 +69,6 @@
.code64 .code64
.section .entry.text, "ax" .section .entry.text, "ax"
#ifdef CONFIG_FUNCTION_TRACER
#ifdef CC_USING_FENTRY
# define function_hook __fentry__
#else
# define function_hook mcount
#endif
#ifdef CONFIG_DYNAMIC_FTRACE
ENTRY(function_hook)
retq
END(function_hook)
/* skip is set if stack has been adjusted */
.macro ftrace_caller_setup skip=0
MCOUNT_SAVE_FRAME \skip
/* Load the ftrace_ops into the 3rd parameter */
movq function_trace_op(%rip), %rdx
/* Load ip into the first parameter */
movq RIP(%rsp), %rdi
subq $MCOUNT_INSN_SIZE, %rdi
/* Load the parent_ip into the second parameter */
#ifdef CC_USING_FENTRY
movq SS+16(%rsp), %rsi
#else
movq 8(%rbp), %rsi
#endif
.endm
ENTRY(ftrace_caller)
/* Check if tracing was disabled (quick check) */
cmpl $0, function_trace_stop
jne ftrace_stub
ftrace_caller_setup
/* regs go into 4th parameter (but make it NULL) */
movq $0, %rcx
GLOBAL(ftrace_call)
call ftrace_stub
MCOUNT_RESTORE_FRAME
ftrace_return:
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
GLOBAL(ftrace_graph_call)
jmp ftrace_stub
#endif
GLOBAL(ftrace_stub)
retq
END(ftrace_caller)
ENTRY(ftrace_regs_caller)
/* Save the current flags before compare (in SS location)*/
pushfq
/* Check if tracing was disabled (quick check) */
cmpl $0, function_trace_stop
jne ftrace_restore_flags
/* skip=8 to skip flags saved in SS */
ftrace_caller_setup 8
/* Save the rest of pt_regs */
movq %r15, R15(%rsp)
movq %r14, R14(%rsp)
movq %r13, R13(%rsp)
movq %r12, R12(%rsp)
movq %r11, R11(%rsp)
movq %r10, R10(%rsp)
movq %rbp, RBP(%rsp)
movq %rbx, RBX(%rsp)
/* Copy saved flags */
movq SS(%rsp), %rcx
movq %rcx, EFLAGS(%rsp)
/* Kernel segments */
movq $__KERNEL_DS, %rcx
movq %rcx, SS(%rsp)
movq $__KERNEL_CS, %rcx
movq %rcx, CS(%rsp)
/* Stack - skipping return address */
leaq SS+16(%rsp), %rcx
movq %rcx, RSP(%rsp)
/* regs go into 4th parameter */
leaq (%rsp), %rcx
GLOBAL(ftrace_regs_call)
call ftrace_stub
/* Copy flags back to SS, to restore them */
movq EFLAGS(%rsp), %rax
movq %rax, SS(%rsp)
/* Handlers can change the RIP */
movq RIP(%rsp), %rax
movq %rax, SS+8(%rsp)
/* restore the rest of pt_regs */
movq R15(%rsp), %r15
movq R14(%rsp), %r14
movq R13(%rsp), %r13
movq R12(%rsp), %r12
movq R10(%rsp), %r10
movq RBP(%rsp), %rbp
movq RBX(%rsp), %rbx
/* skip=8 to skip flags saved in SS */
MCOUNT_RESTORE_FRAME 8
/* Restore flags */
popfq
jmp ftrace_return
ftrace_restore_flags:
popfq
jmp ftrace_stub
END(ftrace_regs_caller)
#else /* ! CONFIG_DYNAMIC_FTRACE */
ENTRY(function_hook)
cmpl $0, function_trace_stop
jne ftrace_stub
cmpq $ftrace_stub, ftrace_trace_function
jnz trace
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
cmpq $ftrace_stub, ftrace_graph_return
jnz ftrace_graph_caller
cmpq $ftrace_graph_entry_stub, ftrace_graph_entry
jnz ftrace_graph_caller
#endif
GLOBAL(ftrace_stub)
retq
trace:
MCOUNT_SAVE_FRAME
movq RIP(%rsp), %rdi
#ifdef CC_USING_FENTRY
movq SS+16(%rsp), %rsi
#else
movq 8(%rbp), %rsi
#endif
subq $MCOUNT_INSN_SIZE, %rdi
call *ftrace_trace_function
MCOUNT_RESTORE_FRAME
jmp ftrace_stub
END(function_hook)
#endif /* CONFIG_DYNAMIC_FTRACE */
#endif /* CONFIG_FUNCTION_TRACER */
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
ENTRY(ftrace_graph_caller)
MCOUNT_SAVE_FRAME
#ifdef CC_USING_FENTRY
leaq SS+16(%rsp), %rdi
movq $0, %rdx /* No framepointers needed */
#else
leaq 8(%rbp), %rdi
movq (%rbp), %rdx
#endif
movq RIP(%rsp), %rsi
subq $MCOUNT_INSN_SIZE, %rsi
call prepare_ftrace_return
MCOUNT_RESTORE_FRAME
retq
END(ftrace_graph_caller)
GLOBAL(return_to_handler)
subq $24, %rsp
/* Save the return values */
movq %rax, (%rsp)
movq %rdx, 8(%rsp)
movq %rbp, %rdi
call ftrace_return_to_handler
movq %rax, %rdi
movq 8(%rsp), %rdx
movq (%rsp), %rax
addq $24, %rsp
jmp *%rdi
#endif
#ifndef CONFIG_PREEMPT #ifndef CONFIG_PREEMPT
#define retint_kernel retint_restore_args #define retint_kernel retint_restore_args
......
...@@ -297,16 +297,7 @@ int ftrace_int3_handler(struct pt_regs *regs) ...@@ -297,16 +297,7 @@ int ftrace_int3_handler(struct pt_regs *regs)
static int ftrace_write(unsigned long ip, const char *val, int size) static int ftrace_write(unsigned long ip, const char *val, int size)
{ {
/* ip = text_ip_addr(ip);
* On x86_64, kernel text mappings are mapped read-only with
* CONFIG_DEBUG_RODATA. So we use the kernel identity mapping instead
* of the kernel text mapping to modify the kernel text.
*
* For 32bit kernels, these mappings are same and we can use
* kernel identity mapping to modify code.
*/
if (within(ip, (unsigned long)_text, (unsigned long)_etext))
ip = (unsigned long)__va(__pa_symbol(ip));
if (probe_kernel_write((void *)ip, val, size)) if (probe_kernel_write((void *)ip, val, size))
return -EPERM; return -EPERM;
...@@ -349,40 +340,14 @@ static int add_brk_on_nop(struct dyn_ftrace *rec) ...@@ -349,40 +340,14 @@ static int add_brk_on_nop(struct dyn_ftrace *rec)
return add_break(rec->ip, old); return add_break(rec->ip, old);
} }
/*
* If the record has the FTRACE_FL_REGS set, that means that it
* wants to convert to a callback that saves all regs. If FTRACE_FL_REGS
* is not not set, then it wants to convert to the normal callback.
*/
static unsigned long get_ftrace_addr(struct dyn_ftrace *rec)
{
if (rec->flags & FTRACE_FL_REGS)
return (unsigned long)FTRACE_REGS_ADDR;
else
return (unsigned long)FTRACE_ADDR;
}
/*
* The FTRACE_FL_REGS_EN is set when the record already points to
* a function that saves all the regs. Basically the '_EN' version
* represents the current state of the function.
*/
static unsigned long get_ftrace_old_addr(struct dyn_ftrace *rec)
{
if (rec->flags & FTRACE_FL_REGS_EN)
return (unsigned long)FTRACE_REGS_ADDR;
else
return (unsigned long)FTRACE_ADDR;
}
static int add_breakpoints(struct dyn_ftrace *rec, int enable) static int add_breakpoints(struct dyn_ftrace *rec, int enable)
{ {
unsigned long ftrace_addr; unsigned long ftrace_addr;
int ret; int ret;
ret = ftrace_test_record(rec, enable); ftrace_addr = ftrace_get_addr_curr(rec);
ftrace_addr = get_ftrace_addr(rec); ret = ftrace_test_record(rec, enable);
switch (ret) { switch (ret) {
case FTRACE_UPDATE_IGNORE: case FTRACE_UPDATE_IGNORE:
...@@ -392,10 +357,7 @@ static int add_breakpoints(struct dyn_ftrace *rec, int enable) ...@@ -392,10 +357,7 @@ static int add_breakpoints(struct dyn_ftrace *rec, int enable)
/* converting nop to call */ /* converting nop to call */
return add_brk_on_nop(rec); return add_brk_on_nop(rec);
case FTRACE_UPDATE_MODIFY_CALL_REGS:
case FTRACE_UPDATE_MODIFY_CALL: case FTRACE_UPDATE_MODIFY_CALL:
ftrace_addr = get_ftrace_old_addr(rec);
/* fall through */
case FTRACE_UPDATE_MAKE_NOP: case FTRACE_UPDATE_MAKE_NOP:
/* converting a call to a nop */ /* converting a call to a nop */
return add_brk_on_call(rec, ftrace_addr); return add_brk_on_call(rec, ftrace_addr);
...@@ -440,14 +402,14 @@ static int remove_breakpoint(struct dyn_ftrace *rec) ...@@ -440,14 +402,14 @@ static int remove_breakpoint(struct dyn_ftrace *rec)
* If not, don't touch the breakpoint, we make just create * If not, don't touch the breakpoint, we make just create
* a disaster. * a disaster.
*/ */
ftrace_addr = get_ftrace_addr(rec); ftrace_addr = ftrace_get_addr_new(rec);
nop = ftrace_call_replace(ip, ftrace_addr); nop = ftrace_call_replace(ip, ftrace_addr);
if (memcmp(&ins[1], &nop[1], MCOUNT_INSN_SIZE - 1) == 0) if (memcmp(&ins[1], &nop[1], MCOUNT_INSN_SIZE - 1) == 0)
goto update; goto update;
/* Check both ftrace_addr and ftrace_old_addr */ /* Check both ftrace_addr and ftrace_old_addr */
ftrace_addr = get_ftrace_old_addr(rec); ftrace_addr = ftrace_get_addr_curr(rec);
nop = ftrace_call_replace(ip, ftrace_addr); nop = ftrace_call_replace(ip, ftrace_addr);
if (memcmp(&ins[1], &nop[1], MCOUNT_INSN_SIZE - 1) != 0) if (memcmp(&ins[1], &nop[1], MCOUNT_INSN_SIZE - 1) != 0)
...@@ -491,13 +453,12 @@ static int add_update(struct dyn_ftrace *rec, int enable) ...@@ -491,13 +453,12 @@ static int add_update(struct dyn_ftrace *rec, int enable)
ret = ftrace_test_record(rec, enable); ret = ftrace_test_record(rec, enable);
ftrace_addr = get_ftrace_addr(rec); ftrace_addr = ftrace_get_addr_new(rec);
switch (ret) { switch (ret) {
case FTRACE_UPDATE_IGNORE: case FTRACE_UPDATE_IGNORE:
return 0; return 0;
case FTRACE_UPDATE_MODIFY_CALL_REGS:
case FTRACE_UPDATE_MODIFY_CALL: case FTRACE_UPDATE_MODIFY_CALL:
case FTRACE_UPDATE_MAKE_CALL: case FTRACE_UPDATE_MAKE_CALL:
/* converting nop to call */ /* converting nop to call */
...@@ -538,13 +499,12 @@ static int finish_update(struct dyn_ftrace *rec, int enable) ...@@ -538,13 +499,12 @@ static int finish_update(struct dyn_ftrace *rec, int enable)
ret = ftrace_update_record(rec, enable); ret = ftrace_update_record(rec, enable);
ftrace_addr = get_ftrace_addr(rec); ftrace_addr = ftrace_get_addr_new(rec);
switch (ret) { switch (ret) {
case FTRACE_UPDATE_IGNORE: case FTRACE_UPDATE_IGNORE:
return 0; return 0;
case FTRACE_UPDATE_MODIFY_CALL_REGS:
case FTRACE_UPDATE_MODIFY_CALL: case FTRACE_UPDATE_MODIFY_CALL:
case FTRACE_UPDATE_MAKE_CALL: case FTRACE_UPDATE_MAKE_CALL:
/* converting nop to call */ /* converting nop to call */
...@@ -621,8 +581,8 @@ void ftrace_replace_code(int enable) ...@@ -621,8 +581,8 @@ void ftrace_replace_code(int enable)
return; return;
remove_breakpoints: remove_breakpoints:
pr_warn("Failed on %s (%d):\n", report, count);
ftrace_bug(ret, rec ? rec->ip : 0); ftrace_bug(ret, rec ? rec->ip : 0);
printk(KERN_WARNING "Failed on %s (%d):\n", report, count);
for_ftrace_rec_iter(iter) { for_ftrace_rec_iter(iter) {
rec = ftrace_rec_iter_record(iter); rec = ftrace_rec_iter_record(iter);
/* /*
......
/*
* linux/arch/x86_64/mcount_64.S
*
* Copyright (C) 2014 Steven Rostedt, Red Hat Inc
*/
#include <linux/linkage.h>
#include <asm/ptrace.h>
#include <asm/ftrace.h>
.code64
.section .entry.text, "ax"
#ifdef CONFIG_FUNCTION_TRACER
#ifdef CC_USING_FENTRY
# define function_hook __fentry__
#else
# define function_hook mcount
#endif
#ifdef CONFIG_DYNAMIC_FTRACE
ENTRY(function_hook)
retq
END(function_hook)
/* skip is set if stack has been adjusted */
.macro ftrace_caller_setup skip=0
MCOUNT_SAVE_FRAME \skip
/* Load the ftrace_ops into the 3rd parameter */
movq function_trace_op(%rip), %rdx
/* Load ip into the first parameter */
movq RIP(%rsp), %rdi
subq $MCOUNT_INSN_SIZE, %rdi
/* Load the parent_ip into the second parameter */
#ifdef CC_USING_FENTRY
movq SS+16(%rsp), %rsi
#else
movq 8(%rbp), %rsi
#endif
.endm
ENTRY(ftrace_caller)
/* Check if tracing was disabled (quick check) */
cmpl $0, function_trace_stop
jne ftrace_stub
ftrace_caller_setup
/* regs go into 4th parameter (but make it NULL) */
movq $0, %rcx
GLOBAL(ftrace_call)
call ftrace_stub
MCOUNT_RESTORE_FRAME
ftrace_return:
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
GLOBAL(ftrace_graph_call)
jmp ftrace_stub
#endif
GLOBAL(ftrace_stub)
retq
END(ftrace_caller)
ENTRY(ftrace_regs_caller)
/* Save the current flags before compare (in SS location)*/
pushfq
/* Check if tracing was disabled (quick check) */
cmpl $0, function_trace_stop
jne ftrace_restore_flags
/* skip=8 to skip flags saved in SS */
ftrace_caller_setup 8
/* Save the rest of pt_regs */
movq %r15, R15(%rsp)
movq %r14, R14(%rsp)
movq %r13, R13(%rsp)
movq %r12, R12(%rsp)
movq %r11, R11(%rsp)
movq %r10, R10(%rsp)
movq %rbp, RBP(%rsp)
movq %rbx, RBX(%rsp)
/* Copy saved flags */
movq SS(%rsp), %rcx
movq %rcx, EFLAGS(%rsp)
/* Kernel segments */
movq $__KERNEL_DS, %rcx
movq %rcx, SS(%rsp)
movq $__KERNEL_CS, %rcx
movq %rcx, CS(%rsp)
/* Stack - skipping return address */
leaq SS+16(%rsp), %rcx
movq %rcx, RSP(%rsp)
/* regs go into 4th parameter */
leaq (%rsp), %rcx
GLOBAL(ftrace_regs_call)
call ftrace_stub
/* Copy flags back to SS, to restore them */
movq EFLAGS(%rsp), %rax
movq %rax, SS(%rsp)
/* Handlers can change the RIP */
movq RIP(%rsp), %rax
movq %rax, SS+8(%rsp)
/* restore the rest of pt_regs */
movq R15(%rsp), %r15
movq R14(%rsp), %r14
movq R13(%rsp), %r13
movq R12(%rsp), %r12
movq R10(%rsp), %r10
movq RBP(%rsp), %rbp
movq RBX(%rsp), %rbx
/* skip=8 to skip flags saved in SS */
MCOUNT_RESTORE_FRAME 8
/* Restore flags */
popfq
jmp ftrace_return
ftrace_restore_flags:
popfq
jmp ftrace_stub
END(ftrace_regs_caller)
#else /* ! CONFIG_DYNAMIC_FTRACE */
ENTRY(function_hook)
cmpl $0, function_trace_stop
jne ftrace_stub
cmpq $ftrace_stub, ftrace_trace_function
jnz trace
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
cmpq $ftrace_stub, ftrace_graph_return
jnz ftrace_graph_caller
cmpq $ftrace_graph_entry_stub, ftrace_graph_entry
jnz ftrace_graph_caller
#endif
GLOBAL(ftrace_stub)
retq
trace:
MCOUNT_SAVE_FRAME
movq RIP(%rsp), %rdi
#ifdef CC_USING_FENTRY
movq SS+16(%rsp), %rsi
#else
movq 8(%rbp), %rsi
#endif
subq $MCOUNT_INSN_SIZE, %rdi
call *ftrace_trace_function
MCOUNT_RESTORE_FRAME
jmp ftrace_stub
END(function_hook)
#endif /* CONFIG_DYNAMIC_FTRACE */
#endif /* CONFIG_FUNCTION_TRACER */
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
ENTRY(ftrace_graph_caller)
MCOUNT_SAVE_FRAME
#ifdef CC_USING_FENTRY
leaq SS+16(%rsp), %rdi
movq $0, %rdx /* No framepointers needed */
#else
leaq 8(%rbp), %rdi
movq (%rbp), %rdx
#endif
movq RIP(%rsp), %rsi
subq $MCOUNT_INSN_SIZE, %rsi
call prepare_ftrace_return
MCOUNT_RESTORE_FRAME
retq
END(ftrace_graph_caller)
GLOBAL(return_to_handler)
subq $24, %rsp
/* Save the return values */
movq %rax, (%rsp)
movq %rdx, 8(%rsp)
movq %rbp, %rdi
call ftrace_return_to_handler
movq %rax, %rdi
movq 8(%rsp), %rdx
movq (%rsp), %rax
addq $24, %rsp
jmp *%rdi
#endif
...@@ -62,9 +62,6 @@ typedef void (*ftrace_func_t)(unsigned long ip, unsigned long parent_ip, ...@@ -62,9 +62,6 @@ typedef void (*ftrace_func_t)(unsigned long ip, unsigned long parent_ip,
* set in the flags member. * set in the flags member.
* *
* ENABLED - set/unset when ftrace_ops is registered/unregistered * ENABLED - set/unset when ftrace_ops is registered/unregistered
* GLOBAL - set manualy by ftrace_ops user to denote the ftrace_ops
* is part of the global tracers sharing the same filter
* via set_ftrace_* debugfs files.
* DYNAMIC - set when ftrace_ops is registered to denote dynamically * DYNAMIC - set when ftrace_ops is registered to denote dynamically
* allocated ftrace_ops which need special care * allocated ftrace_ops which need special care
* CONTROL - set manualy by ftrace_ops user to denote the ftrace_ops * CONTROL - set manualy by ftrace_ops user to denote the ftrace_ops
...@@ -96,15 +93,14 @@ typedef void (*ftrace_func_t)(unsigned long ip, unsigned long parent_ip, ...@@ -96,15 +93,14 @@ typedef void (*ftrace_func_t)(unsigned long ip, unsigned long parent_ip,
*/ */
enum { enum {
FTRACE_OPS_FL_ENABLED = 1 << 0, FTRACE_OPS_FL_ENABLED = 1 << 0,
FTRACE_OPS_FL_GLOBAL = 1 << 1, FTRACE_OPS_FL_DYNAMIC = 1 << 1,
FTRACE_OPS_FL_DYNAMIC = 1 << 2, FTRACE_OPS_FL_CONTROL = 1 << 2,
FTRACE_OPS_FL_CONTROL = 1 << 3, FTRACE_OPS_FL_SAVE_REGS = 1 << 3,
FTRACE_OPS_FL_SAVE_REGS = 1 << 4, FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED = 1 << 4,
FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED = 1 << 5, FTRACE_OPS_FL_RECURSION_SAFE = 1 << 5,
FTRACE_OPS_FL_RECURSION_SAFE = 1 << 6, FTRACE_OPS_FL_STUB = 1 << 6,
FTRACE_OPS_FL_STUB = 1 << 7, FTRACE_OPS_FL_INITIALIZED = 1 << 7,
FTRACE_OPS_FL_INITIALIZED = 1 << 8, FTRACE_OPS_FL_DELETED = 1 << 8,
FTRACE_OPS_FL_DELETED = 1 << 9,
}; };
/* /*
...@@ -366,14 +362,12 @@ enum { ...@@ -366,14 +362,12 @@ enum {
* IGNORE - The function is already what we want it to be * IGNORE - The function is already what we want it to be
* MAKE_CALL - Start tracing the function * MAKE_CALL - Start tracing the function
* MODIFY_CALL - Stop saving regs for the function * MODIFY_CALL - Stop saving regs for the function
* MODIFY_CALL_REGS - Start saving regs for the function
* MAKE_NOP - Stop tracing the function * MAKE_NOP - Stop tracing the function
*/ */
enum { enum {
FTRACE_UPDATE_IGNORE, FTRACE_UPDATE_IGNORE,
FTRACE_UPDATE_MAKE_CALL, FTRACE_UPDATE_MAKE_CALL,
FTRACE_UPDATE_MODIFY_CALL, FTRACE_UPDATE_MODIFY_CALL,
FTRACE_UPDATE_MODIFY_CALL_REGS,
FTRACE_UPDATE_MAKE_NOP, FTRACE_UPDATE_MAKE_NOP,
}; };
...@@ -404,6 +398,8 @@ int ftrace_update_record(struct dyn_ftrace *rec, int enable); ...@@ -404,6 +398,8 @@ int ftrace_update_record(struct dyn_ftrace *rec, int enable);
int ftrace_test_record(struct dyn_ftrace *rec, int enable); int ftrace_test_record(struct dyn_ftrace *rec, int enable);
void ftrace_run_stop_machine(int command); void ftrace_run_stop_machine(int command);
unsigned long ftrace_location(unsigned long ip); unsigned long ftrace_location(unsigned long ip);
unsigned long ftrace_get_addr_new(struct dyn_ftrace *rec);
unsigned long ftrace_get_addr_curr(struct dyn_ftrace *rec);
extern ftrace_func_t ftrace_trace_function; extern ftrace_func_t ftrace_trace_function;
......
...@@ -38,6 +38,9 @@ const char *ftrace_print_symbols_seq_u64(struct trace_seq *p, ...@@ -38,6 +38,9 @@ const char *ftrace_print_symbols_seq_u64(struct trace_seq *p,
*symbol_array); *symbol_array);
#endif #endif
const char *ftrace_print_bitmask_seq(struct trace_seq *p, void *bitmask_ptr,
unsigned int bitmask_size);
const char *ftrace_print_hex_seq(struct trace_seq *p, const char *ftrace_print_hex_seq(struct trace_seq *p,
const unsigned char *buf, int len); const unsigned char *buf, int len);
......
...@@ -355,7 +355,7 @@ static inline void reset_current_kprobe(void) ...@@ -355,7 +355,7 @@ static inline void reset_current_kprobe(void)
static inline struct kprobe_ctlblk *get_kprobe_ctlblk(void) static inline struct kprobe_ctlblk *get_kprobe_ctlblk(void)
{ {
return (&__get_cpu_var(kprobe_ctlblk)); return this_cpu_ptr(&kprobe_ctlblk);
} }
int register_kprobe(struct kprobe *p); int register_kprobe(struct kprobe *p);
......
...@@ -46,6 +46,9 @@ extern int trace_seq_putmem_hex(struct trace_seq *s, const void *mem, ...@@ -46,6 +46,9 @@ extern int trace_seq_putmem_hex(struct trace_seq *s, const void *mem,
extern void *trace_seq_reserve(struct trace_seq *s, size_t len); extern void *trace_seq_reserve(struct trace_seq *s, size_t len);
extern int trace_seq_path(struct trace_seq *s, const struct path *path); extern int trace_seq_path(struct trace_seq *s, const struct path *path);
extern int trace_seq_bitmask(struct trace_seq *s, const unsigned long *maskp,
int nmaskbits);
#else /* CONFIG_TRACING */ #else /* CONFIG_TRACING */
static inline int trace_seq_printf(struct trace_seq *s, const char *fmt, ...) static inline int trace_seq_printf(struct trace_seq *s, const char *fmt, ...)
{ {
...@@ -57,6 +60,13 @@ trace_seq_bprintf(struct trace_seq *s, const char *fmt, const u32 *binary) ...@@ -57,6 +60,13 @@ trace_seq_bprintf(struct trace_seq *s, const char *fmt, const u32 *binary)
return 0; return 0;
} }
static inline int
trace_seq_bitmask(struct trace_seq *s, const unsigned long *maskp,
int nmaskbits)
{
return 0;
}
static inline int trace_print_seq(struct seq_file *m, struct trace_seq *s) static inline int trace_print_seq(struct seq_file *m, struct trace_seq *s)
{ {
return 0; return 0;
......
...@@ -185,6 +185,11 @@ extern void syscall_unregfunc(void); ...@@ -185,6 +185,11 @@ extern void syscall_unregfunc(void);
static inline void \ static inline void \
check_trace_callback_type_##name(void (*cb)(data_proto)) \ check_trace_callback_type_##name(void (*cb)(data_proto)) \
{ \ { \
} \
static inline bool \
trace_##name##_enabled(void) \
{ \
return static_key_false(&__tracepoint_##name.key); \
} }
/* /*
...@@ -230,6 +235,11 @@ extern void syscall_unregfunc(void); ...@@ -230,6 +235,11 @@ extern void syscall_unregfunc(void);
} \ } \
static inline void check_trace_callback_type_##name(void (*cb)(data_proto)) \ static inline void check_trace_callback_type_##name(void (*cb)(data_proto)) \
{ \ { \
} \
static inline bool \
trace_##name##_enabled(void) \
{ \
return false; \
} }
#define DEFINE_TRACE_FN(name, reg, unreg) #define DEFINE_TRACE_FN(name, reg, unreg)
......
...@@ -53,6 +53,9 @@ ...@@ -53,6 +53,9 @@
#undef __string #undef __string
#define __string(item, src) __dynamic_array(char, item, -1) #define __string(item, src) __dynamic_array(char, item, -1)
#undef __bitmask
#define __bitmask(item, nr_bits) __dynamic_array(char, item, -1)
#undef TP_STRUCT__entry #undef TP_STRUCT__entry
#define TP_STRUCT__entry(args...) args #define TP_STRUCT__entry(args...) args
...@@ -128,6 +131,9 @@ ...@@ -128,6 +131,9 @@
#undef __string #undef __string
#define __string(item, src) __dynamic_array(char, item, -1) #define __string(item, src) __dynamic_array(char, item, -1)
#undef __bitmask
#define __bitmask(item, nr_bits) __dynamic_array(unsigned long, item, -1)
#undef DECLARE_EVENT_CLASS #undef DECLARE_EVENT_CLASS
#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \ #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
struct ftrace_data_offsets_##call { \ struct ftrace_data_offsets_##call { \
...@@ -197,9 +203,22 @@ ...@@ -197,9 +203,22 @@
#define __get_dynamic_array(field) \ #define __get_dynamic_array(field) \
((void *)__entry + (__entry->__data_loc_##field & 0xffff)) ((void *)__entry + (__entry->__data_loc_##field & 0xffff))
#undef __get_dynamic_array_len
#define __get_dynamic_array_len(field) \
((__entry->__data_loc_##field >> 16) & 0xffff)
#undef __get_str #undef __get_str
#define __get_str(field) (char *)__get_dynamic_array(field) #define __get_str(field) (char *)__get_dynamic_array(field)
#undef __get_bitmask
#define __get_bitmask(field) \
({ \
void *__bitmask = __get_dynamic_array(field); \
unsigned int __bitmask_size; \
__bitmask_size = __get_dynamic_array_len(field); \
ftrace_print_bitmask_seq(p, __bitmask, __bitmask_size); \
})
#undef __print_flags #undef __print_flags
#define __print_flags(flag, delim, flag_array...) \ #define __print_flags(flag, delim, flag_array...) \
({ \ ({ \
...@@ -322,6 +341,9 @@ static struct trace_event_functions ftrace_event_type_funcs_##call = { \ ...@@ -322,6 +341,9 @@ static struct trace_event_functions ftrace_event_type_funcs_##call = { \
#undef __string #undef __string
#define __string(item, src) __dynamic_array(char, item, -1) #define __string(item, src) __dynamic_array(char, item, -1)
#undef __bitmask
#define __bitmask(item, nr_bits) __dynamic_array(unsigned long, item, -1)
#undef DECLARE_EVENT_CLASS #undef DECLARE_EVENT_CLASS
#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, func, print) \ #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, func, print) \
static int notrace __init \ static int notrace __init \
...@@ -372,6 +394,29 @@ ftrace_define_fields_##call(struct ftrace_event_call *event_call) \ ...@@ -372,6 +394,29 @@ ftrace_define_fields_##call(struct ftrace_event_call *event_call) \
#define __string(item, src) __dynamic_array(char, item, \ #define __string(item, src) __dynamic_array(char, item, \
strlen((src) ? (const char *)(src) : "(null)") + 1) strlen((src) ? (const char *)(src) : "(null)") + 1)
/*
* __bitmask_size_in_bytes_raw is the number of bytes needed to hold
* num_possible_cpus().
*/
#define __bitmask_size_in_bytes_raw(nr_bits) \
(((nr_bits) + 7) / 8)
#define __bitmask_size_in_longs(nr_bits) \
((__bitmask_size_in_bytes_raw(nr_bits) + \
((BITS_PER_LONG / 8) - 1)) / (BITS_PER_LONG / 8))
/*
* __bitmask_size_in_bytes is the number of bytes needed to hold
* num_possible_cpus() padded out to the nearest long. This is what
* is saved in the buffer, just to be consistent.
*/
#define __bitmask_size_in_bytes(nr_bits) \
(__bitmask_size_in_longs(nr_bits) * (BITS_PER_LONG / 8))
#undef __bitmask
#define __bitmask(item, nr_bits) __dynamic_array(unsigned long, item, \
__bitmask_size_in_longs(nr_bits))
#undef DECLARE_EVENT_CLASS #undef DECLARE_EVENT_CLASS
#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \ #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
static inline notrace int ftrace_get_offsets_##call( \ static inline notrace int ftrace_get_offsets_##call( \
...@@ -513,12 +558,22 @@ static inline notrace int ftrace_get_offsets_##call( \ ...@@ -513,12 +558,22 @@ static inline notrace int ftrace_get_offsets_##call( \
__entry->__data_loc_##item = __data_offsets.item; __entry->__data_loc_##item = __data_offsets.item;
#undef __string #undef __string
#define __string(item, src) __dynamic_array(char, item, -1) \ #define __string(item, src) __dynamic_array(char, item, -1)
#undef __assign_str #undef __assign_str
#define __assign_str(dst, src) \ #define __assign_str(dst, src) \
strcpy(__get_str(dst), (src) ? (const char *)(src) : "(null)"); strcpy(__get_str(dst), (src) ? (const char *)(src) : "(null)");
#undef __bitmask
#define __bitmask(item, nr_bits) __dynamic_array(unsigned long, item, -1)
#undef __get_bitmask
#define __get_bitmask(field) (char *)__get_dynamic_array(field)
#undef __assign_bitmask
#define __assign_bitmask(dst, src, nr_bits) \
memcpy(__get_bitmask(dst), (src), __bitmask_size_in_bytes(nr_bits))
#undef TP_fast_assign #undef TP_fast_assign
#define TP_fast_assign(args...) args #define TP_fast_assign(args...) args
...@@ -585,7 +640,9 @@ static inline void ftrace_test_probe_##call(void) \ ...@@ -585,7 +640,9 @@ static inline void ftrace_test_probe_##call(void) \
#undef __print_symbolic #undef __print_symbolic
#undef __print_hex #undef __print_hex
#undef __get_dynamic_array #undef __get_dynamic_array
#undef __get_dynamic_array_len
#undef __get_str #undef __get_str
#undef __get_bitmask
#undef TP_printk #undef TP_printk
#define TP_printk(fmt, args...) "\"" fmt "\", " __stringify(args) #define TP_printk(fmt, args...) "\"" fmt "\", " __stringify(args)
...@@ -648,9 +705,16 @@ __attribute__((section("_ftrace_events"))) *__event_##call = &event_##call ...@@ -648,9 +705,16 @@ __attribute__((section("_ftrace_events"))) *__event_##call = &event_##call
#define __get_dynamic_array(field) \ #define __get_dynamic_array(field) \
((void *)__entry + (__entry->__data_loc_##field & 0xffff)) ((void *)__entry + (__entry->__data_loc_##field & 0xffff))
#undef __get_dynamic_array_len
#define __get_dynamic_array_len(field) \
((__entry->__data_loc_##field >> 16) & 0xffff)
#undef __get_str #undef __get_str
#define __get_str(field) (char *)__get_dynamic_array(field) #define __get_str(field) (char *)__get_dynamic_array(field)
#undef __get_bitmask
#define __get_bitmask(field) (char *)__get_dynamic_array(field)
#undef __perf_addr #undef __perf_addr
#define __perf_addr(a) (__addr = (a)) #define __perf_addr(a) (__addr = (a))
......
...@@ -535,6 +535,36 @@ config MMIOTRACE_TEST ...@@ -535,6 +535,36 @@ config MMIOTRACE_TEST
Say N, unless you absolutely know what you are doing. Say N, unless you absolutely know what you are doing.
config TRACEPOINT_BENCHMARK
bool "Add tracepoint that benchmarks tracepoints"
help
This option creates the tracepoint "benchmark:benchmark_event".
When the tracepoint is enabled, it kicks off a kernel thread that
goes into an infinite loop (calling cond_sched() to let other tasks
run), and calls the tracepoint. Each iteration will record the time
it took to write to the tracepoint and the next iteration that
data will be passed to the tracepoint itself. That is, the tracepoint
will report the time it took to do the previous tracepoint.
The string written to the tracepoint is a static string of 128 bytes
to keep the time the same. The initial string is simply a write of
"START". The second string records the cold cache time of the first
write which is not added to the rest of the calculations.
As it is a tight loop, it benchmarks as hot cache. That's fine because
we care most about hot paths that are probably in cache already.
An example of the output:
START
first=3672 [COLD CACHED]
last=632 first=3672 max=632 min=632 avg=316 std=446 std^2=199712
last=278 first=3672 max=632 min=278 avg=303 std=316 std^2=100337
last=277 first=3672 max=632 min=277 avg=296 std=258 std^2=67064
last=273 first=3672 max=632 min=273 avg=292 std=224 std^2=50411
last=273 first=3672 max=632 min=273 avg=288 std=200 std^2=40389
last=281 first=3672 max=632 min=273 avg=287 std=183 std^2=33666
config RING_BUFFER_BENCHMARK config RING_BUFFER_BENCHMARK
tristate "Ring buffer benchmark stress tester" tristate "Ring buffer benchmark stress tester"
depends on RING_BUFFER depends on RING_BUFFER
......
...@@ -17,6 +17,7 @@ ifdef CONFIG_TRACING_BRANCHES ...@@ -17,6 +17,7 @@ ifdef CONFIG_TRACING_BRANCHES
KBUILD_CFLAGS += -DDISABLE_BRANCH_PROFILING KBUILD_CFLAGS += -DDISABLE_BRANCH_PROFILING
endif endif
CFLAGS_trace_benchmark.o := -I$(src)
CFLAGS_trace_events_filter.o := -I$(src) CFLAGS_trace_events_filter.o := -I$(src)
obj-$(CONFIG_TRACE_CLOCK) += trace_clock.o obj-$(CONFIG_TRACE_CLOCK) += trace_clock.o
...@@ -62,4 +63,6 @@ endif ...@@ -62,4 +63,6 @@ endif
obj-$(CONFIG_PROBE_EVENTS) += trace_probe.o obj-$(CONFIG_PROBE_EVENTS) += trace_probe.o
obj-$(CONFIG_UPROBE_EVENT) += trace_uprobe.o obj-$(CONFIG_UPROBE_EVENT) += trace_uprobe.o
obj-$(CONFIG_TRACEPOINT_BENCHMARK) += trace_benchmark.o
libftrace-y := ftrace.o libftrace-y := ftrace.o
This diff is collapsed.
This diff is collapsed.
...@@ -190,7 +190,22 @@ struct trace_array { ...@@ -190,7 +190,22 @@ struct trace_array {
*/ */
struct trace_buffer max_buffer; struct trace_buffer max_buffer;
bool allocated_snapshot; bool allocated_snapshot;
unsigned long max_latency;
#endif #endif
/*
* max_lock is used to protect the swapping of buffers
* when taking a max snapshot. The buffers themselves are
* protected by per_cpu spinlocks. But the action of the swap
* needs its own lock.
*
* This is defined as a arch_spinlock_t in order to help
* with performance when lockdep debugging is enabled.
*
* It is also used in other places outside the update_max_tr
* so it needs to be defined outside of the
* CONFIG_TRACER_MAX_TRACE.
*/
arch_spinlock_t max_lock;
int buffer_disabled; int buffer_disabled;
#ifdef CONFIG_FTRACE_SYSCALLS #ifdef CONFIG_FTRACE_SYSCALLS
int sys_refcount_enter; int sys_refcount_enter;
...@@ -237,6 +252,9 @@ static inline struct trace_array *top_trace_array(void) ...@@ -237,6 +252,9 @@ static inline struct trace_array *top_trace_array(void)
{ {
struct trace_array *tr; struct trace_array *tr;
if (list_empty(ftrace_trace_arrays.prev))
return NULL;
tr = list_entry(ftrace_trace_arrays.prev, tr = list_entry(ftrace_trace_arrays.prev,
typeof(*tr), list); typeof(*tr), list);
WARN_ON(!(tr->flags & TRACE_ARRAY_FL_GLOBAL)); WARN_ON(!(tr->flags & TRACE_ARRAY_FL_GLOBAL));
...@@ -323,7 +341,6 @@ struct tracer_flags { ...@@ -323,7 +341,6 @@ struct tracer_flags {
* @stop: called when tracing is paused (echo 0 > tracing_enabled) * @stop: called when tracing is paused (echo 0 > tracing_enabled)
* @open: called when the trace file is opened * @open: called when the trace file is opened
* @pipe_open: called when the trace_pipe file is opened * @pipe_open: called when the trace_pipe file is opened
* @wait_pipe: override how the user waits for traces on trace_pipe
* @close: called when the trace file is released * @close: called when the trace file is released
* @pipe_close: called when the trace_pipe file is released * @pipe_close: called when the trace_pipe file is released
* @read: override the default read callback on trace_pipe * @read: override the default read callback on trace_pipe
...@@ -342,7 +359,6 @@ struct tracer { ...@@ -342,7 +359,6 @@ struct tracer {
void (*stop)(struct trace_array *tr); void (*stop)(struct trace_array *tr);
void (*open)(struct trace_iterator *iter); void (*open)(struct trace_iterator *iter);
void (*pipe_open)(struct trace_iterator *iter); void (*pipe_open)(struct trace_iterator *iter);
void (*wait_pipe)(struct trace_iterator *iter);
void (*close)(struct trace_iterator *iter); void (*close)(struct trace_iterator *iter);
void (*pipe_close)(struct trace_iterator *iter); void (*pipe_close)(struct trace_iterator *iter);
ssize_t (*read)(struct trace_iterator *iter, ssize_t (*read)(struct trace_iterator *iter,
...@@ -416,13 +432,7 @@ enum { ...@@ -416,13 +432,7 @@ enum {
TRACE_FTRACE_IRQ_BIT, TRACE_FTRACE_IRQ_BIT,
TRACE_FTRACE_SIRQ_BIT, TRACE_FTRACE_SIRQ_BIT,
/* GLOBAL_BITs must be greater than FTRACE_BITs */ /* INTERNAL_BITs must be greater than FTRACE_BITs */
TRACE_GLOBAL_BIT,
TRACE_GLOBAL_NMI_BIT,
TRACE_GLOBAL_IRQ_BIT,
TRACE_GLOBAL_SIRQ_BIT,
/* INTERNAL_BITs must be greater than GLOBAL_BITs */
TRACE_INTERNAL_BIT, TRACE_INTERNAL_BIT,
TRACE_INTERNAL_NMI_BIT, TRACE_INTERNAL_NMI_BIT,
TRACE_INTERNAL_IRQ_BIT, TRACE_INTERNAL_IRQ_BIT,
...@@ -449,9 +459,6 @@ enum { ...@@ -449,9 +459,6 @@ enum {
#define TRACE_FTRACE_START TRACE_FTRACE_BIT #define TRACE_FTRACE_START TRACE_FTRACE_BIT
#define TRACE_FTRACE_MAX ((1 << (TRACE_FTRACE_START + TRACE_CONTEXT_BITS)) - 1) #define TRACE_FTRACE_MAX ((1 << (TRACE_FTRACE_START + TRACE_CONTEXT_BITS)) - 1)
#define TRACE_GLOBAL_START TRACE_GLOBAL_BIT
#define TRACE_GLOBAL_MAX ((1 << (TRACE_GLOBAL_START + TRACE_CONTEXT_BITS)) - 1)
#define TRACE_LIST_START TRACE_INTERNAL_BIT #define TRACE_LIST_START TRACE_INTERNAL_BIT
#define TRACE_LIST_MAX ((1 << (TRACE_LIST_START + TRACE_CONTEXT_BITS)) - 1) #define TRACE_LIST_MAX ((1 << (TRACE_LIST_START + TRACE_CONTEXT_BITS)) - 1)
...@@ -560,8 +567,6 @@ void trace_init_global_iter(struct trace_iterator *iter); ...@@ -560,8 +567,6 @@ void trace_init_global_iter(struct trace_iterator *iter);
void tracing_iter_reset(struct trace_iterator *iter, int cpu); void tracing_iter_reset(struct trace_iterator *iter, int cpu);
void poll_wait_pipe(struct trace_iterator *iter);
void tracing_sched_switch_trace(struct trace_array *tr, void tracing_sched_switch_trace(struct trace_array *tr,
struct task_struct *prev, struct task_struct *prev,
struct task_struct *next, struct task_struct *next,
...@@ -608,8 +613,6 @@ extern unsigned long nsecs_to_usecs(unsigned long nsecs); ...@@ -608,8 +613,6 @@ extern unsigned long nsecs_to_usecs(unsigned long nsecs);
extern unsigned long tracing_thresh; extern unsigned long tracing_thresh;
#ifdef CONFIG_TRACER_MAX_TRACE #ifdef CONFIG_TRACER_MAX_TRACE
extern unsigned long tracing_max_latency;
void update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu); void update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu);
void update_max_tr_single(struct trace_array *tr, void update_max_tr_single(struct trace_array *tr,
struct task_struct *tsk, int cpu); struct task_struct *tsk, int cpu);
...@@ -724,6 +727,8 @@ extern unsigned long trace_flags; ...@@ -724,6 +727,8 @@ extern unsigned long trace_flags;
#define TRACE_GRAPH_PRINT_PROC 0x8 #define TRACE_GRAPH_PRINT_PROC 0x8
#define TRACE_GRAPH_PRINT_DURATION 0x10 #define TRACE_GRAPH_PRINT_DURATION 0x10
#define TRACE_GRAPH_PRINT_ABS_TIME 0x20 #define TRACE_GRAPH_PRINT_ABS_TIME 0x20
#define TRACE_GRAPH_PRINT_IRQS 0x40
#define TRACE_GRAPH_PRINT_TAIL 0x80
#define TRACE_GRAPH_PRINT_FILL_SHIFT 28 #define TRACE_GRAPH_PRINT_FILL_SHIFT 28
#define TRACE_GRAPH_PRINT_FILL_MASK (0x3 << TRACE_GRAPH_PRINT_FILL_SHIFT) #define TRACE_GRAPH_PRINT_FILL_MASK (0x3 << TRACE_GRAPH_PRINT_FILL_SHIFT)
...@@ -823,6 +828,10 @@ extern int ftrace_is_dead(void); ...@@ -823,6 +828,10 @@ extern int ftrace_is_dead(void);
int ftrace_create_function_files(struct trace_array *tr, int ftrace_create_function_files(struct trace_array *tr,
struct dentry *parent); struct dentry *parent);
void ftrace_destroy_function_files(struct trace_array *tr); void ftrace_destroy_function_files(struct trace_array *tr);
void ftrace_init_global_array_ops(struct trace_array *tr);
void ftrace_init_array_ops(struct trace_array *tr, ftrace_func_t func);
void ftrace_reset_array_ops(struct trace_array *tr);
int using_ftrace_ops_list_func(void);
#else #else
static inline int ftrace_trace_task(struct task_struct *task) static inline int ftrace_trace_task(struct task_struct *task)
{ {
...@@ -836,6 +845,11 @@ ftrace_create_function_files(struct trace_array *tr, ...@@ -836,6 +845,11 @@ ftrace_create_function_files(struct trace_array *tr,
return 0; return 0;
} }
static inline void ftrace_destroy_function_files(struct trace_array *tr) { } static inline void ftrace_destroy_function_files(struct trace_array *tr) { }
static inline __init void
ftrace_init_global_array_ops(struct trace_array *tr) { }
static inline void ftrace_reset_array_ops(struct trace_array *tr) { }
/* ftace_func_t type is not defined, use macro instead of static inline */
#define ftrace_init_array_ops(tr, func) do { } while (0)
#endif /* CONFIG_FUNCTION_TRACER */ #endif /* CONFIG_FUNCTION_TRACER */
#if defined(CONFIG_FUNCTION_TRACER) && defined(CONFIG_DYNAMIC_FTRACE) #if defined(CONFIG_FUNCTION_TRACER) && defined(CONFIG_DYNAMIC_FTRACE)
......
#include <linux/delay.h>
#include <linux/module.h>
#include <linux/kthread.h>
#include <linux/trace_clock.h>
#define CREATE_TRACE_POINTS
#include "trace_benchmark.h"
static struct task_struct *bm_event_thread;
static char bm_str[BENCHMARK_EVENT_STRLEN] = "START";
static u64 bm_total;
static u64 bm_totalsq;
static u64 bm_last;
static u64 bm_max;
static u64 bm_min;
static u64 bm_first;
static u64 bm_cnt;
static u64 bm_stddev;
static unsigned int bm_avg;
static unsigned int bm_std;
/*
* This gets called in a loop recording the time it took to write
* the tracepoint. What it writes is the time statistics of the last
* tracepoint write. As there is nothing to write the first time
* it simply writes "START". As the first write is cold cache and
* the rest is hot, we save off that time in bm_first and it is
* reported as "first", which is shown in the second write to the
* tracepoint. The "first" field is writen within the statics from
* then on but never changes.
*/
static void trace_do_benchmark(void)
{
u64 start;
u64 stop;
u64 delta;
u64 stddev;
u64 seed;
u64 last_seed;
unsigned int avg;
unsigned int std = 0;
/* Only run if the tracepoint is actually active */
if (!trace_benchmark_event_enabled())
return;
local_irq_disable();
start = trace_clock_local();
trace_benchmark_event(bm_str);
stop = trace_clock_local();
local_irq_enable();
bm_cnt++;
delta = stop - start;
/*
* The first read is cold cached, keep it separate from the
* other calculations.
*/
if (bm_cnt == 1) {
bm_first = delta;
scnprintf(bm_str, BENCHMARK_EVENT_STRLEN,
"first=%llu [COLD CACHED]", bm_first);
return;
}
bm_last = delta;
if (delta > bm_max)
bm_max = delta;
if (!bm_min || delta < bm_min)
bm_min = delta;
/*
* When bm_cnt is greater than UINT_MAX, it breaks the statistics
* accounting. Freeze the statistics when that happens.
* We should have enough data for the avg and stddev anyway.
*/
if (bm_cnt > UINT_MAX) {
scnprintf(bm_str, BENCHMARK_EVENT_STRLEN,
"last=%llu first=%llu max=%llu min=%llu ** avg=%u std=%d std^2=%lld",
bm_last, bm_first, bm_max, bm_min, bm_avg, bm_std, bm_stddev);
return;
}
bm_total += delta;
bm_totalsq += delta * delta;
if (bm_cnt > 1) {
/*
* Apply Welford's method to calculate standard deviation:
* s^2 = 1 / (n * (n-1)) * (n * \Sum (x_i)^2 - (\Sum x_i)^2)
*/
stddev = (u64)bm_cnt * bm_totalsq - bm_total * bm_total;
do_div(stddev, (u32)bm_cnt);
do_div(stddev, (u32)bm_cnt - 1);
} else
stddev = 0;
delta = bm_total;
do_div(delta, bm_cnt);
avg = delta;
if (stddev > 0) {
int i = 0;
/*
* stddev is the square of standard deviation but
* we want the actualy number. Use the average
* as our seed to find the std.
*
* The next try is:
* x = (x + N/x) / 2
*
* Where N is the squared number to find the square
* root of.
*/
seed = avg;
do {
last_seed = seed;
seed = stddev;
if (!last_seed)
break;
do_div(seed, last_seed);
seed += last_seed;
do_div(seed, 2);
} while (i++ < 10 && last_seed != seed);
std = seed;
}
scnprintf(bm_str, BENCHMARK_EVENT_STRLEN,
"last=%llu first=%llu max=%llu min=%llu avg=%u std=%d std^2=%lld",
bm_last, bm_first, bm_max, bm_min, avg, std, stddev);
bm_std = std;
bm_avg = avg;
bm_stddev = stddev;
}
static int benchmark_event_kthread(void *arg)
{
/* sleep a bit to make sure the tracepoint gets activated */
msleep(100);
while (!kthread_should_stop()) {
trace_do_benchmark();
/*
* We don't go to sleep, but let others
* run as well.
*/
cond_resched();
}
return 0;
}
/*
* When the benchmark tracepoint is enabled, it calls this
* function and the thread that calls the tracepoint is created.
*/
void trace_benchmark_reg(void)
{
bm_event_thread = kthread_run(benchmark_event_kthread,
NULL, "event_benchmark");
WARN_ON(!bm_event_thread);
}
/*
* When the benchmark tracepoint is disabled, it calls this
* function and the thread that calls the tracepoint is deleted
* and all the numbers are reset.
*/
void trace_benchmark_unreg(void)
{
if (!bm_event_thread)
return;
kthread_stop(bm_event_thread);
strcpy(bm_str, "START");
bm_total = 0;
bm_totalsq = 0;
bm_last = 0;
bm_max = 0;
bm_min = 0;
bm_cnt = 0;
/* These don't need to be reset but reset them anyway */
bm_first = 0;
bm_std = 0;
bm_avg = 0;
bm_stddev = 0;
}
#undef TRACE_SYSTEM
#define TRACE_SYSTEM benchmark
#if !defined(_TRACE_BENCHMARK_H) || defined(TRACE_HEADER_MULTI_READ)
#define _TRACE_BENCHMARK_H
#include <linux/tracepoint.h>
extern void trace_benchmark_reg(void);
extern void trace_benchmark_unreg(void);
#define BENCHMARK_EVENT_STRLEN 128
TRACE_EVENT_FN(benchmark_event,
TP_PROTO(const char *str),
TP_ARGS(str),
TP_STRUCT__entry(
__array( char, str, BENCHMARK_EVENT_STRLEN )
),
TP_fast_assign(
memcpy(__entry->str, str, BENCHMARK_EVENT_STRLEN);
),
TP_printk("%s", __entry->str),
trace_benchmark_reg, trace_benchmark_unreg
);
#endif /* _TRACE_BENCHMARK_H */
#undef TRACE_INCLUDE_FILE
#undef TRACE_INCLUDE_PATH
#define TRACE_INCLUDE_PATH .
#define TRACE_INCLUDE_FILE trace_benchmark
/* This part must be outside protection */
#include <trace/define_trace.h>
...@@ -574,6 +574,9 @@ int trace_set_clr_event(const char *system, const char *event, int set) ...@@ -574,6 +574,9 @@ int trace_set_clr_event(const char *system, const char *event, int set)
{ {
struct trace_array *tr = top_trace_array(); struct trace_array *tr = top_trace_array();
if (!tr)
return -ENODEV;
return __ftrace_set_clr_event(tr, NULL, system, event, set); return __ftrace_set_clr_event(tr, NULL, system, event, set);
} }
EXPORT_SYMBOL_GPL(trace_set_clr_event); EXPORT_SYMBOL_GPL(trace_set_clr_event);
...@@ -2065,6 +2068,9 @@ event_enable_func(struct ftrace_hash *hash, ...@@ -2065,6 +2068,9 @@ event_enable_func(struct ftrace_hash *hash,
bool enable; bool enable;
int ret; int ret;
if (!tr)
return -ENODEV;
/* hash funcs only work with set_ftrace_filter */ /* hash funcs only work with set_ftrace_filter */
if (!enabled || !param) if (!enabled || !param)
return -EINVAL; return -EINVAL;
...@@ -2396,6 +2402,9 @@ static __init int event_trace_enable(void) ...@@ -2396,6 +2402,9 @@ static __init int event_trace_enable(void)
char *token; char *token;
int ret; int ret;
if (!tr)
return -ENODEV;
for_each_event(iter, __start_ftrace_events, __stop_ftrace_events) { for_each_event(iter, __start_ftrace_events, __stop_ftrace_events) {
call = *iter; call = *iter;
...@@ -2442,6 +2451,8 @@ static __init int event_trace_init(void) ...@@ -2442,6 +2451,8 @@ static __init int event_trace_init(void)
int ret; int ret;
tr = top_trace_array(); tr = top_trace_array();
if (!tr)
return -ENODEV;
d_tracer = tracing_init_dentry(); d_tracer = tracing_init_dentry();
if (!d_tracer) if (!d_tracer)
...@@ -2535,6 +2546,8 @@ static __init void event_trace_self_tests(void) ...@@ -2535,6 +2546,8 @@ static __init void event_trace_self_tests(void)
int ret; int ret;
tr = top_trace_array(); tr = top_trace_array();
if (!tr)
return;
pr_info("Running tests on trace events:\n"); pr_info("Running tests on trace events:\n");
......
...@@ -26,8 +26,6 @@ function_trace_call(unsigned long ip, unsigned long parent_ip, ...@@ -26,8 +26,6 @@ function_trace_call(unsigned long ip, unsigned long parent_ip,
static void static void
function_stack_trace_call(unsigned long ip, unsigned long parent_ip, function_stack_trace_call(unsigned long ip, unsigned long parent_ip,
struct ftrace_ops *op, struct pt_regs *pt_regs); struct ftrace_ops *op, struct pt_regs *pt_regs);
static struct ftrace_ops trace_ops;
static struct ftrace_ops trace_stack_ops;
static struct tracer_flags func_flags; static struct tracer_flags func_flags;
/* Our option */ /* Our option */
...@@ -83,28 +81,24 @@ void ftrace_destroy_function_files(struct trace_array *tr) ...@@ -83,28 +81,24 @@ void ftrace_destroy_function_files(struct trace_array *tr)
static int function_trace_init(struct trace_array *tr) static int function_trace_init(struct trace_array *tr)
{ {
struct ftrace_ops *ops; ftrace_func_t func;
if (tr->flags & TRACE_ARRAY_FL_GLOBAL) {
/* There's only one global tr */
if (!trace_ops.private) {
trace_ops.private = tr;
trace_stack_ops.private = tr;
}
if (func_flags.val & TRACE_FUNC_OPT_STACK) /*
ops = &trace_stack_ops; * Instance trace_arrays get their ops allocated
else * at instance creation. Unless it failed
ops = &trace_ops; * the allocation.
tr->ops = ops; */
} else if (!tr->ops) { if (!tr->ops)
/*
* Instance trace_arrays get their ops allocated
* at instance creation. Unless it failed
* the allocation.
*/
return -ENOMEM; return -ENOMEM;
}
/* Currently only the global instance can do stack tracing */
if (tr->flags & TRACE_ARRAY_FL_GLOBAL &&
func_flags.val & TRACE_FUNC_OPT_STACK)
func = function_stack_trace_call;
else
func = function_trace_call;
ftrace_init_array_ops(tr, func);
tr->trace_buffer.cpu = get_cpu(); tr->trace_buffer.cpu = get_cpu();
put_cpu(); put_cpu();
...@@ -118,6 +112,7 @@ static void function_trace_reset(struct trace_array *tr) ...@@ -118,6 +112,7 @@ static void function_trace_reset(struct trace_array *tr)
{ {
tracing_stop_function_trace(tr); tracing_stop_function_trace(tr);
tracing_stop_cmdline_record(); tracing_stop_cmdline_record();
ftrace_reset_array_ops(tr);
} }
static void function_trace_start(struct trace_array *tr) static void function_trace_start(struct trace_array *tr)
...@@ -199,18 +194,6 @@ function_stack_trace_call(unsigned long ip, unsigned long parent_ip, ...@@ -199,18 +194,6 @@ function_stack_trace_call(unsigned long ip, unsigned long parent_ip,
local_irq_restore(flags); local_irq_restore(flags);
} }
static struct ftrace_ops trace_ops __read_mostly =
{
.func = function_trace_call,
.flags = FTRACE_OPS_FL_GLOBAL | FTRACE_OPS_FL_RECURSION_SAFE,
};
static struct ftrace_ops trace_stack_ops __read_mostly =
{
.func = function_stack_trace_call,
.flags = FTRACE_OPS_FL_GLOBAL | FTRACE_OPS_FL_RECURSION_SAFE,
};
static struct tracer_opt func_opts[] = { static struct tracer_opt func_opts[] = {
#ifdef CONFIG_STACKTRACE #ifdef CONFIG_STACKTRACE
{ TRACER_OPT(func_stack_trace, TRACE_FUNC_OPT_STACK) }, { TRACER_OPT(func_stack_trace, TRACE_FUNC_OPT_STACK) },
...@@ -248,10 +231,10 @@ func_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set) ...@@ -248,10 +231,10 @@ func_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
unregister_ftrace_function(tr->ops); unregister_ftrace_function(tr->ops);
if (set) { if (set) {
tr->ops = &trace_stack_ops; tr->ops->func = function_stack_trace_call;
register_ftrace_function(tr->ops); register_ftrace_function(tr->ops);
} else { } else {
tr->ops = &trace_ops; tr->ops->func = function_trace_call;
register_ftrace_function(tr->ops); register_ftrace_function(tr->ops);
} }
...@@ -269,7 +252,6 @@ static struct tracer function_trace __tracer_data = ...@@ -269,7 +252,6 @@ static struct tracer function_trace __tracer_data =
.init = function_trace_init, .init = function_trace_init,
.reset = function_trace_reset, .reset = function_trace_reset,
.start = function_trace_start, .start = function_trace_start,
.wait_pipe = poll_wait_pipe,
.flags = &func_flags, .flags = &func_flags,
.set_flag = func_set_flag, .set_flag = func_set_flag,
.allow_instances = true, .allow_instances = true,
......
...@@ -38,15 +38,6 @@ struct fgraph_data { ...@@ -38,15 +38,6 @@ struct fgraph_data {
#define TRACE_GRAPH_INDENT 2 #define TRACE_GRAPH_INDENT 2
/* Flag options */
#define TRACE_GRAPH_PRINT_OVERRUN 0x1
#define TRACE_GRAPH_PRINT_CPU 0x2
#define TRACE_GRAPH_PRINT_OVERHEAD 0x4
#define TRACE_GRAPH_PRINT_PROC 0x8
#define TRACE_GRAPH_PRINT_DURATION 0x10
#define TRACE_GRAPH_PRINT_ABS_TIME 0x20
#define TRACE_GRAPH_PRINT_IRQS 0x40
static unsigned int max_depth; static unsigned int max_depth;
static struct tracer_opt trace_opts[] = { static struct tracer_opt trace_opts[] = {
...@@ -64,11 +55,13 @@ static struct tracer_opt trace_opts[] = { ...@@ -64,11 +55,13 @@ static struct tracer_opt trace_opts[] = {
{ TRACER_OPT(funcgraph-abstime, TRACE_GRAPH_PRINT_ABS_TIME) }, { TRACER_OPT(funcgraph-abstime, TRACE_GRAPH_PRINT_ABS_TIME) },
/* Display interrupts */ /* Display interrupts */
{ TRACER_OPT(funcgraph-irqs, TRACE_GRAPH_PRINT_IRQS) }, { TRACER_OPT(funcgraph-irqs, TRACE_GRAPH_PRINT_IRQS) },
/* Display function name after trailing } */
{ TRACER_OPT(funcgraph-tail, TRACE_GRAPH_PRINT_TAIL) },
{ } /* Empty entry */ { } /* Empty entry */
}; };
static struct tracer_flags tracer_flags = { static struct tracer_flags tracer_flags = {
/* Don't display overruns and proc by default */ /* Don't display overruns, proc, or tail by default */
.val = TRACE_GRAPH_PRINT_CPU | TRACE_GRAPH_PRINT_OVERHEAD | .val = TRACE_GRAPH_PRINT_CPU | TRACE_GRAPH_PRINT_OVERHEAD |
TRACE_GRAPH_PRINT_DURATION | TRACE_GRAPH_PRINT_IRQS, TRACE_GRAPH_PRINT_DURATION | TRACE_GRAPH_PRINT_IRQS,
.opts = trace_opts .opts = trace_opts
...@@ -1176,9 +1169,10 @@ print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s, ...@@ -1176,9 +1169,10 @@ print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s,
* If the return function does not have a matching entry, * If the return function does not have a matching entry,
* then the entry was lost. Instead of just printing * then the entry was lost. Instead of just printing
* the '}' and letting the user guess what function this * the '}' and letting the user guess what function this
* belongs to, write out the function name. * belongs to, write out the function name. Always do
* that if the funcgraph-tail option is enabled.
*/ */
if (func_match) { if (func_match && !(flags & TRACE_GRAPH_PRINT_TAIL)) {
ret = trace_seq_puts(s, "}\n"); ret = trace_seq_puts(s, "}\n");
if (!ret) if (!ret)
return TRACE_TYPE_PARTIAL_LINE; return TRACE_TYPE_PARTIAL_LINE;
...@@ -1505,7 +1499,6 @@ static struct tracer graph_trace __tracer_data = { ...@@ -1505,7 +1499,6 @@ static struct tracer graph_trace __tracer_data = {
.pipe_open = graph_trace_open, .pipe_open = graph_trace_open,
.close = graph_trace_close, .close = graph_trace_close,
.pipe_close = graph_trace_close, .pipe_close = graph_trace_close,
.wait_pipe = poll_wait_pipe,
.init = graph_trace_init, .init = graph_trace_init,
.reset = graph_trace_reset, .reset = graph_trace_reset,
.print_line = print_graph_function, .print_line = print_graph_function,
......
...@@ -151,12 +151,6 @@ irqsoff_tracer_call(unsigned long ip, unsigned long parent_ip, ...@@ -151,12 +151,6 @@ irqsoff_tracer_call(unsigned long ip, unsigned long parent_ip,
atomic_dec(&data->disabled); atomic_dec(&data->disabled);
} }
static struct ftrace_ops trace_ops __read_mostly =
{
.func = irqsoff_tracer_call,
.flags = FTRACE_OPS_FL_GLOBAL | FTRACE_OPS_FL_RECURSION_SAFE,
};
#endif /* CONFIG_FUNCTION_TRACER */ #endif /* CONFIG_FUNCTION_TRACER */
#ifdef CONFIG_FUNCTION_GRAPH_TRACER #ifdef CONFIG_FUNCTION_GRAPH_TRACER
...@@ -176,7 +170,7 @@ irqsoff_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set) ...@@ -176,7 +170,7 @@ irqsoff_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
for_each_possible_cpu(cpu) for_each_possible_cpu(cpu)
per_cpu(tracing_cpu, cpu) = 0; per_cpu(tracing_cpu, cpu) = 0;
tracing_max_latency = 0; tr->max_latency = 0;
tracing_reset_online_cpus(&irqsoff_trace->trace_buffer); tracing_reset_online_cpus(&irqsoff_trace->trace_buffer);
return start_irqsoff_tracer(irqsoff_trace, set); return start_irqsoff_tracer(irqsoff_trace, set);
...@@ -303,13 +297,13 @@ static void irqsoff_print_header(struct seq_file *s) ...@@ -303,13 +297,13 @@ static void irqsoff_print_header(struct seq_file *s)
/* /*
* Should this new latency be reported/recorded? * Should this new latency be reported/recorded?
*/ */
static int report_latency(cycle_t delta) static int report_latency(struct trace_array *tr, cycle_t delta)
{ {
if (tracing_thresh) { if (tracing_thresh) {
if (delta < tracing_thresh) if (delta < tracing_thresh)
return 0; return 0;
} else { } else {
if (delta <= tracing_max_latency) if (delta <= tr->max_latency)
return 0; return 0;
} }
return 1; return 1;
...@@ -333,13 +327,13 @@ check_critical_timing(struct trace_array *tr, ...@@ -333,13 +327,13 @@ check_critical_timing(struct trace_array *tr,
pc = preempt_count(); pc = preempt_count();
if (!report_latency(delta)) if (!report_latency(tr, delta))
goto out; goto out;
raw_spin_lock_irqsave(&max_trace_lock, flags); raw_spin_lock_irqsave(&max_trace_lock, flags);
/* check if we are still the max latency */ /* check if we are still the max latency */
if (!report_latency(delta)) if (!report_latency(tr, delta))
goto out_unlock; goto out_unlock;
__trace_function(tr, CALLER_ADDR0, parent_ip, flags, pc); __trace_function(tr, CALLER_ADDR0, parent_ip, flags, pc);
...@@ -352,7 +346,7 @@ check_critical_timing(struct trace_array *tr, ...@@ -352,7 +346,7 @@ check_critical_timing(struct trace_array *tr,
data->critical_end = parent_ip; data->critical_end = parent_ip;
if (likely(!is_tracing_stopped())) { if (likely(!is_tracing_stopped())) {
tracing_max_latency = delta; tr->max_latency = delta;
update_max_tr_single(tr, current, cpu); update_max_tr_single(tr, current, cpu);
} }
...@@ -531,7 +525,7 @@ void trace_preempt_off(unsigned long a0, unsigned long a1) ...@@ -531,7 +525,7 @@ void trace_preempt_off(unsigned long a0, unsigned long a1)
} }
#endif /* CONFIG_PREEMPT_TRACER */ #endif /* CONFIG_PREEMPT_TRACER */
static int register_irqsoff_function(int graph, int set) static int register_irqsoff_function(struct trace_array *tr, int graph, int set)
{ {
int ret; int ret;
...@@ -543,7 +537,7 @@ static int register_irqsoff_function(int graph, int set) ...@@ -543,7 +537,7 @@ static int register_irqsoff_function(int graph, int set)
ret = register_ftrace_graph(&irqsoff_graph_return, ret = register_ftrace_graph(&irqsoff_graph_return,
&irqsoff_graph_entry); &irqsoff_graph_entry);
else else
ret = register_ftrace_function(&trace_ops); ret = register_ftrace_function(tr->ops);
if (!ret) if (!ret)
function_enabled = true; function_enabled = true;
...@@ -551,7 +545,7 @@ static int register_irqsoff_function(int graph, int set) ...@@ -551,7 +545,7 @@ static int register_irqsoff_function(int graph, int set)
return ret; return ret;
} }
static void unregister_irqsoff_function(int graph) static void unregister_irqsoff_function(struct trace_array *tr, int graph)
{ {
if (!function_enabled) if (!function_enabled)
return; return;
...@@ -559,17 +553,17 @@ static void unregister_irqsoff_function(int graph) ...@@ -559,17 +553,17 @@ static void unregister_irqsoff_function(int graph)
if (graph) if (graph)
unregister_ftrace_graph(); unregister_ftrace_graph();
else else
unregister_ftrace_function(&trace_ops); unregister_ftrace_function(tr->ops);
function_enabled = false; function_enabled = false;
} }
static void irqsoff_function_set(int set) static void irqsoff_function_set(struct trace_array *tr, int set)
{ {
if (set) if (set)
register_irqsoff_function(is_graph(), 1); register_irqsoff_function(tr, is_graph(), 1);
else else
unregister_irqsoff_function(is_graph()); unregister_irqsoff_function(tr, is_graph());
} }
static int irqsoff_flag_changed(struct trace_array *tr, u32 mask, int set) static int irqsoff_flag_changed(struct trace_array *tr, u32 mask, int set)
...@@ -577,7 +571,7 @@ static int irqsoff_flag_changed(struct trace_array *tr, u32 mask, int set) ...@@ -577,7 +571,7 @@ static int irqsoff_flag_changed(struct trace_array *tr, u32 mask, int set)
struct tracer *tracer = tr->current_trace; struct tracer *tracer = tr->current_trace;
if (mask & TRACE_ITER_FUNCTION) if (mask & TRACE_ITER_FUNCTION)
irqsoff_function_set(set); irqsoff_function_set(tr, set);
return trace_keep_overwrite(tracer, mask, set); return trace_keep_overwrite(tracer, mask, set);
} }
...@@ -586,7 +580,7 @@ static int start_irqsoff_tracer(struct trace_array *tr, int graph) ...@@ -586,7 +580,7 @@ static int start_irqsoff_tracer(struct trace_array *tr, int graph)
{ {
int ret; int ret;
ret = register_irqsoff_function(graph, 0); ret = register_irqsoff_function(tr, graph, 0);
if (!ret && tracing_is_enabled()) if (!ret && tracing_is_enabled())
tracer_enabled = 1; tracer_enabled = 1;
...@@ -600,25 +594,37 @@ static void stop_irqsoff_tracer(struct trace_array *tr, int graph) ...@@ -600,25 +594,37 @@ static void stop_irqsoff_tracer(struct trace_array *tr, int graph)
{ {
tracer_enabled = 0; tracer_enabled = 0;
unregister_irqsoff_function(graph); unregister_irqsoff_function(tr, graph);
} }
static void __irqsoff_tracer_init(struct trace_array *tr) static bool irqsoff_busy;
static int __irqsoff_tracer_init(struct trace_array *tr)
{ {
if (irqsoff_busy)
return -EBUSY;
save_flags = trace_flags; save_flags = trace_flags;
/* non overwrite screws up the latency tracers */ /* non overwrite screws up the latency tracers */
set_tracer_flag(tr, TRACE_ITER_OVERWRITE, 1); set_tracer_flag(tr, TRACE_ITER_OVERWRITE, 1);
set_tracer_flag(tr, TRACE_ITER_LATENCY_FMT, 1); set_tracer_flag(tr, TRACE_ITER_LATENCY_FMT, 1);
tracing_max_latency = 0; tr->max_latency = 0;
irqsoff_trace = tr; irqsoff_trace = tr;
/* make sure that the tracer is visible */ /* make sure that the tracer is visible */
smp_wmb(); smp_wmb();
tracing_reset_online_cpus(&tr->trace_buffer); tracing_reset_online_cpus(&tr->trace_buffer);
if (start_irqsoff_tracer(tr, is_graph())) ftrace_init_array_ops(tr, irqsoff_tracer_call);
/* Only toplevel instance supports graph tracing */
if (start_irqsoff_tracer(tr, (tr->flags & TRACE_ARRAY_FL_GLOBAL &&
is_graph())))
printk(KERN_ERR "failed to start irqsoff tracer\n"); printk(KERN_ERR "failed to start irqsoff tracer\n");
irqsoff_busy = true;
return 0;
} }
static void irqsoff_tracer_reset(struct trace_array *tr) static void irqsoff_tracer_reset(struct trace_array *tr)
...@@ -630,6 +636,9 @@ static void irqsoff_tracer_reset(struct trace_array *tr) ...@@ -630,6 +636,9 @@ static void irqsoff_tracer_reset(struct trace_array *tr)
set_tracer_flag(tr, TRACE_ITER_LATENCY_FMT, lat_flag); set_tracer_flag(tr, TRACE_ITER_LATENCY_FMT, lat_flag);
set_tracer_flag(tr, TRACE_ITER_OVERWRITE, overwrite_flag); set_tracer_flag(tr, TRACE_ITER_OVERWRITE, overwrite_flag);
ftrace_reset_array_ops(tr);
irqsoff_busy = false;
} }
static void irqsoff_tracer_start(struct trace_array *tr) static void irqsoff_tracer_start(struct trace_array *tr)
...@@ -647,8 +656,7 @@ static int irqsoff_tracer_init(struct trace_array *tr) ...@@ -647,8 +656,7 @@ static int irqsoff_tracer_init(struct trace_array *tr)
{ {
trace_type = TRACER_IRQS_OFF; trace_type = TRACER_IRQS_OFF;
__irqsoff_tracer_init(tr); return __irqsoff_tracer_init(tr);
return 0;
} }
static struct tracer irqsoff_tracer __read_mostly = static struct tracer irqsoff_tracer __read_mostly =
{ {
...@@ -668,6 +676,7 @@ static struct tracer irqsoff_tracer __read_mostly = ...@@ -668,6 +676,7 @@ static struct tracer irqsoff_tracer __read_mostly =
#endif #endif
.open = irqsoff_trace_open, .open = irqsoff_trace_open,
.close = irqsoff_trace_close, .close = irqsoff_trace_close,
.allow_instances = true,
.use_max_tr = true, .use_max_tr = true,
}; };
# define register_irqsoff(trace) register_tracer(&trace) # define register_irqsoff(trace) register_tracer(&trace)
...@@ -680,8 +689,7 @@ static int preemptoff_tracer_init(struct trace_array *tr) ...@@ -680,8 +689,7 @@ static int preemptoff_tracer_init(struct trace_array *tr)
{ {
trace_type = TRACER_PREEMPT_OFF; trace_type = TRACER_PREEMPT_OFF;
__irqsoff_tracer_init(tr); return __irqsoff_tracer_init(tr);
return 0;
} }
static struct tracer preemptoff_tracer __read_mostly = static struct tracer preemptoff_tracer __read_mostly =
...@@ -702,6 +710,7 @@ static struct tracer preemptoff_tracer __read_mostly = ...@@ -702,6 +710,7 @@ static struct tracer preemptoff_tracer __read_mostly =
#endif #endif
.open = irqsoff_trace_open, .open = irqsoff_trace_open,
.close = irqsoff_trace_close, .close = irqsoff_trace_close,
.allow_instances = true,
.use_max_tr = true, .use_max_tr = true,
}; };
# define register_preemptoff(trace) register_tracer(&trace) # define register_preemptoff(trace) register_tracer(&trace)
...@@ -716,8 +725,7 @@ static int preemptirqsoff_tracer_init(struct trace_array *tr) ...@@ -716,8 +725,7 @@ static int preemptirqsoff_tracer_init(struct trace_array *tr)
{ {
trace_type = TRACER_IRQS_OFF | TRACER_PREEMPT_OFF; trace_type = TRACER_IRQS_OFF | TRACER_PREEMPT_OFF;
__irqsoff_tracer_init(tr); return __irqsoff_tracer_init(tr);
return 0;
} }
static struct tracer preemptirqsoff_tracer __read_mostly = static struct tracer preemptirqsoff_tracer __read_mostly =
...@@ -738,6 +746,7 @@ static struct tracer preemptirqsoff_tracer __read_mostly = ...@@ -738,6 +746,7 @@ static struct tracer preemptirqsoff_tracer __read_mostly =
#endif #endif
.open = irqsoff_trace_open, .open = irqsoff_trace_open,
.close = irqsoff_trace_close, .close = irqsoff_trace_close,
.allow_instances = true,
.use_max_tr = true, .use_max_tr = true,
}; };
......
...@@ -1377,6 +1377,9 @@ static __init int kprobe_trace_self_tests_init(void) ...@@ -1377,6 +1377,9 @@ static __init int kprobe_trace_self_tests_init(void)
struct trace_kprobe *tk; struct trace_kprobe *tk;
struct ftrace_event_file *file; struct ftrace_event_file *file;
if (tracing_is_disabled())
return -ENODEV;
target = kprobe_trace_selftest_target; target = kprobe_trace_selftest_target;
pr_info("Testing kprobe tracing: "); pr_info("Testing kprobe tracing: ");
......
...@@ -91,7 +91,6 @@ struct tracer nop_trace __read_mostly = ...@@ -91,7 +91,6 @@ struct tracer nop_trace __read_mostly =
.name = "nop", .name = "nop",
.init = nop_trace_init, .init = nop_trace_init,
.reset = nop_trace_reset, .reset = nop_trace_reset,
.wait_pipe = poll_wait_pipe,
#ifdef CONFIG_FTRACE_SELFTEST #ifdef CONFIG_FTRACE_SELFTEST
.selftest = trace_selftest_startup_nop, .selftest = trace_selftest_startup_nop,
#endif #endif
......
...@@ -125,6 +125,34 @@ trace_seq_printf(struct trace_seq *s, const char *fmt, ...) ...@@ -125,6 +125,34 @@ trace_seq_printf(struct trace_seq *s, const char *fmt, ...)
} }
EXPORT_SYMBOL_GPL(trace_seq_printf); EXPORT_SYMBOL_GPL(trace_seq_printf);
/**
* trace_seq_bitmask - put a list of longs as a bitmask print output
* @s: trace sequence descriptor
* @maskp: points to an array of unsigned longs that represent a bitmask
* @nmaskbits: The number of bits that are valid in @maskp
*
* It returns 0 if the trace oversizes the buffer's free
* space, 1 otherwise.
*
* Writes a ASCII representation of a bitmask string into @s.
*/
int
trace_seq_bitmask(struct trace_seq *s, const unsigned long *maskp,
int nmaskbits)
{
int len = (PAGE_SIZE - 1) - s->len;
int ret;
if (s->full || !len)
return 0;
ret = bitmap_scnprintf(s->buffer, len, maskp, nmaskbits);
s->len += ret;
return 1;
}
EXPORT_SYMBOL_GPL(trace_seq_bitmask);
/** /**
* trace_seq_vprintf - sequence printing of trace information * trace_seq_vprintf - sequence printing of trace information
* @s: trace sequence descriptor * @s: trace sequence descriptor
...@@ -398,6 +426,19 @@ ftrace_print_symbols_seq_u64(struct trace_seq *p, unsigned long long val, ...@@ -398,6 +426,19 @@ ftrace_print_symbols_seq_u64(struct trace_seq *p, unsigned long long val,
EXPORT_SYMBOL(ftrace_print_symbols_seq_u64); EXPORT_SYMBOL(ftrace_print_symbols_seq_u64);
#endif #endif
const char *
ftrace_print_bitmask_seq(struct trace_seq *p, void *bitmask_ptr,
unsigned int bitmask_size)
{
const char *ret = p->buffer + p->len;
trace_seq_bitmask(p, bitmask_ptr, bitmask_size * 8);
trace_seq_putc(p, 0);
return ret;
}
EXPORT_SYMBOL_GPL(ftrace_print_bitmask_seq);
const char * const char *
ftrace_print_hex_seq(struct trace_seq *p, const unsigned char *buf, int buf_len) ftrace_print_hex_seq(struct trace_seq *p, const unsigned char *buf, int buf_len)
{ {
......
...@@ -130,15 +130,9 @@ wakeup_tracer_call(unsigned long ip, unsigned long parent_ip, ...@@ -130,15 +130,9 @@ wakeup_tracer_call(unsigned long ip, unsigned long parent_ip,
atomic_dec(&data->disabled); atomic_dec(&data->disabled);
preempt_enable_notrace(); preempt_enable_notrace();
} }
static struct ftrace_ops trace_ops __read_mostly =
{
.func = wakeup_tracer_call,
.flags = FTRACE_OPS_FL_GLOBAL | FTRACE_OPS_FL_RECURSION_SAFE,
};
#endif /* CONFIG_FUNCTION_TRACER */ #endif /* CONFIG_FUNCTION_TRACER */
static int register_wakeup_function(int graph, int set) static int register_wakeup_function(struct trace_array *tr, int graph, int set)
{ {
int ret; int ret;
...@@ -150,7 +144,7 @@ static int register_wakeup_function(int graph, int set) ...@@ -150,7 +144,7 @@ static int register_wakeup_function(int graph, int set)
ret = register_ftrace_graph(&wakeup_graph_return, ret = register_ftrace_graph(&wakeup_graph_return,
&wakeup_graph_entry); &wakeup_graph_entry);
else else
ret = register_ftrace_function(&trace_ops); ret = register_ftrace_function(tr->ops);
if (!ret) if (!ret)
function_enabled = true; function_enabled = true;
...@@ -158,7 +152,7 @@ static int register_wakeup_function(int graph, int set) ...@@ -158,7 +152,7 @@ static int register_wakeup_function(int graph, int set)
return ret; return ret;
} }
static void unregister_wakeup_function(int graph) static void unregister_wakeup_function(struct trace_array *tr, int graph)
{ {
if (!function_enabled) if (!function_enabled)
return; return;
...@@ -166,17 +160,17 @@ static void unregister_wakeup_function(int graph) ...@@ -166,17 +160,17 @@ static void unregister_wakeup_function(int graph)
if (graph) if (graph)
unregister_ftrace_graph(); unregister_ftrace_graph();
else else
unregister_ftrace_function(&trace_ops); unregister_ftrace_function(tr->ops);
function_enabled = false; function_enabled = false;
} }
static void wakeup_function_set(int set) static void wakeup_function_set(struct trace_array *tr, int set)
{ {
if (set) if (set)
register_wakeup_function(is_graph(), 1); register_wakeup_function(tr, is_graph(), 1);
else else
unregister_wakeup_function(is_graph()); unregister_wakeup_function(tr, is_graph());
} }
static int wakeup_flag_changed(struct trace_array *tr, u32 mask, int set) static int wakeup_flag_changed(struct trace_array *tr, u32 mask, int set)
...@@ -184,16 +178,16 @@ static int wakeup_flag_changed(struct trace_array *tr, u32 mask, int set) ...@@ -184,16 +178,16 @@ static int wakeup_flag_changed(struct trace_array *tr, u32 mask, int set)
struct tracer *tracer = tr->current_trace; struct tracer *tracer = tr->current_trace;
if (mask & TRACE_ITER_FUNCTION) if (mask & TRACE_ITER_FUNCTION)
wakeup_function_set(set); wakeup_function_set(tr, set);
return trace_keep_overwrite(tracer, mask, set); return trace_keep_overwrite(tracer, mask, set);
} }
static int start_func_tracer(int graph) static int start_func_tracer(struct trace_array *tr, int graph)
{ {
int ret; int ret;
ret = register_wakeup_function(graph, 0); ret = register_wakeup_function(tr, graph, 0);
if (!ret && tracing_is_enabled()) if (!ret && tracing_is_enabled())
tracer_enabled = 1; tracer_enabled = 1;
...@@ -203,11 +197,11 @@ static int start_func_tracer(int graph) ...@@ -203,11 +197,11 @@ static int start_func_tracer(int graph)
return ret; return ret;
} }
static void stop_func_tracer(int graph) static void stop_func_tracer(struct trace_array *tr, int graph)
{ {
tracer_enabled = 0; tracer_enabled = 0;
unregister_wakeup_function(graph); unregister_wakeup_function(tr, graph);
} }
#ifdef CONFIG_FUNCTION_GRAPH_TRACER #ifdef CONFIG_FUNCTION_GRAPH_TRACER
...@@ -221,12 +215,12 @@ wakeup_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set) ...@@ -221,12 +215,12 @@ wakeup_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
if (!(is_graph() ^ set)) if (!(is_graph() ^ set))
return 0; return 0;
stop_func_tracer(!set); stop_func_tracer(tr, !set);
wakeup_reset(wakeup_trace); wakeup_reset(wakeup_trace);
tracing_max_latency = 0; tr->max_latency = 0;
return start_func_tracer(set); return start_func_tracer(tr, set);
} }
static int wakeup_graph_entry(struct ftrace_graph_ent *trace) static int wakeup_graph_entry(struct ftrace_graph_ent *trace)
...@@ -350,13 +344,13 @@ static void wakeup_print_header(struct seq_file *s) ...@@ -350,13 +344,13 @@ static void wakeup_print_header(struct seq_file *s)
/* /*
* Should this new latency be reported/recorded? * Should this new latency be reported/recorded?
*/ */
static int report_latency(cycle_t delta) static int report_latency(struct trace_array *tr, cycle_t delta)
{ {
if (tracing_thresh) { if (tracing_thresh) {
if (delta < tracing_thresh) if (delta < tracing_thresh)
return 0; return 0;
} else { } else {
if (delta <= tracing_max_latency) if (delta <= tr->max_latency)
return 0; return 0;
} }
return 1; return 1;
...@@ -424,11 +418,11 @@ probe_wakeup_sched_switch(void *ignore, ...@@ -424,11 +418,11 @@ probe_wakeup_sched_switch(void *ignore,
T1 = ftrace_now(cpu); T1 = ftrace_now(cpu);
delta = T1-T0; delta = T1-T0;
if (!report_latency(delta)) if (!report_latency(wakeup_trace, delta))
goto out_unlock; goto out_unlock;
if (likely(!is_tracing_stopped())) { if (likely(!is_tracing_stopped())) {
tracing_max_latency = delta; wakeup_trace->max_latency = delta;
update_max_tr(wakeup_trace, wakeup_task, wakeup_cpu); update_max_tr(wakeup_trace, wakeup_task, wakeup_cpu);
} }
...@@ -587,7 +581,7 @@ static void start_wakeup_tracer(struct trace_array *tr) ...@@ -587,7 +581,7 @@ static void start_wakeup_tracer(struct trace_array *tr)
*/ */
smp_wmb(); smp_wmb();
if (start_func_tracer(is_graph())) if (start_func_tracer(tr, is_graph()))
printk(KERN_ERR "failed to start wakeup tracer\n"); printk(KERN_ERR "failed to start wakeup tracer\n");
return; return;
...@@ -600,13 +594,15 @@ static void start_wakeup_tracer(struct trace_array *tr) ...@@ -600,13 +594,15 @@ static void start_wakeup_tracer(struct trace_array *tr)
static void stop_wakeup_tracer(struct trace_array *tr) static void stop_wakeup_tracer(struct trace_array *tr)
{ {
tracer_enabled = 0; tracer_enabled = 0;
stop_func_tracer(is_graph()); stop_func_tracer(tr, is_graph());
unregister_trace_sched_switch(probe_wakeup_sched_switch, NULL); unregister_trace_sched_switch(probe_wakeup_sched_switch, NULL);
unregister_trace_sched_wakeup_new(probe_wakeup, NULL); unregister_trace_sched_wakeup_new(probe_wakeup, NULL);
unregister_trace_sched_wakeup(probe_wakeup, NULL); unregister_trace_sched_wakeup(probe_wakeup, NULL);
unregister_trace_sched_migrate_task(probe_wakeup_migrate_task, NULL); unregister_trace_sched_migrate_task(probe_wakeup_migrate_task, NULL);
} }
static bool wakeup_busy;
static int __wakeup_tracer_init(struct trace_array *tr) static int __wakeup_tracer_init(struct trace_array *tr)
{ {
save_flags = trace_flags; save_flags = trace_flags;
...@@ -615,14 +611,20 @@ static int __wakeup_tracer_init(struct trace_array *tr) ...@@ -615,14 +611,20 @@ static int __wakeup_tracer_init(struct trace_array *tr)
set_tracer_flag(tr, TRACE_ITER_OVERWRITE, 1); set_tracer_flag(tr, TRACE_ITER_OVERWRITE, 1);
set_tracer_flag(tr, TRACE_ITER_LATENCY_FMT, 1); set_tracer_flag(tr, TRACE_ITER_LATENCY_FMT, 1);
tracing_max_latency = 0; tr->max_latency = 0;
wakeup_trace = tr; wakeup_trace = tr;
ftrace_init_array_ops(tr, wakeup_tracer_call);
start_wakeup_tracer(tr); start_wakeup_tracer(tr);
wakeup_busy = true;
return 0; return 0;
} }
static int wakeup_tracer_init(struct trace_array *tr) static int wakeup_tracer_init(struct trace_array *tr)
{ {
if (wakeup_busy)
return -EBUSY;
wakeup_dl = 0; wakeup_dl = 0;
wakeup_rt = 0; wakeup_rt = 0;
return __wakeup_tracer_init(tr); return __wakeup_tracer_init(tr);
...@@ -630,6 +632,9 @@ static int wakeup_tracer_init(struct trace_array *tr) ...@@ -630,6 +632,9 @@ static int wakeup_tracer_init(struct trace_array *tr)
static int wakeup_rt_tracer_init(struct trace_array *tr) static int wakeup_rt_tracer_init(struct trace_array *tr)
{ {
if (wakeup_busy)
return -EBUSY;
wakeup_dl = 0; wakeup_dl = 0;
wakeup_rt = 1; wakeup_rt = 1;
return __wakeup_tracer_init(tr); return __wakeup_tracer_init(tr);
...@@ -637,6 +642,9 @@ static int wakeup_rt_tracer_init(struct trace_array *tr) ...@@ -637,6 +642,9 @@ static int wakeup_rt_tracer_init(struct trace_array *tr)
static int wakeup_dl_tracer_init(struct trace_array *tr) static int wakeup_dl_tracer_init(struct trace_array *tr)
{ {
if (wakeup_busy)
return -EBUSY;
wakeup_dl = 1; wakeup_dl = 1;
wakeup_rt = 0; wakeup_rt = 0;
return __wakeup_tracer_init(tr); return __wakeup_tracer_init(tr);
...@@ -653,6 +661,8 @@ static void wakeup_tracer_reset(struct trace_array *tr) ...@@ -653,6 +661,8 @@ static void wakeup_tracer_reset(struct trace_array *tr)
set_tracer_flag(tr, TRACE_ITER_LATENCY_FMT, lat_flag); set_tracer_flag(tr, TRACE_ITER_LATENCY_FMT, lat_flag);
set_tracer_flag(tr, TRACE_ITER_OVERWRITE, overwrite_flag); set_tracer_flag(tr, TRACE_ITER_OVERWRITE, overwrite_flag);
ftrace_reset_array_ops(tr);
wakeup_busy = false;
} }
static void wakeup_tracer_start(struct trace_array *tr) static void wakeup_tracer_start(struct trace_array *tr)
...@@ -684,6 +694,7 @@ static struct tracer wakeup_tracer __read_mostly = ...@@ -684,6 +694,7 @@ static struct tracer wakeup_tracer __read_mostly =
#endif #endif
.open = wakeup_trace_open, .open = wakeup_trace_open,
.close = wakeup_trace_close, .close = wakeup_trace_close,
.allow_instances = true,
.use_max_tr = true, .use_max_tr = true,
}; };
...@@ -694,7 +705,6 @@ static struct tracer wakeup_rt_tracer __read_mostly = ...@@ -694,7 +705,6 @@ static struct tracer wakeup_rt_tracer __read_mostly =
.reset = wakeup_tracer_reset, .reset = wakeup_tracer_reset,
.start = wakeup_tracer_start, .start = wakeup_tracer_start,
.stop = wakeup_tracer_stop, .stop = wakeup_tracer_stop,
.wait_pipe = poll_wait_pipe,
.print_max = true, .print_max = true,
.print_header = wakeup_print_header, .print_header = wakeup_print_header,
.print_line = wakeup_print_line, .print_line = wakeup_print_line,
...@@ -706,6 +716,7 @@ static struct tracer wakeup_rt_tracer __read_mostly = ...@@ -706,6 +716,7 @@ static struct tracer wakeup_rt_tracer __read_mostly =
#endif #endif
.open = wakeup_trace_open, .open = wakeup_trace_open,
.close = wakeup_trace_close, .close = wakeup_trace_close,
.allow_instances = true,
.use_max_tr = true, .use_max_tr = true,
}; };
...@@ -716,7 +727,6 @@ static struct tracer wakeup_dl_tracer __read_mostly = ...@@ -716,7 +727,6 @@ static struct tracer wakeup_dl_tracer __read_mostly =
.reset = wakeup_tracer_reset, .reset = wakeup_tracer_reset,
.start = wakeup_tracer_start, .start = wakeup_tracer_start,
.stop = wakeup_tracer_stop, .stop = wakeup_tracer_stop,
.wait_pipe = poll_wait_pipe,
.print_max = true, .print_max = true,
.print_header = wakeup_print_header, .print_header = wakeup_print_header,
.print_line = wakeup_print_line, .print_line = wakeup_print_line,
......
...@@ -65,7 +65,7 @@ static int trace_test_buffer(struct trace_buffer *buf, unsigned long *count) ...@@ -65,7 +65,7 @@ static int trace_test_buffer(struct trace_buffer *buf, unsigned long *count)
/* Don't allow flipping of max traces now */ /* Don't allow flipping of max traces now */
local_irq_save(flags); local_irq_save(flags);
arch_spin_lock(&ftrace_max_lock); arch_spin_lock(&buf->tr->max_lock);
cnt = ring_buffer_entries(buf->buffer); cnt = ring_buffer_entries(buf->buffer);
...@@ -83,7 +83,7 @@ static int trace_test_buffer(struct trace_buffer *buf, unsigned long *count) ...@@ -83,7 +83,7 @@ static int trace_test_buffer(struct trace_buffer *buf, unsigned long *count)
break; break;
} }
tracing_on(); tracing_on();
arch_spin_unlock(&ftrace_max_lock); arch_spin_unlock(&buf->tr->max_lock);
local_irq_restore(flags); local_irq_restore(flags);
if (count) if (count)
...@@ -161,11 +161,6 @@ static struct ftrace_ops test_probe3 = { ...@@ -161,11 +161,6 @@ static struct ftrace_ops test_probe3 = {
.flags = FTRACE_OPS_FL_RECURSION_SAFE, .flags = FTRACE_OPS_FL_RECURSION_SAFE,
}; };
static struct ftrace_ops test_global = {
.func = trace_selftest_test_global_func,
.flags = FTRACE_OPS_FL_GLOBAL | FTRACE_OPS_FL_RECURSION_SAFE,
};
static void print_counts(void) static void print_counts(void)
{ {
printk("(%d %d %d %d %d) ", printk("(%d %d %d %d %d) ",
...@@ -185,7 +180,7 @@ static void reset_counts(void) ...@@ -185,7 +180,7 @@ static void reset_counts(void)
trace_selftest_test_dyn_cnt = 0; trace_selftest_test_dyn_cnt = 0;
} }
static int trace_selftest_ops(int cnt) static int trace_selftest_ops(struct trace_array *tr, int cnt)
{ {
int save_ftrace_enabled = ftrace_enabled; int save_ftrace_enabled = ftrace_enabled;
struct ftrace_ops *dyn_ops; struct ftrace_ops *dyn_ops;
...@@ -220,7 +215,11 @@ static int trace_selftest_ops(int cnt) ...@@ -220,7 +215,11 @@ static int trace_selftest_ops(int cnt)
register_ftrace_function(&test_probe1); register_ftrace_function(&test_probe1);
register_ftrace_function(&test_probe2); register_ftrace_function(&test_probe2);
register_ftrace_function(&test_probe3); register_ftrace_function(&test_probe3);
register_ftrace_function(&test_global); /* First time we are running with main function */
if (cnt > 1) {
ftrace_init_array_ops(tr, trace_selftest_test_global_func);
register_ftrace_function(tr->ops);
}
DYN_FTRACE_TEST_NAME(); DYN_FTRACE_TEST_NAME();
...@@ -232,8 +231,10 @@ static int trace_selftest_ops(int cnt) ...@@ -232,8 +231,10 @@ static int trace_selftest_ops(int cnt)
goto out; goto out;
if (trace_selftest_test_probe3_cnt != 1) if (trace_selftest_test_probe3_cnt != 1)
goto out; goto out;
if (trace_selftest_test_global_cnt == 0) if (cnt > 1) {
goto out; if (trace_selftest_test_global_cnt == 0)
goto out;
}
DYN_FTRACE_TEST_NAME2(); DYN_FTRACE_TEST_NAME2();
...@@ -269,8 +270,10 @@ static int trace_selftest_ops(int cnt) ...@@ -269,8 +270,10 @@ static int trace_selftest_ops(int cnt)
goto out_free; goto out_free;
if (trace_selftest_test_probe3_cnt != 3) if (trace_selftest_test_probe3_cnt != 3)
goto out_free; goto out_free;
if (trace_selftest_test_global_cnt == 0) if (cnt > 1) {
goto out; if (trace_selftest_test_global_cnt == 0)
goto out;
}
if (trace_selftest_test_dyn_cnt == 0) if (trace_selftest_test_dyn_cnt == 0)
goto out_free; goto out_free;
...@@ -295,7 +298,9 @@ static int trace_selftest_ops(int cnt) ...@@ -295,7 +298,9 @@ static int trace_selftest_ops(int cnt)
unregister_ftrace_function(&test_probe1); unregister_ftrace_function(&test_probe1);
unregister_ftrace_function(&test_probe2); unregister_ftrace_function(&test_probe2);
unregister_ftrace_function(&test_probe3); unregister_ftrace_function(&test_probe3);
unregister_ftrace_function(&test_global); if (cnt > 1)
unregister_ftrace_function(tr->ops);
ftrace_reset_array_ops(tr);
/* Make sure everything is off */ /* Make sure everything is off */
reset_counts(); reset_counts();
...@@ -315,9 +320,9 @@ static int trace_selftest_ops(int cnt) ...@@ -315,9 +320,9 @@ static int trace_selftest_ops(int cnt)
} }
/* Test dynamic code modification and ftrace filters */ /* Test dynamic code modification and ftrace filters */
int trace_selftest_startup_dynamic_tracing(struct tracer *trace, static int trace_selftest_startup_dynamic_tracing(struct tracer *trace,
struct trace_array *tr, struct trace_array *tr,
int (*func)(void)) int (*func)(void))
{ {
int save_ftrace_enabled = ftrace_enabled; int save_ftrace_enabled = ftrace_enabled;
unsigned long count; unsigned long count;
...@@ -388,7 +393,7 @@ int trace_selftest_startup_dynamic_tracing(struct tracer *trace, ...@@ -388,7 +393,7 @@ int trace_selftest_startup_dynamic_tracing(struct tracer *trace,
} }
/* Test the ops with global tracing running */ /* Test the ops with global tracing running */
ret = trace_selftest_ops(1); ret = trace_selftest_ops(tr, 1);
trace->reset(tr); trace->reset(tr);
out: out:
...@@ -399,7 +404,7 @@ int trace_selftest_startup_dynamic_tracing(struct tracer *trace, ...@@ -399,7 +404,7 @@ int trace_selftest_startup_dynamic_tracing(struct tracer *trace,
/* Test the ops with global tracing off */ /* Test the ops with global tracing off */
if (!ret) if (!ret)
ret = trace_selftest_ops(2); ret = trace_selftest_ops(tr, 2);
return ret; return ret;
} }
...@@ -802,7 +807,7 @@ trace_selftest_startup_function_graph(struct tracer *trace, ...@@ -802,7 +807,7 @@ trace_selftest_startup_function_graph(struct tracer *trace,
int int
trace_selftest_startup_irqsoff(struct tracer *trace, struct trace_array *tr) trace_selftest_startup_irqsoff(struct tracer *trace, struct trace_array *tr)
{ {
unsigned long save_max = tracing_max_latency; unsigned long save_max = tr->max_latency;
unsigned long count; unsigned long count;
int ret; int ret;
...@@ -814,7 +819,7 @@ trace_selftest_startup_irqsoff(struct tracer *trace, struct trace_array *tr) ...@@ -814,7 +819,7 @@ trace_selftest_startup_irqsoff(struct tracer *trace, struct trace_array *tr)
} }
/* reset the max latency */ /* reset the max latency */
tracing_max_latency = 0; tr->max_latency = 0;
/* disable interrupts for a bit */ /* disable interrupts for a bit */
local_irq_disable(); local_irq_disable();
udelay(100); udelay(100);
...@@ -841,7 +846,7 @@ trace_selftest_startup_irqsoff(struct tracer *trace, struct trace_array *tr) ...@@ -841,7 +846,7 @@ trace_selftest_startup_irqsoff(struct tracer *trace, struct trace_array *tr)
ret = -1; ret = -1;
} }
tracing_max_latency = save_max; tr->max_latency = save_max;
return ret; return ret;
} }
...@@ -851,7 +856,7 @@ trace_selftest_startup_irqsoff(struct tracer *trace, struct trace_array *tr) ...@@ -851,7 +856,7 @@ trace_selftest_startup_irqsoff(struct tracer *trace, struct trace_array *tr)
int int
trace_selftest_startup_preemptoff(struct tracer *trace, struct trace_array *tr) trace_selftest_startup_preemptoff(struct tracer *trace, struct trace_array *tr)
{ {
unsigned long save_max = tracing_max_latency; unsigned long save_max = tr->max_latency;
unsigned long count; unsigned long count;
int ret; int ret;
...@@ -876,7 +881,7 @@ trace_selftest_startup_preemptoff(struct tracer *trace, struct trace_array *tr) ...@@ -876,7 +881,7 @@ trace_selftest_startup_preemptoff(struct tracer *trace, struct trace_array *tr)
} }
/* reset the max latency */ /* reset the max latency */
tracing_max_latency = 0; tr->max_latency = 0;
/* disable preemption for a bit */ /* disable preemption for a bit */
preempt_disable(); preempt_disable();
udelay(100); udelay(100);
...@@ -903,7 +908,7 @@ trace_selftest_startup_preemptoff(struct tracer *trace, struct trace_array *tr) ...@@ -903,7 +908,7 @@ trace_selftest_startup_preemptoff(struct tracer *trace, struct trace_array *tr)
ret = -1; ret = -1;
} }
tracing_max_latency = save_max; tr->max_latency = save_max;
return ret; return ret;
} }
...@@ -913,7 +918,7 @@ trace_selftest_startup_preemptoff(struct tracer *trace, struct trace_array *tr) ...@@ -913,7 +918,7 @@ trace_selftest_startup_preemptoff(struct tracer *trace, struct trace_array *tr)
int int
trace_selftest_startup_preemptirqsoff(struct tracer *trace, struct trace_array *tr) trace_selftest_startup_preemptirqsoff(struct tracer *trace, struct trace_array *tr)
{ {
unsigned long save_max = tracing_max_latency; unsigned long save_max = tr->max_latency;
unsigned long count; unsigned long count;
int ret; int ret;
...@@ -938,7 +943,7 @@ trace_selftest_startup_preemptirqsoff(struct tracer *trace, struct trace_array * ...@@ -938,7 +943,7 @@ trace_selftest_startup_preemptirqsoff(struct tracer *trace, struct trace_array *
} }
/* reset the max latency */ /* reset the max latency */
tracing_max_latency = 0; tr->max_latency = 0;
/* disable preemption and interrupts for a bit */ /* disable preemption and interrupts for a bit */
preempt_disable(); preempt_disable();
...@@ -973,7 +978,7 @@ trace_selftest_startup_preemptirqsoff(struct tracer *trace, struct trace_array * ...@@ -973,7 +978,7 @@ trace_selftest_startup_preemptirqsoff(struct tracer *trace, struct trace_array *
} }
/* do the test by disabling interrupts first this time */ /* do the test by disabling interrupts first this time */
tracing_max_latency = 0; tr->max_latency = 0;
tracing_start(); tracing_start();
trace->start(tr); trace->start(tr);
...@@ -1004,7 +1009,7 @@ trace_selftest_startup_preemptirqsoff(struct tracer *trace, struct trace_array * ...@@ -1004,7 +1009,7 @@ trace_selftest_startup_preemptirqsoff(struct tracer *trace, struct trace_array *
tracing_start(); tracing_start();
out_no_start: out_no_start:
trace->reset(tr); trace->reset(tr);
tracing_max_latency = save_max; tr->max_latency = save_max;
return ret; return ret;
} }
...@@ -1057,7 +1062,7 @@ static int trace_wakeup_test_thread(void *data) ...@@ -1057,7 +1062,7 @@ static int trace_wakeup_test_thread(void *data)
int int
trace_selftest_startup_wakeup(struct tracer *trace, struct trace_array *tr) trace_selftest_startup_wakeup(struct tracer *trace, struct trace_array *tr)
{ {
unsigned long save_max = tracing_max_latency; unsigned long save_max = tr->max_latency;
struct task_struct *p; struct task_struct *p;
struct completion is_ready; struct completion is_ready;
unsigned long count; unsigned long count;
...@@ -1083,7 +1088,7 @@ trace_selftest_startup_wakeup(struct tracer *trace, struct trace_array *tr) ...@@ -1083,7 +1088,7 @@ trace_selftest_startup_wakeup(struct tracer *trace, struct trace_array *tr)
} }
/* reset the max latency */ /* reset the max latency */
tracing_max_latency = 0; tr->max_latency = 0;
while (p->on_rq) { while (p->on_rq) {
/* /*
...@@ -1113,7 +1118,7 @@ trace_selftest_startup_wakeup(struct tracer *trace, struct trace_array *tr) ...@@ -1113,7 +1118,7 @@ trace_selftest_startup_wakeup(struct tracer *trace, struct trace_array *tr)
trace->reset(tr); trace->reset(tr);
tracing_start(); tracing_start();
tracing_max_latency = save_max; tr->max_latency = save_max;
/* kill the thread */ /* kill the thread */
kthread_stop(p); kthread_stop(p);
......
...@@ -51,11 +51,33 @@ static DEFINE_MUTEX(stack_sysctl_mutex); ...@@ -51,11 +51,33 @@ static DEFINE_MUTEX(stack_sysctl_mutex);
int stack_tracer_enabled; int stack_tracer_enabled;
static int last_stack_tracer_enabled; static int last_stack_tracer_enabled;
static inline void print_max_stack(void)
{
long i;
int size;
pr_emerg(" Depth Size Location (%d entries)\n"
" ----- ---- --------\n",
max_stack_trace.nr_entries - 1);
for (i = 0; i < max_stack_trace.nr_entries; i++) {
if (stack_dump_trace[i] == ULONG_MAX)
break;
if (i+1 == max_stack_trace.nr_entries ||
stack_dump_trace[i+1] == ULONG_MAX)
size = stack_dump_index[i];
else
size = stack_dump_index[i] - stack_dump_index[i+1];
pr_emerg("%3ld) %8d %5d %pS\n", i, stack_dump_index[i],
size, (void *)stack_dump_trace[i]);
}
}
static inline void static inline void
check_stack(unsigned long ip, unsigned long *stack) check_stack(unsigned long ip, unsigned long *stack)
{ {
unsigned long this_size, flags; unsigned long this_size, flags; unsigned long *p, *top, *start;
unsigned long *p, *top, *start;
static int tracer_frame; static int tracer_frame;
int frame_size = ACCESS_ONCE(tracer_frame); int frame_size = ACCESS_ONCE(tracer_frame);
int i; int i;
...@@ -85,8 +107,12 @@ check_stack(unsigned long ip, unsigned long *stack) ...@@ -85,8 +107,12 @@ check_stack(unsigned long ip, unsigned long *stack)
max_stack_size = this_size; max_stack_size = this_size;
max_stack_trace.nr_entries = 0; max_stack_trace.nr_entries = 0;
max_stack_trace.skip = 3;
if (using_ftrace_ops_list_func())
max_stack_trace.skip = 4;
else
max_stack_trace.skip = 3;
save_stack_trace(&max_stack_trace); save_stack_trace(&max_stack_trace);
...@@ -145,8 +171,12 @@ check_stack(unsigned long ip, unsigned long *stack) ...@@ -145,8 +171,12 @@ check_stack(unsigned long ip, unsigned long *stack)
i++; i++;
} }
BUG_ON(current != &init_task && if ((current != &init_task &&
*(end_of_stack(current)) != STACK_END_MAGIC); *(end_of_stack(current)) != STACK_END_MAGIC)) {
print_max_stack();
BUG();
}
out: out:
arch_spin_unlock(&max_stack_lock); arch_spin_unlock(&max_stack_lock);
local_irq_restore(flags); local_irq_restore(flags);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment