Commit 6f893fb2 authored by Ingo Molnar's avatar Ingo Molnar

Merge branches 'tracing/branch-tracer', 'tracing/fastboot', 'tracing/ftrace',...

Merge branches 'tracing/branch-tracer', 'tracing/fastboot', 'tracing/ftrace', 'tracing/function-return-tracer', 'tracing/power-tracer', 'tracing/powerpc', 'tracing/ring-buffer', 'tracing/stack-tracer' and 'tracing/urgent' into tracing/core
...@@ -324,7 +324,7 @@ output. To see what is available, simply cat the file: ...@@ -324,7 +324,7 @@ output. To see what is available, simply cat the file:
cat /debug/tracing/trace_options cat /debug/tracing/trace_options
print-parent nosym-offset nosym-addr noverbose noraw nohex nobin \ print-parent nosym-offset nosym-addr noverbose noraw nohex nobin \
noblock nostacktrace nosched-tree noblock nostacktrace nosched-tree nouserstacktrace nosym-userobj
To disable one of the options, echo in the option prepended with "no". To disable one of the options, echo in the option prepended with "no".
...@@ -378,6 +378,20 @@ Here are the available options: ...@@ -378,6 +378,20 @@ Here are the available options:
When a trace is recorded, so is the stack of functions. When a trace is recorded, so is the stack of functions.
This allows for back traces of trace sites. This allows for back traces of trace sites.
userstacktrace - This option changes the trace.
It records a stacktrace of the current userspace thread.
sym-userobj - when user stacktrace are enabled, look up which object the
address belongs to, and print a relative address
This is especially useful when ASLR is on, otherwise you don't
get a chance to resolve the address to object/file/line after the app is no
longer running
The lookup is performed when you read trace,trace_pipe,latency_trace. Example:
a.out-1623 [000] 40874.465068: /root/a.out[+0x480] <-/root/a.out[+0
x494] <- /root/a.out[+0x4a8] <- /lib/libc-2.7.so[+0x1e1a6]
sched-tree - TBD (any users??) sched-tree - TBD (any users??)
......
...@@ -37,7 +37,7 @@ $ echo mmiotrace > /debug/tracing/current_tracer ...@@ -37,7 +37,7 @@ $ echo mmiotrace > /debug/tracing/current_tracer
$ cat /debug/tracing/trace_pipe > mydump.txt & $ cat /debug/tracing/trace_pipe > mydump.txt &
Start X or whatever. Start X or whatever.
$ echo "X is up" > /debug/tracing/trace_marker $ echo "X is up" > /debug/tracing/trace_marker
$ echo none > /debug/tracing/current_tracer $ echo nop > /debug/tracing/current_tracer
Check for lost events. Check for lost events.
...@@ -66,7 +66,7 @@ which action. It is recommended to place descriptive markers about what you ...@@ -66,7 +66,7 @@ which action. It is recommended to place descriptive markers about what you
do. do.
Shut down mmiotrace (requires root privileges): Shut down mmiotrace (requires root privileges):
$ echo none > /debug/tracing/current_tracer $ echo nop > /debug/tracing/current_tracer
The 'cat' process exits. If it does not, kill it by issuing 'fg' command and The 'cat' process exits. If it does not, kill it by issuing 'fg' command and
pressing ctrl+c. pressing ctrl+c.
...@@ -81,7 +81,9 @@ are: ...@@ -81,7 +81,9 @@ are:
$ cat /debug/tracing/trace_entries $ cat /debug/tracing/trace_entries
gives you a number. Approximately double this number and write it back, for gives you a number. Approximately double this number and write it back, for
instance: instance:
$ echo 0 > /debug/tracing/tracing_enabled
$ echo 128000 > /debug/tracing/trace_entries $ echo 128000 > /debug/tracing/trace_entries
$ echo 1 > /debug/tracing/tracing_enabled
Then start again from the top. Then start again from the top.
If you are doing a trace for a driver project, e.g. Nouveau, you should also If you are doing a trace for a driver project, e.g. Nouveau, you should also
......
...@@ -7,7 +7,19 @@ ...@@ -7,7 +7,19 @@
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
extern void _mcount(void); extern void _mcount(void);
#endif
#ifdef CONFIG_DYNAMIC_FTRACE
static inline unsigned long ftrace_call_adjust(unsigned long addr)
{
/* reloction of mcount call site is the same as the address */
return addr;
}
struct dyn_arch_ftrace {
struct module *mod;
};
#endif /* CONFIG_DYNAMIC_FTRACE */
#endif /* __ASSEMBLY__ */
#endif #endif
......
...@@ -34,11 +34,19 @@ struct mod_arch_specific { ...@@ -34,11 +34,19 @@ struct mod_arch_specific {
#ifdef __powerpc64__ #ifdef __powerpc64__
unsigned int stubs_section; /* Index of stubs section in module */ unsigned int stubs_section; /* Index of stubs section in module */
unsigned int toc_section; /* What section is the TOC? */ unsigned int toc_section; /* What section is the TOC? */
#else #ifdef CONFIG_DYNAMIC_FTRACE
unsigned long toc;
unsigned long tramp;
#endif
#else /* powerpc64 */
/* Indices of PLT sections within module. */ /* Indices of PLT sections within module. */
unsigned int core_plt_section; unsigned int core_plt_section;
unsigned int init_plt_section; unsigned int init_plt_section;
#ifdef CONFIG_DYNAMIC_FTRACE
unsigned long tramp;
#endif #endif
#endif /* powerpc64 */
/* List of BUG addresses, source line numbers and filenames */ /* List of BUG addresses, source line numbers and filenames */
struct list_head bug_list; struct list_head bug_list;
...@@ -68,6 +76,12 @@ struct mod_arch_specific { ...@@ -68,6 +76,12 @@ struct mod_arch_specific {
# endif /* MODULE */ # endif /* MODULE */
#endif #endif
#ifdef CONFIG_DYNAMIC_FTRACE
# ifdef MODULE
asm(".section .ftrace.tramp,\"ax\",@nobits; .align 3; .previous");
# endif /* MODULE */
#endif
struct exception_table_entry; struct exception_table_entry;
void sort_ex_table(struct exception_table_entry *start, void sort_ex_table(struct exception_table_entry *start,
......
...@@ -9,22 +9,30 @@ ...@@ -9,22 +9,30 @@
#include <linux/spinlock.h> #include <linux/spinlock.h>
#include <linux/hardirq.h> #include <linux/hardirq.h>
#include <linux/uaccess.h>
#include <linux/module.h>
#include <linux/ftrace.h> #include <linux/ftrace.h>
#include <linux/percpu.h> #include <linux/percpu.h>
#include <linux/init.h> #include <linux/init.h>
#include <linux/list.h> #include <linux/list.h>
#include <asm/cacheflush.h> #include <asm/cacheflush.h>
#include <asm/code-patching.h>
#include <asm/ftrace.h> #include <asm/ftrace.h>
#if 0
#define DEBUGP printk
#else
#define DEBUGP(fmt , ...) do { } while (0)
#endif
static unsigned int ftrace_nop = 0x60000000; static unsigned int ftrace_nop = PPC_NOP_INSTR;
#ifdef CONFIG_PPC32 #ifdef CONFIG_PPC32
# define GET_ADDR(addr) addr # define GET_ADDR(addr) addr
#else #else
/* PowerPC64's functions are data that points to the functions */ /* PowerPC64's functions are data that points to the functions */
# define GET_ADDR(addr) *(unsigned long *)addr # define GET_ADDR(addr) (*(unsigned long *)addr)
#endif #endif
...@@ -33,12 +41,12 @@ static unsigned int ftrace_calc_offset(long ip, long addr) ...@@ -33,12 +41,12 @@ static unsigned int ftrace_calc_offset(long ip, long addr)
return (int)(addr - ip); return (int)(addr - ip);
} }
unsigned char *ftrace_nop_replace(void) static unsigned char *ftrace_nop_replace(void)
{ {
return (char *)&ftrace_nop; return (char *)&ftrace_nop;
} }
unsigned char *ftrace_call_replace(unsigned long ip, unsigned long addr) static unsigned char *ftrace_call_replace(unsigned long ip, unsigned long addr)
{ {
static unsigned int op; static unsigned int op;
...@@ -68,49 +76,434 @@ unsigned char *ftrace_call_replace(unsigned long ip, unsigned long addr) ...@@ -68,49 +76,434 @@ unsigned char *ftrace_call_replace(unsigned long ip, unsigned long addr)
# define _ASM_PTR " .long " # define _ASM_PTR " .long "
#endif #endif
int static int
ftrace_modify_code(unsigned long ip, unsigned char *old_code, ftrace_modify_code(unsigned long ip, unsigned char *old_code,
unsigned char *new_code) unsigned char *new_code)
{ {
unsigned replaced; unsigned char replaced[MCOUNT_INSN_SIZE];
unsigned old = *(unsigned *)old_code;
unsigned new = *(unsigned *)new_code;
int faulted = 0;
/* /*
* Note: Due to modules and __init, code can * Note: Due to modules and __init, code can
* disappear and change, we need to protect against faulting * disappear and change, we need to protect against faulting
* as well as code changing. * as well as code changing. We do this by using the
* probe_kernel_* functions.
* *
* No real locking needed, this code is run through * No real locking needed, this code is run through
* kstop_machine. * kstop_machine, or before SMP starts.
*/ */
asm volatile (
"1: lwz %1, 0(%2)\n" /* read the text we want to modify */
" cmpw %1, %5\n" if (probe_kernel_read(replaced, (void *)ip, MCOUNT_INSN_SIZE))
" bne 2f\n" return -EFAULT;
" stwu %3, 0(%2)\n"
"2:\n" /* Make sure it is what we expect it to be */
".section .fixup, \"ax\"\n" if (memcmp(replaced, old_code, MCOUNT_INSN_SIZE) != 0)
"3: li %0, 1\n" return -EINVAL;
" b 2b\n"
".previous\n" /* replace the text with the new text */
".section __ex_table,\"a\"\n" if (probe_kernel_write((void *)ip, new_code, MCOUNT_INSN_SIZE))
_ASM_ALIGN "\n" return -EPERM;
_ASM_PTR "1b, 3b\n"
".previous" flush_icache_range(ip, ip + 8);
: "=r"(faulted), "=r"(replaced)
: "r"(ip), "r"(new), return 0;
"0"(faulted), "r"(old) }
: "memory");
/*
if (replaced != old && replaced != new) * Helper functions that are the same for both PPC64 and PPC32.
faulted = 2; */
static int test_24bit_addr(unsigned long ip, unsigned long addr)
if (!faulted) {
flush_icache_range(ip, ip + 8); long diff;
return faulted; /*
* Can we get to addr from ip in 24 bits?
* (26 really, since we mulitply by 4 for 4 byte alignment)
*/
diff = addr - ip;
/*
* Return true if diff is less than 1 << 25
* and greater than -1 << 26.
*/
return (diff < (1 << 25)) && (diff > (-1 << 26));
}
static int is_bl_op(unsigned int op)
{
return (op & 0xfc000003) == 0x48000001;
}
static int test_offset(unsigned long offset)
{
return (offset + 0x2000000 > 0x3ffffff) || ((offset & 3) != 0);
}
static unsigned long find_bl_target(unsigned long ip, unsigned int op)
{
static int offset;
offset = (op & 0x03fffffc);
/* make it signed */
if (offset & 0x02000000)
offset |= 0xfe000000;
return ip + (long)offset;
}
static unsigned int branch_offset(unsigned long offset)
{
/* return "bl ip+offset" */
return 0x48000001 | (offset & 0x03fffffc);
}
#ifdef CONFIG_PPC64
static int
__ftrace_make_nop(struct module *mod,
struct dyn_ftrace *rec, unsigned long addr)
{
unsigned char replaced[MCOUNT_INSN_SIZE * 2];
unsigned int *op = (unsigned *)&replaced;
unsigned char jmp[8];
unsigned long *ptr = (unsigned long *)&jmp;
unsigned long ip = rec->ip;
unsigned long tramp;
int offset;
/* read where this goes */
if (probe_kernel_read(replaced, (void *)ip, MCOUNT_INSN_SIZE))
return -EFAULT;
/* Make sure that that this is still a 24bit jump */
if (!is_bl_op(*op)) {
printk(KERN_ERR "Not expected bl: opcode is %x\n", *op);
return -EINVAL;
}
/* lets find where the pointer goes */
tramp = find_bl_target(ip, *op);
/*
* On PPC64 the trampoline looks like:
* 0x3d, 0x82, 0x00, 0x00, addis r12,r2, <high>
* 0x39, 0x8c, 0x00, 0x00, addi r12,r12, <low>
* Where the bytes 2,3,6 and 7 make up the 32bit offset
* to the TOC that holds the pointer.
* to jump to.
* 0xf8, 0x41, 0x00, 0x28, std r2,40(r1)
* 0xe9, 0x6c, 0x00, 0x20, ld r11,32(r12)
* The actually address is 32 bytes from the offset
* into the TOC.
* 0xe8, 0x4c, 0x00, 0x28, ld r2,40(r12)
*/
DEBUGP("ip:%lx jumps to %lx r2: %lx", ip, tramp, mod->arch.toc);
/* Find where the trampoline jumps to */
if (probe_kernel_read(jmp, (void *)tramp, 8)) {
printk(KERN_ERR "Failed to read %lx\n", tramp);
return -EFAULT;
}
DEBUGP(" %08x %08x",
(unsigned)(*ptr >> 32),
(unsigned)*ptr);
offset = (unsigned)jmp[2] << 24 |
(unsigned)jmp[3] << 16 |
(unsigned)jmp[6] << 8 |
(unsigned)jmp[7];
DEBUGP(" %x ", offset);
/* get the address this jumps too */
tramp = mod->arch.toc + offset + 32;
DEBUGP("toc: %lx", tramp);
if (probe_kernel_read(jmp, (void *)tramp, 8)) {
printk(KERN_ERR "Failed to read %lx\n", tramp);
return -EFAULT;
}
DEBUGP(" %08x %08x\n",
(unsigned)(*ptr >> 32),
(unsigned)*ptr);
/* This should match what was called */
if (*ptr != GET_ADDR(addr)) {
printk(KERN_ERR "addr does not match %lx\n", *ptr);
return -EINVAL;
}
/*
* We want to nop the line, but the next line is
* 0xe8, 0x41, 0x00, 0x28 ld r2,40(r1)
* This needs to be turned to a nop too.
*/
if (probe_kernel_read(replaced, (void *)(ip+4), MCOUNT_INSN_SIZE))
return -EFAULT;
if (*op != 0xe8410028) {
printk(KERN_ERR "Next line is not ld! (%08x)\n", *op);
return -EINVAL;
}
/*
* Milton Miller pointed out that we can not blindly do nops.
* If a task was preempted when calling a trace function,
* the nops will remove the way to restore the TOC in r2
* and the r2 TOC will get corrupted.
*/
/*
* Replace:
* bl <tramp> <==== will be replaced with "b 1f"
* ld r2,40(r1)
* 1:
*/
op[0] = 0x48000008; /* b +8 */
if (probe_kernel_write((void *)ip, replaced, MCOUNT_INSN_SIZE))
return -EPERM;
return 0;
}
#else /* !PPC64 */
static int
__ftrace_make_nop(struct module *mod,
struct dyn_ftrace *rec, unsigned long addr)
{
unsigned char replaced[MCOUNT_INSN_SIZE];
unsigned int *op = (unsigned *)&replaced;
unsigned char jmp[8];
unsigned int *ptr = (unsigned int *)&jmp;
unsigned long ip = rec->ip;
unsigned long tramp;
int offset;
if (probe_kernel_read(replaced, (void *)ip, MCOUNT_INSN_SIZE))
return -EFAULT;
/* Make sure that that this is still a 24bit jump */
if (!is_bl_op(*op)) {
printk(KERN_ERR "Not expected bl: opcode is %x\n", *op);
return -EINVAL;
}
/* lets find where the pointer goes */
tramp = find_bl_target(ip, *op);
/*
* On PPC32 the trampoline looks like:
* lis r11,sym@ha
* addi r11,r11,sym@l
* mtctr r11
* bctr
*/
DEBUGP("ip:%lx jumps to %lx", ip, tramp);
/* Find where the trampoline jumps to */
if (probe_kernel_read(jmp, (void *)tramp, 8)) {
printk(KERN_ERR "Failed to read %lx\n", tramp);
return -EFAULT;
}
DEBUGP(" %08x %08x ", ptr[0], ptr[1]);
tramp = (ptr[1] & 0xffff) |
((ptr[0] & 0xffff) << 16);
if (tramp & 0x8000)
tramp -= 0x10000;
DEBUGP(" %x ", tramp);
if (tramp != addr) {
printk(KERN_ERR
"Trampoline location %08lx does not match addr\n",
tramp);
return -EINVAL;
}
op[0] = PPC_NOP_INSTR;
if (probe_kernel_write((void *)ip, replaced, MCOUNT_INSN_SIZE))
return -EPERM;
return 0;
}
#endif /* PPC64 */
int ftrace_make_nop(struct module *mod,
struct dyn_ftrace *rec, unsigned long addr)
{
unsigned char *old, *new;
unsigned long ip = rec->ip;
/*
* If the calling address is more that 24 bits away,
* then we had to use a trampoline to make the call.
* Otherwise just update the call site.
*/
if (test_24bit_addr(ip, addr)) {
/* within range */
old = ftrace_call_replace(ip, addr);
new = ftrace_nop_replace();
return ftrace_modify_code(ip, old, new);
}
/*
* Out of range jumps are called from modules.
* We should either already have a pointer to the module
* or it has been passed in.
*/
if (!rec->arch.mod) {
if (!mod) {
printk(KERN_ERR "No module loaded addr=%lx\n",
addr);
return -EFAULT;
}
rec->arch.mod = mod;
} else if (mod) {
if (mod != rec->arch.mod) {
printk(KERN_ERR
"Record mod %p not equal to passed in mod %p\n",
rec->arch.mod, mod);
return -EINVAL;
}
/* nothing to do if mod == rec->arch.mod */
} else
mod = rec->arch.mod;
return __ftrace_make_nop(mod, rec, addr);
}
#ifdef CONFIG_PPC64
static int
__ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
{
unsigned char replaced[MCOUNT_INSN_SIZE * 2];
unsigned int *op = (unsigned *)&replaced;
unsigned long ip = rec->ip;
unsigned long offset;
/* read where this goes */
if (probe_kernel_read(replaced, (void *)ip, MCOUNT_INSN_SIZE * 2))
return -EFAULT;
/*
* It should be pointing to two nops or
* b +8; ld r2,40(r1)
*/
if (((op[0] != 0x48000008) || (op[1] != 0xe8410028)) &&
((op[0] != PPC_NOP_INSTR) || (op[1] != PPC_NOP_INSTR))) {
printk(KERN_ERR "Expected NOPs but have %x %x\n", op[0], op[1]);
return -EINVAL;
}
/* If we never set up a trampoline to ftrace_caller, then bail */
if (!rec->arch.mod->arch.tramp) {
printk(KERN_ERR "No ftrace trampoline\n");
return -EINVAL;
}
/* now calculate a jump to the ftrace caller trampoline */
offset = rec->arch.mod->arch.tramp - ip;
if (test_offset(offset)) {
printk(KERN_ERR "REL24 %li out of range!\n",
(long int)offset);
return -EINVAL;
}
/* Set to "bl addr" */
op[0] = branch_offset(offset);
/* ld r2,40(r1) */
op[1] = 0xe8410028;
DEBUGP("write to %lx\n", rec->ip);
if (probe_kernel_write((void *)ip, replaced, MCOUNT_INSN_SIZE * 2))
return -EPERM;
return 0;
}
#else
static int
__ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
{
unsigned char replaced[MCOUNT_INSN_SIZE];
unsigned int *op = (unsigned *)&replaced;
unsigned long ip = rec->ip;
unsigned long offset;
/* read where this goes */
if (probe_kernel_read(replaced, (void *)ip, MCOUNT_INSN_SIZE))
return -EFAULT;
/* It should be pointing to a nop */
if (op[0] != PPC_NOP_INSTR) {
printk(KERN_ERR "Expected NOP but have %x\n", op[0]);
return -EINVAL;
}
/* If we never set up a trampoline to ftrace_caller, then bail */
if (!rec->arch.mod->arch.tramp) {
printk(KERN_ERR "No ftrace trampoline\n");
return -EINVAL;
}
/* now calculate a jump to the ftrace caller trampoline */
offset = rec->arch.mod->arch.tramp - ip;
if (test_offset(offset)) {
printk(KERN_ERR "REL24 %li out of range!\n",
(long int)offset);
return -EINVAL;
}
/* Set to "bl addr" */
op[0] = branch_offset(offset);
DEBUGP("write to %lx\n", rec->ip);
if (probe_kernel_write((void *)ip, replaced, MCOUNT_INSN_SIZE))
return -EPERM;
return 0;
}
#endif /* CONFIG_PPC64 */
int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
{
unsigned char *old, *new;
unsigned long ip = rec->ip;
/*
* If the calling address is more that 24 bits away,
* then we had to use a trampoline to make the call.
* Otherwise just update the call site.
*/
if (test_24bit_addr(ip, addr)) {
/* within range */
old = ftrace_nop_replace();
new = ftrace_call_replace(ip, addr);
return ftrace_modify_code(ip, old, new);
}
/*
* Out of range jumps are called from modules.
* Being that we are converting from nop, it had better
* already have a module defined.
*/
if (!rec->arch.mod) {
printk(KERN_ERR "No module loaded\n");
return -EINVAL;
}
return __ftrace_make_call(rec, addr);
} }
int ftrace_update_ftrace_func(ftrace_func_t func) int ftrace_update_ftrace_func(ftrace_func_t func)
...@@ -128,10 +521,10 @@ int ftrace_update_ftrace_func(ftrace_func_t func) ...@@ -128,10 +521,10 @@ int ftrace_update_ftrace_func(ftrace_func_t func)
int __init ftrace_dyn_arch_init(void *data) int __init ftrace_dyn_arch_init(void *data)
{ {
/* This is running in kstop_machine */ /* caller expects data to be zero */
unsigned long *p = data;
ftrace_mcount_set(data); *p = 0;
return 0; return 0;
} }
...@@ -69,10 +69,15 @@ void cpu_idle(void) ...@@ -69,10 +69,15 @@ void cpu_idle(void)
smp_mb(); smp_mb();
local_irq_disable(); local_irq_disable();
/* Don't trace irqs off for idle */
stop_critical_timings();
/* check again after disabling irqs */ /* check again after disabling irqs */
if (!need_resched() && !cpu_should_die()) if (!need_resched() && !cpu_should_die())
ppc_md.power_save(); ppc_md.power_save();
start_critical_timings();
local_irq_enable(); local_irq_enable();
set_thread_flag(TIF_POLLING_NRFLAG); set_thread_flag(TIF_POLLING_NRFLAG);
......
...@@ -22,6 +22,7 @@ ...@@ -22,6 +22,7 @@
#include <linux/fs.h> #include <linux/fs.h>
#include <linux/string.h> #include <linux/string.h>
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/ftrace.h>
#include <linux/cache.h> #include <linux/cache.h>
#include <linux/bug.h> #include <linux/bug.h>
#include <linux/sort.h> #include <linux/sort.h>
...@@ -53,6 +54,9 @@ static unsigned int count_relocs(const Elf32_Rela *rela, unsigned int num) ...@@ -53,6 +54,9 @@ static unsigned int count_relocs(const Elf32_Rela *rela, unsigned int num)
r_addend = rela[i].r_addend; r_addend = rela[i].r_addend;
} }
#ifdef CONFIG_DYNAMIC_FTRACE
_count_relocs++; /* add one for ftrace_caller */
#endif
return _count_relocs; return _count_relocs;
} }
...@@ -306,5 +310,11 @@ int apply_relocate_add(Elf32_Shdr *sechdrs, ...@@ -306,5 +310,11 @@ int apply_relocate_add(Elf32_Shdr *sechdrs,
return -ENOEXEC; return -ENOEXEC;
} }
} }
#ifdef CONFIG_DYNAMIC_FTRACE
module->arch.tramp =
do_plt_call(module->module_core,
(unsigned long)ftrace_caller,
sechdrs, module);
#endif
return 0; return 0;
} }
...@@ -20,6 +20,7 @@ ...@@ -20,6 +20,7 @@
#include <linux/moduleloader.h> #include <linux/moduleloader.h>
#include <linux/err.h> #include <linux/err.h>
#include <linux/vmalloc.h> #include <linux/vmalloc.h>
#include <linux/ftrace.h>
#include <linux/bug.h> #include <linux/bug.h>
#include <asm/module.h> #include <asm/module.h>
#include <asm/firmware.h> #include <asm/firmware.h>
...@@ -163,6 +164,11 @@ static unsigned long get_stubs_size(const Elf64_Ehdr *hdr, ...@@ -163,6 +164,11 @@ static unsigned long get_stubs_size(const Elf64_Ehdr *hdr,
} }
} }
#ifdef CONFIG_DYNAMIC_FTRACE
/* make the trampoline to the ftrace_caller */
relocs++;
#endif
DEBUGP("Looks like a total of %lu stubs, max\n", relocs); DEBUGP("Looks like a total of %lu stubs, max\n", relocs);
return relocs * sizeof(struct ppc64_stub_entry); return relocs * sizeof(struct ppc64_stub_entry);
} }
...@@ -441,5 +447,12 @@ int apply_relocate_add(Elf64_Shdr *sechdrs, ...@@ -441,5 +447,12 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
} }
} }
#ifdef CONFIG_DYNAMIC_FTRACE
me->arch.toc = my_r2(sechdrs, me);
me->arch.tramp = stub_for_addr(sechdrs,
(unsigned long)ftrace_caller,
me);
#endif
return 0; return 0;
} }
...@@ -36,6 +36,7 @@ config X86 ...@@ -36,6 +36,7 @@ config X86
select HAVE_ARCH_TRACEHOOK select HAVE_ARCH_TRACEHOOK
select HAVE_GENERIC_DMA_COHERENT if X86_32 select HAVE_GENERIC_DMA_COHERENT if X86_32
select HAVE_EFFICIENT_UNALIGNED_ACCESS select HAVE_EFFICIENT_UNALIGNED_ACCESS
select USER_STACKTRACE_SUPPORT
config ARCH_DEFCONFIG config ARCH_DEFCONFIG
string string
......
...@@ -29,7 +29,6 @@ struct dyn_arch_ftrace { ...@@ -29,7 +29,6 @@ struct dyn_arch_ftrace {
#endif /* CONFIG_FUNCTION_TRACER */ #endif /* CONFIG_FUNCTION_TRACER */
#ifdef CONFIG_FUNCTION_RET_TRACER #ifdef CONFIG_FUNCTION_RET_TRACER
#define FTRACE_RET_STACK_SIZE 20
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
......
...@@ -40,36 +40,8 @@ struct thread_info { ...@@ -40,36 +40,8 @@ struct thread_info {
*/ */
__u8 supervisor_stack[0]; __u8 supervisor_stack[0];
#endif #endif
#ifdef CONFIG_FUNCTION_RET_TRACER
/* Index of current stored adress in ret_stack */
int curr_ret_stack;
/* Stack of return addresses for return function tracing */
struct ftrace_ret_stack ret_stack[FTRACE_RET_STACK_SIZE];
/*
* Number of functions that haven't been traced
* because of depth overrun.
*/
atomic_t trace_overrun;
#endif
}; };
#ifdef CONFIG_FUNCTION_RET_TRACER
#define INIT_THREAD_INFO(tsk) \
{ \
.task = &tsk, \
.exec_domain = &default_exec_domain, \
.flags = 0, \
.cpu = 0, \
.preempt_count = 1, \
.addr_limit = KERNEL_DS, \
.restart_block = { \
.fn = do_no_restart_syscall, \
}, \
.curr_ret_stack = -1,\
.trace_overrun = ATOMIC_INIT(0) \
}
#else
#define INIT_THREAD_INFO(tsk) \ #define INIT_THREAD_INFO(tsk) \
{ \ { \
.task = &tsk, \ .task = &tsk, \
...@@ -82,7 +54,6 @@ struct thread_info { ...@@ -82,7 +54,6 @@ struct thread_info {
.fn = do_no_restart_syscall, \ .fn = do_no_restart_syscall, \
}, \ }, \
} }
#endif
#define init_thread_info (init_thread_union.thread_info) #define init_thread_info (init_thread_union.thread_info)
#define init_stack (init_thread_union.stack) #define init_stack (init_thread_union.stack)
......
...@@ -350,19 +350,21 @@ static int push_return_trace(unsigned long ret, unsigned long long time, ...@@ -350,19 +350,21 @@ static int push_return_trace(unsigned long ret, unsigned long long time,
unsigned long func) unsigned long func)
{ {
int index; int index;
struct thread_info *ti = current_thread_info();
if (!current->ret_stack)
return -EBUSY;
/* The return trace stack is full */ /* The return trace stack is full */
if (ti->curr_ret_stack == FTRACE_RET_STACK_SIZE - 1) { if (current->curr_ret_stack == FTRACE_RETFUNC_DEPTH - 1) {
atomic_inc(&ti->trace_overrun); atomic_inc(&current->trace_overrun);
return -EBUSY; return -EBUSY;
} }
index = ++ti->curr_ret_stack; index = ++current->curr_ret_stack;
barrier(); barrier();
ti->ret_stack[index].ret = ret; current->ret_stack[index].ret = ret;
ti->ret_stack[index].func = func; current->ret_stack[index].func = func;
ti->ret_stack[index].calltime = time; current->ret_stack[index].calltime = time;
return 0; return 0;
} }
...@@ -373,13 +375,12 @@ static void pop_return_trace(unsigned long *ret, unsigned long long *time, ...@@ -373,13 +375,12 @@ static void pop_return_trace(unsigned long *ret, unsigned long long *time,
{ {
int index; int index;
struct thread_info *ti = current_thread_info(); index = current->curr_ret_stack;
index = ti->curr_ret_stack; *ret = current->ret_stack[index].ret;
*ret = ti->ret_stack[index].ret; *func = current->ret_stack[index].func;
*func = ti->ret_stack[index].func; *time = current->ret_stack[index].calltime;
*time = ti->ret_stack[index].calltime; *overrun = atomic_read(&current->trace_overrun);
*overrun = atomic_read(&ti->trace_overrun); current->curr_ret_stack--;
ti->curr_ret_stack--;
} }
/* /*
......
...@@ -6,6 +6,7 @@ ...@@ -6,6 +6,7 @@
#include <linux/sched.h> #include <linux/sched.h>
#include <linux/stacktrace.h> #include <linux/stacktrace.h>
#include <linux/module.h> #include <linux/module.h>
#include <linux/uaccess.h>
#include <asm/stacktrace.h> #include <asm/stacktrace.h>
static void save_stack_warning(void *data, char *msg) static void save_stack_warning(void *data, char *msg)
...@@ -83,3 +84,66 @@ void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace) ...@@ -83,3 +84,66 @@ void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
trace->entries[trace->nr_entries++] = ULONG_MAX; trace->entries[trace->nr_entries++] = ULONG_MAX;
} }
EXPORT_SYMBOL_GPL(save_stack_trace_tsk); EXPORT_SYMBOL_GPL(save_stack_trace_tsk);
/* Userspace stacktrace - based on kernel/trace/trace_sysprof.c */
struct stack_frame {
const void __user *next_fp;
unsigned long ret_addr;
};
static int copy_stack_frame(const void __user *fp, struct stack_frame *frame)
{
int ret;
if (!access_ok(VERIFY_READ, fp, sizeof(*frame)))
return 0;
ret = 1;
pagefault_disable();
if (__copy_from_user_inatomic(frame, fp, sizeof(*frame)))
ret = 0;
pagefault_enable();
return ret;
}
static inline void __save_stack_trace_user(struct stack_trace *trace)
{
const struct pt_regs *regs = task_pt_regs(current);
const void __user *fp = (const void __user *)regs->bp;
if (trace->nr_entries < trace->max_entries)
trace->entries[trace->nr_entries++] = regs->ip;
while (trace->nr_entries < trace->max_entries) {
struct stack_frame frame;
frame.next_fp = NULL;
frame.ret_addr = 0;
if (!copy_stack_frame(fp, &frame))
break;
if ((unsigned long)fp < regs->sp)
break;
if (frame.ret_addr) {
trace->entries[trace->nr_entries++] =
frame.ret_addr;
}
if (fp == frame.next_fp)
break;
fp = frame.next_fp;
}
}
void save_stack_trace_user(struct stack_trace *trace)
{
/*
* Trace user stack if we are not a kernel thread
*/
if (current->mm) {
__save_stack_trace_user(trace);
}
if (trace->nr_entries < trace->max_entries)
trace->entries[trace->nr_entries++] = ULONG_MAX;
}
...@@ -357,7 +357,18 @@ int seq_printf(struct seq_file *m, const char *f, ...) ...@@ -357,7 +357,18 @@ int seq_printf(struct seq_file *m, const char *f, ...)
} }
EXPORT_SYMBOL(seq_printf); EXPORT_SYMBOL(seq_printf);
static char *mangle_path(char *s, char *p, char *esc) /**
* mangle_path - mangle and copy path to buffer beginning
* @s: buffer start
* @p: beginning of path in above buffer
* @esc: set of characters that need escaping
*
* Copy the path from @p to @s, replacing each occurrence of character from
* @esc with usual octal escape.
* Returns pointer past last written character in @s, or NULL in case of
* failure.
*/
char *mangle_path(char *s, char *p, char *esc)
{ {
while (s <= p) { while (s <= p) {
char c = *p++; char c = *p++;
...@@ -376,6 +387,7 @@ static char *mangle_path(char *s, char *p, char *esc) ...@@ -376,6 +387,7 @@ static char *mangle_path(char *s, char *p, char *esc)
} }
return NULL; return NULL;
} }
EXPORT_SYMBOL_GPL(mangle_path);
/* /*
* return the absolute path of 'dentry' residing in mount 'mnt'. * return the absolute path of 'dentry' residing in mount 'mnt'.
......
...@@ -257,6 +257,7 @@ extern int ftrace_dump_on_oops; ...@@ -257,6 +257,7 @@ extern int ftrace_dump_on_oops;
extern void tracing_start(void); extern void tracing_start(void);
extern void tracing_stop(void); extern void tracing_stop(void);
extern void ftrace_off_permanent(void);
extern void extern void
ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3); ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3);
...@@ -290,6 +291,7 @@ ftrace_printk(const char *fmt, ...) __attribute__ ((format (printf, 1, 0))); ...@@ -290,6 +291,7 @@ ftrace_printk(const char *fmt, ...) __attribute__ ((format (printf, 1, 0)));
static inline void tracing_start(void) { } static inline void tracing_start(void) { }
static inline void tracing_stop(void) { } static inline void tracing_stop(void) { }
static inline void ftrace_off_permanent(void) { }
static inline int static inline int
ftrace_printk(const char *fmt, ...) ftrace_printk(const char *fmt, ...)
{ {
...@@ -323,6 +325,8 @@ struct ftrace_retfunc { ...@@ -323,6 +325,8 @@ struct ftrace_retfunc {
}; };
#ifdef CONFIG_FUNCTION_RET_TRACER #ifdef CONFIG_FUNCTION_RET_TRACER
#define FTRACE_RETFUNC_DEPTH 50
#define FTRACE_RETSTACK_ALLOC_SIZE 32
/* Type of a callback handler of tracing return function */ /* Type of a callback handler of tracing return function */
typedef void (*trace_function_return_t)(struct ftrace_retfunc *); typedef void (*trace_function_return_t)(struct ftrace_retfunc *);
...@@ -330,6 +334,12 @@ extern int register_ftrace_return(trace_function_return_t func); ...@@ -330,6 +334,12 @@ extern int register_ftrace_return(trace_function_return_t func);
/* The current handler in use */ /* The current handler in use */
extern trace_function_return_t ftrace_function_return; extern trace_function_return_t ftrace_function_return;
extern void unregister_ftrace_return(void); extern void unregister_ftrace_return(void);
extern void ftrace_retfunc_init_task(struct task_struct *t);
extern void ftrace_retfunc_exit_task(struct task_struct *t);
#else
static inline void ftrace_retfunc_init_task(struct task_struct *t) { }
static inline void ftrace_retfunc_exit_task(struct task_struct *t) { }
#endif #endif
#endif /* _LINUX_FTRACE_H */ #endif /* _LINUX_FTRACE_H */
...@@ -122,6 +122,7 @@ void ring_buffer_normalize_time_stamp(int cpu, u64 *ts); ...@@ -122,6 +122,7 @@ void ring_buffer_normalize_time_stamp(int cpu, u64 *ts);
void tracing_on(void); void tracing_on(void);
void tracing_off(void); void tracing_off(void);
void tracing_off_permanent(void);
enum ring_buffer_flags { enum ring_buffer_flags {
RB_FL_OVERWRITE = 1 << 0, RB_FL_OVERWRITE = 1 << 0,
......
...@@ -1352,6 +1352,17 @@ struct task_struct { ...@@ -1352,6 +1352,17 @@ struct task_struct {
unsigned long default_timer_slack_ns; unsigned long default_timer_slack_ns;
struct list_head *scm_work_list; struct list_head *scm_work_list;
#ifdef CONFIG_FUNCTION_RET_TRACER
/* Index of current stored adress in ret_stack */
int curr_ret_stack;
/* Stack of return addresses for return function tracing */
struct ftrace_ret_stack *ret_stack;
/*
* Number of functions that haven't been traced
* because of depth overrun.
*/
atomic_t trace_overrun;
#endif
}; };
/* /*
...@@ -2006,18 +2017,6 @@ static inline void setup_thread_stack(struct task_struct *p, struct task_struct ...@@ -2006,18 +2017,6 @@ static inline void setup_thread_stack(struct task_struct *p, struct task_struct
{ {
*task_thread_info(p) = *task_thread_info(org); *task_thread_info(p) = *task_thread_info(org);
task_thread_info(p)->task = p; task_thread_info(p)->task = p;
#ifdef CONFIG_FUNCTION_RET_TRACER
/*
* When fork() creates a child process, this function is called.
* But the child task may not inherit the return adresses traced
* by the return function tracer because it will directly execute
* in userspace and will not return to kernel functions its parent
* used.
*/
task_thread_info(p)->curr_ret_stack = -1;
atomic_set(&task_thread_info(p)->trace_overrun, 0);
#endif
} }
static inline unsigned long *end_of_stack(struct task_struct *p) static inline unsigned long *end_of_stack(struct task_struct *p)
......
...@@ -34,6 +34,7 @@ struct seq_operations { ...@@ -34,6 +34,7 @@ struct seq_operations {
#define SEQ_SKIP 1 #define SEQ_SKIP 1
char *mangle_path(char *s, char *p, char *esc);
int seq_open(struct file *, const struct seq_operations *); int seq_open(struct file *, const struct seq_operations *);
ssize_t seq_read(struct file *, char __user *, size_t, loff_t *); ssize_t seq_read(struct file *, char __user *, size_t, loff_t *);
loff_t seq_lseek(struct file *, loff_t, int); loff_t seq_lseek(struct file *, loff_t, int);
......
...@@ -15,9 +15,17 @@ extern void save_stack_trace_tsk(struct task_struct *tsk, ...@@ -15,9 +15,17 @@ extern void save_stack_trace_tsk(struct task_struct *tsk,
struct stack_trace *trace); struct stack_trace *trace);
extern void print_stack_trace(struct stack_trace *trace, int spaces); extern void print_stack_trace(struct stack_trace *trace, int spaces);
#ifdef CONFIG_USER_STACKTRACE_SUPPORT
extern void save_stack_trace_user(struct stack_trace *trace);
#else
# define save_stack_trace_user(trace) do { } while (0)
#endif
#else #else
# define save_stack_trace(trace) do { } while (0) # define save_stack_trace(trace) do { } while (0)
# define save_stack_trace_tsk(tsk, trace) do { } while (0) # define save_stack_trace_tsk(tsk, trace) do { } while (0)
# define save_stack_trace_user(trace) do { } while (0)
# define print_stack_trace(trace, spaces) do { } while (0) # define print_stack_trace(trace, spaces) do { } while (0)
#endif #endif
......
...@@ -723,7 +723,7 @@ int do_one_initcall(initcall_t fn) ...@@ -723,7 +723,7 @@ int do_one_initcall(initcall_t fn)
disable_boot_trace(); disable_boot_trace();
rettime = ktime_get(); rettime = ktime_get();
delta = ktime_sub(rettime, calltime); delta = ktime_sub(rettime, calltime);
ret.duration = (unsigned long long) delta.tv64 >> 10; ret.duration = (unsigned long long) ktime_to_ns(delta) >> 10;
trace_boot_ret(&ret, fn); trace_boot_ret(&ret, fn);
printk("initcall %pF returned %d after %Ld usecs\n", fn, printk("initcall %pF returned %d after %Ld usecs\n", fn,
ret.result, ret.duration); ret.result, ret.duration);
......
...@@ -1127,7 +1127,6 @@ NORET_TYPE void do_exit(long code) ...@@ -1127,7 +1127,6 @@ NORET_TYPE void do_exit(long code)
preempt_disable(); preempt_disable();
/* causes final put_task_struct in finish_task_switch(). */ /* causes final put_task_struct in finish_task_switch(). */
tsk->state = TASK_DEAD; tsk->state = TASK_DEAD;
schedule(); schedule();
BUG(); BUG();
/* Avoid "noreturn function does return". */ /* Avoid "noreturn function does return". */
......
...@@ -47,6 +47,7 @@ ...@@ -47,6 +47,7 @@
#include <linux/mount.h> #include <linux/mount.h>
#include <linux/audit.h> #include <linux/audit.h>
#include <linux/memcontrol.h> #include <linux/memcontrol.h>
#include <linux/ftrace.h>
#include <linux/profile.h> #include <linux/profile.h>
#include <linux/rmap.h> #include <linux/rmap.h>
#include <linux/acct.h> #include <linux/acct.h>
...@@ -139,6 +140,7 @@ void free_task(struct task_struct *tsk) ...@@ -139,6 +140,7 @@ void free_task(struct task_struct *tsk)
prop_local_destroy_single(&tsk->dirties); prop_local_destroy_single(&tsk->dirties);
free_thread_info(tsk->stack); free_thread_info(tsk->stack);
rt_mutex_debug_task_free(tsk); rt_mutex_debug_task_free(tsk);
ftrace_retfunc_exit_task(tsk);
free_task_struct(tsk); free_task_struct(tsk);
} }
EXPORT_SYMBOL(free_task); EXPORT_SYMBOL(free_task);
...@@ -1269,6 +1271,7 @@ static struct task_struct *copy_process(unsigned long clone_flags, ...@@ -1269,6 +1271,7 @@ static struct task_struct *copy_process(unsigned long clone_flags,
total_forks++; total_forks++;
spin_unlock(&current->sighand->siglock); spin_unlock(&current->sighand->siglock);
write_unlock_irq(&tasklist_lock); write_unlock_irq(&tasklist_lock);
ftrace_retfunc_init_task(p);
proc_fork_connector(p); proc_fork_connector(p);
cgroup_post_fork(p); cgroup_post_fork(p);
return p; return p;
......
...@@ -22,7 +22,6 @@ ...@@ -22,7 +22,6 @@
#include <linux/console.h> #include <linux/console.h>
#include <linux/cpu.h> #include <linux/cpu.h>
#include <linux/freezer.h> #include <linux/freezer.h>
#include <linux/ftrace.h>
#include "power.h" #include "power.h"
...@@ -257,7 +256,7 @@ static int create_image(int platform_mode) ...@@ -257,7 +256,7 @@ static int create_image(int platform_mode)
int hibernation_snapshot(int platform_mode) int hibernation_snapshot(int platform_mode)
{ {
int error, ftrace_save; int error;
/* Free memory before shutting down devices. */ /* Free memory before shutting down devices. */
error = swsusp_shrink_memory(); error = swsusp_shrink_memory();
...@@ -269,7 +268,6 @@ int hibernation_snapshot(int platform_mode) ...@@ -269,7 +268,6 @@ int hibernation_snapshot(int platform_mode)
goto Close; goto Close;
suspend_console(); suspend_console();
ftrace_save = __ftrace_enabled_save();
error = device_suspend(PMSG_FREEZE); error = device_suspend(PMSG_FREEZE);
if (error) if (error)
goto Recover_platform; goto Recover_platform;
...@@ -299,7 +297,6 @@ int hibernation_snapshot(int platform_mode) ...@@ -299,7 +297,6 @@ int hibernation_snapshot(int platform_mode)
Resume_devices: Resume_devices:
device_resume(in_suspend ? device_resume(in_suspend ?
(error ? PMSG_RECOVER : PMSG_THAW) : PMSG_RESTORE); (error ? PMSG_RECOVER : PMSG_THAW) : PMSG_RESTORE);
__ftrace_enabled_restore(ftrace_save);
resume_console(); resume_console();
Close: Close:
platform_end(platform_mode); platform_end(platform_mode);
...@@ -370,11 +367,10 @@ static int resume_target_kernel(void) ...@@ -370,11 +367,10 @@ static int resume_target_kernel(void)
int hibernation_restore(int platform_mode) int hibernation_restore(int platform_mode)
{ {
int error, ftrace_save; int error;
pm_prepare_console(); pm_prepare_console();
suspend_console(); suspend_console();
ftrace_save = __ftrace_enabled_save();
error = device_suspend(PMSG_QUIESCE); error = device_suspend(PMSG_QUIESCE);
if (error) if (error)
goto Finish; goto Finish;
...@@ -389,7 +385,6 @@ int hibernation_restore(int platform_mode) ...@@ -389,7 +385,6 @@ int hibernation_restore(int platform_mode)
platform_restore_cleanup(platform_mode); platform_restore_cleanup(platform_mode);
device_resume(PMSG_RECOVER); device_resume(PMSG_RECOVER);
Finish: Finish:
__ftrace_enabled_restore(ftrace_save);
resume_console(); resume_console();
pm_restore_console(); pm_restore_console();
return error; return error;
...@@ -402,7 +397,7 @@ int hibernation_restore(int platform_mode) ...@@ -402,7 +397,7 @@ int hibernation_restore(int platform_mode)
int hibernation_platform_enter(void) int hibernation_platform_enter(void)
{ {
int error, ftrace_save; int error;
if (!hibernation_ops) if (!hibernation_ops)
return -ENOSYS; return -ENOSYS;
...@@ -417,7 +412,6 @@ int hibernation_platform_enter(void) ...@@ -417,7 +412,6 @@ int hibernation_platform_enter(void)
goto Close; goto Close;
suspend_console(); suspend_console();
ftrace_save = __ftrace_enabled_save();
error = device_suspend(PMSG_HIBERNATE); error = device_suspend(PMSG_HIBERNATE);
if (error) { if (error) {
if (hibernation_ops->recover) if (hibernation_ops->recover)
...@@ -452,7 +446,6 @@ int hibernation_platform_enter(void) ...@@ -452,7 +446,6 @@ int hibernation_platform_enter(void)
hibernation_ops->finish(); hibernation_ops->finish();
Resume_devices: Resume_devices:
device_resume(PMSG_RESTORE); device_resume(PMSG_RESTORE);
__ftrace_enabled_restore(ftrace_save);
resume_console(); resume_console();
Close: Close:
hibernation_ops->end(); hibernation_ops->end();
......
...@@ -22,7 +22,6 @@ ...@@ -22,7 +22,6 @@
#include <linux/freezer.h> #include <linux/freezer.h>
#include <linux/vmstat.h> #include <linux/vmstat.h>
#include <linux/syscalls.h> #include <linux/syscalls.h>
#include <linux/ftrace.h>
#include "power.h" #include "power.h"
...@@ -317,7 +316,7 @@ static int suspend_enter(suspend_state_t state) ...@@ -317,7 +316,7 @@ static int suspend_enter(suspend_state_t state)
*/ */
int suspend_devices_and_enter(suspend_state_t state) int suspend_devices_and_enter(suspend_state_t state)
{ {
int error, ftrace_save; int error;
if (!suspend_ops) if (!suspend_ops)
return -ENOSYS; return -ENOSYS;
...@@ -328,7 +327,6 @@ int suspend_devices_and_enter(suspend_state_t state) ...@@ -328,7 +327,6 @@ int suspend_devices_and_enter(suspend_state_t state)
goto Close; goto Close;
} }
suspend_console(); suspend_console();
ftrace_save = __ftrace_enabled_save();
suspend_test_start(); suspend_test_start();
error = device_suspend(PMSG_SUSPEND); error = device_suspend(PMSG_SUSPEND);
if (error) { if (error) {
...@@ -360,7 +358,6 @@ int suspend_devices_and_enter(suspend_state_t state) ...@@ -360,7 +358,6 @@ int suspend_devices_and_enter(suspend_state_t state)
suspend_test_start(); suspend_test_start();
device_resume(PMSG_RESUME); device_resume(PMSG_RESUME);
suspend_test_finish("resume devices"); suspend_test_finish("resume devices");
__ftrace_enabled_restore(ftrace_save);
resume_console(); resume_console();
Close: Close:
if (suspend_ops->end) if (suspend_ops->end)
......
...@@ -5901,6 +5901,7 @@ void __cpuinit init_idle(struct task_struct *idle, int cpu) ...@@ -5901,6 +5901,7 @@ void __cpuinit init_idle(struct task_struct *idle, int cpu)
* The idle tasks have their own, simple scheduling class: * The idle tasks have their own, simple scheduling class:
*/ */
idle->sched_class = &idle_sched_class; idle->sched_class = &idle_sched_class;
ftrace_retfunc_init_task(idle);
} }
/* /*
......
...@@ -3,6 +3,9 @@ ...@@ -3,6 +3,9 @@
# select HAVE_FUNCTION_TRACER: # select HAVE_FUNCTION_TRACER:
# #
config USER_STACKTRACE_SUPPORT
bool
config NOP_TRACER config NOP_TRACER
bool bool
......
...@@ -1498,10 +1498,77 @@ ftrace_enable_sysctl(struct ctl_table *table, int write, ...@@ -1498,10 +1498,77 @@ ftrace_enable_sysctl(struct ctl_table *table, int write,
#ifdef CONFIG_FUNCTION_RET_TRACER #ifdef CONFIG_FUNCTION_RET_TRACER
static atomic_t ftrace_retfunc_active;
/* The callback that hooks the return of a function */ /* The callback that hooks the return of a function */
trace_function_return_t ftrace_function_return = trace_function_return_t ftrace_function_return =
(trace_function_return_t)ftrace_stub; (trace_function_return_t)ftrace_stub;
/* Try to assign a return stack array on FTRACE_RETSTACK_ALLOC_SIZE tasks. */
static int alloc_retstack_tasklist(struct ftrace_ret_stack **ret_stack_list)
{
int i;
int ret = 0;
unsigned long flags;
int start = 0, end = FTRACE_RETSTACK_ALLOC_SIZE;
struct task_struct *g, *t;
for (i = 0; i < FTRACE_RETSTACK_ALLOC_SIZE; i++) {
ret_stack_list[i] = kmalloc(FTRACE_RETFUNC_DEPTH
* sizeof(struct ftrace_ret_stack),
GFP_KERNEL);
if (!ret_stack_list[i]) {
start = 0;
end = i;
ret = -ENOMEM;
goto free;
}
}
read_lock_irqsave(&tasklist_lock, flags);
do_each_thread(g, t) {
if (start == end) {
ret = -EAGAIN;
goto unlock;
}
if (t->ret_stack == NULL) {
t->ret_stack = ret_stack_list[start++];
t->curr_ret_stack = -1;
atomic_set(&t->trace_overrun, 0);
}
} while_each_thread(g, t);
unlock:
read_unlock_irqrestore(&tasklist_lock, flags);
free:
for (i = start; i < end; i++)
kfree(ret_stack_list[i]);
return ret;
}
/* Allocate a return stack for each task */
static int start_return_tracing(void)
{
struct ftrace_ret_stack **ret_stack_list;
int ret;
ret_stack_list = kmalloc(FTRACE_RETSTACK_ALLOC_SIZE *
sizeof(struct ftrace_ret_stack *),
GFP_KERNEL);
if (!ret_stack_list)
return -ENOMEM;
do {
ret = alloc_retstack_tasklist(ret_stack_list);
} while (ret == -EAGAIN);
kfree(ret_stack_list);
return ret;
}
int register_ftrace_return(trace_function_return_t func) int register_ftrace_return(trace_function_return_t func)
{ {
int ret = 0; int ret = 0;
...@@ -1516,7 +1583,12 @@ int register_ftrace_return(trace_function_return_t func) ...@@ -1516,7 +1583,12 @@ int register_ftrace_return(trace_function_return_t func)
ret = -EBUSY; ret = -EBUSY;
goto out; goto out;
} }
atomic_inc(&ftrace_retfunc_active);
ret = start_return_tracing();
if (ret) {
atomic_dec(&ftrace_retfunc_active);
goto out;
}
ftrace_tracing_type = FTRACE_TYPE_RETURN; ftrace_tracing_type = FTRACE_TYPE_RETURN;
ftrace_function_return = func; ftrace_function_return = func;
ftrace_startup(); ftrace_startup();
...@@ -1530,6 +1602,7 @@ void unregister_ftrace_return(void) ...@@ -1530,6 +1602,7 @@ void unregister_ftrace_return(void)
{ {
mutex_lock(&ftrace_sysctl_lock); mutex_lock(&ftrace_sysctl_lock);
atomic_dec(&ftrace_retfunc_active);
ftrace_function_return = (trace_function_return_t)ftrace_stub; ftrace_function_return = (trace_function_return_t)ftrace_stub;
ftrace_shutdown(); ftrace_shutdown();
/* Restore normal tracing type */ /* Restore normal tracing type */
...@@ -1537,6 +1610,32 @@ void unregister_ftrace_return(void) ...@@ -1537,6 +1610,32 @@ void unregister_ftrace_return(void)
mutex_unlock(&ftrace_sysctl_lock); mutex_unlock(&ftrace_sysctl_lock);
} }
/* Allocate a return stack for newly created task */
void ftrace_retfunc_init_task(struct task_struct *t)
{
if (atomic_read(&ftrace_retfunc_active)) {
t->ret_stack = kmalloc(FTRACE_RETFUNC_DEPTH
* sizeof(struct ftrace_ret_stack),
GFP_KERNEL);
if (!t->ret_stack)
return;
t->curr_ret_stack = -1;
atomic_set(&t->trace_overrun, 0);
} else
t->ret_stack = NULL;
}
void ftrace_retfunc_exit_task(struct task_struct *t)
{
struct ftrace_ret_stack *ret_stack = t->ret_stack;
t->ret_stack = NULL;
/* NULL must become visible to IRQs before we free it: */
barrier();
kfree(ret_stack);
}
#endif #endif
......
...@@ -18,8 +18,46 @@ ...@@ -18,8 +18,46 @@
#include "trace.h" #include "trace.h"
/* Global flag to disable all recording to ring buffers */ /*
static int ring_buffers_off __read_mostly; * A fast way to enable or disable all ring buffers is to
* call tracing_on or tracing_off. Turning off the ring buffers
* prevents all ring buffers from being recorded to.
* Turning this switch on, makes it OK to write to the
* ring buffer, if the ring buffer is enabled itself.
*
* There's three layers that must be on in order to write
* to the ring buffer.
*
* 1) This global flag must be set.
* 2) The ring buffer must be enabled for recording.
* 3) The per cpu buffer must be enabled for recording.
*
* In case of an anomaly, this global flag has a bit set that
* will permantly disable all ring buffers.
*/
/*
* Global flag to disable all recording to ring buffers
* This has two bits: ON, DISABLED
*
* ON DISABLED
* ---- ----------
* 0 0 : ring buffers are off
* 1 0 : ring buffers are on
* X 1 : ring buffers are permanently disabled
*/
enum {
RB_BUFFERS_ON_BIT = 0,
RB_BUFFERS_DISABLED_BIT = 1,
};
enum {
RB_BUFFERS_ON = 1 << RB_BUFFERS_ON_BIT,
RB_BUFFERS_DISABLED = 1 << RB_BUFFERS_DISABLED_BIT,
};
static long ring_buffer_flags __read_mostly = RB_BUFFERS_ON;
/** /**
* tracing_on - enable all tracing buffers * tracing_on - enable all tracing buffers
...@@ -29,7 +67,7 @@ static int ring_buffers_off __read_mostly; ...@@ -29,7 +67,7 @@ static int ring_buffers_off __read_mostly;
*/ */
void tracing_on(void) void tracing_on(void)
{ {
ring_buffers_off = 0; set_bit(RB_BUFFERS_ON_BIT, &ring_buffer_flags);
} }
/** /**
...@@ -42,7 +80,18 @@ void tracing_on(void) ...@@ -42,7 +80,18 @@ void tracing_on(void)
*/ */
void tracing_off(void) void tracing_off(void)
{ {
ring_buffers_off = 1; clear_bit(RB_BUFFERS_ON_BIT, &ring_buffer_flags);
}
/**
* tracing_off_permanent - permanently disable ring buffers
*
* This function, once called, will disable all ring buffers
* permanenty.
*/
void tracing_off_permanent(void)
{
set_bit(RB_BUFFERS_DISABLED_BIT, &ring_buffer_flags);
} }
#include "trace.h" #include "trace.h"
...@@ -1185,7 +1234,7 @@ ring_buffer_lock_reserve(struct ring_buffer *buffer, ...@@ -1185,7 +1234,7 @@ ring_buffer_lock_reserve(struct ring_buffer *buffer,
struct ring_buffer_event *event; struct ring_buffer_event *event;
int cpu, resched; int cpu, resched;
if (ring_buffers_off) if (ring_buffer_flags != RB_BUFFERS_ON)
return NULL; return NULL;
if (atomic_read(&buffer->record_disabled)) if (atomic_read(&buffer->record_disabled))
...@@ -1297,7 +1346,7 @@ int ring_buffer_write(struct ring_buffer *buffer, ...@@ -1297,7 +1346,7 @@ int ring_buffer_write(struct ring_buffer *buffer,
int ret = -EBUSY; int ret = -EBUSY;
int cpu, resched; int cpu, resched;
if (ring_buffers_off) if (ring_buffer_flags != RB_BUFFERS_ON)
return -EBUSY; return -EBUSY;
if (atomic_read(&buffer->record_disabled)) if (atomic_read(&buffer->record_disabled))
...@@ -2178,12 +2227,14 @@ static ssize_t ...@@ -2178,12 +2227,14 @@ static ssize_t
rb_simple_read(struct file *filp, char __user *ubuf, rb_simple_read(struct file *filp, char __user *ubuf,
size_t cnt, loff_t *ppos) size_t cnt, loff_t *ppos)
{ {
int *p = filp->private_data; long *p = filp->private_data;
char buf[64]; char buf[64];
int r; int r;
/* !ring_buffers_off == tracing_on */ if (test_bit(RB_BUFFERS_DISABLED_BIT, p))
r = sprintf(buf, "%d\n", !*p); r = sprintf(buf, "permanently disabled\n");
else
r = sprintf(buf, "%d\n", test_bit(RB_BUFFERS_ON_BIT, p));
return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
} }
...@@ -2192,7 +2243,7 @@ static ssize_t ...@@ -2192,7 +2243,7 @@ static ssize_t
rb_simple_write(struct file *filp, const char __user *ubuf, rb_simple_write(struct file *filp, const char __user *ubuf,
size_t cnt, loff_t *ppos) size_t cnt, loff_t *ppos)
{ {
int *p = filp->private_data; long *p = filp->private_data;
char buf[64]; char buf[64];
long val; long val;
int ret; int ret;
...@@ -2209,8 +2260,10 @@ rb_simple_write(struct file *filp, const char __user *ubuf, ...@@ -2209,8 +2260,10 @@ rb_simple_write(struct file *filp, const char __user *ubuf,
if (ret < 0) if (ret < 0)
return ret; return ret;
/* !ring_buffers_off == tracing_on */ if (val)
*p = !val; set_bit(RB_BUFFERS_ON_BIT, p);
else
clear_bit(RB_BUFFERS_ON_BIT, p);
(*ppos)++; (*ppos)++;
...@@ -2232,7 +2285,7 @@ static __init int rb_init_debugfs(void) ...@@ -2232,7 +2285,7 @@ static __init int rb_init_debugfs(void)
d_tracer = tracing_init_dentry(); d_tracer = tracing_init_dentry();
entry = debugfs_create_file("tracing_on", 0644, d_tracer, entry = debugfs_create_file("tracing_on", 0644, d_tracer,
&ring_buffers_off, &rb_simple_fops); &ring_buffer_flags, &rb_simple_fops);
if (!entry) if (!entry)
pr_warning("Could not create debugfs 'tracing_on' entry\n"); pr_warning("Could not create debugfs 'tracing_on' entry\n");
......
...@@ -30,6 +30,7 @@ ...@@ -30,6 +30,7 @@
#include <linux/gfp.h> #include <linux/gfp.h>
#include <linux/fs.h> #include <linux/fs.h>
#include <linux/kprobes.h> #include <linux/kprobes.h>
#include <linux/seq_file.h>
#include <linux/writeback.h> #include <linux/writeback.h>
#include <linux/stacktrace.h> #include <linux/stacktrace.h>
...@@ -275,6 +276,8 @@ static const char *trace_options[] = { ...@@ -275,6 +276,8 @@ static const char *trace_options[] = {
"ftrace_preempt", "ftrace_preempt",
"branch", "branch",
"annotate", "annotate",
"userstacktrace",
"sym-userobj",
NULL NULL
}; };
...@@ -421,6 +424,28 @@ trace_seq_putmem_hex(struct trace_seq *s, void *mem, size_t len) ...@@ -421,6 +424,28 @@ trace_seq_putmem_hex(struct trace_seq *s, void *mem, size_t len)
return trace_seq_putmem(s, hex, j); return trace_seq_putmem(s, hex, j);
} }
static int
trace_seq_path(struct trace_seq *s, struct path *path)
{
unsigned char *p;
if (s->len >= (PAGE_SIZE - 1))
return 0;
p = d_path(path, s->buffer + s->len, PAGE_SIZE - s->len);
if (!IS_ERR(p)) {
p = mangle_path(s->buffer + s->len, p, "\n");
if (p) {
s->len = p - s->buffer;
return 1;
}
} else {
s->buffer[s->len++] = '?';
return 1;
}
return 0;
}
static void static void
trace_seq_reset(struct trace_seq *s) trace_seq_reset(struct trace_seq *s)
{ {
...@@ -660,6 +685,21 @@ static void trace_init_cmdlines(void) ...@@ -660,6 +685,21 @@ static void trace_init_cmdlines(void)
static int trace_stop_count; static int trace_stop_count;
static DEFINE_SPINLOCK(tracing_start_lock); static DEFINE_SPINLOCK(tracing_start_lock);
/**
* ftrace_off_permanent - disable all ftrace code permanently
*
* This should only be called when a serious anomally has
* been detected. This will turn off the function tracing,
* ring buffers, and other tracing utilites. It takes no
* locks and can be called from any context.
*/
void ftrace_off_permanent(void)
{
tracing_disabled = 1;
ftrace_stop();
tracing_off_permanent();
}
/** /**
* tracing_start - quick start of the tracer * tracing_start - quick start of the tracer
* *
...@@ -801,6 +841,7 @@ tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags, ...@@ -801,6 +841,7 @@ tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags,
entry->preempt_count = pc & 0xff; entry->preempt_count = pc & 0xff;
entry->pid = (tsk) ? tsk->pid : 0; entry->pid = (tsk) ? tsk->pid : 0;
entry->tgid = (tsk) ? tsk->tgid : 0;
entry->flags = entry->flags =
#ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT #ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT
(irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) | (irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) |
...@@ -918,6 +959,44 @@ void __trace_stack(struct trace_array *tr, ...@@ -918,6 +959,44 @@ void __trace_stack(struct trace_array *tr,
ftrace_trace_stack(tr, data, flags, skip, preempt_count()); ftrace_trace_stack(tr, data, flags, skip, preempt_count());
} }
static void ftrace_trace_userstack(struct trace_array *tr,
struct trace_array_cpu *data,
unsigned long flags, int pc)
{
struct ring_buffer_event *event;
struct userstack_entry *entry;
struct stack_trace trace;
unsigned long irq_flags;
if (!(trace_flags & TRACE_ITER_USERSTACKTRACE))
return;
event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry),
&irq_flags);
if (!event)
return;
entry = ring_buffer_event_data(event);
tracing_generic_entry_update(&entry->ent, flags, pc);
entry->ent.type = TRACE_USER_STACK;
memset(&entry->caller, 0, sizeof(entry->caller));
trace.nr_entries = 0;
trace.max_entries = FTRACE_STACK_ENTRIES;
trace.skip = 0;
trace.entries = entry->caller;
save_stack_trace_user(&trace);
ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
}
void __trace_userstack(struct trace_array *tr,
struct trace_array_cpu *data,
unsigned long flags)
{
ftrace_trace_userstack(tr, data, flags, preempt_count());
}
static void static void
ftrace_trace_special(void *__tr, void *__data, ftrace_trace_special(void *__tr, void *__data,
unsigned long arg1, unsigned long arg2, unsigned long arg3, unsigned long arg1, unsigned long arg2, unsigned long arg3,
...@@ -941,6 +1020,7 @@ ftrace_trace_special(void *__tr, void *__data, ...@@ -941,6 +1020,7 @@ ftrace_trace_special(void *__tr, void *__data,
entry->arg3 = arg3; entry->arg3 = arg3;
ring_buffer_unlock_commit(tr->buffer, event, irq_flags); ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
ftrace_trace_stack(tr, data, irq_flags, 4, pc); ftrace_trace_stack(tr, data, irq_flags, 4, pc);
ftrace_trace_userstack(tr, data, irq_flags, pc);
trace_wake_up(); trace_wake_up();
} }
...@@ -979,6 +1059,7 @@ tracing_sched_switch_trace(struct trace_array *tr, ...@@ -979,6 +1059,7 @@ tracing_sched_switch_trace(struct trace_array *tr,
entry->next_cpu = task_cpu(next); entry->next_cpu = task_cpu(next);
ring_buffer_unlock_commit(tr->buffer, event, irq_flags); ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
ftrace_trace_stack(tr, data, flags, 5, pc); ftrace_trace_stack(tr, data, flags, 5, pc);
ftrace_trace_userstack(tr, data, flags, pc);
} }
void void
...@@ -1008,6 +1089,7 @@ tracing_sched_wakeup_trace(struct trace_array *tr, ...@@ -1008,6 +1089,7 @@ tracing_sched_wakeup_trace(struct trace_array *tr,
entry->next_cpu = task_cpu(wakee); entry->next_cpu = task_cpu(wakee);
ring_buffer_unlock_commit(tr->buffer, event, irq_flags); ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
ftrace_trace_stack(tr, data, flags, 6, pc); ftrace_trace_stack(tr, data, flags, 6, pc);
ftrace_trace_userstack(tr, data, flags, pc);
trace_wake_up(); trace_wake_up();
} }
...@@ -1387,6 +1469,78 @@ seq_print_ip_sym(struct trace_seq *s, unsigned long ip, unsigned long sym_flags) ...@@ -1387,6 +1469,78 @@ seq_print_ip_sym(struct trace_seq *s, unsigned long ip, unsigned long sym_flags)
return ret; return ret;
} }
static inline int seq_print_user_ip(struct trace_seq *s, struct mm_struct *mm,
unsigned long ip, unsigned long sym_flags)
{
struct file *file = NULL;
unsigned long vmstart = 0;
int ret = 1;
if (mm) {
const struct vm_area_struct *vma;
down_read(&mm->mmap_sem);
vma = find_vma(mm, ip);
if (vma) {
file = vma->vm_file;
vmstart = vma->vm_start;
}
if (file) {
ret = trace_seq_path(s, &file->f_path);
if (ret)
ret = trace_seq_printf(s, "[+0x%lx]", ip - vmstart);
}
up_read(&mm->mmap_sem);
}
if (ret && ((sym_flags & TRACE_ITER_SYM_ADDR) || !file))
ret = trace_seq_printf(s, " <" IP_FMT ">", ip);
return ret;
}
static int
seq_print_userip_objs(const struct userstack_entry *entry, struct trace_seq *s,
unsigned long sym_flags)
{
struct mm_struct *mm = NULL;
int ret = 1;
unsigned int i;
if (trace_flags & TRACE_ITER_SYM_USEROBJ) {
struct task_struct *task;
/*
* we do the lookup on the thread group leader,
* since individual threads might have already quit!
*/
rcu_read_lock();
task = find_task_by_vpid(entry->ent.tgid);
if (task)
mm = get_task_mm(task);
rcu_read_unlock();
}
for (i = 0; i < FTRACE_STACK_ENTRIES; i++) {
unsigned long ip = entry->caller[i];
if (ip == ULONG_MAX || !ret)
break;
if (i && ret)
ret = trace_seq_puts(s, " <- ");
if (!ip) {
if (ret)
ret = trace_seq_puts(s, "??");
continue;
}
if (!ret)
break;
if (ret)
ret = seq_print_user_ip(s, mm, ip, sym_flags);
}
if (mm)
mmput(mm);
return ret;
}
static void print_lat_help_header(struct seq_file *m) static void print_lat_help_header(struct seq_file *m)
{ {
seq_puts(m, "# _------=> CPU# \n"); seq_puts(m, "# _------=> CPU# \n");
...@@ -1702,6 +1856,15 @@ print_lat_fmt(struct trace_iterator *iter, unsigned int trace_idx, int cpu) ...@@ -1702,6 +1856,15 @@ print_lat_fmt(struct trace_iterator *iter, unsigned int trace_idx, int cpu)
field->line); field->line);
break; break;
} }
case TRACE_USER_STACK: {
struct userstack_entry *field;
trace_assign_type(field, entry);
seq_print_userip_objs(field, s, sym_flags);
trace_seq_putc(s, '\n');
break;
}
default: default:
trace_seq_printf(s, "Unknown type %d\n", entry->type); trace_seq_printf(s, "Unknown type %d\n", entry->type);
} }
...@@ -1853,6 +2016,19 @@ static enum print_line_t print_trace_fmt(struct trace_iterator *iter) ...@@ -1853,6 +2016,19 @@ static enum print_line_t print_trace_fmt(struct trace_iterator *iter)
field->line); field->line);
break; break;
} }
case TRACE_USER_STACK: {
struct userstack_entry *field;
trace_assign_type(field, entry);
ret = seq_print_userip_objs(field, s, sym_flags);
if (!ret)
return TRACE_TYPE_PARTIAL_LINE;
ret = trace_seq_putc(s, '\n');
if (!ret)
return TRACE_TYPE_PARTIAL_LINE;
break;
}
} }
return TRACE_TYPE_HANDLED; return TRACE_TYPE_HANDLED;
} }
...@@ -1912,6 +2088,7 @@ static enum print_line_t print_raw_fmt(struct trace_iterator *iter) ...@@ -1912,6 +2088,7 @@ static enum print_line_t print_raw_fmt(struct trace_iterator *iter)
break; break;
} }
case TRACE_SPECIAL: case TRACE_SPECIAL:
case TRACE_USER_STACK:
case TRACE_STACK: { case TRACE_STACK: {
struct special_entry *field; struct special_entry *field;
...@@ -2000,6 +2177,7 @@ static enum print_line_t print_hex_fmt(struct trace_iterator *iter) ...@@ -2000,6 +2177,7 @@ static enum print_line_t print_hex_fmt(struct trace_iterator *iter)
break; break;
} }
case TRACE_SPECIAL: case TRACE_SPECIAL:
case TRACE_USER_STACK:
case TRACE_STACK: { case TRACE_STACK: {
struct special_entry *field; struct special_entry *field;
...@@ -2054,6 +2232,7 @@ static enum print_line_t print_bin_fmt(struct trace_iterator *iter) ...@@ -2054,6 +2232,7 @@ static enum print_line_t print_bin_fmt(struct trace_iterator *iter)
break; break;
} }
case TRACE_SPECIAL: case TRACE_SPECIAL:
case TRACE_USER_STACK:
case TRACE_STACK: { case TRACE_STACK: {
struct special_entry *field; struct special_entry *field;
...@@ -3488,6 +3667,9 @@ void ftrace_dump(void) ...@@ -3488,6 +3667,9 @@ void ftrace_dump(void)
atomic_inc(&global_trace.data[cpu]->disabled); atomic_inc(&global_trace.data[cpu]->disabled);
} }
/* don't look at user memory in panic mode */
trace_flags &= ~TRACE_ITER_SYM_USEROBJ;
printk(KERN_TRACE "Dumping ftrace buffer:\n"); printk(KERN_TRACE "Dumping ftrace buffer:\n");
iter.tr = &global_trace; iter.tr = &global_trace;
......
...@@ -26,6 +26,7 @@ enum trace_type { ...@@ -26,6 +26,7 @@ enum trace_type {
TRACE_BOOT_CALL, TRACE_BOOT_CALL,
TRACE_BOOT_RET, TRACE_BOOT_RET,
TRACE_FN_RET, TRACE_FN_RET,
TRACE_USER_STACK,
__TRACE_LAST_TYPE __TRACE_LAST_TYPE
}; };
...@@ -42,6 +43,7 @@ struct trace_entry { ...@@ -42,6 +43,7 @@ struct trace_entry {
unsigned char flags; unsigned char flags;
unsigned char preempt_count; unsigned char preempt_count;
int pid; int pid;
int tgid;
}; };
/* /*
...@@ -99,6 +101,11 @@ struct stack_entry { ...@@ -99,6 +101,11 @@ struct stack_entry {
unsigned long caller[FTRACE_STACK_ENTRIES]; unsigned long caller[FTRACE_STACK_ENTRIES];
}; };
struct userstack_entry {
struct trace_entry ent;
unsigned long caller[FTRACE_STACK_ENTRIES];
};
/* /*
* ftrace_printk entry: * ftrace_printk entry:
*/ */
...@@ -240,6 +247,7 @@ extern void __ftrace_bad_type(void); ...@@ -240,6 +247,7 @@ extern void __ftrace_bad_type(void);
IF_ASSIGN(var, ent, struct ctx_switch_entry, 0); \ IF_ASSIGN(var, ent, struct ctx_switch_entry, 0); \
IF_ASSIGN(var, ent, struct trace_field_cont, TRACE_CONT); \ IF_ASSIGN(var, ent, struct trace_field_cont, TRACE_CONT); \
IF_ASSIGN(var, ent, struct stack_entry, TRACE_STACK); \ IF_ASSIGN(var, ent, struct stack_entry, TRACE_STACK); \
IF_ASSIGN(var, ent, struct userstack_entry, TRACE_USER_STACK);\
IF_ASSIGN(var, ent, struct print_entry, TRACE_PRINT); \ IF_ASSIGN(var, ent, struct print_entry, TRACE_PRINT); \
IF_ASSIGN(var, ent, struct special_entry, 0); \ IF_ASSIGN(var, ent, struct special_entry, 0); \
IF_ASSIGN(var, ent, struct trace_mmiotrace_rw, \ IF_ASSIGN(var, ent, struct trace_mmiotrace_rw, \
...@@ -500,6 +508,8 @@ enum trace_iterator_flags { ...@@ -500,6 +508,8 @@ enum trace_iterator_flags {
TRACE_ITER_PREEMPTONLY = 0x800, TRACE_ITER_PREEMPTONLY = 0x800,
TRACE_ITER_BRANCH = 0x1000, TRACE_ITER_BRANCH = 0x1000,
TRACE_ITER_ANNOTATE = 0x2000, TRACE_ITER_ANNOTATE = 0x2000,
TRACE_ITER_USERSTACKTRACE = 0x4000,
TRACE_ITER_SYM_USEROBJ = 0x8000
}; };
/* /*
......
...@@ -18,12 +18,14 @@ struct header_iter { ...@@ -18,12 +18,14 @@ struct header_iter {
static struct trace_array *mmio_trace_array; static struct trace_array *mmio_trace_array;
static bool overrun_detected; static bool overrun_detected;
static unsigned long prev_overruns;
static void mmio_reset_data(struct trace_array *tr) static void mmio_reset_data(struct trace_array *tr)
{ {
int cpu; int cpu;
overrun_detected = false; overrun_detected = false;
prev_overruns = 0;
tr->time_start = ftrace_now(tr->cpu); tr->time_start = ftrace_now(tr->cpu);
for_each_online_cpu(cpu) for_each_online_cpu(cpu)
...@@ -123,16 +125,12 @@ static void mmio_close(struct trace_iterator *iter) ...@@ -123,16 +125,12 @@ static void mmio_close(struct trace_iterator *iter)
static unsigned long count_overruns(struct trace_iterator *iter) static unsigned long count_overruns(struct trace_iterator *iter)
{ {
int cpu;
unsigned long cnt = 0; unsigned long cnt = 0;
/* FIXME: */ unsigned long over = ring_buffer_overruns(iter->tr->buffer);
#if 0
for_each_online_cpu(cpu) { if (over > prev_overruns)
cnt += iter->overrun[cpu]; cnt = over - prev_overruns;
iter->overrun[cpu] = 0; prev_overruns = over;
}
#endif
(void)cpu;
return cnt; return cnt;
} }
......
...@@ -130,11 +130,13 @@ my %weak; # List of weak functions ...@@ -130,11 +130,13 @@ my %weak; # List of weak functions
my %convert; # List of local functions used that needs conversion my %convert; # List of local functions used that needs conversion
my $type; my $type;
my $nm_regex; # Find the local functions (return function)
my $section_regex; # Find the start of a section my $section_regex; # Find the start of a section
my $function_regex; # Find the name of a function my $function_regex; # Find the name of a function
# (return offset and func name) # (return offset and func name)
my $mcount_regex; # Find the call site to mcount (return offset) my $mcount_regex; # Find the call site to mcount (return offset)
my $alignment; # The .align value to use for $mcount_section my $alignment; # The .align value to use for $mcount_section
my $section_type; # Section header plus possible alignment command
if ($arch eq "x86") { if ($arch eq "x86") {
if ($bits == 64) { if ($bits == 64) {
...@@ -144,9 +146,18 @@ if ($arch eq "x86") { ...@@ -144,9 +146,18 @@ if ($arch eq "x86") {
} }
} }
#
# We base the defaults off of i386, the other archs may
# feel free to change them in the below if statements.
#
$nm_regex = "^[0-9a-fA-F]+\\s+t\\s+(\\S+)";
$section_regex = "Disassembly of section\\s+(\\S+):";
$function_regex = "^([0-9a-fA-F]+)\\s+<(.*?)>:";
$mcount_regex = "^\\s*([0-9a-fA-F]+):.*\\smcount\$";
$section_type = '@progbits';
$type = ".long";
if ($arch eq "x86_64") { if ($arch eq "x86_64") {
$section_regex = "Disassembly of section\\s+(\\S+):";
$function_regex = "^([0-9a-fA-F]+)\\s+<(.*?)>:";
$mcount_regex = "^\\s*([0-9a-fA-F]+):.*\\smcount([+-]0x[0-9a-zA-Z]+)?\$"; $mcount_regex = "^\\s*([0-9a-fA-F]+):.*\\smcount([+-]0x[0-9a-zA-Z]+)?\$";
$type = ".quad"; $type = ".quad";
$alignment = 8; $alignment = 8;
...@@ -158,10 +169,6 @@ if ($arch eq "x86_64") { ...@@ -158,10 +169,6 @@ if ($arch eq "x86_64") {
$cc .= " -m64"; $cc .= " -m64";
} elsif ($arch eq "i386") { } elsif ($arch eq "i386") {
$section_regex = "Disassembly of section\\s+(\\S+):";
$function_regex = "^([0-9a-fA-F]+)\\s+<(.*?)>:";
$mcount_regex = "^\\s*([0-9a-fA-F]+):.*\\smcount\$";
$type = ".long";
$alignment = 4; $alignment = 4;
# force flags for this arch # force flags for this arch
...@@ -170,6 +177,27 @@ if ($arch eq "x86_64") { ...@@ -170,6 +177,27 @@ if ($arch eq "x86_64") {
$objcopy .= " -O elf32-i386"; $objcopy .= " -O elf32-i386";
$cc .= " -m32"; $cc .= " -m32";
} elsif ($arch eq "sh") {
$alignment = 2;
# force flags for this arch
$ld .= " -m shlelf_linux";
$objcopy .= " -O elf32-sh-linux";
$cc .= " -m32";
} elsif ($arch eq "powerpc") {
$nm_regex = "^[0-9a-fA-F]+\\s+t\\s+(\\.?\\S+)";
$function_regex = "^([0-9a-fA-F]+)\\s+<(\\.?.*?)>:";
$mcount_regex = "^\\s*([0-9a-fA-F]+):.*\\s\\.?_mcount\$";
if ($bits == 64) {
$type = ".quad";
}
} elsif ($arch eq "arm") {
$alignment = 2;
$section_type = '%progbits';
} else { } else {
die "Arch $arch is not supported with CONFIG_FTRACE_MCOUNT_RECORD"; die "Arch $arch is not supported with CONFIG_FTRACE_MCOUNT_RECORD";
} }
...@@ -239,7 +267,7 @@ if (!$found_version) { ...@@ -239,7 +267,7 @@ if (!$found_version) {
# #
open (IN, "$nm $inputfile|") || die "error running $nm"; open (IN, "$nm $inputfile|") || die "error running $nm";
while (<IN>) { while (<IN>) {
if (/^[0-9a-fA-F]+\s+t\s+(\S+)/) { if (/$nm_regex/) {
$locals{$1} = 1; $locals{$1} = 1;
} elsif (/^[0-9a-fA-F]+\s+([wW])\s+(\S+)/) { } elsif (/^[0-9a-fA-F]+\s+([wW])\s+(\S+)/) {
$weak{$2} = $1; $weak{$2} = $1;
...@@ -290,8 +318,8 @@ sub update_funcs ...@@ -290,8 +318,8 @@ sub update_funcs
if (!$opened) { if (!$opened) {
open(FILE, ">$mcount_s") || die "can't create $mcount_s\n"; open(FILE, ">$mcount_s") || die "can't create $mcount_s\n";
$opened = 1; $opened = 1;
print FILE "\t.section $mcount_section,\"a\",\@progbits\n"; print FILE "\t.section $mcount_section,\"a\",$section_type\n";
print FILE "\t.align $alignment\n"; print FILE "\t.align $alignment\n" if (defined($alignment));
} }
printf FILE "\t%s %s + %d\n", $type, $ref_func, $offsets[$i] - $offset; printf FILE "\t%s %s + %d\n", $type, $ref_func, $offsets[$i] - $offset;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment