Commit 1c873be7 authored by Mike Frysinger's avatar Mike Frysinger

Blackfin: initial support for ftrace

Just the basic ftrace support here -- mcount and the ftrace stub.
Signed-off-by: default avatarMike Frysinger <vapier@gentoo.org>
parent 6fa68e7a
...@@ -19,6 +19,7 @@ config RWSEM_XCHGADD_ALGORITHM ...@@ -19,6 +19,7 @@ config RWSEM_XCHGADD_ALGORITHM
config BLACKFIN config BLACKFIN
def_bool y def_bool y
select HAVE_FUNCTION_TRACER
select HAVE_IDE select HAVE_IDE
select HAVE_KERNEL_GZIP select HAVE_KERNEL_GZIP
select HAVE_KERNEL_BZIP2 select HAVE_KERNEL_BZIP2
......
/* empty */ /*
* Blackfin ftrace code
*
* Copyright 2009 Analog Devices Inc.
* Licensed under the GPL-2 or later.
*/
#ifndef __ASM_BFIN_FTRACE_H__
#define __ASM_BFIN_FTRACE_H__
#define MCOUNT_INSN_SIZE 8 /* sizeof mcount call: LINK + CALL */
#endif
...@@ -15,6 +15,7 @@ else ...@@ -15,6 +15,7 @@ else
obj-y += time.o obj-y += time.o
endif endif
obj-$(CONFIG_FUNCTION_TRACER) += ftrace-entry.o
obj-$(CONFIG_IPIPE) += ipipe.o obj-$(CONFIG_IPIPE) += ipipe.o
obj-$(CONFIG_IPIPE_TRACE_MCOUNT) += mcount.o obj-$(CONFIG_IPIPE_TRACE_MCOUNT) += mcount.o
obj-$(CONFIG_BFIN_GPTIMERS) += gptimers.o obj-$(CONFIG_BFIN_GPTIMERS) += gptimers.o
......
...@@ -103,3 +103,8 @@ EXPORT_SYMBOL(__raw_smp_mark_barrier_asm); ...@@ -103,3 +103,8 @@ EXPORT_SYMBOL(__raw_smp_mark_barrier_asm);
EXPORT_SYMBOL(__raw_smp_check_barrier_asm); EXPORT_SYMBOL(__raw_smp_check_barrier_asm);
#endif #endif
#endif #endif
#ifdef CONFIG_FUNCTION_TRACER
extern void _mcount(void);
EXPORT_SYMBOL(_mcount);
#endif
/*
* mcount and friends -- ftrace stuff
*
* Copyright (C) 2009 Analog Devices Inc.
* Licensed under the GPL-2 or later.
*/
#include <linux/linkage.h>
#include <asm/ftrace.h>
.text
/* GCC will have called us before setting up the function prologue, so we
* can clobber the normal scratch registers, but we need to make sure to
* save/restore the registers used for argument passing (R0-R2) in case
* the profiled function is using them. With data registers, R3 is the
* only one we can blow away. With pointer registers, we have P0-P2.
*
* Upon entry, the RETS will point to the top of the current profiled
* function. And since GCC setup the frame for us, the previous function
* will be waiting there. mmmm pie.
*/
ENTRY(__mcount)
/* save third function arg early so we can do testing below */
[--sp] = r2;
/* load the function pointer to the tracer */
p0.l = _ftrace_trace_function;
p0.h = _ftrace_trace_function;
r3 = [p0];
/* optional micro optimization: don't call the stub tracer */
r2.l = _ftrace_stub;
r2.h = _ftrace_stub;
cc = r2 == r3;
if ! cc jump .Ldo_trace;
r2 = [sp++];
rts;
.Ldo_trace:
/* save first/second function arg and the return register */
[--sp] = r0;
[--sp] = r1;
[--sp] = rets;
/* setup the tracer function */
p0 = r3;
/* tracer(ulong frompc, ulong selfpc):
* frompc: the pc that did the call to ...
* selfpc: ... this location
* the selfpc itself will need adjusting for the mcount call
*/
r1 = rets;
r0 = [fp + 4];
r1 += -MCOUNT_INSN_SIZE;
/* call the tracer */
call (p0);
/* restore state and get out of dodge */
rets = [sp++];
r1 = [sp++];
r0 = [sp++];
r2 = [sp++];
.globl _ftrace_stub
_ftrace_stub:
rts;
ENDPROC(__mcount)
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment