Commit b3c18725 authored by Anton Blanchard's avatar Anton Blanchard Committed by Michael Ellerman

powerpc/ftrace: simplify prepare_ftrace_return

Instead of passing in the stack address of the link register
to be modified, just pass in the old value and return the
new value and rely on ftrace_graph_caller to do the
modification.

This removes the exception handling around the stack update -
it isn't needed and we weren't consistent about it. Later on
we would do an unprotected modification:

       if (!ftrace_graph_entry(&trace)) {
               *parent = old;
Signed-off-by: default avatarAnton Blanchard <anton@samba.org>
Signed-off-by: default avatarMichael Ellerman <mpe@ellerman.id.au>
parent 7d56c65a
......@@ -1424,12 +1424,18 @@ _GLOBAL(ftrace_graph_caller)
lwz r4, 44(r1)
subi r4, r4, MCOUNT_INSN_SIZE
/* get the parent address */
addi r3, r1, 52
/* Grab the LR out of the caller stack frame */
lwz r3,52(r1)
bl prepare_ftrace_return
nop
/*
* prepare_ftrace_return gives us the address we divert to.
* Change the LR in the callers stack frame to this.
*/
stw r3,52(r1)
MCOUNT_RESTORE_FRAME
/* old link register ends up in ctr reg */
bctr
......
......@@ -1227,13 +1227,20 @@ _GLOBAL(ftrace_graph_caller)
ld r4, 128(r1)
subi r4, r4, MCOUNT_INSN_SIZE
/* get the parent address */
/* Grab the LR out of the caller stack frame */
ld r11, 112(r1)
addi r3, r11, 16
ld r3, 16(r11)
bl prepare_ftrace_return
nop
/*
* prepare_ftrace_return gives us the address we divert to.
* Change the LR in the callers stack frame to this.
*/
ld r11, 112(r1)
std r3, 16(r11)
ld r0, 128(r1)
mtlr r0
addi r1, r1, 112
......
......@@ -512,67 +512,34 @@ int ftrace_disable_ftrace_graph_caller(void)
/*
* Hook the return address and push it in the stack of return addrs
* in current thread info.
* in current thread info. Return the address we want to divert to.
*/
void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr)
unsigned long prepare_ftrace_return(unsigned long parent, unsigned long ip)
{
unsigned long old;
int faulted;
struct ftrace_graph_ent trace;
unsigned long return_hooker;
if (unlikely(ftrace_graph_is_dead()))
return;
goto out;
if (unlikely(atomic_read(&current->tracing_graph_pause)))
return;
goto out;
return_hooker = ppc_function_entry(return_to_handler);
/*
* Protect against fault, even if it shouldn't
* happen. This tool is too much intrusive to
* ignore such a protection.
*/
asm volatile(
"1: " PPC_LL "%[old], 0(%[parent])\n"
"2: " PPC_STL "%[return_hooker], 0(%[parent])\n"
" li %[faulted], 0\n"
"3:\n"
".section .fixup, \"ax\"\n"
"4: li %[faulted], 1\n"
" b 3b\n"
".previous\n"
".section __ex_table,\"a\"\n"
PPC_LONG_ALIGN "\n"
PPC_LONG "1b,4b\n"
PPC_LONG "2b,4b\n"
".previous"
: [old] "=&r" (old), [faulted] "=r" (faulted)
: [parent] "r" (parent), [return_hooker] "r" (return_hooker)
: "memory"
);
if (unlikely(faulted)) {
ftrace_graph_stop();
WARN_ON(1);
return;
}
trace.func = self_addr;
trace.func = ip;
trace.depth = current->curr_ret_stack + 1;
/* Only trace if the calling function expects to */
if (!ftrace_graph_entry(&trace)) {
*parent = old;
return;
}
if (!ftrace_graph_entry(&trace))
goto out;
if (ftrace_push_return_trace(parent, ip, &trace.depth, 0) == -EBUSY)
goto out;
if (ftrace_push_return_trace(old, self_addr, &trace.depth, 0) == -EBUSY)
*parent = old;
parent = return_hooker;
out:
return parent;
}
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment