x86/ftrace: Only have the builtin ftrace_regs_caller call direct hooks

If a direct hook is attached to a function that ftrace also has a function
attached to it, then it is required that the ftrace_ops_list_func() is used
to iterate over the registered ftrace callbacks. This will also include the
direct ftrace_ops helper, that tells ftrace_regs_caller where to return to
(the direct callback and not the function that called it).

As this direct helper is only to handle the case of ftrace callbacks
attached to the same function as the direct callback, the ftrace callback
allocated trampolines (used to only call them), should never be used to
return back to a direct callback.

Only copy the portion of the ftrace_regs_caller that will return back to
what called it, and not the portion that returns back to the direct caller.

The direct ftrace_ops must then pick the ftrace_regs_caller builtin function
as its own trampoline to ensure that it will never have one allocated for
it (which would not include the handling of direct callbacks).

Link: http://lkml.kernel.org/r/20200422162750.495903799@goodmis.org

Cc: Peter Zijlstra <peterz@infradead.org>
Signed-off-by: default avatarSteven Rostedt (VMware) <rostedt@goodmis.org>
parent 0b4f8ddc
...@@ -367,13 +367,6 @@ create_trampoline(struct ftrace_ops *ops, unsigned int *tramp_size) ...@@ -367,13 +367,6 @@ create_trampoline(struct ftrace_ops *ops, unsigned int *tramp_size)
if (WARN_ON(ret < 0)) if (WARN_ON(ret < 0))
goto fail; goto fail;
if (ops->flags & FTRACE_OPS_FL_SAVE_REGS) {
ip = trampoline + (ftrace_regs_caller_ret - ftrace_regs_caller);
ret = copy_from_kernel_nofault(ip, (void *)retq, RET_SIZE);
if (WARN_ON(ret < 0))
goto fail;
}
/* /*
* The address of the ftrace_ops that is used for this trampoline * The address of the ftrace_ops that is used for this trampoline
* is stored at the end of the trampoline. This will be used to * is stored at the end of the trampoline. This will be used to
......
...@@ -253,7 +253,7 @@ SYM_INNER_LABEL(ftrace_regs_call, SYM_L_GLOBAL) ...@@ -253,7 +253,7 @@ SYM_INNER_LABEL(ftrace_regs_call, SYM_L_GLOBAL)
* The trampoline will add the code to jump * The trampoline will add the code to jump
* to the return. * to the return.
*/ */
SYM_INNER_LABEL(ftrace_regs_caller_ret, SYM_L_GLOBAL) SYM_INNER_LABEL(ftrace_regs_caller_end, SYM_L_GLOBAL)
jmp ftrace_epilogue jmp ftrace_epilogue
/* Swap the flags with orig_rax */ /* Swap the flags with orig_rax */
...@@ -264,7 +264,6 @@ SYM_INNER_LABEL(ftrace_regs_caller_ret, SYM_L_GLOBAL) ...@@ -264,7 +264,6 @@ SYM_INNER_LABEL(ftrace_regs_caller_ret, SYM_L_GLOBAL)
restore_mcount_regs 8 restore_mcount_regs 8
/* Restore flags */ /* Restore flags */
popfq popfq
SYM_INNER_LABEL(ftrace_regs_caller_end, SYM_L_GLOBAL)
UNWIND_HINT_RET_OFFSET UNWIND_HINT_RET_OFFSET
jmp ftrace_epilogue jmp ftrace_epilogue
......
...@@ -2388,6 +2388,14 @@ struct ftrace_ops direct_ops = { ...@@ -2388,6 +2388,14 @@ struct ftrace_ops direct_ops = {
.flags = FTRACE_OPS_FL_IPMODIFY | FTRACE_OPS_FL_RECURSION_SAFE .flags = FTRACE_OPS_FL_IPMODIFY | FTRACE_OPS_FL_RECURSION_SAFE
| FTRACE_OPS_FL_DIRECT | FTRACE_OPS_FL_SAVE_REGS | FTRACE_OPS_FL_DIRECT | FTRACE_OPS_FL_SAVE_REGS
| FTRACE_OPS_FL_PERMANENT, | FTRACE_OPS_FL_PERMANENT,
/*
* By declaring the main trampoline as this trampoline
* it will never have one allocated for it. Allocated
* trampolines should not call direct functions.
* The direct_ops should only be called by the builtin
* ftrace_regs_caller trampoline.
*/
.trampoline = FTRACE_REGS_ADDR,
}; };
#endif /* CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS */ #endif /* CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment