Commit b5ecc19e authored by Mark Rutland's avatar Mark Rutland Committed by Will Deacon

arm64: stacktrace: always inline core stacktrace functions

The arm64 stacktrace code can be used in kprobe context, and so cannot
be safely probed. Some (but not all) of the unwind functions are
annotated with `NOKPROBE_SYMBOL()` to ensure this, with others markes as
`__always_inline`, relying on the top-level unwind function being marked
as `noinstr`.

This patch has stacktrace.c consistently mark the internal stacktrace
functions as `__always_inline`, removing the need for NOKPROBE_SYMBOL()
as the top-level unwind function (arch_stack_walk()) is marked as
`noinstr`. This is more consistent and is a simpler pattern to follow
for future additions to stacktrace.c.

There should be no functional change as a result of this patch.
Signed-off-by: default avatarMark Rutland <mark.rutland@arm.com>
Reviewed-by: default avatarKalesh Singh <kaleshsingh@google.com>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Madhavan T. Venkataraman <madvenka@linux.microsoft.com>
Cc: Mark Brown <broonie@kernel.org>
Cc: Will Deacon <will@kernel.org>
Link: https://lore.kernel.org/r/20230411162943.203199-4-mark.rutland@arm.comSigned-off-by: default avatarWill Deacon <will@kernel.org>
parent ead6122c
...@@ -25,8 +25,9 @@ ...@@ -25,8 +25,9 @@
* *
* The regs must be on a stack currently owned by the calling task. * The regs must be on a stack currently owned by the calling task.
*/ */
static __always_inline void unwind_init_from_regs(struct unwind_state *state, static __always_inline void
struct pt_regs *regs) unwind_init_from_regs(struct unwind_state *state,
struct pt_regs *regs)
{ {
unwind_init_common(state, current); unwind_init_common(state, current);
...@@ -42,7 +43,8 @@ static __always_inline void unwind_init_from_regs(struct unwind_state *state, ...@@ -42,7 +43,8 @@ static __always_inline void unwind_init_from_regs(struct unwind_state *state,
* *
* The function which invokes this must be noinline. * The function which invokes this must be noinline.
*/ */
static __always_inline void unwind_init_from_caller(struct unwind_state *state) static __always_inline void
unwind_init_from_caller(struct unwind_state *state)
{ {
unwind_init_common(state, current); unwind_init_common(state, current);
...@@ -60,8 +62,9 @@ static __always_inline void unwind_init_from_caller(struct unwind_state *state) ...@@ -60,8 +62,9 @@ static __always_inline void unwind_init_from_caller(struct unwind_state *state)
* duration of the unwind, or the unwind will be bogus. It is never valid to * duration of the unwind, or the unwind will be bogus. It is never valid to
* call this for the current task. * call this for the current task.
*/ */
static __always_inline void unwind_init_from_task(struct unwind_state *state, static __always_inline void
struct task_struct *task) unwind_init_from_task(struct unwind_state *state,
struct task_struct *task)
{ {
unwind_init_common(state, task); unwind_init_common(state, task);
...@@ -102,7 +105,8 @@ unwind_recover_return_address(struct unwind_state *state) ...@@ -102,7 +105,8 @@ unwind_recover_return_address(struct unwind_state *state)
* records (e.g. a cycle), determined based on the location and fp value of A * records (e.g. a cycle), determined based on the location and fp value of A
* and the location (but not the fp value) of B. * and the location (but not the fp value) of B.
*/ */
static int notrace unwind_next(struct unwind_state *state) static __always_inline int
unwind_next(struct unwind_state *state)
{ {
struct task_struct *tsk = state->task; struct task_struct *tsk = state->task;
unsigned long fp = state->fp; unsigned long fp = state->fp;
...@@ -120,10 +124,10 @@ static int notrace unwind_next(struct unwind_state *state) ...@@ -120,10 +124,10 @@ static int notrace unwind_next(struct unwind_state *state)
return unwind_recover_return_address(state); return unwind_recover_return_address(state);
} }
NOKPROBE_SYMBOL(unwind_next);
static void notrace unwind(struct unwind_state *state, static __always_inline void
stack_trace_consume_fn consume_entry, void *cookie) unwind(struct unwind_state *state, stack_trace_consume_fn consume_entry,
void *cookie)
{ {
if (unwind_recover_return_address(state)) if (unwind_recover_return_address(state))
return; return;
...@@ -138,7 +142,6 @@ static void notrace unwind(struct unwind_state *state, ...@@ -138,7 +142,6 @@ static void notrace unwind(struct unwind_state *state,
break; break;
} }
} }
NOKPROBE_SYMBOL(unwind);
/* /*
* Per-cpu stacks are only accessible when unwinding the current task in a * Per-cpu stacks are only accessible when unwinding the current task in a
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment