Commit f25170a0 authored by Palmer Dabbelt's avatar Palmer Dabbelt

Merge patch series "riscv: stacktrace: Add USER_STACKTRACE support"

Jinjie Ruan <ruanjinjie@huawei.com> says:

Add RISC-V USER_STACKTRACE support, and fix the fp alignment bug
in perf_callchain_user() by the way as Björn pointed out.

* b4-shazam-merge:
  riscv: stacktrace: Add USER_STACKTRACE support
  riscv: Fix fp alignment bug in perf_callchain_user()

Link: https://lore.kernel.org/r/20240708032847.2998158-1-ruanjinjie@huawei.comSigned-off-by: default avatarPalmer Dabbelt <palmer@rivosinc.com>
parents 5c178472 1a748331
......@@ -201,6 +201,7 @@ config RISCV
select THREAD_INFO_IN_TASK
select TRACE_IRQFLAGS_SUPPORT
select UACCESS_MEMCPY if !MMU
select USER_STACKTRACE_SUPPORT
select ZONE_DMA32 if 64BIT
config CLANG_SUPPORTS_DYNAMIC_FTRACE
......
......@@ -6,37 +6,9 @@
#include <asm/stacktrace.h>
/*
* Get the return address for a single stackframe and return a pointer to the
* next frame tail.
*/
static unsigned long user_backtrace(struct perf_callchain_entry_ctx *entry,
unsigned long fp, unsigned long reg_ra)
static bool fill_callchain(void *entry, unsigned long pc)
{
struct stackframe buftail;
unsigned long ra = 0;
unsigned long __user *user_frame_tail =
(unsigned long __user *)(fp - sizeof(struct stackframe));
/* Check accessibility of one struct frame_tail beyond */
if (!access_ok(user_frame_tail, sizeof(buftail)))
return 0;
if (__copy_from_user_inatomic(&buftail, user_frame_tail,
sizeof(buftail)))
return 0;
if (reg_ra != 0)
ra = reg_ra;
else
ra = buftail.ra;
fp = buftail.fp;
if (ra != 0)
perf_callchain_store(entry, ra);
else
return 0;
return fp;
return perf_callchain_store(entry, pc) == 0;
}
/*
......@@ -56,19 +28,7 @@ static unsigned long user_backtrace(struct perf_callchain_entry_ctx *entry,
void perf_callchain_user(struct perf_callchain_entry_ctx *entry,
struct pt_regs *regs)
{
unsigned long fp = 0;
fp = regs->s0;
perf_callchain_store(entry, regs->epc);
fp = user_backtrace(entry, fp, regs->ra);
while (fp && !(fp & 0x3) && entry->nr < entry->max_stack)
fp = user_backtrace(entry, fp, 0);
}
static bool fill_callchain(void *entry, unsigned long pc)
{
return perf_callchain_store(entry, pc) == 0;
arch_stack_walk_user(fill_callchain, entry, regs);
}
void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry,
......
......@@ -162,3 +162,46 @@ noinline noinstr void arch_stack_walk(stack_trace_consume_fn consume_entry, void
{
walk_stackframe(task, regs, consume_entry, cookie);
}
/*
* Get the return address for a single stackframe and return a pointer to the
* next frame tail.
*/
static unsigned long unwind_user_frame(stack_trace_consume_fn consume_entry,
void *cookie, unsigned long fp,
unsigned long reg_ra)
{
struct stackframe buftail;
unsigned long ra = 0;
unsigned long __user *user_frame_tail =
(unsigned long __user *)(fp - sizeof(struct stackframe));
/* Check accessibility of one struct frame_tail beyond */
if (!access_ok(user_frame_tail, sizeof(buftail)))
return 0;
if (__copy_from_user_inatomic(&buftail, user_frame_tail,
sizeof(buftail)))
return 0;
ra = reg_ra ? : buftail.ra;
fp = buftail.fp;
if (!ra || !consume_entry(cookie, ra))
return 0;
return fp;
}
void arch_stack_walk_user(stack_trace_consume_fn consume_entry, void *cookie,
const struct pt_regs *regs)
{
unsigned long fp = 0;
fp = regs->s0;
if (!consume_entry(cookie, regs->epc))
return;
fp = unwind_user_frame(consume_entry, cookie, fp, regs->ra);
while (fp && !(fp & 0x7))
fp = unwind_user_frame(consume_entry, cookie, fp, 0);
}
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment