Commit 70791ce9 authored by Frederic Weisbecker's avatar Frederic Weisbecker

perf: Generalize callchain_store()

callchain_store() is the same on every archs, inline it in
perf_event.h and rename it to perf_callchain_store() to avoid
any collision.

This removes repetitive code.
Signed-off-by: default avatarFrederic Weisbecker <fweisbec@gmail.com>
Acked-by: default avatarPaul Mackerras <paulus@samba.org>
Tested-by: default avatarWill Deacon <will.deacon@arm.com>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Stephane Eranian <eranian@google.com>
Cc: David Miller <davem@davemloft.net>
Cc: Paul Mundt <lethal@linux-sh.org>
Cc: Borislav Petkov <bp@amd64.org>
parent c1a65932
...@@ -3001,13 +3001,6 @@ arch_initcall(init_hw_perf_events); ...@@ -3001,13 +3001,6 @@ arch_initcall(init_hw_perf_events);
/* /*
* Callchain handling code. * Callchain handling code.
*/ */
static inline void
callchain_store(struct perf_callchain_entry *entry,
u64 ip)
{
if (entry->nr < PERF_MAX_STACK_DEPTH)
entry->ip[entry->nr++] = ip;
}
/* /*
* The registers we're interested in are at the end of the variable * The registers we're interested in are at the end of the variable
...@@ -3039,7 +3032,7 @@ user_backtrace(struct frame_tail *tail, ...@@ -3039,7 +3032,7 @@ user_backtrace(struct frame_tail *tail,
if (__copy_from_user_inatomic(&buftail, tail, sizeof(buftail))) if (__copy_from_user_inatomic(&buftail, tail, sizeof(buftail)))
return NULL; return NULL;
callchain_store(entry, buftail.lr); perf_callchain_store(entry, buftail.lr);
/* /*
* Frame pointers should strictly progress back up the stack * Frame pointers should strictly progress back up the stack
...@@ -3057,7 +3050,7 @@ perf_callchain_user(struct pt_regs *regs, ...@@ -3057,7 +3050,7 @@ perf_callchain_user(struct pt_regs *regs,
{ {
struct frame_tail *tail; struct frame_tail *tail;
callchain_store(entry, PERF_CONTEXT_USER); perf_callchain_store(entry, PERF_CONTEXT_USER);
if (!user_mode(regs)) if (!user_mode(regs))
regs = task_pt_regs(current); regs = task_pt_regs(current);
...@@ -3078,7 +3071,7 @@ callchain_trace(struct stackframe *fr, ...@@ -3078,7 +3071,7 @@ callchain_trace(struct stackframe *fr,
void *data) void *data)
{ {
struct perf_callchain_entry *entry = data; struct perf_callchain_entry *entry = data;
callchain_store(entry, fr->pc); perf_callchain_store(entry, fr->pc);
return 0; return 0;
} }
...@@ -3088,7 +3081,7 @@ perf_callchain_kernel(struct pt_regs *regs, ...@@ -3088,7 +3081,7 @@ perf_callchain_kernel(struct pt_regs *regs,
{ {
struct stackframe fr; struct stackframe fr;
callchain_store(entry, PERF_CONTEXT_KERNEL); perf_callchain_store(entry, PERF_CONTEXT_KERNEL);
fr.fp = regs->ARM_fp; fr.fp = regs->ARM_fp;
fr.sp = regs->ARM_sp; fr.sp = regs->ARM_sp;
fr.lr = regs->ARM_lr; fr.lr = regs->ARM_lr;
......
...@@ -23,18 +23,6 @@ ...@@ -23,18 +23,6 @@
#include "ppc32.h" #include "ppc32.h"
#endif #endif
/*
* Store another value in a callchain_entry.
*/
static inline void callchain_store(struct perf_callchain_entry *entry, u64 ip)
{
unsigned int nr = entry->nr;
if (nr < PERF_MAX_STACK_DEPTH) {
entry->ip[nr] = ip;
entry->nr = nr + 1;
}
}
/* /*
* Is sp valid as the address of the next kernel stack frame after prev_sp? * Is sp valid as the address of the next kernel stack frame after prev_sp?
...@@ -69,8 +57,8 @@ static void perf_callchain_kernel(struct pt_regs *regs, ...@@ -69,8 +57,8 @@ static void perf_callchain_kernel(struct pt_regs *regs,
lr = regs->link; lr = regs->link;
sp = regs->gpr[1]; sp = regs->gpr[1];
callchain_store(entry, PERF_CONTEXT_KERNEL); perf_callchain_store(entry, PERF_CONTEXT_KERNEL);
callchain_store(entry, regs->nip); perf_callchain_store(entry, regs->nip);
if (!validate_sp(sp, current, STACK_FRAME_OVERHEAD)) if (!validate_sp(sp, current, STACK_FRAME_OVERHEAD))
return; return;
...@@ -89,7 +77,7 @@ static void perf_callchain_kernel(struct pt_regs *regs, ...@@ -89,7 +77,7 @@ static void perf_callchain_kernel(struct pt_regs *regs,
next_ip = regs->nip; next_ip = regs->nip;
lr = regs->link; lr = regs->link;
level = 0; level = 0;
callchain_store(entry, PERF_CONTEXT_KERNEL); perf_callchain_store(entry, PERF_CONTEXT_KERNEL);
} else { } else {
if (level == 0) if (level == 0)
...@@ -111,7 +99,7 @@ static void perf_callchain_kernel(struct pt_regs *regs, ...@@ -111,7 +99,7 @@ static void perf_callchain_kernel(struct pt_regs *regs,
++level; ++level;
} }
callchain_store(entry, next_ip); perf_callchain_store(entry, next_ip);
if (!valid_next_sp(next_sp, sp)) if (!valid_next_sp(next_sp, sp))
return; return;
sp = next_sp; sp = next_sp;
...@@ -246,8 +234,8 @@ static void perf_callchain_user_64(struct pt_regs *regs, ...@@ -246,8 +234,8 @@ static void perf_callchain_user_64(struct pt_regs *regs,
next_ip = regs->nip; next_ip = regs->nip;
lr = regs->link; lr = regs->link;
sp = regs->gpr[1]; sp = regs->gpr[1];
callchain_store(entry, PERF_CONTEXT_USER); perf_callchain_store(entry, PERF_CONTEXT_USER);
callchain_store(entry, next_ip); perf_callchain_store(entry, next_ip);
for (;;) { for (;;) {
fp = (unsigned long __user *) sp; fp = (unsigned long __user *) sp;
...@@ -276,14 +264,14 @@ static void perf_callchain_user_64(struct pt_regs *regs, ...@@ -276,14 +264,14 @@ static void perf_callchain_user_64(struct pt_regs *regs,
read_user_stack_64(&uregs[PT_R1], &sp)) read_user_stack_64(&uregs[PT_R1], &sp))
return; return;
level = 0; level = 0;
callchain_store(entry, PERF_CONTEXT_USER); perf_callchain_store(entry, PERF_CONTEXT_USER);
callchain_store(entry, next_ip); perf_callchain_store(entry, next_ip);
continue; continue;
} }
if (level == 0) if (level == 0)
next_ip = lr; next_ip = lr;
callchain_store(entry, next_ip); perf_callchain_store(entry, next_ip);
++level; ++level;
sp = next_sp; sp = next_sp;
} }
...@@ -447,8 +435,8 @@ static void perf_callchain_user_32(struct pt_regs *regs, ...@@ -447,8 +435,8 @@ static void perf_callchain_user_32(struct pt_regs *regs,
next_ip = regs->nip; next_ip = regs->nip;
lr = regs->link; lr = regs->link;
sp = regs->gpr[1]; sp = regs->gpr[1];
callchain_store(entry, PERF_CONTEXT_USER); perf_callchain_store(entry, PERF_CONTEXT_USER);
callchain_store(entry, next_ip); perf_callchain_store(entry, next_ip);
while (entry->nr < PERF_MAX_STACK_DEPTH) { while (entry->nr < PERF_MAX_STACK_DEPTH) {
fp = (unsigned int __user *) (unsigned long) sp; fp = (unsigned int __user *) (unsigned long) sp;
...@@ -470,14 +458,14 @@ static void perf_callchain_user_32(struct pt_regs *regs, ...@@ -470,14 +458,14 @@ static void perf_callchain_user_32(struct pt_regs *regs,
read_user_stack_32(&uregs[PT_R1], &sp)) read_user_stack_32(&uregs[PT_R1], &sp))
return; return;
level = 0; level = 0;
callchain_store(entry, PERF_CONTEXT_USER); perf_callchain_store(entry, PERF_CONTEXT_USER);
callchain_store(entry, next_ip); perf_callchain_store(entry, next_ip);
continue; continue;
} }
if (level == 0) if (level == 0)
next_ip = lr; next_ip = lr;
callchain_store(entry, next_ip); perf_callchain_store(entry, next_ip);
++level; ++level;
sp = next_sp; sp = next_sp;
} }
......
...@@ -14,11 +14,6 @@ ...@@ -14,11 +14,6 @@
#include <asm/unwinder.h> #include <asm/unwinder.h>
#include <asm/ptrace.h> #include <asm/ptrace.h>
static inline void callchain_store(struct perf_callchain_entry *entry, u64 ip)
{
if (entry->nr < PERF_MAX_STACK_DEPTH)
entry->ip[entry->nr++] = ip;
}
static void callchain_warning(void *data, char *msg) static void callchain_warning(void *data, char *msg)
{ {
...@@ -39,7 +34,7 @@ static void callchain_address(void *data, unsigned long addr, int reliable) ...@@ -39,7 +34,7 @@ static void callchain_address(void *data, unsigned long addr, int reliable)
struct perf_callchain_entry *entry = data; struct perf_callchain_entry *entry = data;
if (reliable) if (reliable)
callchain_store(entry, addr); perf_callchain_store(entry, addr);
} }
static const struct stacktrace_ops callchain_ops = { static const struct stacktrace_ops callchain_ops = {
...@@ -52,8 +47,8 @@ static const struct stacktrace_ops callchain_ops = { ...@@ -52,8 +47,8 @@ static const struct stacktrace_ops callchain_ops = {
static void static void
perf_callchain_kernel(struct pt_regs *regs, struct perf_callchain_entry *entry) perf_callchain_kernel(struct pt_regs *regs, struct perf_callchain_entry *entry)
{ {
callchain_store(entry, PERF_CONTEXT_KERNEL); perf_callchain_store(entry, PERF_CONTEXT_KERNEL);
callchain_store(entry, regs->pc); perf_callchain_store(entry, regs->pc);
unwind_stack(NULL, regs, NULL, &callchain_ops, entry); unwind_stack(NULL, regs, NULL, &callchain_ops, entry);
} }
......
...@@ -1283,12 +1283,6 @@ void __init init_hw_perf_events(void) ...@@ -1283,12 +1283,6 @@ void __init init_hw_perf_events(void)
register_die_notifier(&perf_event_nmi_notifier); register_die_notifier(&perf_event_nmi_notifier);
} }
static inline void callchain_store(struct perf_callchain_entry *entry, u64 ip)
{
if (entry->nr < PERF_MAX_STACK_DEPTH)
entry->ip[entry->nr++] = ip;
}
static void perf_callchain_kernel(struct pt_regs *regs, static void perf_callchain_kernel(struct pt_regs *regs,
struct perf_callchain_entry *entry) struct perf_callchain_entry *entry)
{ {
...@@ -1297,8 +1291,8 @@ static void perf_callchain_kernel(struct pt_regs *regs, ...@@ -1297,8 +1291,8 @@ static void perf_callchain_kernel(struct pt_regs *regs,
int graph = 0; int graph = 0;
#endif #endif
callchain_store(entry, PERF_CONTEXT_KERNEL); perf_callchain_store(entry, PERF_CONTEXT_KERNEL);
callchain_store(entry, regs->tpc); perf_callchain_store(entry, regs->tpc);
ksp = regs->u_regs[UREG_I6]; ksp = regs->u_regs[UREG_I6];
fp = ksp + STACK_BIAS; fp = ksp + STACK_BIAS;
...@@ -1322,13 +1316,13 @@ static void perf_callchain_kernel(struct pt_regs *regs, ...@@ -1322,13 +1316,13 @@ static void perf_callchain_kernel(struct pt_regs *regs,
pc = sf->callers_pc; pc = sf->callers_pc;
fp = (unsigned long)sf->fp + STACK_BIAS; fp = (unsigned long)sf->fp + STACK_BIAS;
} }
callchain_store(entry, pc); perf_callchain_store(entry, pc);
#ifdef CONFIG_FUNCTION_GRAPH_TRACER #ifdef CONFIG_FUNCTION_GRAPH_TRACER
if ((pc + 8UL) == (unsigned long) &return_to_handler) { if ((pc + 8UL) == (unsigned long) &return_to_handler) {
int index = current->curr_ret_stack; int index = current->curr_ret_stack;
if (current->ret_stack && index >= graph) { if (current->ret_stack && index >= graph) {
pc = current->ret_stack[index - graph].ret; pc = current->ret_stack[index - graph].ret;
callchain_store(entry, pc); perf_callchain_store(entry, pc);
graph++; graph++;
} }
} }
...@@ -1341,8 +1335,8 @@ static void perf_callchain_user_64(struct pt_regs *regs, ...@@ -1341,8 +1335,8 @@ static void perf_callchain_user_64(struct pt_regs *regs,
{ {
unsigned long ufp; unsigned long ufp;
callchain_store(entry, PERF_CONTEXT_USER); perf_callchain_store(entry, PERF_CONTEXT_USER);
callchain_store(entry, regs->tpc); perf_callchain_store(entry, regs->tpc);
ufp = regs->u_regs[UREG_I6] + STACK_BIAS; ufp = regs->u_regs[UREG_I6] + STACK_BIAS;
do { do {
...@@ -1355,7 +1349,7 @@ static void perf_callchain_user_64(struct pt_regs *regs, ...@@ -1355,7 +1349,7 @@ static void perf_callchain_user_64(struct pt_regs *regs,
pc = sf.callers_pc; pc = sf.callers_pc;
ufp = (unsigned long)sf.fp + STACK_BIAS; ufp = (unsigned long)sf.fp + STACK_BIAS;
callchain_store(entry, pc); perf_callchain_store(entry, pc);
} while (entry->nr < PERF_MAX_STACK_DEPTH); } while (entry->nr < PERF_MAX_STACK_DEPTH);
} }
...@@ -1364,8 +1358,8 @@ static void perf_callchain_user_32(struct pt_regs *regs, ...@@ -1364,8 +1358,8 @@ static void perf_callchain_user_32(struct pt_regs *regs,
{ {
unsigned long ufp; unsigned long ufp;
callchain_store(entry, PERF_CONTEXT_USER); perf_callchain_store(entry, PERF_CONTEXT_USER);
callchain_store(entry, regs->tpc); perf_callchain_store(entry, regs->tpc);
ufp = regs->u_regs[UREG_I6] & 0xffffffffUL; ufp = regs->u_regs[UREG_I6] & 0xffffffffUL;
do { do {
...@@ -1378,7 +1372,7 @@ static void perf_callchain_user_32(struct pt_regs *regs, ...@@ -1378,7 +1372,7 @@ static void perf_callchain_user_32(struct pt_regs *regs,
pc = sf.callers_pc; pc = sf.callers_pc;
ufp = (unsigned long)sf.fp; ufp = (unsigned long)sf.fp;
callchain_store(entry, pc); perf_callchain_store(entry, pc);
} while (entry->nr < PERF_MAX_STACK_DEPTH); } while (entry->nr < PERF_MAX_STACK_DEPTH);
} }
......
...@@ -1571,12 +1571,6 @@ const struct pmu *hw_perf_event_init(struct perf_event *event) ...@@ -1571,12 +1571,6 @@ const struct pmu *hw_perf_event_init(struct perf_event *event)
* callchain support * callchain support
*/ */
static inline
void callchain_store(struct perf_callchain_entry *entry, u64 ip)
{
if (entry->nr < PERF_MAX_STACK_DEPTH)
entry->ip[entry->nr++] = ip;
}
static DEFINE_PER_CPU(struct perf_callchain_entry, pmc_irq_entry); static DEFINE_PER_CPU(struct perf_callchain_entry, pmc_irq_entry);
static DEFINE_PER_CPU(struct perf_callchain_entry, pmc_nmi_entry); static DEFINE_PER_CPU(struct perf_callchain_entry, pmc_nmi_entry);
...@@ -1602,7 +1596,7 @@ static void backtrace_address(void *data, unsigned long addr, int reliable) ...@@ -1602,7 +1596,7 @@ static void backtrace_address(void *data, unsigned long addr, int reliable)
{ {
struct perf_callchain_entry *entry = data; struct perf_callchain_entry *entry = data;
callchain_store(entry, addr); perf_callchain_store(entry, addr);
} }
static const struct stacktrace_ops backtrace_ops = { static const struct stacktrace_ops backtrace_ops = {
...@@ -1616,8 +1610,8 @@ static const struct stacktrace_ops backtrace_ops = { ...@@ -1616,8 +1610,8 @@ static const struct stacktrace_ops backtrace_ops = {
static void static void
perf_callchain_kernel(struct pt_regs *regs, struct perf_callchain_entry *entry) perf_callchain_kernel(struct pt_regs *regs, struct perf_callchain_entry *entry)
{ {
callchain_store(entry, PERF_CONTEXT_KERNEL); perf_callchain_store(entry, PERF_CONTEXT_KERNEL);
callchain_store(entry, regs->ip); perf_callchain_store(entry, regs->ip);
dump_trace(NULL, regs, NULL, regs->bp, &backtrace_ops, entry); dump_trace(NULL, regs, NULL, regs->bp, &backtrace_ops, entry);
} }
...@@ -1646,7 +1640,7 @@ perf_callchain_user32(struct pt_regs *regs, struct perf_callchain_entry *entry) ...@@ -1646,7 +1640,7 @@ perf_callchain_user32(struct pt_regs *regs, struct perf_callchain_entry *entry)
if (fp < compat_ptr(regs->sp)) if (fp < compat_ptr(regs->sp))
break; break;
callchain_store(entry, frame.return_address); perf_callchain_store(entry, frame.return_address);
fp = compat_ptr(frame.next_frame); fp = compat_ptr(frame.next_frame);
} }
return 1; return 1;
...@@ -1670,8 +1664,8 @@ perf_callchain_user(struct pt_regs *regs, struct perf_callchain_entry *entry) ...@@ -1670,8 +1664,8 @@ perf_callchain_user(struct pt_regs *regs, struct perf_callchain_entry *entry)
fp = (void __user *)regs->bp; fp = (void __user *)regs->bp;
callchain_store(entry, PERF_CONTEXT_USER); perf_callchain_store(entry, PERF_CONTEXT_USER);
callchain_store(entry, regs->ip); perf_callchain_store(entry, regs->ip);
if (perf_callchain_user32(regs, entry)) if (perf_callchain_user32(regs, entry))
return; return;
...@@ -1688,7 +1682,7 @@ perf_callchain_user(struct pt_regs *regs, struct perf_callchain_entry *entry) ...@@ -1688,7 +1682,7 @@ perf_callchain_user(struct pt_regs *regs, struct perf_callchain_entry *entry)
if ((unsigned long)fp < regs->sp) if ((unsigned long)fp < regs->sp)
break; break;
callchain_store(entry, frame.return_address); perf_callchain_store(entry, frame.return_address);
fp = frame.next_frame; fp = frame.next_frame;
} }
} }
......
...@@ -978,6 +978,13 @@ extern void perf_event_fork(struct task_struct *tsk); ...@@ -978,6 +978,13 @@ extern void perf_event_fork(struct task_struct *tsk);
extern struct perf_callchain_entry *perf_callchain(struct pt_regs *regs); extern struct perf_callchain_entry *perf_callchain(struct pt_regs *regs);
static inline void
perf_callchain_store(struct perf_callchain_entry *entry, u64 ip)
{
if (entry->nr < PERF_MAX_STACK_DEPTH)
entry->ip[entry->nr++] = ip;
}
extern int sysctl_perf_event_paranoid; extern int sysctl_perf_event_paranoid;
extern int sysctl_perf_event_mlock; extern int sysctl_perf_event_mlock;
extern int sysctl_perf_event_sample_rate; extern int sysctl_perf_event_sample_rate;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment