Commit 568b329a authored by Alexei Starovoitov's avatar Alexei Starovoitov Committed by David S. Miller

perf: generalize perf_callchain

. avoid walking the stack when there is no room left in the buffer
. generalize get_perf_callchain() to be called from bpf helper
Signed-off-by: default avatarAlexei Starovoitov <ast@kernel.org>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 6b83d28a
...@@ -37,7 +37,7 @@ print_context_stack_bp(struct thread_info *tinfo, ...@@ -37,7 +37,7 @@ print_context_stack_bp(struct thread_info *tinfo,
/* Generic stack tracer with callbacks */ /* Generic stack tracer with callbacks */
struct stacktrace_ops { struct stacktrace_ops {
void (*address)(void *data, unsigned long address, int reliable); int (*address)(void *data, unsigned long address, int reliable);
/* On negative return stop dumping */ /* On negative return stop dumping */
int (*stack)(void *data, char *name); int (*stack)(void *data, char *name);
walk_stack_t walk_stack; walk_stack_t walk_stack;
......
...@@ -2180,11 +2180,11 @@ static int backtrace_stack(void *data, char *name) ...@@ -2180,11 +2180,11 @@ static int backtrace_stack(void *data, char *name)
return 0; return 0;
} }
static void backtrace_address(void *data, unsigned long addr, int reliable) static int backtrace_address(void *data, unsigned long addr, int reliable)
{ {
struct perf_callchain_entry *entry = data; struct perf_callchain_entry *entry = data;
perf_callchain_store(entry, addr); return perf_callchain_store(entry, addr);
} }
static const struct stacktrace_ops backtrace_ops = { static const struct stacktrace_ops backtrace_ops = {
......
...@@ -135,7 +135,8 @@ print_context_stack_bp(struct thread_info *tinfo, ...@@ -135,7 +135,8 @@ print_context_stack_bp(struct thread_info *tinfo,
if (!__kernel_text_address(addr)) if (!__kernel_text_address(addr))
break; break;
ops->address(data, addr, 1); if (ops->address(data, addr, 1))
break;
frame = frame->next_frame; frame = frame->next_frame;
ret_addr = &frame->return_address; ret_addr = &frame->return_address;
print_ftrace_graph_addr(addr, data, ops, tinfo, graph); print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
...@@ -154,10 +155,11 @@ static int print_trace_stack(void *data, char *name) ...@@ -154,10 +155,11 @@ static int print_trace_stack(void *data, char *name)
/* /*
* Print one address/symbol entries per line. * Print one address/symbol entries per line.
*/ */
static void print_trace_address(void *data, unsigned long addr, int reliable) static int print_trace_address(void *data, unsigned long addr, int reliable)
{ {
touch_nmi_watchdog(); touch_nmi_watchdog();
printk_stack_address(addr, reliable, data); printk_stack_address(addr, reliable, data);
return 0;
} }
static const struct stacktrace_ops print_trace_ops = { static const struct stacktrace_ops print_trace_ops = {
......
...@@ -14,30 +14,34 @@ static int save_stack_stack(void *data, char *name) ...@@ -14,30 +14,34 @@ static int save_stack_stack(void *data, char *name)
return 0; return 0;
} }
static void static int
__save_stack_address(void *data, unsigned long addr, bool reliable, bool nosched) __save_stack_address(void *data, unsigned long addr, bool reliable, bool nosched)
{ {
struct stack_trace *trace = data; struct stack_trace *trace = data;
#ifdef CONFIG_FRAME_POINTER #ifdef CONFIG_FRAME_POINTER
if (!reliable) if (!reliable)
return; return 0;
#endif #endif
if (nosched && in_sched_functions(addr)) if (nosched && in_sched_functions(addr))
return; return 0;
if (trace->skip > 0) { if (trace->skip > 0) {
trace->skip--; trace->skip--;
return; return 0;
} }
if (trace->nr_entries < trace->max_entries) if (trace->nr_entries < trace->max_entries) {
trace->entries[trace->nr_entries++] = addr; trace->entries[trace->nr_entries++] = addr;
return 0;
} else {
return -1; /* no more room, stop walking the stack */
}
} }
static void save_stack_address(void *data, unsigned long addr, int reliable) static int save_stack_address(void *data, unsigned long addr, int reliable)
{ {
return __save_stack_address(data, addr, reliable, false); return __save_stack_address(data, addr, reliable, false);
} }
static void static int
save_stack_address_nosched(void *data, unsigned long addr, int reliable) save_stack_address_nosched(void *data, unsigned long addr, int reliable)
{ {
return __save_stack_address(data, addr, reliable, true); return __save_stack_address(data, addr, reliable, true);
......
...@@ -23,12 +23,13 @@ static int backtrace_stack(void *data, char *name) ...@@ -23,12 +23,13 @@ static int backtrace_stack(void *data, char *name)
return 0; return 0;
} }
static void backtrace_address(void *data, unsigned long addr, int reliable) static int backtrace_address(void *data, unsigned long addr, int reliable)
{ {
unsigned int *depth = data; unsigned int *depth = data;
if ((*depth)--) if ((*depth)--)
oprofile_add_trace(addr); oprofile_add_trace(addr);
return 0;
} }
static struct stacktrace_ops backtrace_ops = { static struct stacktrace_ops backtrace_ops = {
......
...@@ -964,11 +964,20 @@ DECLARE_PER_CPU(struct perf_callchain_entry, perf_callchain_entry); ...@@ -964,11 +964,20 @@ DECLARE_PER_CPU(struct perf_callchain_entry, perf_callchain_entry);
extern void perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs); extern void perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs);
extern void perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs); extern void perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs);
extern struct perf_callchain_entry *
get_perf_callchain(struct pt_regs *regs, u32 init_nr, bool kernel, bool user,
bool crosstask, bool add_mark);
extern int get_callchain_buffers(void);
extern void put_callchain_buffers(void);
static inline void perf_callchain_store(struct perf_callchain_entry *entry, u64 ip) static inline int perf_callchain_store(struct perf_callchain_entry *entry, u64 ip)
{ {
if (entry->nr < PERF_MAX_STACK_DEPTH) if (entry->nr < PERF_MAX_STACK_DEPTH) {
entry->ip[entry->nr++] = ip; entry->ip[entry->nr++] = ip;
return 0;
} else {
return -1; /* no more room, stop walking the stack */
}
} }
extern int sysctl_perf_event_paranoid; extern int sysctl_perf_event_paranoid;
......
...@@ -159,15 +159,24 @@ put_callchain_entry(int rctx) ...@@ -159,15 +159,24 @@ put_callchain_entry(int rctx)
struct perf_callchain_entry * struct perf_callchain_entry *
perf_callchain(struct perf_event *event, struct pt_regs *regs) perf_callchain(struct perf_event *event, struct pt_regs *regs)
{ {
int rctx; bool kernel = !event->attr.exclude_callchain_kernel;
struct perf_callchain_entry *entry; bool user = !event->attr.exclude_callchain_user;
/* Disallow cross-task user callchains. */
int kernel = !event->attr.exclude_callchain_kernel; bool crosstask = event->ctx->task && event->ctx->task != current;
int user = !event->attr.exclude_callchain_user;
if (!kernel && !user) if (!kernel && !user)
return NULL; return NULL;
return get_perf_callchain(regs, 0, kernel, user, crosstask, true);
}
struct perf_callchain_entry *
get_perf_callchain(struct pt_regs *regs, u32 init_nr, bool kernel, bool user,
bool crosstask, bool add_mark)
{
struct perf_callchain_entry *entry;
int rctx;
entry = get_callchain_entry(&rctx); entry = get_callchain_entry(&rctx);
if (rctx == -1) if (rctx == -1)
return NULL; return NULL;
...@@ -175,10 +184,11 @@ perf_callchain(struct perf_event *event, struct pt_regs *regs) ...@@ -175,10 +184,11 @@ perf_callchain(struct perf_event *event, struct pt_regs *regs)
if (!entry) if (!entry)
goto exit_put; goto exit_put;
entry->nr = 0; entry->nr = init_nr;
if (kernel && !user_mode(regs)) { if (kernel && !user_mode(regs)) {
perf_callchain_store(entry, PERF_CONTEXT_KERNEL); if (add_mark)
perf_callchain_store(entry, PERF_CONTEXT_KERNEL);
perf_callchain_kernel(entry, regs); perf_callchain_kernel(entry, regs);
} }
...@@ -191,13 +201,11 @@ perf_callchain(struct perf_event *event, struct pt_regs *regs) ...@@ -191,13 +201,11 @@ perf_callchain(struct perf_event *event, struct pt_regs *regs)
} }
if (regs) { if (regs) {
/* if (crosstask)
* Disallow cross-task user callchains.
*/
if (event->ctx->task && event->ctx->task != current)
goto exit_put; goto exit_put;
perf_callchain_store(entry, PERF_CONTEXT_USER); if (add_mark)
perf_callchain_store(entry, PERF_CONTEXT_USER);
perf_callchain_user(entry, regs); perf_callchain_user(entry, regs);
} }
} }
......
...@@ -182,8 +182,6 @@ DEFINE_OUTPUT_COPY(__output_copy_user, arch_perf_out_copy_user) ...@@ -182,8 +182,6 @@ DEFINE_OUTPUT_COPY(__output_copy_user, arch_perf_out_copy_user)
/* Callchain handling */ /* Callchain handling */
extern struct perf_callchain_entry * extern struct perf_callchain_entry *
perf_callchain(struct perf_event *event, struct pt_regs *regs); perf_callchain(struct perf_event *event, struct pt_regs *regs);
extern int get_callchain_buffers(void);
extern void put_callchain_buffers(void);
static inline int get_recursion_context(int *recursion) static inline int get_recursion_context(int *recursion)
{ {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment