Commit 803e3757 authored by Peter Zijlstra (Intel)'s avatar Peter Zijlstra (Intel) Committed by Greg Kroah-Hartman

perf: Avoid horrible stack usage

commit 86038c5e upstream.

Both Linus (most recent) and Steve (a while ago) reported that perf
related callbacks have massive stack bloat.

The problem is that software events need a pt_regs in order to
properly report the event location and unwind stack. And because we
could not assume one was present we allocated one on stack and filled
it with minimal bits required for operation.

Now, pt_regs is quite large, so this is undesirable. Furthermore it
turns out that most sites actually have a pt_regs pointer available,
making this even more onerous, as the stack space is pointless waste.

This patch addresses the problem by observing that software events
have well defined nesting semantics, therefore we can use static
per-cpu storage instead of on-stack.

Linus made the further observation that all but the scheduler callers
of perf_sw_event() have a pt_regs available, so we change the regular
perf_sw_event() to require a valid pt_regs (where it used to be
optional) and add perf_sw_event_sched() for the scheduler.

We have a scheduler specific call instead of a more generic _noregs()
like construct because we can assume non-recursion from the scheduler
and thereby simplify the code further (_noregs would have to put the
recursion context call inline in order to assertain which __perf_regs
element to use).

One last note on the implementation of perf_trace_buf_prepare(); we
allow .regs = NULL for those cases where we already have a pt_regs
pointer available and do not need another.
Reported-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
Reported-by: default avatarSteven Rostedt <rostedt@goodmis.org>
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Cc: Arnaldo Carvalho de Melo <acme@kernel.org>
Cc: Javi Merino <javi.merino@arm.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
Cc: Oleg Nesterov <oleg@redhat.com>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Petr Mladek <pmladek@suse.cz>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: Tom Zanussi <tom.zanussi@linux.intel.com>
Cc: Vaibhav Nagarnaik <vnagarnaik@google.com>
Link: http://lkml.kernel.org/r/20141216115041.GW3337@twins.programming.kicks-ass.netSigned-off-by: default avatarIngo Molnar <mingo@kernel.org>
Cc: Arnd Bergmann <arnd@arndb.de>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent 8e0aa9da
...@@ -584,7 +584,7 @@ extern int ftrace_profile_set_filter(struct perf_event *event, int event_id, ...@@ -584,7 +584,7 @@ extern int ftrace_profile_set_filter(struct perf_event *event, int event_id,
char *filter_str); char *filter_str);
extern void ftrace_profile_free_filter(struct perf_event *event); extern void ftrace_profile_free_filter(struct perf_event *event);
extern void *perf_trace_buf_prepare(int size, unsigned short type, extern void *perf_trace_buf_prepare(int size, unsigned short type,
struct pt_regs *regs, int *rctxp); struct pt_regs **regs, int *rctxp);
static inline void static inline void
perf_trace_buf_submit(void *raw_data, int size, int rctx, u64 addr, perf_trace_buf_submit(void *raw_data, int size, int rctx, u64 addr,
......
...@@ -660,6 +660,7 @@ static inline int is_software_event(struct perf_event *event) ...@@ -660,6 +660,7 @@ static inline int is_software_event(struct perf_event *event)
extern struct static_key perf_swevent_enabled[PERF_COUNT_SW_MAX]; extern struct static_key perf_swevent_enabled[PERF_COUNT_SW_MAX];
extern void ___perf_sw_event(u32, u64, struct pt_regs *, u64);
extern void __perf_sw_event(u32, u64, struct pt_regs *, u64); extern void __perf_sw_event(u32, u64, struct pt_regs *, u64);
#ifndef perf_arch_fetch_caller_regs #ifndef perf_arch_fetch_caller_regs
...@@ -684,14 +685,25 @@ static inline void perf_fetch_caller_regs(struct pt_regs *regs) ...@@ -684,14 +685,25 @@ static inline void perf_fetch_caller_regs(struct pt_regs *regs)
static __always_inline void static __always_inline void
perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr) perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr)
{ {
struct pt_regs hot_regs; if (static_key_false(&perf_swevent_enabled[event_id]))
__perf_sw_event(event_id, nr, regs, addr);
}
DECLARE_PER_CPU(struct pt_regs, __perf_regs[4]);
/*
* 'Special' version for the scheduler, it hard assumes no recursion,
* which is guaranteed by us not actually scheduling inside other swevents
* because those disable preemption.
*/
static __always_inline void
perf_sw_event_sched(u32 event_id, u64 nr, u64 addr)
{
if (static_key_false(&perf_swevent_enabled[event_id])) { if (static_key_false(&perf_swevent_enabled[event_id])) {
if (!regs) { struct pt_regs *regs = this_cpu_ptr(&__perf_regs[0]);
perf_fetch_caller_regs(&hot_regs);
regs = &hot_regs; perf_fetch_caller_regs(regs);
} ___perf_sw_event(event_id, nr, regs, addr);
__perf_sw_event(event_id, nr, regs, addr);
} }
} }
...@@ -707,7 +719,7 @@ static inline void perf_event_task_sched_in(struct task_struct *prev, ...@@ -707,7 +719,7 @@ static inline void perf_event_task_sched_in(struct task_struct *prev,
static inline void perf_event_task_sched_out(struct task_struct *prev, static inline void perf_event_task_sched_out(struct task_struct *prev,
struct task_struct *next) struct task_struct *next)
{ {
perf_sw_event(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, NULL, 0); perf_sw_event_sched(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, 0);
if (static_key_false(&perf_sched_events.key)) if (static_key_false(&perf_sched_events.key))
__perf_event_task_sched_out(prev, next); __perf_event_task_sched_out(prev, next);
...@@ -818,6 +830,8 @@ static inline int perf_event_refresh(struct perf_event *event, int refresh) ...@@ -818,6 +830,8 @@ static inline int perf_event_refresh(struct perf_event *event, int refresh)
static inline void static inline void
perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr) { } perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr) { }
static inline void static inline void
perf_sw_event_sched(u32 event_id, u64 nr, u64 addr) { }
static inline void
perf_bp_event(struct perf_event *event, void *data) { } perf_bp_event(struct perf_event *event, void *data) { }
static inline int perf_register_guest_info_callbacks static inline int perf_register_guest_info_callbacks
......
...@@ -765,7 +765,7 @@ perf_trace_##call(void *__data, proto) \ ...@@ -765,7 +765,7 @@ perf_trace_##call(void *__data, proto) \
struct ftrace_event_call *event_call = __data; \ struct ftrace_event_call *event_call = __data; \
struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\ struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\
struct ftrace_raw_##call *entry; \ struct ftrace_raw_##call *entry; \
struct pt_regs __regs; \ struct pt_regs *__regs; \
u64 __addr = 0, __count = 1; \ u64 __addr = 0, __count = 1; \
struct task_struct *__task = NULL; \ struct task_struct *__task = NULL; \
struct hlist_head *head; \ struct hlist_head *head; \
...@@ -784,18 +784,19 @@ perf_trace_##call(void *__data, proto) \ ...@@ -784,18 +784,19 @@ perf_trace_##call(void *__data, proto) \
sizeof(u64)); \ sizeof(u64)); \
__entry_size -= sizeof(u32); \ __entry_size -= sizeof(u32); \
\ \
perf_fetch_caller_regs(&__regs); \
entry = perf_trace_buf_prepare(__entry_size, \ entry = perf_trace_buf_prepare(__entry_size, \
event_call->event.type, &__regs, &rctx); \ event_call->event.type, &__regs, &rctx); \
if (!entry) \ if (!entry) \
return; \ return; \
\ \
perf_fetch_caller_regs(__regs); \
\
tstruct \ tstruct \
\ \
{ assign; } \ { assign; } \
\ \
perf_trace_buf_submit(entry, __entry_size, rctx, __addr, \ perf_trace_buf_submit(entry, __entry_size, rctx, __addr, \
__count, &__regs, head, __task); \ __count, __regs, head, __task); \
} }
/* /*
......
...@@ -5905,6 +5905,8 @@ static void do_perf_sw_event(enum perf_type_id type, u32 event_id, ...@@ -5905,6 +5905,8 @@ static void do_perf_sw_event(enum perf_type_id type, u32 event_id,
rcu_read_unlock(); rcu_read_unlock();
} }
DEFINE_PER_CPU(struct pt_regs, __perf_regs[4]);
int perf_swevent_get_recursion_context(void) int perf_swevent_get_recursion_context(void)
{ {
struct swevent_htable *swhash = this_cpu_ptr(&swevent_htable); struct swevent_htable *swhash = this_cpu_ptr(&swevent_htable);
...@@ -5920,21 +5922,30 @@ inline void perf_swevent_put_recursion_context(int rctx) ...@@ -5920,21 +5922,30 @@ inline void perf_swevent_put_recursion_context(int rctx)
put_recursion_context(swhash->recursion, rctx); put_recursion_context(swhash->recursion, rctx);
} }
void __perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr) void ___perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr)
{ {
struct perf_sample_data data; struct perf_sample_data data;
int rctx;
preempt_disable_notrace(); if (WARN_ON_ONCE(!regs))
rctx = perf_swevent_get_recursion_context();
if (rctx < 0)
return; return;
perf_sample_data_init(&data, addr, 0); perf_sample_data_init(&data, addr, 0);
do_perf_sw_event(PERF_TYPE_SOFTWARE, event_id, nr, &data, regs); do_perf_sw_event(PERF_TYPE_SOFTWARE, event_id, nr, &data, regs);
}
void __perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr)
{
int rctx;
preempt_disable_notrace();
rctx = perf_swevent_get_recursion_context();
if (unlikely(rctx < 0))
goto fail;
___perf_sw_event(event_id, nr, regs, addr);
perf_swevent_put_recursion_context(rctx); perf_swevent_put_recursion_context(rctx);
fail:
preempt_enable_notrace(); preempt_enable_notrace();
} }
......
...@@ -1083,7 +1083,7 @@ void set_task_cpu(struct task_struct *p, unsigned int new_cpu) ...@@ -1083,7 +1083,7 @@ void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
if (p->sched_class->migrate_task_rq) if (p->sched_class->migrate_task_rq)
p->sched_class->migrate_task_rq(p, new_cpu); p->sched_class->migrate_task_rq(p, new_cpu);
p->se.nr_migrations++; p->se.nr_migrations++;
perf_sw_event(PERF_COUNT_SW_CPU_MIGRATIONS, 1, NULL, 0); perf_sw_event_sched(PERF_COUNT_SW_CPU_MIGRATIONS, 1, 0);
} }
__set_task_cpu(p, new_cpu); __set_task_cpu(p, new_cpu);
......
...@@ -261,7 +261,7 @@ void perf_trace_del(struct perf_event *p_event, int flags) ...@@ -261,7 +261,7 @@ void perf_trace_del(struct perf_event *p_event, int flags)
} }
void *perf_trace_buf_prepare(int size, unsigned short type, void *perf_trace_buf_prepare(int size, unsigned short type,
struct pt_regs *regs, int *rctxp) struct pt_regs **regs, int *rctxp)
{ {
struct trace_entry *entry; struct trace_entry *entry;
unsigned long flags; unsigned long flags;
...@@ -280,6 +280,8 @@ void *perf_trace_buf_prepare(int size, unsigned short type, ...@@ -280,6 +280,8 @@ void *perf_trace_buf_prepare(int size, unsigned short type,
if (*rctxp < 0) if (*rctxp < 0)
return NULL; return NULL;
if (regs)
*regs = this_cpu_ptr(&__perf_regs[*rctxp]);
raw_data = this_cpu_ptr(perf_trace_buf[*rctxp]); raw_data = this_cpu_ptr(perf_trace_buf[*rctxp]);
/* zero the dead bytes from align to not leak stack to user */ /* zero the dead bytes from align to not leak stack to user */
......
...@@ -1158,7 +1158,7 @@ kprobe_perf_func(struct trace_kprobe *tk, struct pt_regs *regs) ...@@ -1158,7 +1158,7 @@ kprobe_perf_func(struct trace_kprobe *tk, struct pt_regs *regs)
size = ALIGN(__size + sizeof(u32), sizeof(u64)); size = ALIGN(__size + sizeof(u32), sizeof(u64));
size -= sizeof(u32); size -= sizeof(u32);
entry = perf_trace_buf_prepare(size, call->event.type, regs, &rctx); entry = perf_trace_buf_prepare(size, call->event.type, NULL, &rctx);
if (!entry) if (!entry)
return; return;
...@@ -1189,7 +1189,7 @@ kretprobe_perf_func(struct trace_kprobe *tk, struct kretprobe_instance *ri, ...@@ -1189,7 +1189,7 @@ kretprobe_perf_func(struct trace_kprobe *tk, struct kretprobe_instance *ri,
size = ALIGN(__size + sizeof(u32), sizeof(u64)); size = ALIGN(__size + sizeof(u32), sizeof(u64));
size -= sizeof(u32); size -= sizeof(u32);
entry = perf_trace_buf_prepare(size, call->event.type, regs, &rctx); entry = perf_trace_buf_prepare(size, call->event.type, NULL, &rctx);
if (!entry) if (!entry)
return; return;
......
...@@ -586,7 +586,7 @@ static void perf_syscall_enter(void *ignore, struct pt_regs *regs, long id) ...@@ -586,7 +586,7 @@ static void perf_syscall_enter(void *ignore, struct pt_regs *regs, long id)
size -= sizeof(u32); size -= sizeof(u32);
rec = (struct syscall_trace_enter *)perf_trace_buf_prepare(size, rec = (struct syscall_trace_enter *)perf_trace_buf_prepare(size,
sys_data->enter_event->event.type, regs, &rctx); sys_data->enter_event->event.type, NULL, &rctx);
if (!rec) if (!rec)
return; return;
...@@ -659,7 +659,7 @@ static void perf_syscall_exit(void *ignore, struct pt_regs *regs, long ret) ...@@ -659,7 +659,7 @@ static void perf_syscall_exit(void *ignore, struct pt_regs *regs, long ret)
size -= sizeof(u32); size -= sizeof(u32);
rec = (struct syscall_trace_exit *)perf_trace_buf_prepare(size, rec = (struct syscall_trace_exit *)perf_trace_buf_prepare(size,
sys_data->exit_event->event.type, regs, &rctx); sys_data->exit_event->event.type, NULL, &rctx);
if (!rec) if (!rec)
return; return;
......
...@@ -1115,7 +1115,7 @@ static void __uprobe_perf_func(struct trace_uprobe *tu, ...@@ -1115,7 +1115,7 @@ static void __uprobe_perf_func(struct trace_uprobe *tu,
if (hlist_empty(head)) if (hlist_empty(head))
goto out; goto out;
entry = perf_trace_buf_prepare(size, call->event.type, regs, &rctx); entry = perf_trace_buf_prepare(size, call->event.type, NULL, &rctx);
if (!entry) if (!entry)
goto out; goto out;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment