Commit 85b67bcb authored by Alexei Starovoitov's avatar Alexei Starovoitov Committed by David S. Miller

perf, bpf: minimize the size of perf_trace_() tracepoint handler

move trace_call_bpf() into helper function to minimize the size
of perf_trace_*() tracepoint handlers.
    text	   data	    bss	    dec	 	   hex	filename
10541679	5526646	2945024	19013349	1221ee5	vmlinux_before
10509422	5526646	2945024	18981092	121a0e4	vmlinux_after

It may seem that perf_fetch_caller_regs() can also be moved,
but that is incorrect, since ip/sp will be wrong.

bpf+tracepoint performance is not affected, since
perf_swevent_put_recursion_context() is now inlined.
export_symbol_gpl can also be dropped.

No measurable change in normal perf tracepoints.
Suggested-by: default avatarSteven Rostedt <rostedt@goodmis.org>
Signed-off-by: default avatarAlexei Starovoitov <ast@kernel.org>
Acked-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Acked-by: default avatarSteven Rostedt <rostedt@goodmis.org>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent c60c9840
...@@ -609,6 +609,11 @@ extern void ftrace_profile_free_filter(struct perf_event *event); ...@@ -609,6 +609,11 @@ extern void ftrace_profile_free_filter(struct perf_event *event);
void perf_trace_buf_update(void *record, u16 type); void perf_trace_buf_update(void *record, u16 type);
void *perf_trace_buf_alloc(int size, struct pt_regs **regs, int *rctxp); void *perf_trace_buf_alloc(int size, struct pt_regs **regs, int *rctxp);
void perf_trace_run_bpf_submit(void *raw_data, int size, int rctx,
struct trace_event_call *call, u64 count,
struct pt_regs *regs, struct hlist_head *head,
struct task_struct *task);
static inline void static inline void
perf_trace_buf_submit(void *raw_data, int size, int rctx, u16 type, perf_trace_buf_submit(void *raw_data, int size, int rctx, u16 type,
u64 count, struct pt_regs *regs, void *head, u64 count, struct pt_regs *regs, void *head,
......
...@@ -64,16 +64,9 @@ perf_trace_##call(void *__data, proto) \ ...@@ -64,16 +64,9 @@ perf_trace_##call(void *__data, proto) \
\ \
{ assign; } \ { assign; } \
\ \
if (prog) { \ perf_trace_run_bpf_submit(entry, __entry_size, rctx, \
*(struct pt_regs **)entry = __regs; \ event_call, __count, __regs, \
if (!trace_call_bpf(prog, entry) || hlist_empty(head)) { \ head, __task); \
perf_swevent_put_recursion_context(rctx); \
return; \
} \
} \
perf_trace_buf_submit(entry, __entry_size, rctx, \
event_call->event.type, __count, __regs, \
head, __task); \
} }
/* /*
......
...@@ -6741,7 +6741,6 @@ void perf_swevent_put_recursion_context(int rctx) ...@@ -6741,7 +6741,6 @@ void perf_swevent_put_recursion_context(int rctx)
put_recursion_context(swhash->recursion, rctx); put_recursion_context(swhash->recursion, rctx);
} }
EXPORT_SYMBOL_GPL(perf_swevent_put_recursion_context);
void ___perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr) void ___perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr)
{ {
...@@ -6998,6 +6997,25 @@ static int perf_tp_event_match(struct perf_event *event, ...@@ -6998,6 +6997,25 @@ static int perf_tp_event_match(struct perf_event *event,
return 1; return 1;
} }
void perf_trace_run_bpf_submit(void *raw_data, int size, int rctx,
struct trace_event_call *call, u64 count,
struct pt_regs *regs, struct hlist_head *head,
struct task_struct *task)
{
struct bpf_prog *prog = call->prog;
if (prog) {
*(struct pt_regs **)raw_data = regs;
if (!trace_call_bpf(prog, raw_data) || hlist_empty(head)) {
perf_swevent_put_recursion_context(rctx);
return;
}
}
perf_tp_event(call->event.type, count, raw_data, size, regs, head,
rctx, task);
}
EXPORT_SYMBOL_GPL(perf_trace_run_bpf_submit);
void perf_tp_event(u16 event_type, u64 count, void *record, int entry_size, void perf_tp_event(u16 event_type, u64 count, void *record, int entry_size,
struct pt_regs *regs, struct hlist_head *head, int rctx, struct pt_regs *regs, struct hlist_head *head, int rctx,
struct task_struct *task) struct task_struct *task)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment