Commit 66665ad2 authored by Masami Hiramatsu's avatar Masami Hiramatsu Committed by Alexei Starovoitov

tracing/kprobe: bpf: Compare instruction pointer with original one

Compare instruction pointer with original one on the
stack instead using per-cpu bpf_kprobe_override flag.

This patch also consolidates reset_current_kprobe() and
preempt_enable_no_resched() blocks. Those can be done
in one place.
Signed-off-by: default avatarMasami Hiramatsu <mhiramat@kernel.org>
Reviewed-by: default avatarJosef Bacik <jbacik@fb.com>
Signed-off-by: default avatarAlexei Starovoitov <ast@kernel.org>
parent b4da3340
...@@ -83,7 +83,6 @@ EXPORT_SYMBOL_GPL(trace_call_bpf); ...@@ -83,7 +83,6 @@ EXPORT_SYMBOL_GPL(trace_call_bpf);
#ifdef CONFIG_BPF_KPROBE_OVERRIDE #ifdef CONFIG_BPF_KPROBE_OVERRIDE
BPF_CALL_2(bpf_override_return, struct pt_regs *, regs, unsigned long, rc) BPF_CALL_2(bpf_override_return, struct pt_regs *, regs, unsigned long, rc)
{ {
__this_cpu_write(bpf_kprobe_override, 1);
regs_set_return_value(regs, rc); regs_set_return_value(regs, rc);
arch_kprobe_override_function(regs); arch_kprobe_override_function(regs);
return 0; return 0;
......
...@@ -42,8 +42,6 @@ struct trace_kprobe { ...@@ -42,8 +42,6 @@ struct trace_kprobe {
(offsetof(struct trace_kprobe, tp.args) + \ (offsetof(struct trace_kprobe, tp.args) + \
(sizeof(struct probe_arg) * (n))) (sizeof(struct probe_arg) * (n)))
DEFINE_PER_CPU(int, bpf_kprobe_override);
static nokprobe_inline bool trace_kprobe_is_return(struct trace_kprobe *tk) static nokprobe_inline bool trace_kprobe_is_return(struct trace_kprobe *tk)
{ {
return tk->rp.handler != NULL; return tk->rp.handler != NULL;
...@@ -1205,6 +1203,7 @@ kprobe_perf_func(struct trace_kprobe *tk, struct pt_regs *regs) ...@@ -1205,6 +1203,7 @@ kprobe_perf_func(struct trace_kprobe *tk, struct pt_regs *regs)
int rctx; int rctx;
if (bpf_prog_array_valid(call)) { if (bpf_prog_array_valid(call)) {
unsigned long orig_ip = instruction_pointer(regs);
int ret; int ret;
ret = trace_call_bpf(call, regs); ret = trace_call_bpf(call, regs);
...@@ -1212,12 +1211,13 @@ kprobe_perf_func(struct trace_kprobe *tk, struct pt_regs *regs) ...@@ -1212,12 +1211,13 @@ kprobe_perf_func(struct trace_kprobe *tk, struct pt_regs *regs)
/* /*
* We need to check and see if we modified the pc of the * We need to check and see if we modified the pc of the
* pt_regs, and if so clear the kprobe and return 1 so that we * pt_regs, and if so clear the kprobe and return 1 so that we
* don't do the instruction skipping. Also reset our state so * don't do the single stepping.
* we are clean the next pass through. * The ftrace kprobe handler leaves it up to us to re-enable
* preemption here before returning if we've modified the ip.
*/ */
if (__this_cpu_read(bpf_kprobe_override)) { if (orig_ip != instruction_pointer(regs)) {
__this_cpu_write(bpf_kprobe_override, 0);
reset_current_kprobe(); reset_current_kprobe();
preempt_enable_no_resched();
return 1; return 1;
} }
if (!ret) if (!ret)
...@@ -1325,15 +1325,8 @@ static int kprobe_dispatcher(struct kprobe *kp, struct pt_regs *regs) ...@@ -1325,15 +1325,8 @@ static int kprobe_dispatcher(struct kprobe *kp, struct pt_regs *regs)
if (tk->tp.flags & TP_FLAG_TRACE) if (tk->tp.flags & TP_FLAG_TRACE)
kprobe_trace_func(tk, regs); kprobe_trace_func(tk, regs);
#ifdef CONFIG_PERF_EVENTS #ifdef CONFIG_PERF_EVENTS
if (tk->tp.flags & TP_FLAG_PROFILE) { if (tk->tp.flags & TP_FLAG_PROFILE)
ret = kprobe_perf_func(tk, regs); ret = kprobe_perf_func(tk, regs);
/*
* The ftrace kprobe handler leaves it up to us to re-enable
* preemption here before returning if we've modified the ip.
*/
if (ret)
preempt_enable_no_resched();
}
#endif #endif
return ret; return ret;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment