Commit 653ae3a8 authored by Andrii Nakryiko's avatar Andrii Nakryiko Committed by Alexei Starovoitov

bpf: clean up visit_insn()'s instruction processing

Instead of referencing processed instruction repeatedly as insns[t]
throughout entire visit_insn() function, take a local insn pointer and
work with it in a cleaner way.

It makes enhancing this function further a bit easier as well.
Signed-off-by: default avatarAndrii Nakryiko <andrii@kernel.org>
Link: https://lore.kernel.org/r/20230302235015.2044271-7-andrii@kernel.orgSigned-off-by: default avatarAlexei Starovoitov <ast@kernel.org>
parent fffc893b
...@@ -13484,44 +13484,43 @@ static int visit_func_call_insn(int t, struct bpf_insn *insns, ...@@ -13484,44 +13484,43 @@ static int visit_func_call_insn(int t, struct bpf_insn *insns,
*/ */
static int visit_insn(int t, struct bpf_verifier_env *env) static int visit_insn(int t, struct bpf_verifier_env *env)
{ {
struct bpf_insn *insns = env->prog->insnsi; struct bpf_insn *insns = env->prog->insnsi, *insn = &insns[t];
int ret; int ret;
if (bpf_pseudo_func(insns + t)) if (bpf_pseudo_func(insn))
return visit_func_call_insn(t, insns, env, true); return visit_func_call_insn(t, insns, env, true);
/* All non-branch instructions have a single fall-through edge. */ /* All non-branch instructions have a single fall-through edge. */
if (BPF_CLASS(insns[t].code) != BPF_JMP && if (BPF_CLASS(insn->code) != BPF_JMP &&
BPF_CLASS(insns[t].code) != BPF_JMP32) BPF_CLASS(insn->code) != BPF_JMP32)
return push_insn(t, t + 1, FALLTHROUGH, env, false); return push_insn(t, t + 1, FALLTHROUGH, env, false);
switch (BPF_OP(insns[t].code)) { switch (BPF_OP(insn->code)) {
case BPF_EXIT: case BPF_EXIT:
return DONE_EXPLORING; return DONE_EXPLORING;
case BPF_CALL: case BPF_CALL:
if (insns[t].imm == BPF_FUNC_timer_set_callback) if (insn->imm == BPF_FUNC_timer_set_callback)
/* Mark this call insn as a prune point to trigger /* Mark this call insn as a prune point to trigger
* is_state_visited() check before call itself is * is_state_visited() check before call itself is
* processed by __check_func_call(). Otherwise new * processed by __check_func_call(). Otherwise new
* async state will be pushed for further exploration. * async state will be pushed for further exploration.
*/ */
mark_prune_point(env, t); mark_prune_point(env, t);
return visit_func_call_insn(t, insns, env, return visit_func_call_insn(t, insns, env, insn->src_reg == BPF_PSEUDO_CALL);
insns[t].src_reg == BPF_PSEUDO_CALL);
case BPF_JA: case BPF_JA:
if (BPF_SRC(insns[t].code) != BPF_K) if (BPF_SRC(insn->code) != BPF_K)
return -EINVAL; return -EINVAL;
/* unconditional jump with single edge */ /* unconditional jump with single edge */
ret = push_insn(t, t + insns[t].off + 1, FALLTHROUGH, env, ret = push_insn(t, t + insn->off + 1, FALLTHROUGH, env,
true); true);
if (ret) if (ret)
return ret; return ret;
mark_prune_point(env, t + insns[t].off + 1); mark_prune_point(env, t + insn->off + 1);
mark_jmp_point(env, t + insns[t].off + 1); mark_jmp_point(env, t + insn->off + 1);
return ret; return ret;
...@@ -13533,7 +13532,7 @@ static int visit_insn(int t, struct bpf_verifier_env *env) ...@@ -13533,7 +13532,7 @@ static int visit_insn(int t, struct bpf_verifier_env *env)
if (ret) if (ret)
return ret; return ret;
return push_insn(t, t + insns[t].off + 1, BRANCH, env, true); return push_insn(t, t + insn->off + 1, BRANCH, env, true);
} }
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment