Commit 46291067 authored by Brendan Jackman's avatar Brendan Jackman Committed by Alexei Starovoitov

bpf: Pull out a macro for interpreting atomic ALU operations

Since the atomic operations that are added in subsequent commits are
all isomorphic with BPF_ADD, pull out a macro to avoid the
interpreter becoming dominated by lines of atomic-related code.

Note that this sacrificies interpreter performance (combining
STX_ATOMIC_W and STX_ATOMIC_DW into single switch case means that we
need an extra conditional branch to differentiate them) in favour of
compact and (relatively!) simple C code.
Signed-off-by: default avatarBrendan Jackman <jackmanb@google.com>
Signed-off-by: default avatarAlexei Starovoitov <ast@kernel.org>
Acked-by: default avatarYonghong Song <yhs@fb.com>
Link: https://lore.kernel.org/bpf/20210114181751.768687-9-jackmanb@google.com
parent 5ffa2550
...@@ -1618,55 +1618,53 @@ static u64 ___bpf_prog_run(u64 *regs, const struct bpf_insn *insn, u64 *stack) ...@@ -1618,55 +1618,53 @@ static u64 ___bpf_prog_run(u64 *regs, const struct bpf_insn *insn, u64 *stack)
LDX_PROBE(DW, 8) LDX_PROBE(DW, 8)
#undef LDX_PROBE #undef LDX_PROBE
STX_ATOMIC_W: #define ATOMIC_ALU_OP(BOP, KOP) \
switch (IMM) { case BOP: \
case BPF_ADD: if (BPF_SIZE(insn->code) == BPF_W) \
/* lock xadd *(u32 *)(dst_reg + off16) += src_reg */ atomic_##KOP((u32) SRC, (atomic_t *)(unsigned long) \
atomic_add((u32) SRC, (atomic_t *)(unsigned long) (DST + insn->off)); \
(DST + insn->off)); else \
break; atomic64_##KOP((u64) SRC, (atomic64_t *)(unsigned long) \
case BPF_ADD | BPF_FETCH: (DST + insn->off)); \
SRC = (u32) atomic_fetch_add( break; \
(u32) SRC, case BOP | BPF_FETCH: \
(atomic_t *)(unsigned long) (DST + insn->off)); if (BPF_SIZE(insn->code) == BPF_W) \
break; SRC = (u32) atomic_fetch_##KOP( \
case BPF_XCHG: (u32) SRC, \
SRC = (u32) atomic_xchg( (atomic_t *)(unsigned long) (DST + insn->off)); \
(atomic_t *)(unsigned long) (DST + insn->off), else \
(u32) SRC); SRC = (u64) atomic64_fetch_##KOP( \
break; (u64) SRC, \
case BPF_CMPXCHG: (atomic64_t *)(unsigned long) (DST + insn->off)); \
BPF_R0 = (u32) atomic_cmpxchg(
(atomic_t *)(unsigned long) (DST + insn->off),
(u32) BPF_R0, (u32) SRC);
break; break;
default:
goto default_label;
}
CONT;
STX_ATOMIC_DW: STX_ATOMIC_DW:
STX_ATOMIC_W:
switch (IMM) { switch (IMM) {
case BPF_ADD: ATOMIC_ALU_OP(BPF_ADD, add)
/* lock xadd *(u64 *)(dst_reg + off16) += src_reg */ #undef ATOMIC_ALU_OP
atomic64_add((u64) SRC, (atomic64_t *)(unsigned long)
(DST + insn->off));
break;
case BPF_ADD | BPF_FETCH:
SRC = (u64) atomic64_fetch_add(
(u64) SRC,
(atomic64_t *)(unsigned long) (DST + insn->off));
break;
case BPF_XCHG: case BPF_XCHG:
SRC = (u64) atomic64_xchg( if (BPF_SIZE(insn->code) == BPF_W)
(atomic64_t *)(unsigned long) (DST + insn->off), SRC = (u32) atomic_xchg(
(u64) SRC); (atomic_t *)(unsigned long) (DST + insn->off),
(u32) SRC);
else
SRC = (u64) atomic64_xchg(
(atomic64_t *)(unsigned long) (DST + insn->off),
(u64) SRC);
break; break;
case BPF_CMPXCHG: case BPF_CMPXCHG:
BPF_R0 = (u64) atomic64_cmpxchg( if (BPF_SIZE(insn->code) == BPF_W)
(atomic64_t *)(unsigned long) (DST + insn->off), BPF_R0 = (u32) atomic_cmpxchg(
(u64) BPF_R0, (u64) SRC); (atomic_t *)(unsigned long) (DST + insn->off),
(u32) BPF_R0, (u32) SRC);
else
BPF_R0 = (u64) atomic64_cmpxchg(
(atomic64_t *)(unsigned long) (DST + insn->off),
(u64) BPF_R0, (u64) SRC);
break; break;
default: default:
goto default_label; goto default_label;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment