Commit 1e82dfaa authored by Hari Bathini's avatar Hari Bathini Committed by Michael Ellerman

powerpc/bpf/64: Add instructions for atomic_[cmp]xchg

This adds two atomic opcodes BPF_XCHG and BPF_CMPXCHG on ppc64, both
of which include the BPF_FETCH flag.  The kernel's atomic_cmpxchg
operation fundamentally has 3 operands, but we only have two register
fields. Therefore the operand we compare against (the kernel's API
calls it 'old') is hard-coded to be BPF_REG_R0. Also, kernel's
atomic_cmpxchg returns the previous value at dst_reg + off. JIT the
same for BPF too with return value put in BPF_REG_0.

  BPF_REG_R0 = atomic_cmpxchg(dst_reg + off, BPF_REG_R0, src_reg);
Signed-off-by: default avatarHari Bathini <hbathini@linux.ibm.com>
Tested-by: Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com> (ppc64le)
Reviewed-by: default avatarNaveen N. Rao <naveen.n.rao@linux.vnet.ibm.com>
Signed-off-by: default avatarMichael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/20220610155552.25892-4-hbathini@linux.ibm.com
parent dbe6e245
...@@ -360,6 +360,7 @@ int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, struct codegen_context * ...@@ -360,6 +360,7 @@ int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, struct codegen_context *
u32 size = BPF_SIZE(code); u32 size = BPF_SIZE(code);
u32 tmp1_reg = bpf_to_ppc(TMP_REG_1); u32 tmp1_reg = bpf_to_ppc(TMP_REG_1);
u32 tmp2_reg = bpf_to_ppc(TMP_REG_2); u32 tmp2_reg = bpf_to_ppc(TMP_REG_2);
u32 save_reg, ret_reg;
s16 off = insn[i].off; s16 off = insn[i].off;
s32 imm = insn[i].imm; s32 imm = insn[i].imm;
bool func_addr_fixed; bool func_addr_fixed;
...@@ -778,6 +779,9 @@ int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, struct codegen_context * ...@@ -778,6 +779,9 @@ int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, struct codegen_context *
*/ */
case BPF_STX | BPF_ATOMIC | BPF_W: case BPF_STX | BPF_ATOMIC | BPF_W:
case BPF_STX | BPF_ATOMIC | BPF_DW: case BPF_STX | BPF_ATOMIC | BPF_DW:
save_reg = tmp2_reg;
ret_reg = src_reg;
/* Get offset into TMP_REG_1 */ /* Get offset into TMP_REG_1 */
EMIT(PPC_RAW_LI(tmp1_reg, off)); EMIT(PPC_RAW_LI(tmp1_reg, off));
tmp_idx = ctx->idx * 4; tmp_idx = ctx->idx * 4;
...@@ -808,6 +812,24 @@ int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, struct codegen_context * ...@@ -808,6 +812,24 @@ int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, struct codegen_context *
case BPF_XOR | BPF_FETCH: case BPF_XOR | BPF_FETCH:
EMIT(PPC_RAW_XOR(tmp2_reg, tmp2_reg, src_reg)); EMIT(PPC_RAW_XOR(tmp2_reg, tmp2_reg, src_reg));
break; break;
case BPF_CMPXCHG:
/*
* Return old value in BPF_REG_0 for BPF_CMPXCHG &
* in src_reg for other cases.
*/
ret_reg = bpf_to_ppc(BPF_REG_0);
/* Compare with old value in BPF_R0 */
if (size == BPF_DW)
EMIT(PPC_RAW_CMPD(bpf_to_ppc(BPF_REG_0), tmp2_reg));
else
EMIT(PPC_RAW_CMPW(bpf_to_ppc(BPF_REG_0), tmp2_reg));
/* Don't set if different from old value */
PPC_BCC_SHORT(COND_NE, (ctx->idx + 3) * 4);
fallthrough;
case BPF_XCHG:
save_reg = src_reg;
break;
default: default:
pr_err_ratelimited( pr_err_ratelimited(
"eBPF filter atomic op code %02x (@%d) unsupported\n", "eBPF filter atomic op code %02x (@%d) unsupported\n",
...@@ -817,15 +839,22 @@ int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, struct codegen_context * ...@@ -817,15 +839,22 @@ int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, struct codegen_context *
/* store new value */ /* store new value */
if (size == BPF_DW) if (size == BPF_DW)
EMIT(PPC_RAW_STDCX(tmp2_reg, tmp1_reg, dst_reg)); EMIT(PPC_RAW_STDCX(save_reg, tmp1_reg, dst_reg));
else else
EMIT(PPC_RAW_STWCX(tmp2_reg, tmp1_reg, dst_reg)); EMIT(PPC_RAW_STWCX(save_reg, tmp1_reg, dst_reg));
/* we're done if this succeeded */ /* we're done if this succeeded */
PPC_BCC_SHORT(COND_NE, tmp_idx); PPC_BCC_SHORT(COND_NE, tmp_idx);
/* For the BPF_FETCH variant, get old value into src_reg */ if (imm & BPF_FETCH) {
if (imm & BPF_FETCH) EMIT(PPC_RAW_MR(ret_reg, _R0));
EMIT(PPC_RAW_MR(src_reg, _R0)); /*
* Skip unnecessary zero-extension for 32-bit cmpxchg.
* For context, see commit 39491867ace5.
*/
if (size != BPF_DW && imm == BPF_CMPXCHG &&
insn_is_zext(&insn[i + 1]))
addrs[++i] = ctx->idx * 4;
}
break; break;
/* /*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment