Commit 65112709 authored by Hari Bathini's avatar Hari Bathini Committed by Michael Ellerman

powerpc/bpf/64: add support for BPF_ATOMIC bitwise operations

Adding instructions for ppc64 for

atomic[64]_and
atomic[64]_or
atomic[64]_xor
Signed-off-by: default avatarHari Bathini <hbathini@linux.ibm.com>
Tested-by: Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com> (ppc64le)
Reviewed-by: default avatarNaveen N. Rao <naveen.n.rao@linux.vnet.ibm.com>
Signed-off-by: default avatarMichael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/20220610155552.25892-2-hbathini@linux.ibm.com
parent 61bdbca8
...@@ -777,41 +777,42 @@ int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, struct codegen_context * ...@@ -777,41 +777,42 @@ int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, struct codegen_context *
* BPF_STX ATOMIC (atomic ops) * BPF_STX ATOMIC (atomic ops)
*/ */
case BPF_STX | BPF_ATOMIC | BPF_W: case BPF_STX | BPF_ATOMIC | BPF_W:
if (imm != BPF_ADD) { case BPF_STX | BPF_ATOMIC | BPF_DW:
pr_err_ratelimited( /* Get offset into TMP_REG_1 */
"eBPF filter atomic op code %02x (@%d) unsupported\n", EMIT(PPC_RAW_LI(tmp1_reg, off));
code, i);
return -ENOTSUPP;
}
/* *(u32 *)(dst + off) += src */
/* Get EA into TMP_REG_1 */
EMIT(PPC_RAW_ADDI(tmp1_reg, dst_reg, off));
tmp_idx = ctx->idx * 4; tmp_idx = ctx->idx * 4;
/* load value from memory into TMP_REG_2 */ /* load value from memory into TMP_REG_2 */
EMIT(PPC_RAW_LWARX(tmp2_reg, 0, tmp1_reg, 0)); if (size == BPF_DW)
/* add value from src_reg into this */ EMIT(PPC_RAW_LDARX(tmp2_reg, tmp1_reg, dst_reg, 0));
else
EMIT(PPC_RAW_LWARX(tmp2_reg, tmp1_reg, dst_reg, 0));
switch (imm) {
case BPF_ADD:
EMIT(PPC_RAW_ADD(tmp2_reg, tmp2_reg, src_reg)); EMIT(PPC_RAW_ADD(tmp2_reg, tmp2_reg, src_reg));
/* store result back */
EMIT(PPC_RAW_STWCX(tmp2_reg, 0, tmp1_reg));
/* we're done if this succeeded */
PPC_BCC_SHORT(COND_NE, tmp_idx);
break; break;
case BPF_STX | BPF_ATOMIC | BPF_DW: case BPF_AND:
if (imm != BPF_ADD) { EMIT(PPC_RAW_AND(tmp2_reg, tmp2_reg, src_reg));
break;
case BPF_OR:
EMIT(PPC_RAW_OR(tmp2_reg, tmp2_reg, src_reg));
break;
case BPF_XOR:
EMIT(PPC_RAW_XOR(tmp2_reg, tmp2_reg, src_reg));
break;
default:
pr_err_ratelimited( pr_err_ratelimited(
"eBPF filter atomic op code %02x (@%d) unsupported\n", "eBPF filter atomic op code %02x (@%d) unsupported\n",
code, i); code, i);
return -ENOTSUPP; return -EOPNOTSUPP;
} }
/* *(u64 *)(dst + off) += src */
EMIT(PPC_RAW_ADDI(tmp1_reg, dst_reg, off)); /* store result back */
tmp_idx = ctx->idx * 4; if (size == BPF_DW)
EMIT(PPC_RAW_LDARX(tmp2_reg, 0, tmp1_reg, 0)); EMIT(PPC_RAW_STDCX(tmp2_reg, tmp1_reg, dst_reg));
EMIT(PPC_RAW_ADD(tmp2_reg, tmp2_reg, src_reg)); else
EMIT(PPC_RAW_STDCX(tmp2_reg, 0, tmp1_reg)); EMIT(PPC_RAW_STWCX(tmp2_reg, tmp1_reg, dst_reg));
/* we're done if this succeeded */
PPC_BCC_SHORT(COND_NE, tmp_idx); PPC_BCC_SHORT(COND_NE, tmp_idx);
break; break;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment