Commit 20a759df authored by Puranjay Mohan's avatar Puranjay Mohan Committed by Alexei Starovoitov

riscv, bpf: make some atomic operations fully ordered

The BPF atomic operations with the BPF_FETCH modifier along with
BPF_XCHG and BPF_CMPXCHG are fully ordered but the RISC-V JIT implements
all atomic operations except BPF_CMPXCHG with relaxed ordering.

Section 8.1 of the "The RISC-V Instruction Set Manual Volume I:
Unprivileged ISA" [1], titled, "Specifying Ordering of Atomic
Instructions" says:

| To provide more efficient support for release consistency [5], each
| atomic instruction has two bits, aq and rl, used to specify additional
| memory ordering constraints as viewed by other RISC-V harts.

and

| If only the aq bit is set, the atomic memory operation is treated as
| an acquire access.
| If only the rl bit is set, the atomic memory operation is treated as a
| release access.
|
| If both the aq and rl bits are set, the atomic memory operation is
| sequentially consistent.

Fix this by setting both aq and rl bits as 1 for operations with
BPF_FETCH and BPF_XCHG.

[1] https://riscv.org/wp-content/uploads/2017/05/riscv-spec-v2.2.pdf

Fixes: dd642ccb ("riscv, bpf: Implement more atomic operations for RV64")
Signed-off-by: default avatarPuranjay Mohan <puranjay@kernel.org>
Reviewed-by: default avatarPu Lehui <pulehui@huawei.com>
Link: https://lore.kernel.org/r/20240505201633.123115-1-puranjay@kernel.orgSigned-off-by: default avatarAlexei Starovoitov <ast@kernel.org>
parent 80c5a07a
...@@ -504,33 +504,33 @@ static void emit_atomic(u8 rd, u8 rs, s16 off, s32 imm, bool is64, ...@@ -504,33 +504,33 @@ static void emit_atomic(u8 rd, u8 rs, s16 off, s32 imm, bool is64,
break; break;
/* src_reg = atomic_fetch_<op>(dst_reg + off16, src_reg) */ /* src_reg = atomic_fetch_<op>(dst_reg + off16, src_reg) */
case BPF_ADD | BPF_FETCH: case BPF_ADD | BPF_FETCH:
emit(is64 ? rv_amoadd_d(rs, rs, rd, 0, 0) : emit(is64 ? rv_amoadd_d(rs, rs, rd, 1, 1) :
rv_amoadd_w(rs, rs, rd, 0, 0), ctx); rv_amoadd_w(rs, rs, rd, 1, 1), ctx);
if (!is64) if (!is64)
emit_zextw(rs, rs, ctx); emit_zextw(rs, rs, ctx);
break; break;
case BPF_AND | BPF_FETCH: case BPF_AND | BPF_FETCH:
emit(is64 ? rv_amoand_d(rs, rs, rd, 0, 0) : emit(is64 ? rv_amoand_d(rs, rs, rd, 1, 1) :
rv_amoand_w(rs, rs, rd, 0, 0), ctx); rv_amoand_w(rs, rs, rd, 1, 1), ctx);
if (!is64) if (!is64)
emit_zextw(rs, rs, ctx); emit_zextw(rs, rs, ctx);
break; break;
case BPF_OR | BPF_FETCH: case BPF_OR | BPF_FETCH:
emit(is64 ? rv_amoor_d(rs, rs, rd, 0, 0) : emit(is64 ? rv_amoor_d(rs, rs, rd, 1, 1) :
rv_amoor_w(rs, rs, rd, 0, 0), ctx); rv_amoor_w(rs, rs, rd, 1, 1), ctx);
if (!is64) if (!is64)
emit_zextw(rs, rs, ctx); emit_zextw(rs, rs, ctx);
break; break;
case BPF_XOR | BPF_FETCH: case BPF_XOR | BPF_FETCH:
emit(is64 ? rv_amoxor_d(rs, rs, rd, 0, 0) : emit(is64 ? rv_amoxor_d(rs, rs, rd, 1, 1) :
rv_amoxor_w(rs, rs, rd, 0, 0), ctx); rv_amoxor_w(rs, rs, rd, 1, 1), ctx);
if (!is64) if (!is64)
emit_zextw(rs, rs, ctx); emit_zextw(rs, rs, ctx);
break; break;
/* src_reg = atomic_xchg(dst_reg + off16, src_reg); */ /* src_reg = atomic_xchg(dst_reg + off16, src_reg); */
case BPF_XCHG: case BPF_XCHG:
emit(is64 ? rv_amoswap_d(rs, rs, rd, 0, 0) : emit(is64 ? rv_amoswap_d(rs, rs, rd, 1, 1) :
rv_amoswap_w(rs, rs, rd, 0, 0), ctx); rv_amoswap_w(rs, rs, rd, 1, 1), ctx);
if (!is64) if (!is64)
emit_zextw(rs, rs, ctx); emit_zextw(rs, rs, ctx);
break; break;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment