Commit 361db44c authored by Pu Lehui's avatar Pu Lehui Committed by Daniel Borkmann

riscv, bpf: Simplify sext and zext logics in branch instructions

There are many extension helpers in the current branch instructions, and
the implementation is a bit complicated. We simplify this logic through
two simple extension helpers with alternate register.
Signed-off-by: default avatarPu Lehui <pulehui@huawei.com>
Signed-off-by: default avatarDaniel Borkmann <daniel@iogearbox.net>
Tested-by: default avatarBjörn Töpel <bjorn@rivosinc.com>
Acked-by: default avatarBjörn Töpel <bjorn@kernel.org>
Link: https://lore.kernel.org/bpf/20240115131235.2914289-4-pulehui@huaweicloud.com
parent 914c7a5f
......@@ -141,6 +141,19 @@ static bool in_auipc_jalr_range(s64 val)
val < ((1L << 31) - (1L << 11));
}
/* Modify rd pointer to alternate reg to avoid corrupting original reg */
static void emit_sextw_alt(u8 *rd, u8 ra, struct rv_jit_context *ctx)
{
emit_sextw(ra, *rd, ctx);
*rd = ra;
}
static void emit_zextw_alt(u8 *rd, u8 ra, struct rv_jit_context *ctx)
{
emit_zextw(ra, *rd, ctx);
*rd = ra;
}
/* Emit fixed-length instructions for address */
static int emit_addr(u8 rd, u64 addr, bool extra_pass, struct rv_jit_context *ctx)
{
......@@ -399,38 +412,6 @@ static void init_regs(u8 *rd, u8 *rs, const struct bpf_insn *insn,
*rs = bpf_to_rv_reg(insn->src_reg, ctx);
}
static void emit_zext_32_rd_rs(u8 *rd, u8 *rs, struct rv_jit_context *ctx)
{
emit_mv(RV_REG_T2, *rd, ctx);
emit_zextw(RV_REG_T2, RV_REG_T2, ctx);
emit_mv(RV_REG_T1, *rs, ctx);
emit_zextw(RV_REG_T1, RV_REG_T1, ctx);
*rd = RV_REG_T2;
*rs = RV_REG_T1;
}
static void emit_sext_32_rd_rs(u8 *rd, u8 *rs, struct rv_jit_context *ctx)
{
emit_sextw(RV_REG_T2, *rd, ctx);
emit_sextw(RV_REG_T1, *rs, ctx);
*rd = RV_REG_T2;
*rs = RV_REG_T1;
}
static void emit_zext_32_rd_t1(u8 *rd, struct rv_jit_context *ctx)
{
emit_mv(RV_REG_T2, *rd, ctx);
emit_zextw(RV_REG_T2, RV_REG_T2, ctx);
emit_zextw(RV_REG_T1, RV_REG_T2, ctx);
*rd = RV_REG_T2;
}
static void emit_sext_32_rd(u8 *rd, struct rv_jit_context *ctx)
{
emit_sextw(RV_REG_T2, *rd, ctx);
*rd = RV_REG_T2;
}
static int emit_jump_and_link(u8 rd, s64 rvoff, bool fixed_addr,
struct rv_jit_context *ctx)
{
......@@ -1419,10 +1400,13 @@ int bpf_jit_emit_insn(const struct bpf_insn *insn, struct rv_jit_context *ctx,
rvoff = rv_offset(i, off, ctx);
if (!is64) {
s = ctx->ninsns;
if (is_signed_bpf_cond(BPF_OP(code)))
emit_sext_32_rd_rs(&rd, &rs, ctx);
else
emit_zext_32_rd_rs(&rd, &rs, ctx);
if (is_signed_bpf_cond(BPF_OP(code))) {
emit_sextw_alt(&rs, RV_REG_T1, ctx);
emit_sextw_alt(&rd, RV_REG_T2, ctx);
} else {
emit_zextw_alt(&rs, RV_REG_T1, ctx);
emit_zextw_alt(&rd, RV_REG_T2, ctx);
}
e = ctx->ninsns;
/* Adjust for extra insns */
......@@ -1433,8 +1417,7 @@ int bpf_jit_emit_insn(const struct bpf_insn *insn, struct rv_jit_context *ctx,
/* Adjust for and */
rvoff -= 4;
emit_and(RV_REG_T1, rd, rs, ctx);
emit_branch(BPF_JNE, RV_REG_T1, RV_REG_ZERO, rvoff,
ctx);
emit_branch(BPF_JNE, RV_REG_T1, RV_REG_ZERO, rvoff, ctx);
} else {
emit_branch(BPF_OP(code), rd, rs, rvoff, ctx);
}
......@@ -1463,18 +1446,18 @@ int bpf_jit_emit_insn(const struct bpf_insn *insn, struct rv_jit_context *ctx,
case BPF_JMP32 | BPF_JSLE | BPF_K:
rvoff = rv_offset(i, off, ctx);
s = ctx->ninsns;
if (imm) {
if (imm)
emit_imm(RV_REG_T1, imm, ctx);
rs = RV_REG_T1;
} else {
/* If imm is 0, simply use zero register. */
rs = RV_REG_ZERO;
}
rs = imm ? RV_REG_T1 : RV_REG_ZERO;
if (!is64) {
if (is_signed_bpf_cond(BPF_OP(code)))
emit_sext_32_rd(&rd, ctx);
else
emit_zext_32_rd_t1(&rd, ctx);
if (is_signed_bpf_cond(BPF_OP(code))) {
emit_sextw_alt(&rd, RV_REG_T2, ctx);
/* rs has been sign extended */
} else {
emit_zextw_alt(&rd, RV_REG_T2, ctx);
if (imm)
emit_zextw(rs, rs, ctx);
}
}
e = ctx->ninsns;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment