Commit 7bdc97be authored by Jakub Kicinski's avatar Jakub Kicinski Committed by Daniel Borkmann

nfp: bpf: optimize comparisons to negative constants

Comparison instruction requires a subtraction.  If the constant
is negative we are more likely to fit it into a NFP instruction
directly if we change the sign and use addition.
Signed-off-by: default avatarJakub Kicinski <jakub.kicinski@netronome.com>
Signed-off-by: default avatarDaniel Borkmann <daniel@iogearbox.net>
parent 61dd8f00
...@@ -1247,6 +1247,7 @@ static int cmp_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) ...@@ -1247,6 +1247,7 @@ static int cmp_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
const struct bpf_insn *insn = &meta->insn; const struct bpf_insn *insn = &meta->insn;
u64 imm = insn->imm; /* sign extend */ u64 imm = insn->imm; /* sign extend */
const struct jmp_code_map *code; const struct jmp_code_map *code;
enum alu_op alu_op, carry_op;
u8 reg = insn->dst_reg * 2; u8 reg = insn->dst_reg * 2;
swreg tmp_reg; swreg tmp_reg;
...@@ -1254,19 +1255,22 @@ static int cmp_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) ...@@ -1254,19 +1255,22 @@ static int cmp_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
if (!code) if (!code)
return -EINVAL; return -EINVAL;
alu_op = meta->jump_neg_op ? ALU_OP_ADD : ALU_OP_SUB;
carry_op = meta->jump_neg_op ? ALU_OP_ADD_C : ALU_OP_SUB_C;
tmp_reg = ur_load_imm_any(nfp_prog, imm & ~0U, imm_b(nfp_prog)); tmp_reg = ur_load_imm_any(nfp_prog, imm & ~0U, imm_b(nfp_prog));
if (!code->swap) if (!code->swap)
emit_alu(nfp_prog, reg_none(), reg_a(reg), ALU_OP_SUB, tmp_reg); emit_alu(nfp_prog, reg_none(), reg_a(reg), alu_op, tmp_reg);
else else
emit_alu(nfp_prog, reg_none(), tmp_reg, ALU_OP_SUB, reg_a(reg)); emit_alu(nfp_prog, reg_none(), tmp_reg, alu_op, reg_a(reg));
tmp_reg = ur_load_imm_any(nfp_prog, imm >> 32, imm_b(nfp_prog)); tmp_reg = ur_load_imm_any(nfp_prog, imm >> 32, imm_b(nfp_prog));
if (!code->swap) if (!code->swap)
emit_alu(nfp_prog, reg_none(), emit_alu(nfp_prog, reg_none(),
reg_a(reg + 1), ALU_OP_SUB_C, tmp_reg); reg_a(reg + 1), carry_op, tmp_reg);
else else
emit_alu(nfp_prog, reg_none(), emit_alu(nfp_prog, reg_none(),
tmp_reg, ALU_OP_SUB_C, reg_a(reg + 1)); tmp_reg, carry_op, reg_a(reg + 1));
emit_br(nfp_prog, code->br_mask, insn->off, 0); emit_br(nfp_prog, code->br_mask, insn->off, 0);
...@@ -2745,21 +2749,35 @@ static void nfp_bpf_opt_neg_add_sub(struct nfp_prog *nfp_prog) ...@@ -2745,21 +2749,35 @@ static void nfp_bpf_opt_neg_add_sub(struct nfp_prog *nfp_prog)
continue; continue;
if (BPF_CLASS(insn.code) != BPF_ALU && if (BPF_CLASS(insn.code) != BPF_ALU &&
BPF_CLASS(insn.code) != BPF_ALU64) BPF_CLASS(insn.code) != BPF_ALU64 &&
BPF_CLASS(insn.code) != BPF_JMP)
continue; continue;
if (BPF_SRC(insn.code) != BPF_K) if (BPF_SRC(insn.code) != BPF_K)
continue; continue;
if (insn.imm >= 0) if (insn.imm >= 0)
continue; continue;
if (BPF_OP(insn.code) == BPF_ADD) if (BPF_CLASS(insn.code) == BPF_JMP) {
insn.code = BPF_CLASS(insn.code) | BPF_SUB; switch (BPF_OP(insn.code)) {
else if (BPF_OP(insn.code) == BPF_SUB) case BPF_JGE:
insn.code = BPF_CLASS(insn.code) | BPF_ADD; case BPF_JSGE:
else case BPF_JLT:
continue; case BPF_JSLT:
meta->jump_neg_op = true;
break;
default:
continue;
}
} else {
if (BPF_OP(insn.code) == BPF_ADD)
insn.code = BPF_CLASS(insn.code) | BPF_SUB;
else if (BPF_OP(insn.code) == BPF_SUB)
insn.code = BPF_CLASS(insn.code) | BPF_ADD;
else
continue;
meta->insn.code = insn.code | BPF_K; meta->insn.code = insn.code | BPF_K;
}
meta->insn.imm = -insn.imm; meta->insn.imm = -insn.imm;
} }
......
...@@ -236,6 +236,7 @@ struct nfp_bpf_reg_state { ...@@ -236,6 +236,7 @@ struct nfp_bpf_reg_state {
* @xadd_over_16bit: 16bit immediate is not guaranteed * @xadd_over_16bit: 16bit immediate is not guaranteed
* @xadd_maybe_16bit: 16bit immediate is possible * @xadd_maybe_16bit: 16bit immediate is possible
* @jmp_dst: destination info for jump instructions * @jmp_dst: destination info for jump instructions
* @jump_neg_op: jump instruction has inverted immediate, use ADD instead of SUB
* @func_id: function id for call instructions * @func_id: function id for call instructions
* @arg1: arg1 for call instructions * @arg1: arg1 for call instructions
* @arg2: arg2 for call instructions * @arg2: arg2 for call instructions
...@@ -264,7 +265,10 @@ struct nfp_insn_meta { ...@@ -264,7 +265,10 @@ struct nfp_insn_meta {
bool xadd_maybe_16bit; bool xadd_maybe_16bit;
}; };
/* jump */ /* jump */
struct nfp_insn_meta *jmp_dst; struct {
struct nfp_insn_meta *jmp_dst;
bool jump_neg_op;
};
/* function calls */ /* function calls */
struct { struct {
u32 func_id; u32 func_id;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment