Commit 854dc87d authored by Jiong Wang's avatar Jiong Wang Committed by Daniel Borkmann

nfp: bpf: support backward jump

This patch adds support for backward jump on NFP.

  - restrictions on backward jump in various functions have been removed.
  - nfp_fixup_branches now supports backward jump.

There is one thing to note, currently an input eBPF JMP insn may generate
several NFP insns, for example,

  NFP imm move insn A \
  NFP compare insn  B  --> 3 NFP insn jited from eBPF JMP insn M
  NFP branch insn   C /
  ---
  NFP insn X           --> 1 NFP insn jited from eBPF insn N
  ---
  ...

therefore, we are doing sanity check to make sure the last jited insn from
an eBPF JMP is a NFP branch instruction.

Once backward jump is allowed, it is possible an eBPF JMP insn is at the
end of the program. This is however causing trouble for the sanity check.
Because the sanity check requires the end index of the NFP insns jited from
one eBPF insn while only the start index is recorded before this patch that
we can only get the end index by:

  start_index_of_the_next_eBPF_insn - 1

or for the above example:

  start_index_of_eBPF_insn_N (which is the index of NFP insn X) - 1

nfp_fixup_branches was using nfp_for_each_insn_walk2 to expose *next* insn
to each iteration during the traversal so the last index could be
calculated from which. Now, it needs some extra code to handle the last
insn. Meanwhile, the use of walk2 is actually unnecessary, we could simply
use generic single instruction walk to do this, the next insn could be
easily calculated using list_next_entry.

So, this patch migrates the jump fixup traversal method to
*list_for_each_entry*, this simplifies the code logic a little bit.

The other thing to note is a new state variable "last_bpf_off" is
introduced to track the index of the last jited NFP insn. This is necessary
because NFP is generating special purposes epilogue sequences, so the index
of the last jited NFP insn is *not* always nfp_prog->prog_len - 1.
Suggested-by: default avatarJakub Kicinski <jakub.kicinski@netronome.com>
Signed-off-by: default avatarJiong Wang <jiong.wang@netronome.com>
Signed-off-by: default avatarJakub Kicinski <jakub.kicinski@netronome.com>
Signed-off-by: default avatarDaniel Borkmann <daniel@iogearbox.net>
parent a646c9b2
/*
* Copyright (C) 2016 Netronome Systems, Inc.
* Copyright (C) 2016-2017 Netronome Systems, Inc.
*
* This software is dual licensed under the GNU General License Version 2,
* June 1991 as shown in the file COPYING in the top-level directory of this
......@@ -975,9 +975,6 @@ wrp_test_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
{
const struct bpf_insn *insn = &meta->insn;
if (insn->off < 0) /* TODO */
return -EOPNOTSUPP;
wrp_test_reg_one(nfp_prog, insn->dst_reg * 2, alu_op,
insn->src_reg * 2, br_mask, insn->off);
wrp_test_reg_one(nfp_prog, insn->dst_reg * 2 + 1, alu_op,
......@@ -995,9 +992,6 @@ wrp_cmp_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
u8 reg = insn->dst_reg * 2;
swreg tmp_reg;
if (insn->off < 0) /* TODO */
return -EOPNOTSUPP;
tmp_reg = ur_load_imm_any(nfp_prog, imm & ~0U, imm_b(nfp_prog));
if (!swap)
emit_alu(nfp_prog, reg_none(), reg_a(reg), ALU_OP_SUB, tmp_reg);
......@@ -1027,9 +1021,6 @@ wrp_cmp_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
areg = insn->dst_reg * 2;
breg = insn->src_reg * 2;
if (insn->off < 0) /* TODO */
return -EOPNOTSUPP;
if (swap) {
areg ^= breg;
breg ^= areg;
......@@ -1630,8 +1621,6 @@ static int mem_stx8(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
static int jump(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{
if (meta->insn.off < 0) /* TODO */
return -EOPNOTSUPP;
emit_br(nfp_prog, BR_UNC, meta->insn.off, 0);
return 0;
......@@ -1646,9 +1635,6 @@ static int jeq_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
or1 = reg_a(insn->dst_reg * 2);
or2 = reg_b(insn->dst_reg * 2 + 1);
if (insn->off < 0) /* TODO */
return -EOPNOTSUPP;
if (imm & ~0U) {
tmp_reg = ur_load_imm_any(nfp_prog, imm & ~0U, imm_b(nfp_prog));
emit_alu(nfp_prog, imm_a(nfp_prog),
......@@ -1695,9 +1681,6 @@ static int jset_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
u64 imm = insn->imm; /* sign extend */
swreg tmp_reg;
if (insn->off < 0) /* TODO */
return -EOPNOTSUPP;
if (!imm) {
meta->skip = true;
return 0;
......@@ -1726,9 +1709,6 @@ static int jne_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
u64 imm = insn->imm; /* sign extend */
swreg tmp_reg;
if (insn->off < 0) /* TODO */
return -EOPNOTSUPP;
if (!imm) {
emit_alu(nfp_prog, reg_none(), reg_a(insn->dst_reg * 2),
ALU_OP_OR, reg_b(insn->dst_reg * 2 + 1));
......@@ -1753,9 +1733,6 @@ static int jeq_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{
const struct bpf_insn *insn = &meta->insn;
if (insn->off < 0) /* TODO */
return -EOPNOTSUPP;
emit_alu(nfp_prog, imm_a(nfp_prog), reg_a(insn->dst_reg * 2),
ALU_OP_XOR, reg_b(insn->src_reg * 2));
emit_alu(nfp_prog, imm_b(nfp_prog), reg_a(insn->dst_reg * 2 + 1),
......@@ -1888,16 +1865,25 @@ static void br_set_offset(u64 *instr, u16 offset)
static int nfp_fixup_branches(struct nfp_prog *nfp_prog)
{
struct nfp_insn_meta *meta, *next;
u32 off, br_idx;
u32 idx;
u32 idx, br_idx;
int off;
nfp_for_each_insn_walk2(nfp_prog, meta, next) {
list_for_each_entry(meta, &nfp_prog->insns, l) {
if (meta->skip)
continue;
if (BPF_CLASS(meta->insn.code) != BPF_JMP)
continue;
br_idx = nfp_prog_offset_to_index(nfp_prog, next->off) - 1;
if (list_is_last(&meta->l, &nfp_prog->insns)) {
next = NULL;
idx = nfp_prog->last_bpf_off;
} else {
next = list_next_entry(meta, l);
idx = next->off - 1;
}
br_idx = nfp_prog_offset_to_index(nfp_prog, idx);
if (!nfp_is_br(nfp_prog->prog[br_idx])) {
pr_err("Fixup found block not ending in branch %d %02x %016llx!!\n",
br_idx, meta->insn.code, nfp_prog->prog[br_idx]);
......@@ -1914,10 +1900,30 @@ static int nfp_fixup_branches(struct nfp_prog *nfp_prog)
return -ELOOP;
}
while (off && nfp_meta_has_next(nfp_prog, next)) {
if (!next) {
/* When "next" is NULL, "meta" is the last node in the
* list. Given it is an JMP, it then must be a backward
* jump.
*
* For eBPF, the jump offset is against pc + 1, so we
* need to compensate the offset by 1 as we are pointing
* "next" to the current node "meta".
*/
if (WARN_ON_ONCE(off > -2))
return -ELOOP;
next = meta;
off += 1;
}
while (off > 0 && nfp_meta_has_next(nfp_prog, next)) {
next = nfp_meta_next(next);
off--;
}
while (off < 0 && nfp_meta_has_prev(nfp_prog, next)) {
next = nfp_meta_prev(next);
off++;
}
if (off) {
pr_err("Fixup found too large jump!! %d\n", off);
return -ELOOP;
......@@ -2105,6 +2111,8 @@ static int nfp_translate(struct nfp_prog *nfp_prog)
nfp_prog->n_translated++;
}
nfp_prog->last_bpf_off = nfp_prog_current_offset(nfp_prog) - 1;
nfp_outro(nfp_prog);
if (nfp_prog->error)
return nfp_prog->error;
......
/*
* Copyright (C) 2016 Netronome Systems, Inc.
* Copyright (C) 2016-2017 Netronome Systems, Inc.
*
* This software is dual licensed under the GNU General License Version 2,
* June 1991 as shown in the file COPYING in the top-level directory of this
......@@ -142,6 +142,7 @@ static inline u8 mbpf_mode(const struct nfp_insn_meta *meta)
* @verifier_meta: temporary storage for verifier's insn meta
* @type: BPF program type
* @start_off: address of the first instruction in the memory
* @last_bpf_off: address of the last instruction translated from BPF
* @tgt_out: jump target for normal exit
* @tgt_abort: jump target for abort (e.g. access outside of packet buffer)
* @tgt_done: jump target to get the next packet
......@@ -160,6 +161,7 @@ struct nfp_prog {
enum bpf_prog_type type;
unsigned int start_off;
unsigned int last_bpf_off;
unsigned int tgt_out;
unsigned int tgt_abort;
unsigned int tgt_done;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment