Commit dcb0c27f authored by Jakub Kicinski's avatar Jakub Kicinski Committed by Alexei Starovoitov

nfp: bpf: add basic support for atomic adds

Implement atomic add operation for 32 and 64 bit values.  Depend
on the verifier to ensure alignment.  Values have to be kept in
big endian and swapped upon read/write.  For now only support
atomic add of a constant.
Signed-off-by: default avatarJakub Kicinski <jakub.kicinski@netronome.com>
Reviewed-by: default avatarQuentin Monnet <quentin.monnet@netronome.com>
Reviewed-by: default avatarJiong Wang <jiong.wang@netronome.com>
Signed-off-by: default avatarAlexei Starovoitov <ast@kernel.org>
parent e59ac634
...@@ -2127,6 +2127,49 @@ static int mem_stx8(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) ...@@ -2127,6 +2127,49 @@ static int mem_stx8(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
return mem_stx(nfp_prog, meta, 8); return mem_stx(nfp_prog, meta, 8);
} }
static int
mem_xadd(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, bool is64)
{
swreg addra, addrb, off, prev_alu = imm_a(nfp_prog);
u8 dst_gpr = meta->insn.dst_reg * 2;
u8 src_gpr = meta->insn.src_reg * 2;
off = ur_load_imm_any(nfp_prog, meta->insn.off, imm_b(nfp_prog));
/* If insn has an offset add to the address */
if (!meta->insn.off) {
addra = reg_a(dst_gpr);
addrb = reg_b(dst_gpr + 1);
} else {
emit_alu(nfp_prog, imma_a(nfp_prog),
reg_a(dst_gpr), ALU_OP_ADD, off);
emit_alu(nfp_prog, imma_b(nfp_prog),
reg_a(dst_gpr + 1), ALU_OP_ADD_C, reg_imm(0));
addra = imma_a(nfp_prog);
addrb = imma_b(nfp_prog);
}
wrp_immed(nfp_prog, prev_alu,
FIELD_PREP(CMD_OVE_DATA, 2) |
CMD_OVE_LEN |
FIELD_PREP(CMD_OV_LEN, 0x8 | is64 << 2));
wrp_reg_or_subpart(nfp_prog, prev_alu, reg_b(src_gpr), 2, 2);
emit_cmd_indir(nfp_prog, CMD_TGT_ADD_IMM, CMD_MODE_40b_BA, 0,
addra, addrb, 0, false);
return 0;
}
static int mem_xadd4(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{
return mem_xadd(nfp_prog, meta, false);
}
static int mem_xadd8(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{
return mem_xadd(nfp_prog, meta, true);
}
static int jump(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) static int jump(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{ {
emit_br(nfp_prog, BR_UNC, meta->insn.off, 0); emit_br(nfp_prog, BR_UNC, meta->insn.off, 0);
...@@ -2390,6 +2433,8 @@ static const instr_cb_t instr_cb[256] = { ...@@ -2390,6 +2433,8 @@ static const instr_cb_t instr_cb[256] = {
[BPF_STX | BPF_MEM | BPF_H] = mem_stx2, [BPF_STX | BPF_MEM | BPF_H] = mem_stx2,
[BPF_STX | BPF_MEM | BPF_W] = mem_stx4, [BPF_STX | BPF_MEM | BPF_W] = mem_stx4,
[BPF_STX | BPF_MEM | BPF_DW] = mem_stx8, [BPF_STX | BPF_MEM | BPF_DW] = mem_stx8,
[BPF_STX | BPF_XADD | BPF_W] = mem_xadd4,
[BPF_STX | BPF_XADD | BPF_DW] = mem_xadd8,
[BPF_ST | BPF_MEM | BPF_B] = mem_st1, [BPF_ST | BPF_MEM | BPF_B] = mem_st1,
[BPF_ST | BPF_MEM | BPF_H] = mem_st2, [BPF_ST | BPF_MEM | BPF_H] = mem_st2,
[BPF_ST | BPF_MEM | BPF_W] = mem_st4, [BPF_ST | BPF_MEM | BPF_W] = mem_st4,
......
...@@ -72,6 +72,7 @@ enum nfp_relo_type { ...@@ -72,6 +72,7 @@ enum nfp_relo_type {
#define BR_OFF_RELO 15000 #define BR_OFF_RELO 15000
enum static_regs { enum static_regs {
STATIC_REG_IMMA = 20, /* Bank AB */
STATIC_REG_IMM = 21, /* Bank AB */ STATIC_REG_IMM = 21, /* Bank AB */
STATIC_REG_STACK = 22, /* Bank A */ STATIC_REG_STACK = 22, /* Bank A */
STATIC_REG_PKT_LEN = 22, /* Bank B */ STATIC_REG_PKT_LEN = 22, /* Bank B */
...@@ -91,6 +92,8 @@ enum pkt_vec { ...@@ -91,6 +92,8 @@ enum pkt_vec {
#define pptr_reg(np) pv_ctm_ptr(np) #define pptr_reg(np) pv_ctm_ptr(np)
#define imm_a(np) reg_a(STATIC_REG_IMM) #define imm_a(np) reg_a(STATIC_REG_IMM)
#define imm_b(np) reg_b(STATIC_REG_IMM) #define imm_b(np) reg_b(STATIC_REG_IMM)
#define imma_a(np) reg_a(STATIC_REG_IMMA)
#define imma_b(np) reg_b(STATIC_REG_IMMA)
#define imm_both(np) reg_both(STATIC_REG_IMM) #define imm_both(np) reg_both(STATIC_REG_IMM)
#define NFP_BPF_ABI_FLAGS reg_imm(0) #define NFP_BPF_ABI_FLAGS reg_imm(0)
...@@ -169,18 +172,27 @@ struct nfp_app_bpf { ...@@ -169,18 +172,27 @@ struct nfp_app_bpf {
} helpers; } helpers;
}; };
enum nfp_bpf_map_use {
NFP_MAP_UNUSED = 0,
NFP_MAP_USE_READ,
NFP_MAP_USE_WRITE,
NFP_MAP_USE_ATOMIC_CNT,
};
/** /**
* struct nfp_bpf_map - private per-map data attached to BPF maps for offload * struct nfp_bpf_map - private per-map data attached to BPF maps for offload
* @offmap: pointer to the offloaded BPF map * @offmap: pointer to the offloaded BPF map
* @bpf: back pointer to bpf app private structure * @bpf: back pointer to bpf app private structure
* @tid: table id identifying map on datapath * @tid: table id identifying map on datapath
* @l: link on the nfp_app_bpf->map_list list * @l: link on the nfp_app_bpf->map_list list
* @use_map: map of how the value is used (in 4B chunks)
*/ */
struct nfp_bpf_map { struct nfp_bpf_map {
struct bpf_offloaded_map *offmap; struct bpf_offloaded_map *offmap;
struct nfp_app_bpf *bpf; struct nfp_app_bpf *bpf;
u32 tid; u32 tid;
struct list_head l; struct list_head l;
enum nfp_bpf_map_use use_map[];
}; };
struct nfp_prog; struct nfp_prog;
...@@ -320,6 +332,11 @@ static inline bool is_mbpf_classic_store_pkt(const struct nfp_insn_meta *meta) ...@@ -320,6 +332,11 @@ static inline bool is_mbpf_classic_store_pkt(const struct nfp_insn_meta *meta)
return is_mbpf_classic_store(meta) && meta->ptr.type == PTR_TO_PACKET; return is_mbpf_classic_store(meta) && meta->ptr.type == PTR_TO_PACKET;
} }
static inline bool is_mbpf_xadd(const struct nfp_insn_meta *meta)
{
return (meta->insn.code & ~BPF_SIZE_MASK) == (BPF_STX | BPF_XADD);
}
/** /**
* struct nfp_prog - nfp BPF program * struct nfp_prog - nfp BPF program
* @bpf: backpointer to the bpf app priv structure * @bpf: backpointer to the bpf app priv structure
......
...@@ -164,6 +164,41 @@ static int nfp_bpf_destroy(struct nfp_net *nn, struct bpf_prog *prog) ...@@ -164,6 +164,41 @@ static int nfp_bpf_destroy(struct nfp_net *nn, struct bpf_prog *prog)
return 0; return 0;
} }
/* Atomic engine requires values to be in big endian, we need to byte swap
* the value words used with xadd.
*/
static void nfp_map_bpf_byte_swap(struct nfp_bpf_map *nfp_map, void *value)
{
u32 *word = value;
unsigned int i;
for (i = 0; i < DIV_ROUND_UP(nfp_map->offmap->map.value_size, 4); i++)
if (nfp_map->use_map[i] == NFP_MAP_USE_ATOMIC_CNT)
word[i] = (__force u32)cpu_to_be32(word[i]);
}
static int
nfp_bpf_map_lookup_entry(struct bpf_offloaded_map *offmap,
void *key, void *value)
{
int err;
err = nfp_bpf_ctrl_lookup_entry(offmap, key, value);
if (err)
return err;
nfp_map_bpf_byte_swap(offmap->dev_priv, value);
return 0;
}
static int
nfp_bpf_map_update_entry(struct bpf_offloaded_map *offmap,
void *key, void *value, u64 flags)
{
nfp_map_bpf_byte_swap(offmap->dev_priv, value);
return nfp_bpf_ctrl_update_entry(offmap, key, value, flags);
}
static int static int
nfp_bpf_map_get_next_key(struct bpf_offloaded_map *offmap, nfp_bpf_map_get_next_key(struct bpf_offloaded_map *offmap,
void *key, void *next_key) void *key, void *next_key)
...@@ -183,8 +218,8 @@ nfp_bpf_map_delete_elem(struct bpf_offloaded_map *offmap, void *key) ...@@ -183,8 +218,8 @@ nfp_bpf_map_delete_elem(struct bpf_offloaded_map *offmap, void *key)
static const struct bpf_map_dev_ops nfp_bpf_map_ops = { static const struct bpf_map_dev_ops nfp_bpf_map_ops = {
.map_get_next_key = nfp_bpf_map_get_next_key, .map_get_next_key = nfp_bpf_map_get_next_key,
.map_lookup_elem = nfp_bpf_ctrl_lookup_entry, .map_lookup_elem = nfp_bpf_map_lookup_entry,
.map_update_elem = nfp_bpf_ctrl_update_entry, .map_update_elem = nfp_bpf_map_update_entry,
.map_delete_elem = nfp_bpf_map_delete_elem, .map_delete_elem = nfp_bpf_map_delete_elem,
}; };
...@@ -192,6 +227,7 @@ static int ...@@ -192,6 +227,7 @@ static int
nfp_bpf_map_alloc(struct nfp_app_bpf *bpf, struct bpf_offloaded_map *offmap) nfp_bpf_map_alloc(struct nfp_app_bpf *bpf, struct bpf_offloaded_map *offmap)
{ {
struct nfp_bpf_map *nfp_map; struct nfp_bpf_map *nfp_map;
unsigned int use_map_size;
long long int res; long long int res;
if (!bpf->maps.types) if (!bpf->maps.types)
...@@ -226,7 +262,10 @@ nfp_bpf_map_alloc(struct nfp_app_bpf *bpf, struct bpf_offloaded_map *offmap) ...@@ -226,7 +262,10 @@ nfp_bpf_map_alloc(struct nfp_app_bpf *bpf, struct bpf_offloaded_map *offmap)
return -ENOMEM; return -ENOMEM;
} }
nfp_map = kzalloc(sizeof(*nfp_map), GFP_USER); use_map_size = DIV_ROUND_UP(offmap->map.value_size, 4) *
FIELD_SIZEOF(struct nfp_bpf_map, use_map[0]);
nfp_map = kzalloc(sizeof(*nfp_map) + use_map_size, GFP_USER);
if (!nfp_map) if (!nfp_map)
return -ENOMEM; return -ENOMEM;
......
...@@ -285,6 +285,72 @@ nfp_bpf_check_stack_access(struct nfp_prog *nfp_prog, ...@@ -285,6 +285,72 @@ nfp_bpf_check_stack_access(struct nfp_prog *nfp_prog,
return -EINVAL; return -EINVAL;
} }
static const char *nfp_bpf_map_use_name(enum nfp_bpf_map_use use)
{
static const char * const names[] = {
[NFP_MAP_UNUSED] = "unused",
[NFP_MAP_USE_READ] = "read",
[NFP_MAP_USE_WRITE] = "write",
[NFP_MAP_USE_ATOMIC_CNT] = "atomic",
};
if (use >= ARRAY_SIZE(names) || !names[use])
return "unknown";
return names[use];
}
static int
nfp_bpf_map_mark_used_one(struct bpf_verifier_env *env,
struct nfp_bpf_map *nfp_map,
unsigned int off, enum nfp_bpf_map_use use)
{
if (nfp_map->use_map[off / 4] != NFP_MAP_UNUSED &&
nfp_map->use_map[off / 4] != use) {
pr_vlog(env, "map value use type conflict %s vs %s off: %u\n",
nfp_bpf_map_use_name(nfp_map->use_map[off / 4]),
nfp_bpf_map_use_name(use), off);
return -EOPNOTSUPP;
}
nfp_map->use_map[off / 4] = use;
return 0;
}
static int
nfp_bpf_map_mark_used(struct bpf_verifier_env *env, struct nfp_insn_meta *meta,
const struct bpf_reg_state *reg,
enum nfp_bpf_map_use use)
{
struct bpf_offloaded_map *offmap;
struct nfp_bpf_map *nfp_map;
unsigned int size, off;
int i, err;
if (!tnum_is_const(reg->var_off)) {
pr_vlog(env, "map value offset is variable\n");
return -EOPNOTSUPP;
}
off = reg->var_off.value + meta->insn.off + reg->off;
size = BPF_LDST_BYTES(&meta->insn);
offmap = map_to_offmap(reg->map_ptr);
nfp_map = offmap->dev_priv;
if (off + size > offmap->map.value_size) {
pr_vlog(env, "map value access out-of-bounds\n");
return -EINVAL;
}
for (i = 0; i < size; i += 4 - (off + i) % 4) {
err = nfp_bpf_map_mark_used_one(env, nfp_map, off + i, use);
if (err)
return err;
}
return 0;
}
static int static int
nfp_bpf_check_ptr(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, nfp_bpf_check_ptr(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
struct bpf_verifier_env *env, u8 reg_no) struct bpf_verifier_env *env, u8 reg_no)
...@@ -307,10 +373,22 @@ nfp_bpf_check_ptr(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, ...@@ -307,10 +373,22 @@ nfp_bpf_check_ptr(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
} }
if (reg->type == PTR_TO_MAP_VALUE) { if (reg->type == PTR_TO_MAP_VALUE) {
if (is_mbpf_load(meta)) {
err = nfp_bpf_map_mark_used(env, meta, reg,
NFP_MAP_USE_READ);
if (err)
return err;
}
if (is_mbpf_store(meta)) { if (is_mbpf_store(meta)) {
pr_vlog(env, "map writes not supported\n"); pr_vlog(env, "map writes not supported\n");
return -EOPNOTSUPP; return -EOPNOTSUPP;
} }
if (is_mbpf_xadd(meta)) {
err = nfp_bpf_map_mark_used(env, meta, reg,
NFP_MAP_USE_ATOMIC_CNT);
if (err)
return err;
}
} }
if (meta->ptr.type != NOT_INIT && meta->ptr.type != reg->type) { if (meta->ptr.type != NOT_INIT && meta->ptr.type != reg->type) {
...@@ -324,6 +402,31 @@ nfp_bpf_check_ptr(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, ...@@ -324,6 +402,31 @@ nfp_bpf_check_ptr(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
return 0; return 0;
} }
static int
nfp_bpf_check_xadd(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
struct bpf_verifier_env *env)
{
const struct bpf_reg_state *sreg = cur_regs(env) + meta->insn.src_reg;
const struct bpf_reg_state *dreg = cur_regs(env) + meta->insn.dst_reg;
if (dreg->type != PTR_TO_MAP_VALUE) {
pr_vlog(env, "atomic add not to a map value pointer: %d\n",
dreg->type);
return -EOPNOTSUPP;
}
if (sreg->type != SCALAR_VALUE ||
sreg->var_off.value > 0xffff || sreg->var_off.mask > 0xffff) {
char tn_buf[48];
tnum_strn(tn_buf, sizeof(tn_buf), sreg->var_off);
pr_vlog(env, "atomic add not of a small constant scalar: %s\n",
tn_buf);
return -EOPNOTSUPP;
}
return nfp_bpf_check_ptr(nfp_prog, meta, env, meta->insn.dst_reg);
}
static int static int
nfp_verify_insn(struct bpf_verifier_env *env, int insn_idx, int prev_insn_idx) nfp_verify_insn(struct bpf_verifier_env *env, int insn_idx, int prev_insn_idx)
{ {
...@@ -356,6 +459,8 @@ nfp_verify_insn(struct bpf_verifier_env *env, int insn_idx, int prev_insn_idx) ...@@ -356,6 +459,8 @@ nfp_verify_insn(struct bpf_verifier_env *env, int insn_idx, int prev_insn_idx)
if (is_mbpf_store(meta)) if (is_mbpf_store(meta))
return nfp_bpf_check_ptr(nfp_prog, meta, env, return nfp_bpf_check_ptr(nfp_prog, meta, env,
meta->insn.dst_reg); meta->insn.dst_reg);
if (is_mbpf_xadd(meta))
return nfp_bpf_check_xadd(nfp_prog, meta, env);
return 0; return 0;
} }
......
...@@ -48,6 +48,7 @@ const struct cmd_tgt_act cmd_tgt_act[__CMD_TGT_MAP_SIZE] = { ...@@ -48,6 +48,7 @@ const struct cmd_tgt_act cmd_tgt_act[__CMD_TGT_MAP_SIZE] = {
[CMD_TGT_READ32_SWAP] = { 0x02, 0x5c }, [CMD_TGT_READ32_SWAP] = { 0x02, 0x5c },
[CMD_TGT_READ_LE] = { 0x01, 0x40 }, [CMD_TGT_READ_LE] = { 0x01, 0x40 },
[CMD_TGT_READ_SWAP_LE] = { 0x03, 0x40 }, [CMD_TGT_READ_SWAP_LE] = { 0x03, 0x40 },
[CMD_TGT_ADD_IMM] = { 0x02, 0x47 },
}; };
static bool unreg_is_imm(u16 reg) static bool unreg_is_imm(u16 reg)
......
...@@ -238,6 +238,7 @@ enum cmd_tgt_map { ...@@ -238,6 +238,7 @@ enum cmd_tgt_map {
CMD_TGT_READ32_SWAP, CMD_TGT_READ32_SWAP,
CMD_TGT_READ_LE, CMD_TGT_READ_LE,
CMD_TGT_READ_SWAP_LE, CMD_TGT_READ_SWAP_LE,
CMD_TGT_ADD_IMM,
__CMD_TGT_MAP_SIZE, __CMD_TGT_MAP_SIZE,
}; };
...@@ -254,6 +255,7 @@ enum cmd_ctx_swap { ...@@ -254,6 +255,7 @@ enum cmd_ctx_swap {
CMD_CTX_NO_SWAP = 3, CMD_CTX_NO_SWAP = 3,
}; };
#define CMD_OVE_DATA GENMASK(5, 3)
#define CMD_OVE_LEN BIT(7) #define CMD_OVE_LEN BIT(7)
#define CMD_OV_LEN GENMASK(12, 8) #define CMD_OV_LEN GENMASK(12, 8)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment