Commit 9816dd35 authored by Jakub Kicinski's avatar Jakub Kicinski Committed by Daniel Borkmann

nfp: bpf: perf event output helpers support

Add support for the perf_event_output family of helpers.

The implementation on the NFP will not match the host code exactly.
The state of the host map and rings is unknown to the device, hence
device can't return errors when rings are not installed.  The device
simply packs the data into a firmware notification message and sends
it over to the host, returning success to the program.

There is no notion of a host CPU on the device when packets are being
processed.  Device will only offload programs which set BPF_F_CURRENT_CPU.
Still, if map index doesn't match CPU no error will be returned (see
above).

Dropped/lost firmware notification messages will not cause "lost
events" event on the perf ring, they are only visible via device
error counters.

Firmware notification messages may also get reordered in respect
to the packets which caused their generation.
Signed-off-by: default avatarJakub Kicinski <jakub.kicinski@netronome.com>
Reviewed-by: default avatarQuentin Monnet <quentin.monnet@netronome.com>
Signed-off-by: default avatarDaniel Borkmann <daniel@iogearbox.net>
parent f4e3ec0d
/* /*
* Copyright (C) 2017 Netronome Systems, Inc. * Copyright (C) 2017-2018 Netronome Systems, Inc.
* *
* This software is dual licensed under the GNU General License Version 2, * This software is dual licensed under the GNU General License Version 2,
* June 1991 as shown in the file COPYING in the top-level directory of this * June 1991 as shown in the file COPYING in the top-level directory of this
...@@ -102,6 +102,15 @@ nfp_bpf_cmsg_map_req_alloc(struct nfp_app_bpf *bpf, unsigned int n) ...@@ -102,6 +102,15 @@ nfp_bpf_cmsg_map_req_alloc(struct nfp_app_bpf *bpf, unsigned int n)
return nfp_bpf_cmsg_alloc(bpf, size); return nfp_bpf_cmsg_alloc(bpf, size);
} }
static u8 nfp_bpf_cmsg_get_type(struct sk_buff *skb)
{
struct cmsg_hdr *hdr;
hdr = (struct cmsg_hdr *)skb->data;
return hdr->type;
}
static unsigned int nfp_bpf_cmsg_get_tag(struct sk_buff *skb) static unsigned int nfp_bpf_cmsg_get_tag(struct sk_buff *skb)
{ {
struct cmsg_hdr *hdr; struct cmsg_hdr *hdr;
...@@ -431,6 +440,11 @@ void nfp_bpf_ctrl_msg_rx(struct nfp_app *app, struct sk_buff *skb) ...@@ -431,6 +440,11 @@ void nfp_bpf_ctrl_msg_rx(struct nfp_app *app, struct sk_buff *skb)
goto err_free; goto err_free;
} }
if (nfp_bpf_cmsg_get_type(skb) == CMSG_TYPE_BPF_EVENT) {
nfp_bpf_event_output(bpf, skb);
return;
}
nfp_ctrl_lock(bpf->app->ctrl); nfp_ctrl_lock(bpf->app->ctrl);
tag = nfp_bpf_cmsg_get_tag(skb); tag = nfp_bpf_cmsg_get_tag(skb);
......
/* /*
* Copyright (C) 2017 Netronome Systems, Inc. * Copyright (C) 2017-2018 Netronome Systems, Inc.
* *
* This software is dual licensed under the GNU General License Version 2, * This software is dual licensed under the GNU General License Version 2,
* June 1991 as shown in the file COPYING in the top-level directory of this * June 1991 as shown in the file COPYING in the top-level directory of this
...@@ -37,6 +37,14 @@ ...@@ -37,6 +37,14 @@
#include <linux/bitops.h> #include <linux/bitops.h>
#include <linux/types.h> #include <linux/types.h>
/* Kernel's enum bpf_reg_type is not uABI so people may change it breaking
* our FW ABI. In that case we will do translation in the driver.
*/
#define NFP_BPF_SCALAR_VALUE 1
#define NFP_BPF_MAP_VALUE 4
#define NFP_BPF_STACK 6
#define NFP_BPF_PACKET_DATA 8
enum bpf_cap_tlv_type { enum bpf_cap_tlv_type {
NFP_BPF_CAP_TYPE_FUNC = 1, NFP_BPF_CAP_TYPE_FUNC = 1,
NFP_BPF_CAP_TYPE_ADJUST_HEAD = 2, NFP_BPF_CAP_TYPE_ADJUST_HEAD = 2,
...@@ -81,6 +89,7 @@ enum nfp_bpf_cmsg_type { ...@@ -81,6 +89,7 @@ enum nfp_bpf_cmsg_type {
CMSG_TYPE_MAP_DELETE = 5, CMSG_TYPE_MAP_DELETE = 5,
CMSG_TYPE_MAP_GETNEXT = 6, CMSG_TYPE_MAP_GETNEXT = 6,
CMSG_TYPE_MAP_GETFIRST = 7, CMSG_TYPE_MAP_GETFIRST = 7,
CMSG_TYPE_BPF_EVENT = 8,
__CMSG_TYPE_MAP_MAX, __CMSG_TYPE_MAP_MAX,
}; };
...@@ -155,4 +164,13 @@ struct cmsg_reply_map_op { ...@@ -155,4 +164,13 @@ struct cmsg_reply_map_op {
__be32 resv; __be32 resv;
struct cmsg_key_value_pair elem[0]; struct cmsg_key_value_pair elem[0];
}; };
struct cmsg_bpf_event {
struct cmsg_hdr hdr;
__be32 cpu_id;
__be64 map_ptr;
__be32 data_size;
__be32 pkt_size;
u8 data[0];
};
#endif #endif
/* /*
* Copyright (C) 2016-2017 Netronome Systems, Inc. * Copyright (C) 2016-2018 Netronome Systems, Inc.
* *
* This software is dual licensed under the GNU General License Version 2, * This software is dual licensed under the GNU General License Version 2,
* June 1991 as shown in the file COPYING in the top-level directory of this * June 1991 as shown in the file COPYING in the top-level directory of this
...@@ -1456,6 +1456,31 @@ nfp_get_prandom_u32(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) ...@@ -1456,6 +1456,31 @@ nfp_get_prandom_u32(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
return 0; return 0;
} }
static int
nfp_perf_event_output(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{
swreg ptr_type;
u32 ret_tgt;
ptr_type = ur_load_imm_any(nfp_prog, meta->arg1.type, imm_a(nfp_prog));
ret_tgt = nfp_prog_current_offset(nfp_prog) + 3;
emit_br_relo(nfp_prog, BR_UNC, BR_OFF_RELO + meta->func_id,
2, RELO_BR_HELPER);
/* Load ptr type into A1 */
wrp_mov(nfp_prog, reg_a(1), ptr_type);
/* Load the return address into B0 */
wrp_immed_relo(nfp_prog, reg_b(0), ret_tgt, RELO_IMMED_REL);
if (!nfp_prog_confirm_current_offset(nfp_prog, ret_tgt))
return -EINVAL;
return 0;
}
/* --- Callbacks --- */ /* --- Callbacks --- */
static int mov_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) static int mov_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{ {
...@@ -2411,6 +2436,8 @@ static int call(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) ...@@ -2411,6 +2436,8 @@ static int call(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
return map_call_stack_common(nfp_prog, meta); return map_call_stack_common(nfp_prog, meta);
case BPF_FUNC_get_prandom_u32: case BPF_FUNC_get_prandom_u32:
return nfp_get_prandom_u32(nfp_prog, meta); return nfp_get_prandom_u32(nfp_prog, meta);
case BPF_FUNC_perf_event_output:
return nfp_perf_event_output(nfp_prog, meta);
default: default:
WARN_ONCE(1, "verifier allowed unsupported function\n"); WARN_ONCE(1, "verifier allowed unsupported function\n");
return -EOPNOTSUPP; return -EOPNOTSUPP;
...@@ -3353,6 +3380,9 @@ void *nfp_bpf_relo_for_vnic(struct nfp_prog *nfp_prog, struct nfp_bpf_vnic *bv) ...@@ -3353,6 +3380,9 @@ void *nfp_bpf_relo_for_vnic(struct nfp_prog *nfp_prog, struct nfp_bpf_vnic *bv)
case BPF_FUNC_map_delete_elem: case BPF_FUNC_map_delete_elem:
val = nfp_prog->bpf->helpers.map_delete; val = nfp_prog->bpf->helpers.map_delete;
break; break;
case BPF_FUNC_perf_event_output:
val = nfp_prog->bpf->helpers.perf_event_output;
break;
default: default:
pr_err("relocation of unknown helper %d\n", pr_err("relocation of unknown helper %d\n",
val); val);
......
...@@ -298,6 +298,9 @@ nfp_bpf_parse_cap_func(struct nfp_app_bpf *bpf, void __iomem *value, u32 length) ...@@ -298,6 +298,9 @@ nfp_bpf_parse_cap_func(struct nfp_app_bpf *bpf, void __iomem *value, u32 length)
case BPF_FUNC_map_delete_elem: case BPF_FUNC_map_delete_elem:
bpf->helpers.map_delete = readl(&cap->func_addr); bpf->helpers.map_delete = readl(&cap->func_addr);
break; break;
case BPF_FUNC_perf_event_output:
bpf->helpers.perf_event_output = readl(&cap->func_addr);
break;
} }
return 0; return 0;
......
...@@ -136,6 +136,7 @@ enum pkt_vec { ...@@ -136,6 +136,7 @@ enum pkt_vec {
* @helpers.map_lookup: map lookup helper address * @helpers.map_lookup: map lookup helper address
* @helpers.map_update: map update helper address * @helpers.map_update: map update helper address
* @helpers.map_delete: map delete helper address * @helpers.map_delete: map delete helper address
* @helpers.perf_event_output: output perf event to a ring buffer
* *
* @pseudo_random: FW initialized the pseudo-random machinery (CSRs) * @pseudo_random: FW initialized the pseudo-random machinery (CSRs)
*/ */
...@@ -176,6 +177,7 @@ struct nfp_app_bpf { ...@@ -176,6 +177,7 @@ struct nfp_app_bpf {
u32 map_lookup; u32 map_lookup;
u32 map_update; u32 map_update;
u32 map_delete; u32 map_delete;
u32 perf_event_output;
} helpers; } helpers;
bool pseudo_random; bool pseudo_random;
...@@ -458,5 +460,7 @@ int nfp_bpf_ctrl_lookup_entry(struct bpf_offloaded_map *offmap, ...@@ -458,5 +460,7 @@ int nfp_bpf_ctrl_lookup_entry(struct bpf_offloaded_map *offmap,
int nfp_bpf_ctrl_getnext_entry(struct bpf_offloaded_map *offmap, int nfp_bpf_ctrl_getnext_entry(struct bpf_offloaded_map *offmap,
void *key, void *next_key); void *key, void *next_key);
int nfp_bpf_event_output(struct nfp_app_bpf *bpf, struct sk_buff *skb);
void nfp_bpf_ctrl_msg_rx(struct nfp_app *app, struct sk_buff *skb); void nfp_bpf_ctrl_msg_rx(struct nfp_app *app, struct sk_buff *skb);
#endif #endif
...@@ -441,6 +441,53 @@ int nfp_ndo_bpf(struct nfp_app *app, struct nfp_net *nn, struct netdev_bpf *bpf) ...@@ -441,6 +441,53 @@ int nfp_ndo_bpf(struct nfp_app *app, struct nfp_net *nn, struct netdev_bpf *bpf)
} }
} }
static unsigned long
nfp_bpf_perf_event_copy(void *dst, const void *src,
unsigned long off, unsigned long len)
{
memcpy(dst, src + off, len);
return 0;
}
int nfp_bpf_event_output(struct nfp_app_bpf *bpf, struct sk_buff *skb)
{
struct cmsg_bpf_event *cbe = (void *)skb->data;
u32 pkt_size, data_size;
struct bpf_map *map;
if (skb->len < sizeof(struct cmsg_bpf_event))
goto err_drop;
pkt_size = be32_to_cpu(cbe->pkt_size);
data_size = be32_to_cpu(cbe->data_size);
map = (void *)(unsigned long)be64_to_cpu(cbe->map_ptr);
if (skb->len < sizeof(struct cmsg_bpf_event) + pkt_size + data_size)
goto err_drop;
if (cbe->hdr.ver != CMSG_MAP_ABI_VERSION)
goto err_drop;
rcu_read_lock();
if (!rhashtable_lookup_fast(&bpf->maps_neutral, &map,
nfp_bpf_maps_neutral_params)) {
rcu_read_unlock();
pr_warn("perf event: dest map pointer %px not recognized, dropping event\n",
map);
goto err_drop;
}
bpf_event_output(map, be32_to_cpu(cbe->cpu_id),
&cbe->data[round_up(pkt_size, 4)], data_size,
cbe->data, pkt_size, nfp_bpf_perf_event_copy);
rcu_read_unlock();
dev_consume_skb_any(skb);
return 0;
err_drop:
dev_kfree_skb_any(skb);
return -EINVAL;
}
static int static int
nfp_net_bpf_load(struct nfp_net *nn, struct bpf_prog *prog, nfp_net_bpf_load(struct nfp_net *nn, struct bpf_prog *prog,
struct netlink_ext_ack *extack) struct netlink_ext_ack *extack)
......
/* /*
* Copyright (C) 2016-2017 Netronome Systems, Inc. * Copyright (C) 2016-2018 Netronome Systems, Inc.
* *
* This software is dual licensed under the GNU General License Version 2, * This software is dual licensed under the GNU General License Version 2,
* June 1991 as shown in the file COPYING in the top-level directory of this * June 1991 as shown in the file COPYING in the top-level directory of this
...@@ -36,6 +36,8 @@ ...@@ -36,6 +36,8 @@
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/pkt_cls.h> #include <linux/pkt_cls.h>
#include "../nfp_app.h"
#include "../nfp_main.h"
#include "fw.h" #include "fw.h"
#include "main.h" #include "main.h"
...@@ -216,6 +218,71 @@ nfp_bpf_check_call(struct nfp_prog *nfp_prog, struct bpf_verifier_env *env, ...@@ -216,6 +218,71 @@ nfp_bpf_check_call(struct nfp_prog *nfp_prog, struct bpf_verifier_env *env,
pr_vlog(env, "bpf_get_prandom_u32(): FW doesn't support random number generation\n"); pr_vlog(env, "bpf_get_prandom_u32(): FW doesn't support random number generation\n");
return -EOPNOTSUPP; return -EOPNOTSUPP;
case BPF_FUNC_perf_event_output:
BUILD_BUG_ON(NFP_BPF_SCALAR_VALUE != SCALAR_VALUE ||
NFP_BPF_MAP_VALUE != PTR_TO_MAP_VALUE ||
NFP_BPF_STACK != PTR_TO_STACK ||
NFP_BPF_PACKET_DATA != PTR_TO_PACKET);
if (!bpf->helpers.perf_event_output) {
pr_vlog(env, "event_output: not supported by FW\n");
return -EOPNOTSUPP;
}
/* Force current CPU to make sure we can report the event
* wherever we get the control message from FW.
*/
if (reg3->var_off.mask & BPF_F_INDEX_MASK ||
(reg3->var_off.value & BPF_F_INDEX_MASK) !=
BPF_F_CURRENT_CPU) {
char tn_buf[48];
tnum_strn(tn_buf, sizeof(tn_buf), reg3->var_off);
pr_vlog(env, "event_output: must use BPF_F_CURRENT_CPU, var_off: %s\n",
tn_buf);
return -EOPNOTSUPP;
}
/* Save space in meta, we don't care about arguments other
* than 4th meta, shove it into arg1.
*/
reg1 = cur_regs(env) + BPF_REG_4;
if (reg1->type != SCALAR_VALUE /* NULL ptr */ &&
reg1->type != PTR_TO_STACK &&
reg1->type != PTR_TO_MAP_VALUE &&
reg1->type != PTR_TO_PACKET) {
pr_vlog(env, "event_output: unsupported ptr type: %d\n",
reg1->type);
return -EOPNOTSUPP;
}
if (reg1->type == PTR_TO_STACK &&
!nfp_bpf_stack_arg_ok("event_output", env, reg1, NULL))
return -EOPNOTSUPP;
/* Warn user that on offload NFP may return success even if map
* is not going to accept the event, since the event output is
* fully async and device won't know the state of the map.
* There is also FW limitation on the event length.
*
* Lost events will not show up on the perf ring, driver
* won't see them at all. Events may also get reordered.
*/
dev_warn_once(&nfp_prog->bpf->app->pf->pdev->dev,
"bpf: note: return codes and behavior of bpf_event_output() helper differs for offloaded programs!\n");
pr_vlog(env, "warning: return codes and behavior of event_output helper differ for offload!\n");
if (!meta->func_id)
break;
if (reg1->type != meta->arg1.type) {
pr_vlog(env, "event_output: ptr type changed: %d %d\n",
meta->arg1.type, reg1->type);
return -EINVAL;
}
break;
default: default:
pr_vlog(env, "unsupported function id: %d\n", func_id); pr_vlog(env, "unsupported function id: %d\n", func_id);
return -EOPNOTSUPP; return -EOPNOTSUPP;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment