Commit 9ce7a956 authored by Jakub Kicinski's avatar Jakub Kicinski Committed by David S. Miller

nfp: bpf: refactor offload logic

We currently create a fake cls_bpf offload object when we want
to offload XDP.  Simplify and clarify the code by moving the
TC/XDP specific logic out of common offload code.  This is easy
now that we don't support legacy TC actions.  We only need the
bpf program and state of the skip_sw flag.

Temporarily set @code to NULL in nfp_net_bpf_offload(), compilers
seem to have trouble recognizing it's always initialized.  Next
patches will eliminate that variable.
Signed-off-by: default avatarJakub Kicinski <jakub.kicinski@netronome.com>
Reviewed-by: default avatarQuentin Monnet <quentin.monnet@netronome.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 5559eedb
...@@ -54,28 +54,25 @@ static int ...@@ -54,28 +54,25 @@ static int
nfp_bpf_xdp_offload(struct nfp_app *app, struct nfp_net *nn, nfp_bpf_xdp_offload(struct nfp_app *app, struct nfp_net *nn,
struct bpf_prog *prog) struct bpf_prog *prog)
{ {
struct tc_cls_bpf_offload cmd = { bool running, xdp_running;
.prog = prog,
};
int ret; int ret;
if (!nfp_net_ebpf_capable(nn)) if (!nfp_net_ebpf_capable(nn))
return -EINVAL; return -EINVAL;
if (nn->dp.ctrl & NFP_NET_CFG_CTRL_BPF) { running = nn->dp.ctrl & NFP_NET_CFG_CTRL_BPF;
if (!nn->dp.bpf_offload_xdp) xdp_running = running && nn->dp.bpf_offload_xdp;
return prog ? -EBUSY : 0;
cmd.command = prog ? TC_CLSBPF_REPLACE : TC_CLSBPF_DESTROY; if (!prog && !xdp_running)
} else { return 0;
if (!prog) if (prog && running && !xdp_running)
return 0; return -EBUSY;
cmd.command = TC_CLSBPF_ADD;
}
ret = nfp_net_bpf_offload(nn, &cmd); ret = nfp_net_bpf_offload(nn, prog, running, true);
/* Stop offload if replace not possible */ /* Stop offload if replace not possible */
if (ret && cmd.command == TC_CLSBPF_REPLACE) if (ret && prog)
nfp_bpf_xdp_offload(app, nn, NULL); nfp_bpf_xdp_offload(app, nn, NULL);
nn->dp.bpf_offload_xdp = prog && !ret; nn->dp.bpf_offload_xdp = prog && !ret;
return ret; return ret;
} }
...@@ -96,27 +93,33 @@ static int nfp_bpf_setup_tc_block_cb(enum tc_setup_type type, ...@@ -96,27 +93,33 @@ static int nfp_bpf_setup_tc_block_cb(enum tc_setup_type type,
{ {
struct tc_cls_bpf_offload *cls_bpf = type_data; struct tc_cls_bpf_offload *cls_bpf = type_data;
struct nfp_net *nn = cb_priv; struct nfp_net *nn = cb_priv;
bool skip_sw;
if (type != TC_SETUP_CLSBPF ||
!tc_can_offload(nn->dp.netdev) ||
!nfp_net_ebpf_capable(nn) ||
cls_bpf->common.protocol != htons(ETH_P_ALL) ||
cls_bpf->common.chain_index)
return -EOPNOTSUPP;
if (nn->dp.bpf_offload_xdp)
return -EBUSY;
if (!tc_can_offload(nn->dp.netdev)) /* Only support TC direct action */
if (!cls_bpf->exts_integrated ||
tcf_exts_has_actions(cls_bpf->exts)) {
nn_err(nn, "only direct action with no legacy actions supported\n");
return -EOPNOTSUPP; return -EOPNOTSUPP;
}
switch (type) { skip_sw = !!(cls_bpf->gen_flags & TCA_CLS_FLAGS_SKIP_SW);
case TC_SETUP_CLSBPF:
if (!nfp_net_ebpf_capable(nn) || switch (cls_bpf->command) {
cls_bpf->common.protocol != htons(ETH_P_ALL) || case TC_CLSBPF_REPLACE:
cls_bpf->common.chain_index) return nfp_net_bpf_offload(nn, cls_bpf->prog, true, !skip_sw);
return -EOPNOTSUPP; case TC_CLSBPF_ADD:
if (nn->dp.bpf_offload_xdp) return nfp_net_bpf_offload(nn, cls_bpf->prog, false, !skip_sw);
return -EBUSY; case TC_CLSBPF_DESTROY:
return nfp_net_bpf_offload(nn, NULL, true, !skip_sw);
/* Only support TC direct action */
if (!cls_bpf->exts_integrated ||
tcf_exts_has_actions(cls_bpf->exts)) {
nn_err(nn, "only direct action with no legacy actions supported\n");
return -EOPNOTSUPP;
}
return nfp_net_bpf_offload(nn, cls_bpf);
default: default:
return -EOPNOTSUPP; return -EOPNOTSUPP;
} }
......
...@@ -181,8 +181,8 @@ nfp_bpf_jit(struct bpf_prog *filter, void *prog, ...@@ -181,8 +181,8 @@ nfp_bpf_jit(struct bpf_prog *filter, void *prog,
int nfp_prog_verify(struct nfp_prog *nfp_prog, struct bpf_prog *prog); int nfp_prog_verify(struct nfp_prog *nfp_prog, struct bpf_prog *prog);
struct nfp_net; struct nfp_net;
struct tc_cls_bpf_offload;
int nfp_net_bpf_offload(struct nfp_net *nn, struct tc_cls_bpf_offload *cls_bpf); int nfp_net_bpf_offload(struct nfp_net *nn, struct bpf_prog *prog,
bool old_prog, bool sw_fallback);
#endif #endif
...@@ -52,8 +52,7 @@ ...@@ -52,8 +52,7 @@
#include "../nfp_net.h" #include "../nfp_net.h"
static int static int
nfp_net_bpf_offload_prepare(struct nfp_net *nn, nfp_net_bpf_offload_prepare(struct nfp_net *nn, struct bpf_prog *prog,
struct tc_cls_bpf_offload *cls_bpf,
struct nfp_bpf_result *res, struct nfp_bpf_result *res,
void **code, dma_addr_t *dma_addr, u16 max_instr) void **code, dma_addr_t *dma_addr, u16 max_instr)
{ {
...@@ -73,9 +72,9 @@ nfp_net_bpf_offload_prepare(struct nfp_net *nn, ...@@ -73,9 +72,9 @@ nfp_net_bpf_offload_prepare(struct nfp_net *nn,
done_off = nn_readw(nn, NFP_NET_CFG_BPF_DONE); done_off = nn_readw(nn, NFP_NET_CFG_BPF_DONE);
stack_size = nn_readb(nn, NFP_NET_CFG_BPF_STACK_SZ) * 64; stack_size = nn_readb(nn, NFP_NET_CFG_BPF_STACK_SZ) * 64;
if (cls_bpf->prog->aux->stack_depth > stack_size) { if (prog->aux->stack_depth > stack_size) {
nn_info(nn, "stack too large: program %dB > FW stack %dB\n", nn_info(nn, "stack too large: program %dB > FW stack %dB\n",
cls_bpf->prog->aux->stack_depth, stack_size); prog->aux->stack_depth, stack_size);
return -EOPNOTSUPP; return -EOPNOTSUPP;
} }
...@@ -83,8 +82,7 @@ nfp_net_bpf_offload_prepare(struct nfp_net *nn, ...@@ -83,8 +82,7 @@ nfp_net_bpf_offload_prepare(struct nfp_net *nn,
if (!*code) if (!*code)
return -ENOMEM; return -ENOMEM;
ret = nfp_bpf_jit(cls_bpf->prog, *code, start_off, done_off, ret = nfp_bpf_jit(prog, *code, start_off, done_off, max_instr, res);
max_instr, res);
if (ret) if (ret)
goto out; goto out;
...@@ -96,13 +94,13 @@ nfp_net_bpf_offload_prepare(struct nfp_net *nn, ...@@ -96,13 +94,13 @@ nfp_net_bpf_offload_prepare(struct nfp_net *nn,
} }
static void static void
nfp_net_bpf_load_and_start(struct nfp_net *nn, u32 tc_flags, nfp_net_bpf_load_and_start(struct nfp_net *nn, bool sw_fallback,
void *code, dma_addr_t dma_addr, void *code, dma_addr_t dma_addr,
unsigned int code_sz, unsigned int n_instr) unsigned int code_sz, unsigned int n_instr)
{ {
int err; int err;
nn->dp.bpf_offload_skip_sw = !!(tc_flags & TCA_CLS_FLAGS_SKIP_SW); nn->dp.bpf_offload_skip_sw = !sw_fallback;
nn_writew(nn, NFP_NET_CFG_BPF_SIZE, n_instr); nn_writew(nn, NFP_NET_CFG_BPF_SIZE, n_instr);
nn_writeq(nn, NFP_NET_CFG_BPF_ADDR, dma_addr); nn_writeq(nn, NFP_NET_CFG_BPF_ADDR, dma_addr);
...@@ -134,7 +132,8 @@ static int nfp_net_bpf_stop(struct nfp_net *nn) ...@@ -134,7 +132,8 @@ static int nfp_net_bpf_stop(struct nfp_net *nn)
return nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_GEN); return nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_GEN);
} }
int nfp_net_bpf_offload(struct nfp_net *nn, struct tc_cls_bpf_offload *cls_bpf) int nfp_net_bpf_offload(struct nfp_net *nn, struct bpf_prog *prog,
bool old_prog, bool sw_fallback)
{ {
struct nfp_bpf_result res; struct nfp_bpf_result res;
dma_addr_t dma_addr; dma_addr_t dma_addr;
...@@ -142,49 +141,37 @@ int nfp_net_bpf_offload(struct nfp_net *nn, struct tc_cls_bpf_offload *cls_bpf) ...@@ -142,49 +141,37 @@ int nfp_net_bpf_offload(struct nfp_net *nn, struct tc_cls_bpf_offload *cls_bpf)
void *code; void *code;
int err; int err;
/* There is nothing stopping us from implementing seamless
* replace but the simple method of loading I adopted in
* the firmware does not handle atomic replace (i.e. we have to
* stop the BPF offload and re-enable it). Leaking-in a few
* frames which didn't have BPF applied in the hardware should
* be fine if software fallback is available, though.
*/
if (prog && old_prog && nn->dp.bpf_offload_skip_sw)
return -EBUSY;
/* Something else is loaded, different program type? */
if (!old_prog && nn->dp.ctrl & NFP_NET_CFG_CTRL_BPF)
return -EBUSY;
max_instr = nn_readw(nn, NFP_NET_CFG_BPF_MAX_LEN); max_instr = nn_readw(nn, NFP_NET_CFG_BPF_MAX_LEN);
code = NULL;
switch (cls_bpf->command) { if (prog) {
case TC_CLSBPF_REPLACE: err = nfp_net_bpf_offload_prepare(nn, prog, &res, &code,
/* There is nothing stopping us from implementing seamless
* replace but the simple method of loading I adopted in
* the firmware does not handle atomic replace (i.e. we have to
* stop the BPF offload and re-enable it). Leaking-in a few
* frames which didn't have BPF applied in the hardware should
* be fine if software fallback is available, though.
*/
if (nn->dp.bpf_offload_skip_sw)
return -EBUSY;
err = nfp_net_bpf_offload_prepare(nn, cls_bpf, &res, &code,
&dma_addr, max_instr); &dma_addr, max_instr);
if (err) if (err)
return err; return err;
}
if (old_prog)
nfp_net_bpf_stop(nn); nfp_net_bpf_stop(nn);
nfp_net_bpf_load_and_start(nn, cls_bpf->gen_flags, code,
dma_addr, max_instr * sizeof(u64),
res.n_instr);
return 0;
case TC_CLSBPF_ADD:
if (nn->dp.ctrl & NFP_NET_CFG_CTRL_BPF)
return -EBUSY;
err = nfp_net_bpf_offload_prepare(nn, cls_bpf, &res, &code,
&dma_addr, max_instr);
if (err)
return err;
nfp_net_bpf_load_and_start(nn, cls_bpf->gen_flags, code, if (prog)
nfp_net_bpf_load_and_start(nn, sw_fallback, code,
dma_addr, max_instr * sizeof(u64), dma_addr, max_instr * sizeof(u64),
res.n_instr); res.n_instr);
return 0;
case TC_CLSBPF_DESTROY: return 0;
return nfp_net_bpf_stop(nn);
default:
return -EOPNOTSUPP;
}
} }
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment