Commit cae1927c authored by Jakub Kicinski's avatar Jakub Kicinski Committed by Daniel Borkmann

bpf: offload: allow netdev to disappear while verifier is running

To allow verifier instruction callbacks without any extra locking
NETDEV_UNREGISTER notification would wait on a waitqueue for verifier
to finish.  This design decision was made when rtnl lock was providing
all the locking.  Use the read/write lock instead and remove the
workqueue.

Verifier will now call into the offload code, so dev_ops are moved
to offload structure.  Since verifier calls are all under
bpf_prog_is_dev_bound() we no longer need static inline implementations
to please builds with CONFIG_NET=n.
Signed-off-by: default avatarJakub Kicinski <jakub.kicinski@netronome.com>
Reviewed-by: default avatarQuentin Monnet <quentin.monnet@netronome.com>
Acked-by: default avatarAlexei Starovoitov <ast@kernel.org>
Signed-off-by: default avatarDaniel Borkmann <daniel@iogearbox.net>
parent 9a18eedb
...@@ -238,7 +238,7 @@ struct nfp_bpf_vnic { ...@@ -238,7 +238,7 @@ struct nfp_bpf_vnic {
int nfp_bpf_jit(struct nfp_prog *prog); int nfp_bpf_jit(struct nfp_prog *prog);
extern const struct bpf_ext_analyzer_ops nfp_bpf_analyzer_ops; extern const struct bpf_prog_offload_ops nfp_bpf_analyzer_ops;
struct netdev_bpf; struct netdev_bpf;
struct nfp_app; struct nfp_app;
......
...@@ -260,6 +260,6 @@ nfp_verify_insn(struct bpf_verifier_env *env, int insn_idx, int prev_insn_idx) ...@@ -260,6 +260,6 @@ nfp_verify_insn(struct bpf_verifier_env *env, int insn_idx, int prev_insn_idx)
return 0; return 0;
} }
const struct bpf_ext_analyzer_ops nfp_bpf_analyzer_ops = { const struct bpf_prog_offload_ops nfp_bpf_analyzer_ops = {
.insn_hook = nfp_verify_insn, .insn_hook = nfp_verify_insn,
}; };
...@@ -66,7 +66,7 @@ nsim_bpf_verify_insn(struct bpf_verifier_env *env, int insn_idx, int prev_insn) ...@@ -66,7 +66,7 @@ nsim_bpf_verify_insn(struct bpf_verifier_env *env, int insn_idx, int prev_insn)
return 0; return 0;
} }
static const struct bpf_ext_analyzer_ops nsim_bpf_analyzer_ops = { static const struct bpf_prog_offload_ops nsim_bpf_analyzer_ops = {
.insn_hook = nsim_bpf_verify_insn, .insn_hook = nsim_bpf_verify_insn,
}; };
......
...@@ -17,6 +17,7 @@ ...@@ -17,6 +17,7 @@
#include <linux/numa.h> #include <linux/numa.h>
#include <linux/wait.h> #include <linux/wait.h>
struct bpf_verifier_env;
struct perf_event; struct perf_event;
struct bpf_prog; struct bpf_prog;
struct bpf_map; struct bpf_map;
...@@ -184,14 +185,18 @@ struct bpf_verifier_ops { ...@@ -184,14 +185,18 @@ struct bpf_verifier_ops {
struct bpf_prog *prog, u32 *target_size); struct bpf_prog *prog, u32 *target_size);
}; };
struct bpf_prog_offload_ops {
int (*insn_hook)(struct bpf_verifier_env *env,
int insn_idx, int prev_insn_idx);
};
struct bpf_dev_offload { struct bpf_dev_offload {
struct bpf_prog *prog; struct bpf_prog *prog;
struct net_device *netdev; struct net_device *netdev;
void *dev_priv; void *dev_priv;
struct list_head offloads; struct list_head offloads;
bool dev_state; bool dev_state;
bool verifier_running; const struct bpf_prog_offload_ops *dev_ops;
wait_queue_head_t verifier_done;
}; };
struct bpf_prog_aux { struct bpf_prog_aux {
......
...@@ -166,12 +166,6 @@ static inline bool bpf_verifier_log_full(const struct bpf_verifer_log *log) ...@@ -166,12 +166,6 @@ static inline bool bpf_verifier_log_full(const struct bpf_verifer_log *log)
return log->len_used >= log->len_total - 1; return log->len_used >= log->len_total - 1;
} }
struct bpf_verifier_env;
struct bpf_ext_analyzer_ops {
int (*insn_hook)(struct bpf_verifier_env *env,
int insn_idx, int prev_insn_idx);
};
#define BPF_MAX_SUBPROGS 256 #define BPF_MAX_SUBPROGS 256
/* single container for all structs /* single container for all structs
...@@ -185,7 +179,6 @@ struct bpf_verifier_env { ...@@ -185,7 +179,6 @@ struct bpf_verifier_env {
bool strict_alignment; /* perform strict pointer alignment checks */ bool strict_alignment; /* perform strict pointer alignment checks */
struct bpf_verifier_state *cur_state; /* current verifier state */ struct bpf_verifier_state *cur_state; /* current verifier state */
struct bpf_verifier_state_list **explored_states; /* search pruning optimization */ struct bpf_verifier_state_list **explored_states; /* search pruning optimization */
const struct bpf_ext_analyzer_ops *dev_ops; /* device analyzer ops */
struct bpf_map *used_maps[MAX_USED_MAPS]; /* array of map's used by eBPF program */ struct bpf_map *used_maps[MAX_USED_MAPS]; /* array of map's used by eBPF program */
u32 used_map_cnt; /* number of used maps */ u32 used_map_cnt; /* number of used maps */
u32 id_gen; /* used to generate unique reg IDs */ u32 id_gen; /* used to generate unique reg IDs */
...@@ -206,13 +199,8 @@ static inline struct bpf_reg_state *cur_regs(struct bpf_verifier_env *env) ...@@ -206,13 +199,8 @@ static inline struct bpf_reg_state *cur_regs(struct bpf_verifier_env *env)
return cur->frame[cur->curframe]->regs; return cur->frame[cur->curframe]->regs;
} }
#if defined(CONFIG_NET) && defined(CONFIG_BPF_SYSCALL)
int bpf_prog_offload_verifier_prep(struct bpf_verifier_env *env); int bpf_prog_offload_verifier_prep(struct bpf_verifier_env *env);
#else int bpf_prog_offload_verify_insn(struct bpf_verifier_env *env,
static inline int bpf_prog_offload_verifier_prep(struct bpf_verifier_env *env) int insn_idx, int prev_insn_idx);
{
return -EOPNOTSUPP;
}
#endif
#endif /* _LINUX_BPF_VERIFIER_H */ #endif /* _LINUX_BPF_VERIFIER_H */
...@@ -804,7 +804,7 @@ enum bpf_netdev_command { ...@@ -804,7 +804,7 @@ enum bpf_netdev_command {
BPF_OFFLOAD_DESTROY, BPF_OFFLOAD_DESTROY,
}; };
struct bpf_ext_analyzer_ops; struct bpf_prog_offload_ops;
struct netlink_ext_ack; struct netlink_ext_ack;
struct netdev_bpf { struct netdev_bpf {
...@@ -826,7 +826,7 @@ struct netdev_bpf { ...@@ -826,7 +826,7 @@ struct netdev_bpf {
/* BPF_OFFLOAD_VERIFIER_PREP */ /* BPF_OFFLOAD_VERIFIER_PREP */
struct { struct {
struct bpf_prog *prog; struct bpf_prog *prog;
const struct bpf_ext_analyzer_ops *ops; /* callee set */ const struct bpf_prog_offload_ops *ops; /* callee set */
} verifier; } verifier;
/* BPF_OFFLOAD_TRANSLATE, BPF_OFFLOAD_DESTROY */ /* BPF_OFFLOAD_TRANSLATE, BPF_OFFLOAD_DESTROY */
struct { struct {
......
...@@ -44,7 +44,6 @@ int bpf_prog_offload_init(struct bpf_prog *prog, union bpf_attr *attr) ...@@ -44,7 +44,6 @@ int bpf_prog_offload_init(struct bpf_prog *prog, union bpf_attr *attr)
return -ENOMEM; return -ENOMEM;
offload->prog = prog; offload->prog = prog;
init_waitqueue_head(&offload->verifier_done);
offload->netdev = dev_get_by_index(current->nsproxy->net_ns, offload->netdev = dev_get_by_index(current->nsproxy->net_ns,
attr->prog_ifindex); attr->prog_ifindex);
...@@ -97,15 +96,28 @@ int bpf_prog_offload_verifier_prep(struct bpf_verifier_env *env) ...@@ -97,15 +96,28 @@ int bpf_prog_offload_verifier_prep(struct bpf_verifier_env *env)
if (err) if (err)
goto exit_unlock; goto exit_unlock;
env->dev_ops = data.verifier.ops; env->prog->aux->offload->dev_ops = data.verifier.ops;
env->prog->aux->offload->dev_state = true; env->prog->aux->offload->dev_state = true;
env->prog->aux->offload->verifier_running = true;
exit_unlock: exit_unlock:
rtnl_unlock(); rtnl_unlock();
return err; return err;
} }
int bpf_prog_offload_verify_insn(struct bpf_verifier_env *env,
int insn_idx, int prev_insn_idx)
{
struct bpf_dev_offload *offload;
int ret = -ENODEV;
down_read(&bpf_devs_lock);
offload = env->prog->aux->offload;
if (offload->netdev)
ret = offload->dev_ops->insn_hook(env, insn_idx, prev_insn_idx);
up_read(&bpf_devs_lock);
return ret;
}
static void __bpf_prog_offload_destroy(struct bpf_prog *prog) static void __bpf_prog_offload_destroy(struct bpf_prog *prog)
{ {
struct bpf_dev_offload *offload = prog->aux->offload; struct bpf_dev_offload *offload = prog->aux->offload;
...@@ -117,9 +129,6 @@ static void __bpf_prog_offload_destroy(struct bpf_prog *prog) ...@@ -117,9 +129,6 @@ static void __bpf_prog_offload_destroy(struct bpf_prog *prog)
data.offload.prog = prog; data.offload.prog = prog;
if (offload->verifier_running)
wait_event(offload->verifier_done, !offload->verifier_running);
if (offload->dev_state) if (offload->dev_state)
WARN_ON(__bpf_offload_ndo(prog, BPF_OFFLOAD_DESTROY, &data)); WARN_ON(__bpf_offload_ndo(prog, BPF_OFFLOAD_DESTROY, &data));
...@@ -132,9 +141,6 @@ void bpf_prog_offload_destroy(struct bpf_prog *prog) ...@@ -132,9 +141,6 @@ void bpf_prog_offload_destroy(struct bpf_prog *prog)
{ {
struct bpf_dev_offload *offload = prog->aux->offload; struct bpf_dev_offload *offload = prog->aux->offload;
offload->verifier_running = false;
wake_up(&offload->verifier_done);
rtnl_lock(); rtnl_lock();
down_write(&bpf_devs_lock); down_write(&bpf_devs_lock);
__bpf_prog_offload_destroy(prog); __bpf_prog_offload_destroy(prog);
...@@ -146,15 +152,11 @@ void bpf_prog_offload_destroy(struct bpf_prog *prog) ...@@ -146,15 +152,11 @@ void bpf_prog_offload_destroy(struct bpf_prog *prog)
static int bpf_prog_offload_translate(struct bpf_prog *prog) static int bpf_prog_offload_translate(struct bpf_prog *prog)
{ {
struct bpf_dev_offload *offload = prog->aux->offload;
struct netdev_bpf data = {}; struct netdev_bpf data = {};
int ret; int ret;
data.offload.prog = prog; data.offload.prog = prog;
offload->verifier_running = false;
wake_up(&offload->verifier_done);
rtnl_lock(); rtnl_lock();
ret = __bpf_offload_ndo(prog, BPF_OFFLOAD_TRANSLATE, &data); ret = __bpf_offload_ndo(prog, BPF_OFFLOAD_TRANSLATE, &data);
rtnl_unlock(); rtnl_unlock();
......
...@@ -4438,15 +4438,6 @@ static int is_state_visited(struct bpf_verifier_env *env, int insn_idx) ...@@ -4438,15 +4438,6 @@ static int is_state_visited(struct bpf_verifier_env *env, int insn_idx)
return 0; return 0;
} }
static int ext_analyzer_insn_hook(struct bpf_verifier_env *env,
int insn_idx, int prev_insn_idx)
{
if (env->dev_ops && env->dev_ops->insn_hook)
return env->dev_ops->insn_hook(env, insn_idx, prev_insn_idx);
return 0;
}
static int do_check(struct bpf_verifier_env *env) static int do_check(struct bpf_verifier_env *env)
{ {
struct bpf_verifier_state *state; struct bpf_verifier_state *state;
...@@ -4531,9 +4522,12 @@ static int do_check(struct bpf_verifier_env *env) ...@@ -4531,9 +4522,12 @@ static int do_check(struct bpf_verifier_env *env)
print_bpf_insn(&cbs, env, insn, env->allow_ptr_leaks); print_bpf_insn(&cbs, env, insn, env->allow_ptr_leaks);
} }
err = ext_analyzer_insn_hook(env, insn_idx, prev_insn_idx); if (bpf_prog_is_dev_bound(env->prog->aux)) {
err = bpf_prog_offload_verify_insn(env, insn_idx,
prev_insn_idx);
if (err) if (err)
return err; return err;
}
regs = cur_regs(env); regs = cur_regs(env);
env->insn_aux_data[insn_idx].seen = true; env->insn_aux_data[insn_idx].seen = true;
...@@ -5463,7 +5457,7 @@ int bpf_check(struct bpf_prog **prog, union bpf_attr *attr) ...@@ -5463,7 +5457,7 @@ int bpf_check(struct bpf_prog **prog, union bpf_attr *attr)
if (!IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)) if (!IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS))
env->strict_alignment = true; env->strict_alignment = true;
if (env->prog->aux->offload) { if (bpf_prog_is_dev_bound(env->prog->aux)) {
ret = bpf_prog_offload_verifier_prep(env); ret = bpf_prog_offload_verifier_prep(env);
if (ret) if (ret)
goto err_unlock; goto err_unlock;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment