Commit f45d5b6c authored by Toke Hoiland-Jorgensen's avatar Toke Hoiland-Jorgensen Committed by Alexei Starovoitov

bpf: generalise tail call map compatibility check

The check for tail call map compatibility ensures that tail calls only
happen between maps of the same type. To ensure backwards compatibility for
XDP frags we need a similar type of check for cpumap and devmap
programs, so move the state from bpf_array_aux into bpf_map, add
xdp_has_frags to the check, and apply the same check to cpumap and devmap.
Acked-by: default avatarJohn Fastabend <john.fastabend@gmail.com>
Co-developed-by: default avatarLorenzo Bianconi <lorenzo@kernel.org>
Signed-off-by: default avatarLorenzo Bianconi <lorenzo@kernel.org>
Signed-off-by: default avatarToke Hoiland-Jorgensen <toke@redhat.com>
Link: https://lore.kernel.org/r/f19fd97c0328a39927f3ad03e1ca6b43fd53cdfd.1642758637.git.lorenzo@kernel.orgSigned-off-by: default avatarAlexei Starovoitov <ast@kernel.org>
parent 082c4bfb
...@@ -194,6 +194,17 @@ struct bpf_map { ...@@ -194,6 +194,17 @@ struct bpf_map {
struct work_struct work; struct work_struct work;
struct mutex freeze_mutex; struct mutex freeze_mutex;
atomic64_t writecnt; atomic64_t writecnt;
/* 'Ownership' of program-containing map is claimed by the first program
* that is going to use this map or by the first program which FD is
* stored in the map to make sure that all callers and callees have the
* same prog type, JITed flag and xdp_has_frags flag.
*/
struct {
spinlock_t lock;
enum bpf_prog_type type;
bool jited;
bool xdp_has_frags;
} owner;
}; };
static inline bool map_value_has_spin_lock(const struct bpf_map *map) static inline bool map_value_has_spin_lock(const struct bpf_map *map)
...@@ -994,16 +1005,6 @@ struct bpf_prog_aux { ...@@ -994,16 +1005,6 @@ struct bpf_prog_aux {
}; };
struct bpf_array_aux { struct bpf_array_aux {
/* 'Ownership' of prog array is claimed by the first program that
* is going to use this map or by the first program which FD is
* stored in the map to make sure that all callers and callees have
* the same prog type and JITed flag.
*/
struct {
spinlock_t lock;
enum bpf_prog_type type;
bool jited;
} owner;
/* Programs with direct jumps into programs part of this array. */ /* Programs with direct jumps into programs part of this array. */
struct list_head poke_progs; struct list_head poke_progs;
struct bpf_map *map; struct bpf_map *map;
...@@ -1178,7 +1179,14 @@ struct bpf_event_entry { ...@@ -1178,7 +1179,14 @@ struct bpf_event_entry {
struct rcu_head rcu; struct rcu_head rcu;
}; };
bool bpf_prog_array_compatible(struct bpf_array *array, const struct bpf_prog *fp); static inline bool map_type_contains_progs(struct bpf_map *map)
{
return map->map_type == BPF_MAP_TYPE_PROG_ARRAY ||
map->map_type == BPF_MAP_TYPE_DEVMAP ||
map->map_type == BPF_MAP_TYPE_CPUMAP;
}
bool bpf_prog_map_compatible(struct bpf_map *map, const struct bpf_prog *fp);
int bpf_prog_calc_tag(struct bpf_prog *fp); int bpf_prog_calc_tag(struct bpf_prog *fp);
const struct bpf_func_proto *bpf_get_trace_printk_proto(void); const struct bpf_func_proto *bpf_get_trace_printk_proto(void);
......
...@@ -837,13 +837,12 @@ static int fd_array_map_delete_elem(struct bpf_map *map, void *key) ...@@ -837,13 +837,12 @@ static int fd_array_map_delete_elem(struct bpf_map *map, void *key)
static void *prog_fd_array_get_ptr(struct bpf_map *map, static void *prog_fd_array_get_ptr(struct bpf_map *map,
struct file *map_file, int fd) struct file *map_file, int fd)
{ {
struct bpf_array *array = container_of(map, struct bpf_array, map);
struct bpf_prog *prog = bpf_prog_get(fd); struct bpf_prog *prog = bpf_prog_get(fd);
if (IS_ERR(prog)) if (IS_ERR(prog))
return prog; return prog;
if (!bpf_prog_array_compatible(array, prog)) { if (!bpf_prog_map_compatible(map, prog)) {
bpf_prog_put(prog); bpf_prog_put(prog);
return ERR_PTR(-EINVAL); return ERR_PTR(-EINVAL);
} }
...@@ -1071,7 +1070,6 @@ static struct bpf_map *prog_array_map_alloc(union bpf_attr *attr) ...@@ -1071,7 +1070,6 @@ static struct bpf_map *prog_array_map_alloc(union bpf_attr *attr)
INIT_WORK(&aux->work, prog_array_map_clear_deferred); INIT_WORK(&aux->work, prog_array_map_clear_deferred);
INIT_LIST_HEAD(&aux->poke_progs); INIT_LIST_HEAD(&aux->poke_progs);
mutex_init(&aux->poke_mutex); mutex_init(&aux->poke_mutex);
spin_lock_init(&aux->owner.lock);
map = array_map_alloc(attr); map = array_map_alloc(attr);
if (IS_ERR(map)) { if (IS_ERR(map)) {
......
...@@ -1829,28 +1829,30 @@ static unsigned int __bpf_prog_ret0_warn(const void *ctx, ...@@ -1829,28 +1829,30 @@ static unsigned int __bpf_prog_ret0_warn(const void *ctx,
} }
#endif #endif
bool bpf_prog_array_compatible(struct bpf_array *array, bool bpf_prog_map_compatible(struct bpf_map *map,
const struct bpf_prog *fp) const struct bpf_prog *fp)
{ {
bool ret; bool ret;
if (fp->kprobe_override) if (fp->kprobe_override)
return false; return false;
spin_lock(&array->aux->owner.lock); spin_lock(&map->owner.lock);
if (!map->owner.type) {
if (!array->aux->owner.type) {
/* There's no owner yet where we could check for /* There's no owner yet where we could check for
* compatibility. * compatibility.
*/ */
array->aux->owner.type = fp->type; map->owner.type = fp->type;
array->aux->owner.jited = fp->jited; map->owner.jited = fp->jited;
map->owner.xdp_has_frags = fp->aux->xdp_has_frags;
ret = true; ret = true;
} else { } else {
ret = array->aux->owner.type == fp->type && ret = map->owner.type == fp->type &&
array->aux->owner.jited == fp->jited; map->owner.jited == fp->jited &&
map->owner.xdp_has_frags == fp->aux->xdp_has_frags;
} }
spin_unlock(&array->aux->owner.lock); spin_unlock(&map->owner.lock);
return ret; return ret;
} }
...@@ -1862,13 +1864,11 @@ static int bpf_check_tail_call(const struct bpf_prog *fp) ...@@ -1862,13 +1864,11 @@ static int bpf_check_tail_call(const struct bpf_prog *fp)
mutex_lock(&aux->used_maps_mutex); mutex_lock(&aux->used_maps_mutex);
for (i = 0; i < aux->used_map_cnt; i++) { for (i = 0; i < aux->used_map_cnt; i++) {
struct bpf_map *map = aux->used_maps[i]; struct bpf_map *map = aux->used_maps[i];
struct bpf_array *array;
if (map->map_type != BPF_MAP_TYPE_PROG_ARRAY) if (!map_type_contains_progs(map))
continue; continue;
array = container_of(map, struct bpf_array, map); if (!bpf_prog_map_compatible(map, fp)) {
if (!bpf_prog_array_compatible(array, fp)) {
ret = -EINVAL; ret = -EINVAL;
goto out; goto out;
} }
......
...@@ -397,7 +397,8 @@ static int cpu_map_kthread_run(void *data) ...@@ -397,7 +397,8 @@ static int cpu_map_kthread_run(void *data)
return 0; return 0;
} }
static int __cpu_map_load_bpf_program(struct bpf_cpu_map_entry *rcpu, int fd) static int __cpu_map_load_bpf_program(struct bpf_cpu_map_entry *rcpu,
struct bpf_map *map, int fd)
{ {
struct bpf_prog *prog; struct bpf_prog *prog;
...@@ -405,7 +406,8 @@ static int __cpu_map_load_bpf_program(struct bpf_cpu_map_entry *rcpu, int fd) ...@@ -405,7 +406,8 @@ static int __cpu_map_load_bpf_program(struct bpf_cpu_map_entry *rcpu, int fd)
if (IS_ERR(prog)) if (IS_ERR(prog))
return PTR_ERR(prog); return PTR_ERR(prog);
if (prog->expected_attach_type != BPF_XDP_CPUMAP) { if (prog->expected_attach_type != BPF_XDP_CPUMAP ||
!bpf_prog_map_compatible(map, prog)) {
bpf_prog_put(prog); bpf_prog_put(prog);
return -EINVAL; return -EINVAL;
} }
...@@ -457,7 +459,7 @@ __cpu_map_entry_alloc(struct bpf_map *map, struct bpf_cpumap_val *value, ...@@ -457,7 +459,7 @@ __cpu_map_entry_alloc(struct bpf_map *map, struct bpf_cpumap_val *value,
rcpu->map_id = map->id; rcpu->map_id = map->id;
rcpu->value.qsize = value->qsize; rcpu->value.qsize = value->qsize;
if (fd > 0 && __cpu_map_load_bpf_program(rcpu, fd)) if (fd > 0 && __cpu_map_load_bpf_program(rcpu, map, fd))
goto free_ptr_ring; goto free_ptr_ring;
/* Setup kthread */ /* Setup kthread */
......
...@@ -858,7 +858,8 @@ static struct bpf_dtab_netdev *__dev_map_alloc_node(struct net *net, ...@@ -858,7 +858,8 @@ static struct bpf_dtab_netdev *__dev_map_alloc_node(struct net *net,
BPF_PROG_TYPE_XDP, false); BPF_PROG_TYPE_XDP, false);
if (IS_ERR(prog)) if (IS_ERR(prog))
goto err_put_dev; goto err_put_dev;
if (prog->expected_attach_type != BPF_XDP_DEVMAP) if (prog->expected_attach_type != BPF_XDP_DEVMAP ||
!bpf_prog_map_compatible(&dtab->map, prog))
goto err_put_prog; goto err_put_prog;
} }
......
...@@ -556,16 +556,14 @@ static unsigned long bpf_map_memory_footprint(const struct bpf_map *map) ...@@ -556,16 +556,14 @@ static unsigned long bpf_map_memory_footprint(const struct bpf_map *map)
static void bpf_map_show_fdinfo(struct seq_file *m, struct file *filp) static void bpf_map_show_fdinfo(struct seq_file *m, struct file *filp)
{ {
const struct bpf_map *map = filp->private_data; struct bpf_map *map = filp->private_data;
const struct bpf_array *array;
u32 type = 0, jited = 0; u32 type = 0, jited = 0;
if (map->map_type == BPF_MAP_TYPE_PROG_ARRAY) { if (map_type_contains_progs(map)) {
array = container_of(map, struct bpf_array, map); spin_lock(&map->owner.lock);
spin_lock(&array->aux->owner.lock); type = map->owner.type;
type = array->aux->owner.type; jited = map->owner.jited;
jited = array->aux->owner.jited; spin_unlock(&map->owner.lock);
spin_unlock(&array->aux->owner.lock);
} }
seq_printf(m, seq_printf(m,
...@@ -874,6 +872,7 @@ static int map_create(union bpf_attr *attr) ...@@ -874,6 +872,7 @@ static int map_create(union bpf_attr *attr)
atomic64_set(&map->refcnt, 1); atomic64_set(&map->refcnt, 1);
atomic64_set(&map->usercnt, 1); atomic64_set(&map->usercnt, 1);
mutex_init(&map->freeze_mutex); mutex_init(&map->freeze_mutex);
spin_lock_init(&map->owner.lock);
map->spin_lock_off = -EINVAL; map->spin_lock_off = -EINVAL;
map->timer_off = -EINVAL; map->timer_off = -EINVAL;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment