Commit 984fe94f authored by YiFei Zhu's avatar YiFei Zhu Committed by Alexei Starovoitov

bpf: Mutex protect used_maps array and count

To support modifying the used_maps array, we use a mutex to protect
the use of the counter and the array. The mutex is initialized right
after the prog aux is allocated, and destroyed right before prog
aux is freed. This way we guarantee it's initialized for both cBPF
and eBPF.
Signed-off-by: default avatarYiFei Zhu <zhuyifei@google.com>
Signed-off-by: default avatarStanislav Fomichev <sdf@google.com>
Signed-off-by: default avatarAlexei Starovoitov <ast@kernel.org>
Acked-by: default avatarAndrii Nakryiko <andriin@fb.com>
Cc: YiFei Zhu <zhuyifei1999@gmail.com>
Link: https://lore.kernel.org/bpf/20200915234543.3220146-2-sdf@google.com
parent d317b0a8
...@@ -111,7 +111,9 @@ static int ...@@ -111,7 +111,9 @@ static int
nfp_map_ptrs_record(struct nfp_app_bpf *bpf, struct nfp_prog *nfp_prog, nfp_map_ptrs_record(struct nfp_app_bpf *bpf, struct nfp_prog *nfp_prog,
struct bpf_prog *prog) struct bpf_prog *prog)
{ {
int i, cnt, err; int i, cnt, err = 0;
mutex_lock(&prog->aux->used_maps_mutex);
/* Quickly count the maps we will have to remember */ /* Quickly count the maps we will have to remember */
cnt = 0; cnt = 0;
...@@ -119,13 +121,15 @@ nfp_map_ptrs_record(struct nfp_app_bpf *bpf, struct nfp_prog *nfp_prog, ...@@ -119,13 +121,15 @@ nfp_map_ptrs_record(struct nfp_app_bpf *bpf, struct nfp_prog *nfp_prog,
if (bpf_map_offload_neutral(prog->aux->used_maps[i])) if (bpf_map_offload_neutral(prog->aux->used_maps[i]))
cnt++; cnt++;
if (!cnt) if (!cnt)
return 0; goto out;
nfp_prog->map_records = kmalloc_array(cnt, nfp_prog->map_records = kmalloc_array(cnt,
sizeof(nfp_prog->map_records[0]), sizeof(nfp_prog->map_records[0]),
GFP_KERNEL); GFP_KERNEL);
if (!nfp_prog->map_records) if (!nfp_prog->map_records) {
return -ENOMEM; err = -ENOMEM;
goto out;
}
for (i = 0; i < prog->aux->used_map_cnt; i++) for (i = 0; i < prog->aux->used_map_cnt; i++)
if (bpf_map_offload_neutral(prog->aux->used_maps[i])) { if (bpf_map_offload_neutral(prog->aux->used_maps[i])) {
...@@ -133,12 +137,14 @@ nfp_map_ptrs_record(struct nfp_app_bpf *bpf, struct nfp_prog *nfp_prog, ...@@ -133,12 +137,14 @@ nfp_map_ptrs_record(struct nfp_app_bpf *bpf, struct nfp_prog *nfp_prog,
prog->aux->used_maps[i]); prog->aux->used_maps[i]);
if (err) { if (err) {
nfp_map_ptrs_forget(bpf, nfp_prog); nfp_map_ptrs_forget(bpf, nfp_prog);
return err; goto out;
} }
} }
WARN_ON(cnt != nfp_prog->map_records_cnt); WARN_ON(cnt != nfp_prog->map_records_cnt);
return 0; out:
mutex_unlock(&prog->aux->used_maps_mutex);
return err;
} }
static int static int
......
...@@ -751,6 +751,7 @@ struct bpf_prog_aux { ...@@ -751,6 +751,7 @@ struct bpf_prog_aux {
struct bpf_ksym ksym; struct bpf_ksym ksym;
const struct bpf_prog_ops *ops; const struct bpf_prog_ops *ops;
struct bpf_map **used_maps; struct bpf_map **used_maps;
struct mutex used_maps_mutex; /* mutex for used_maps and used_map_cnt */
struct bpf_prog *prog; struct bpf_prog *prog;
struct user_struct *user; struct user_struct *user;
u64 load_time; /* ns since boottime */ u64 load_time; /* ns since boottime */
......
...@@ -98,6 +98,7 @@ struct bpf_prog *bpf_prog_alloc_no_stats(unsigned int size, gfp_t gfp_extra_flag ...@@ -98,6 +98,7 @@ struct bpf_prog *bpf_prog_alloc_no_stats(unsigned int size, gfp_t gfp_extra_flag
fp->jit_requested = ebpf_jit_enabled(); fp->jit_requested = ebpf_jit_enabled();
INIT_LIST_HEAD_RCU(&fp->aux->ksym.lnode); INIT_LIST_HEAD_RCU(&fp->aux->ksym.lnode);
mutex_init(&fp->aux->used_maps_mutex);
return fp; return fp;
} }
...@@ -253,6 +254,7 @@ struct bpf_prog *bpf_prog_realloc(struct bpf_prog *fp_old, unsigned int size, ...@@ -253,6 +254,7 @@ struct bpf_prog *bpf_prog_realloc(struct bpf_prog *fp_old, unsigned int size,
void __bpf_prog_free(struct bpf_prog *fp) void __bpf_prog_free(struct bpf_prog *fp)
{ {
if (fp->aux) { if (fp->aux) {
mutex_destroy(&fp->aux->used_maps_mutex);
free_percpu(fp->aux->stats); free_percpu(fp->aux->stats);
kfree(fp->aux->poke_tab); kfree(fp->aux->poke_tab);
kfree(fp->aux); kfree(fp->aux);
...@@ -1747,8 +1749,9 @@ bool bpf_prog_array_compatible(struct bpf_array *array, ...@@ -1747,8 +1749,9 @@ bool bpf_prog_array_compatible(struct bpf_array *array,
static int bpf_check_tail_call(const struct bpf_prog *fp) static int bpf_check_tail_call(const struct bpf_prog *fp)
{ {
struct bpf_prog_aux *aux = fp->aux; struct bpf_prog_aux *aux = fp->aux;
int i; int i, ret = 0;
mutex_lock(&aux->used_maps_mutex);
for (i = 0; i < aux->used_map_cnt; i++) { for (i = 0; i < aux->used_map_cnt; i++) {
struct bpf_map *map = aux->used_maps[i]; struct bpf_map *map = aux->used_maps[i];
struct bpf_array *array; struct bpf_array *array;
...@@ -1757,11 +1760,15 @@ static int bpf_check_tail_call(const struct bpf_prog *fp) ...@@ -1757,11 +1760,15 @@ static int bpf_check_tail_call(const struct bpf_prog *fp)
continue; continue;
array = container_of(map, struct bpf_array, map); array = container_of(map, struct bpf_array, map);
if (!bpf_prog_array_compatible(array, fp)) if (!bpf_prog_array_compatible(array, fp)) {
return -EINVAL; ret = -EINVAL;
goto out;
}
} }
return 0; out:
mutex_unlock(&aux->used_maps_mutex);
return ret;
} }
static void bpf_prog_select_func(struct bpf_prog *fp) static void bpf_prog_select_func(struct bpf_prog *fp)
......
...@@ -3162,21 +3162,25 @@ static const struct bpf_map *bpf_map_from_imm(const struct bpf_prog *prog, ...@@ -3162,21 +3162,25 @@ static const struct bpf_map *bpf_map_from_imm(const struct bpf_prog *prog,
const struct bpf_map *map; const struct bpf_map *map;
int i; int i;
mutex_lock(&prog->aux->used_maps_mutex);
for (i = 0, *off = 0; i < prog->aux->used_map_cnt; i++) { for (i = 0, *off = 0; i < prog->aux->used_map_cnt; i++) {
map = prog->aux->used_maps[i]; map = prog->aux->used_maps[i];
if (map == (void *)addr) { if (map == (void *)addr) {
*type = BPF_PSEUDO_MAP_FD; *type = BPF_PSEUDO_MAP_FD;
return map; goto out;
} }
if (!map->ops->map_direct_value_meta) if (!map->ops->map_direct_value_meta)
continue; continue;
if (!map->ops->map_direct_value_meta(map, addr, off)) { if (!map->ops->map_direct_value_meta(map, addr, off)) {
*type = BPF_PSEUDO_MAP_VALUE; *type = BPF_PSEUDO_MAP_VALUE;
return map; goto out;
} }
} }
map = NULL;
return NULL; out:
mutex_unlock(&prog->aux->used_maps_mutex);
return map;
} }
static struct bpf_insn *bpf_insn_prepare_dump(const struct bpf_prog *prog, static struct bpf_insn *bpf_insn_prepare_dump(const struct bpf_prog *prog,
...@@ -3294,6 +3298,7 @@ static int bpf_prog_get_info_by_fd(struct file *file, ...@@ -3294,6 +3298,7 @@ static int bpf_prog_get_info_by_fd(struct file *file,
memcpy(info.tag, prog->tag, sizeof(prog->tag)); memcpy(info.tag, prog->tag, sizeof(prog->tag));
memcpy(info.name, prog->aux->name, sizeof(prog->aux->name)); memcpy(info.name, prog->aux->name, sizeof(prog->aux->name));
mutex_lock(&prog->aux->used_maps_mutex);
ulen = info.nr_map_ids; ulen = info.nr_map_ids;
info.nr_map_ids = prog->aux->used_map_cnt; info.nr_map_ids = prog->aux->used_map_cnt;
ulen = min_t(u32, info.nr_map_ids, ulen); ulen = min_t(u32, info.nr_map_ids, ulen);
...@@ -3303,9 +3308,12 @@ static int bpf_prog_get_info_by_fd(struct file *file, ...@@ -3303,9 +3308,12 @@ static int bpf_prog_get_info_by_fd(struct file *file,
for (i = 0; i < ulen; i++) for (i = 0; i < ulen; i++)
if (put_user(prog->aux->used_maps[i]->id, if (put_user(prog->aux->used_maps[i]->id,
&user_map_ids[i])) &user_map_ids[i])) {
mutex_unlock(&prog->aux->used_maps_mutex);
return -EFAULT; return -EFAULT;
} }
}
mutex_unlock(&prog->aux->used_maps_mutex);
err = set_info_rec_size(&info); err = set_info_rec_size(&info);
if (err) if (err)
......
...@@ -5441,17 +5441,22 @@ static int generic_xdp_install(struct net_device *dev, struct netdev_bpf *xdp) ...@@ -5441,17 +5441,22 @@ static int generic_xdp_install(struct net_device *dev, struct netdev_bpf *xdp)
if (new) { if (new) {
u32 i; u32 i;
mutex_lock(&new->aux->used_maps_mutex);
/* generic XDP does not work with DEVMAPs that can /* generic XDP does not work with DEVMAPs that can
* have a bpf_prog installed on an entry * have a bpf_prog installed on an entry
*/ */
for (i = 0; i < new->aux->used_map_cnt; i++) { for (i = 0; i < new->aux->used_map_cnt; i++) {
if (dev_map_can_have_prog(new->aux->used_maps[i])) if (dev_map_can_have_prog(new->aux->used_maps[i]) ||
return -EINVAL; cpu_map_prog_allowed(new->aux->used_maps[i])) {
if (cpu_map_prog_allowed(new->aux->used_maps[i])) mutex_unlock(&new->aux->used_maps_mutex);
return -EINVAL; return -EINVAL;
} }
} }
mutex_unlock(&new->aux->used_maps_mutex);
}
switch (xdp->command) { switch (xdp->command) {
case XDP_SETUP_PROG: case XDP_SETUP_PROG:
rcu_assign_pointer(dev->xdp_prog, new); rcu_assign_pointer(dev->xdp_prog, new);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment