Commit f73e601a authored by Kumar Kartikeya Dwivedi's avatar Kumar Kartikeya Dwivedi Committed by Alexei Starovoitov

bpf: Populate field_offs for inner_map_meta

Far too much code simply assumes that both btf_record and btf_field_offs
are set to valid pointers together, or both are unset. They go together
hand in hand as btf_record describes the special fields and
btf_field_offs is compact representation for runtime copying/zeroing.

It is very difficult to make this clear in the code when the only
exception to this universal invariant is inner_map_meta which is used
as reg->map_ptr in the verifier. This is simply a bug waiting to happen,
as in verifier context we cannot easily distinguish if PTR_TO_MAP_VALUE
is coming from an inner map, and if we ever end up using field_offs for
any reason in the future, we will silently ignore the special fields for
inner map case (as NULL is not an error but unset field_offs).

Hence, simply copy field_offs from inner map together with btf_record.

While at it, refactor code to unwind properly on errors with gotos.
Signed-off-by: default avatarKumar Kartikeya Dwivedi <memxor@gmail.com>
Link: https://lore.kernel.org/r/20221118015614.2013203-5-memxor@gmail.comSigned-off-by: default avatarAlexei Starovoitov <ast@kernel.org>
parent d4899572
...@@ -12,6 +12,7 @@ struct bpf_map *bpf_map_meta_alloc(int inner_map_ufd) ...@@ -12,6 +12,7 @@ struct bpf_map *bpf_map_meta_alloc(int inner_map_ufd)
struct bpf_map *inner_map, *inner_map_meta; struct bpf_map *inner_map, *inner_map_meta;
u32 inner_map_meta_size; u32 inner_map_meta_size;
struct fd f; struct fd f;
int ret;
f = fdget(inner_map_ufd); f = fdget(inner_map_ufd);
inner_map = __bpf_map_get(f); inner_map = __bpf_map_get(f);
...@@ -20,18 +21,18 @@ struct bpf_map *bpf_map_meta_alloc(int inner_map_ufd) ...@@ -20,18 +21,18 @@ struct bpf_map *bpf_map_meta_alloc(int inner_map_ufd)
/* Does not support >1 level map-in-map */ /* Does not support >1 level map-in-map */
if (inner_map->inner_map_meta) { if (inner_map->inner_map_meta) {
fdput(f); ret = -EINVAL;
return ERR_PTR(-EINVAL); goto put;
} }
if (!inner_map->ops->map_meta_equal) { if (!inner_map->ops->map_meta_equal) {
fdput(f); ret = -ENOTSUPP;
return ERR_PTR(-ENOTSUPP); goto put;
} }
if (btf_record_has_field(inner_map->record, BPF_SPIN_LOCK)) { if (btf_record_has_field(inner_map->record, BPF_SPIN_LOCK)) {
fdput(f); ret = -ENOTSUPP;
return ERR_PTR(-ENOTSUPP); goto put;
} }
inner_map_meta_size = sizeof(*inner_map_meta); inner_map_meta_size = sizeof(*inner_map_meta);
...@@ -41,8 +42,8 @@ struct bpf_map *bpf_map_meta_alloc(int inner_map_ufd) ...@@ -41,8 +42,8 @@ struct bpf_map *bpf_map_meta_alloc(int inner_map_ufd)
inner_map_meta = kzalloc(inner_map_meta_size, GFP_USER); inner_map_meta = kzalloc(inner_map_meta_size, GFP_USER);
if (!inner_map_meta) { if (!inner_map_meta) {
fdput(f); ret = -ENOMEM;
return ERR_PTR(-ENOMEM); goto put;
} }
inner_map_meta->map_type = inner_map->map_type; inner_map_meta->map_type = inner_map->map_type;
...@@ -50,16 +51,27 @@ struct bpf_map *bpf_map_meta_alloc(int inner_map_ufd) ...@@ -50,16 +51,27 @@ struct bpf_map *bpf_map_meta_alloc(int inner_map_ufd)
inner_map_meta->value_size = inner_map->value_size; inner_map_meta->value_size = inner_map->value_size;
inner_map_meta->map_flags = inner_map->map_flags; inner_map_meta->map_flags = inner_map->map_flags;
inner_map_meta->max_entries = inner_map->max_entries; inner_map_meta->max_entries = inner_map->max_entries;
inner_map_meta->record = btf_record_dup(inner_map->record); inner_map_meta->record = btf_record_dup(inner_map->record);
if (IS_ERR(inner_map_meta->record)) { if (IS_ERR(inner_map_meta->record)) {
struct bpf_map *err_ptr = ERR_CAST(inner_map_meta->record);
/* btf_record_dup returns NULL or valid pointer in case of /* btf_record_dup returns NULL or valid pointer in case of
* invalid/empty/valid, but ERR_PTR in case of errors. During * invalid/empty/valid, but ERR_PTR in case of errors. During
* equality NULL or IS_ERR is equivalent. * equality NULL or IS_ERR is equivalent.
*/ */
kfree(inner_map_meta); ret = PTR_ERR(inner_map_meta->record);
fdput(f); goto free;
return err_ptr; }
if (inner_map_meta->record) {
struct btf_field_offs *field_offs;
/* If btf_record is !IS_ERR_OR_NULL, then field_offs is always
* valid.
*/
field_offs = kmemdup(inner_map->field_offs, sizeof(*inner_map->field_offs), GFP_KERNEL | __GFP_NOWARN);
if (!field_offs) {
ret = -ENOMEM;
goto free_rec;
}
inner_map_meta->field_offs = field_offs;
} }
if (inner_map->btf) { if (inner_map->btf) {
btf_get(inner_map->btf); btf_get(inner_map->btf);
...@@ -76,10 +88,18 @@ struct bpf_map *bpf_map_meta_alloc(int inner_map_ufd) ...@@ -76,10 +88,18 @@ struct bpf_map *bpf_map_meta_alloc(int inner_map_ufd)
fdput(f); fdput(f);
return inner_map_meta; return inner_map_meta;
free_rec:
btf_record_free(inner_map_meta->record);
free:
kfree(inner_map_meta);
put:
fdput(f);
return ERR_PTR(ret);
} }
void bpf_map_meta_free(struct bpf_map *map_meta) void bpf_map_meta_free(struct bpf_map *map_meta)
{ {
kfree(map_meta->field_offs);
bpf_map_free_record(map_meta); bpf_map_free_record(map_meta);
btf_put(map_meta->btf); btf_put(map_meta->btf);
kfree(map_meta); kfree(map_meta);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment