Commit 87ac0d60 authored by Andrii Nakryiko's avatar Andrii Nakryiko Committed by Alexei Starovoitov

bpf: fix potential 32-bit overflow when accessing ARRAY map element

If BPF array map is bigger than 4GB, element pointer calculation can
overflow because both index and elem_size are u32. Fix this everywhere
by forcing 64-bit multiplication. Extract this formula into separate
small helper and use it consistently in various places.

Speculative-preventing formula utilizing index_mask trick is left as is,
but explicit u64 casts are added in both places.

Fixes: c85d6913 ("bpf: move memory size checks to bpf_map_charge_init()")
Signed-off-by: default avatarAndrii Nakryiko <andrii@kernel.org>
Link: https://lore.kernel.org/r/20220715053146.1291891-2-andrii@kernel.orgSigned-off-by: default avatarAlexei Starovoitov <ast@kernel.org>
parent e5e23424
...@@ -156,6 +156,11 @@ static struct bpf_map *array_map_alloc(union bpf_attr *attr) ...@@ -156,6 +156,11 @@ static struct bpf_map *array_map_alloc(union bpf_attr *attr)
return &array->map; return &array->map;
} }
static void *array_map_elem_ptr(struct bpf_array* array, u32 index)
{
return array->value + (u64)array->elem_size * index;
}
/* Called from syscall or from eBPF program */ /* Called from syscall or from eBPF program */
static void *array_map_lookup_elem(struct bpf_map *map, void *key) static void *array_map_lookup_elem(struct bpf_map *map, void *key)
{ {
...@@ -165,7 +170,7 @@ static void *array_map_lookup_elem(struct bpf_map *map, void *key) ...@@ -165,7 +170,7 @@ static void *array_map_lookup_elem(struct bpf_map *map, void *key)
if (unlikely(index >= array->map.max_entries)) if (unlikely(index >= array->map.max_entries))
return NULL; return NULL;
return array->value + array->elem_size * (index & array->index_mask); return array->value + (u64)array->elem_size * (index & array->index_mask);
} }
static int array_map_direct_value_addr(const struct bpf_map *map, u64 *imm, static int array_map_direct_value_addr(const struct bpf_map *map, u64 *imm,
...@@ -339,7 +344,7 @@ static int array_map_update_elem(struct bpf_map *map, void *key, void *value, ...@@ -339,7 +344,7 @@ static int array_map_update_elem(struct bpf_map *map, void *key, void *value,
value, map->value_size); value, map->value_size);
} else { } else {
val = array->value + val = array->value +
array->elem_size * (index & array->index_mask); (u64)array->elem_size * (index & array->index_mask);
if (map_flags & BPF_F_LOCK) if (map_flags & BPF_F_LOCK)
copy_map_value_locked(map, val, value, false); copy_map_value_locked(map, val, value, false);
else else
...@@ -408,8 +413,7 @@ static void array_map_free_timers(struct bpf_map *map) ...@@ -408,8 +413,7 @@ static void array_map_free_timers(struct bpf_map *map)
return; return;
for (i = 0; i < array->map.max_entries; i++) for (i = 0; i < array->map.max_entries; i++)
bpf_timer_cancel_and_free(array->value + array->elem_size * i + bpf_timer_cancel_and_free(array_map_elem_ptr(array, i) + map->timer_off);
map->timer_off);
} }
/* Called when map->refcnt goes to zero, either from workqueue or from syscall */ /* Called when map->refcnt goes to zero, either from workqueue or from syscall */
...@@ -420,7 +424,7 @@ static void array_map_free(struct bpf_map *map) ...@@ -420,7 +424,7 @@ static void array_map_free(struct bpf_map *map)
if (map_value_has_kptrs(map)) { if (map_value_has_kptrs(map)) {
for (i = 0; i < array->map.max_entries; i++) for (i = 0; i < array->map.max_entries; i++)
bpf_map_free_kptrs(map, array->value + array->elem_size * i); bpf_map_free_kptrs(map, array_map_elem_ptr(array, i));
bpf_map_free_kptr_off_tab(map); bpf_map_free_kptr_off_tab(map);
} }
...@@ -556,7 +560,7 @@ static void *bpf_array_map_seq_start(struct seq_file *seq, loff_t *pos) ...@@ -556,7 +560,7 @@ static void *bpf_array_map_seq_start(struct seq_file *seq, loff_t *pos)
index = info->index & array->index_mask; index = info->index & array->index_mask;
if (info->percpu_value_buf) if (info->percpu_value_buf)
return array->pptrs[index]; return array->pptrs[index];
return array->value + array->elem_size * index; return array_map_elem_ptr(array, index);
} }
static void *bpf_array_map_seq_next(struct seq_file *seq, void *v, loff_t *pos) static void *bpf_array_map_seq_next(struct seq_file *seq, void *v, loff_t *pos)
...@@ -575,7 +579,7 @@ static void *bpf_array_map_seq_next(struct seq_file *seq, void *v, loff_t *pos) ...@@ -575,7 +579,7 @@ static void *bpf_array_map_seq_next(struct seq_file *seq, void *v, loff_t *pos)
index = info->index & array->index_mask; index = info->index & array->index_mask;
if (info->percpu_value_buf) if (info->percpu_value_buf)
return array->pptrs[index]; return array->pptrs[index];
return array->value + array->elem_size * index; return array_map_elem_ptr(array, index);
} }
static int __bpf_array_map_seq_show(struct seq_file *seq, void *v) static int __bpf_array_map_seq_show(struct seq_file *seq, void *v)
...@@ -690,7 +694,7 @@ static int bpf_for_each_array_elem(struct bpf_map *map, bpf_callback_t callback_ ...@@ -690,7 +694,7 @@ static int bpf_for_each_array_elem(struct bpf_map *map, bpf_callback_t callback_
if (is_percpu) if (is_percpu)
val = this_cpu_ptr(array->pptrs[i]); val = this_cpu_ptr(array->pptrs[i]);
else else
val = array->value + array->elem_size * i; val = array_map_elem_ptr(array, i);
num_elems++; num_elems++;
key = i; key = i;
ret = callback_fn((u64)(long)map, (u64)(long)&key, ret = callback_fn((u64)(long)map, (u64)(long)&key,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment