Commit fba1a1c6 authored by Alexei Starovoitov's avatar Alexei Starovoitov Committed by Daniel Borkmann

bpf: Convert hash map to bpf_mem_alloc.

Convert bpf hash map to use bpf memory allocator.
Signed-off-by: default avatarAlexei Starovoitov <ast@kernel.org>
Signed-off-by: default avatarDaniel Borkmann <daniel@iogearbox.net>
Acked-by: default avatarKumar Kartikeya Dwivedi <memxor@gmail.com>
Acked-by: default avatarAndrii Nakryiko <andrii@kernel.org>
Link: https://lore.kernel.org/bpf/20220902211058.60789-3-alexei.starovoitov@gmail.com
parent 7c8199e2
...@@ -14,6 +14,7 @@ ...@@ -14,6 +14,7 @@
#include "percpu_freelist.h" #include "percpu_freelist.h"
#include "bpf_lru_list.h" #include "bpf_lru_list.h"
#include "map_in_map.h" #include "map_in_map.h"
#include <linux/bpf_mem_alloc.h>
#define HTAB_CREATE_FLAG_MASK \ #define HTAB_CREATE_FLAG_MASK \
(BPF_F_NO_PREALLOC | BPF_F_NO_COMMON_LRU | BPF_F_NUMA_NODE | \ (BPF_F_NO_PREALLOC | BPF_F_NO_COMMON_LRU | BPF_F_NUMA_NODE | \
...@@ -92,6 +93,7 @@ struct bucket { ...@@ -92,6 +93,7 @@ struct bucket {
struct bpf_htab { struct bpf_htab {
struct bpf_map map; struct bpf_map map;
struct bpf_mem_alloc ma;
struct bucket *buckets; struct bucket *buckets;
void *elems; void *elems;
union { union {
...@@ -576,6 +578,10 @@ static struct bpf_map *htab_map_alloc(union bpf_attr *attr) ...@@ -576,6 +578,10 @@ static struct bpf_map *htab_map_alloc(union bpf_attr *attr)
if (err) if (err)
goto free_prealloc; goto free_prealloc;
} }
} else {
err = bpf_mem_alloc_init(&htab->ma, htab->elem_size);
if (err)
goto free_map_locked;
} }
return &htab->map; return &htab->map;
...@@ -586,6 +592,7 @@ static struct bpf_map *htab_map_alloc(union bpf_attr *attr) ...@@ -586,6 +592,7 @@ static struct bpf_map *htab_map_alloc(union bpf_attr *attr)
for (i = 0; i < HASHTAB_MAP_LOCK_COUNT; i++) for (i = 0; i < HASHTAB_MAP_LOCK_COUNT; i++)
free_percpu(htab->map_locked[i]); free_percpu(htab->map_locked[i]);
bpf_map_area_free(htab->buckets); bpf_map_area_free(htab->buckets);
bpf_mem_alloc_destroy(&htab->ma);
free_htab: free_htab:
lockdep_unregister_key(&htab->lockdep_key); lockdep_unregister_key(&htab->lockdep_key);
bpf_map_area_free(htab); bpf_map_area_free(htab);
...@@ -862,7 +869,7 @@ static void htab_elem_free(struct bpf_htab *htab, struct htab_elem *l) ...@@ -862,7 +869,7 @@ static void htab_elem_free(struct bpf_htab *htab, struct htab_elem *l)
if (htab->map.map_type == BPF_MAP_TYPE_PERCPU_HASH) if (htab->map.map_type == BPF_MAP_TYPE_PERCPU_HASH)
free_percpu(htab_elem_get_ptr(l, htab->map.key_size)); free_percpu(htab_elem_get_ptr(l, htab->map.key_size));
check_and_free_fields(htab, l); check_and_free_fields(htab, l);
kfree(l); bpf_mem_cache_free(&htab->ma, l);
} }
static void htab_elem_free_rcu(struct rcu_head *head) static void htab_elem_free_rcu(struct rcu_head *head)
...@@ -986,9 +993,7 @@ static struct htab_elem *alloc_htab_elem(struct bpf_htab *htab, void *key, ...@@ -986,9 +993,7 @@ static struct htab_elem *alloc_htab_elem(struct bpf_htab *htab, void *key,
l_new = ERR_PTR(-E2BIG); l_new = ERR_PTR(-E2BIG);
goto dec_count; goto dec_count;
} }
l_new = bpf_map_kmalloc_node(&htab->map, htab->elem_size, l_new = bpf_mem_cache_alloc(&htab->ma);
GFP_NOWAIT | __GFP_NOWARN,
htab->map.numa_node);
if (!l_new) { if (!l_new) {
l_new = ERR_PTR(-ENOMEM); l_new = ERR_PTR(-ENOMEM);
goto dec_count; goto dec_count;
...@@ -1007,7 +1012,7 @@ static struct htab_elem *alloc_htab_elem(struct bpf_htab *htab, void *key, ...@@ -1007,7 +1012,7 @@ static struct htab_elem *alloc_htab_elem(struct bpf_htab *htab, void *key,
pptr = bpf_map_alloc_percpu(&htab->map, size, 8, pptr = bpf_map_alloc_percpu(&htab->map, size, 8,
GFP_NOWAIT | __GFP_NOWARN); GFP_NOWAIT | __GFP_NOWARN);
if (!pptr) { if (!pptr) {
kfree(l_new); bpf_mem_cache_free(&htab->ma, l_new);
l_new = ERR_PTR(-ENOMEM); l_new = ERR_PTR(-ENOMEM);
goto dec_count; goto dec_count;
} }
...@@ -1429,6 +1434,10 @@ static void delete_all_elements(struct bpf_htab *htab) ...@@ -1429,6 +1434,10 @@ static void delete_all_elements(struct bpf_htab *htab)
{ {
int i; int i;
/* It's called from a worker thread, so disable migration here,
* since bpf_mem_cache_free() relies on that.
*/
migrate_disable();
for (i = 0; i < htab->n_buckets; i++) { for (i = 0; i < htab->n_buckets; i++) {
struct hlist_nulls_head *head = select_bucket(htab, i); struct hlist_nulls_head *head = select_bucket(htab, i);
struct hlist_nulls_node *n; struct hlist_nulls_node *n;
...@@ -1439,6 +1448,7 @@ static void delete_all_elements(struct bpf_htab *htab) ...@@ -1439,6 +1448,7 @@ static void delete_all_elements(struct bpf_htab *htab)
htab_elem_free(htab, l); htab_elem_free(htab, l);
} }
} }
migrate_enable();
} }
static void htab_free_malloced_timers(struct bpf_htab *htab) static void htab_free_malloced_timers(struct bpf_htab *htab)
...@@ -1502,6 +1512,7 @@ static void htab_map_free(struct bpf_map *map) ...@@ -1502,6 +1512,7 @@ static void htab_map_free(struct bpf_map *map)
bpf_map_free_kptr_off_tab(map); bpf_map_free_kptr_off_tab(map);
free_percpu(htab->extra_elems); free_percpu(htab->extra_elems);
bpf_map_area_free(htab->buckets); bpf_map_area_free(htab->buckets);
bpf_mem_alloc_destroy(&htab->ma);
for (i = 0; i < HASHTAB_MAP_LOCK_COUNT; i++) for (i = 0; i < HASHTAB_MAP_LOCK_COUNT; i++)
free_percpu(htab->map_locked[i]); free_percpu(htab->map_locked[i]);
lockdep_unregister_key(&htab->lockdep_key); lockdep_unregister_key(&htab->lockdep_key);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment