Commit 9328e0d1 authored by Jakub Kicinski's avatar Jakub Kicinski Committed by Daniel Borkmann

bpf: hashtab: move checks out of alloc function

Use the new callback to perform allocation checks for hash maps.
Signed-off-by: default avatarJakub Kicinski <jakub.kicinski@netronome.com>
Reviewed-by: default avatarQuentin Monnet <quentin.monnet@netronome.com>
Signed-off-by: default avatarDaniel Borkmann <daniel@iogearbox.net>
parent daffc5a2
...@@ -227,7 +227,7 @@ static int alloc_extra_elems(struct bpf_htab *htab) ...@@ -227,7 +227,7 @@ static int alloc_extra_elems(struct bpf_htab *htab)
} }
/* Called from syscall */ /* Called from syscall */
static struct bpf_map *htab_map_alloc(union bpf_attr *attr) static int htab_map_alloc_check(union bpf_attr *attr)
{ {
bool percpu = (attr->map_type == BPF_MAP_TYPE_PERCPU_HASH || bool percpu = (attr->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
attr->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH); attr->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH);
...@@ -241,9 +241,6 @@ static struct bpf_map *htab_map_alloc(union bpf_attr *attr) ...@@ -241,9 +241,6 @@ static struct bpf_map *htab_map_alloc(union bpf_attr *attr)
bool percpu_lru = (attr->map_flags & BPF_F_NO_COMMON_LRU); bool percpu_lru = (attr->map_flags & BPF_F_NO_COMMON_LRU);
bool prealloc = !(attr->map_flags & BPF_F_NO_PREALLOC); bool prealloc = !(attr->map_flags & BPF_F_NO_PREALLOC);
int numa_node = bpf_map_attr_numa_node(attr); int numa_node = bpf_map_attr_numa_node(attr);
struct bpf_htab *htab;
int err, i;
u64 cost;
BUILD_BUG_ON(offsetof(struct htab_elem, htab) != BUILD_BUG_ON(offsetof(struct htab_elem, htab) !=
offsetof(struct htab_elem, hash_node.pprev)); offsetof(struct htab_elem, hash_node.pprev));
...@@ -254,33 +251,33 @@ static struct bpf_map *htab_map_alloc(union bpf_attr *attr) ...@@ -254,33 +251,33 @@ static struct bpf_map *htab_map_alloc(union bpf_attr *attr)
/* LRU implementation is much complicated than other /* LRU implementation is much complicated than other
* maps. Hence, limit to CAP_SYS_ADMIN for now. * maps. Hence, limit to CAP_SYS_ADMIN for now.
*/ */
return ERR_PTR(-EPERM); return -EPERM;
if (attr->map_flags & ~HTAB_CREATE_FLAG_MASK) if (attr->map_flags & ~HTAB_CREATE_FLAG_MASK)
/* reserved bits should not be used */ /* reserved bits should not be used */
return ERR_PTR(-EINVAL); return -EINVAL;
if (!lru && percpu_lru) if (!lru && percpu_lru)
return ERR_PTR(-EINVAL); return -EINVAL;
if (lru && !prealloc) if (lru && !prealloc)
return ERR_PTR(-ENOTSUPP); return -ENOTSUPP;
if (numa_node != NUMA_NO_NODE && (percpu || percpu_lru)) if (numa_node != NUMA_NO_NODE && (percpu || percpu_lru))
return ERR_PTR(-EINVAL); return -EINVAL;
/* check sanity of attributes. /* check sanity of attributes.
* value_size == 0 may be allowed in the future to use map as a set * value_size == 0 may be allowed in the future to use map as a set
*/ */
if (attr->max_entries == 0 || attr->key_size == 0 || if (attr->max_entries == 0 || attr->key_size == 0 ||
attr->value_size == 0) attr->value_size == 0)
return ERR_PTR(-EINVAL); return -EINVAL;
if (attr->key_size > MAX_BPF_STACK) if (attr->key_size > MAX_BPF_STACK)
/* eBPF programs initialize keys on stack, so they cannot be /* eBPF programs initialize keys on stack, so they cannot be
* larger than max stack size * larger than max stack size
*/ */
return ERR_PTR(-E2BIG); return -E2BIG;
if (attr->value_size >= KMALLOC_MAX_SIZE - if (attr->value_size >= KMALLOC_MAX_SIZE -
MAX_BPF_STACK - sizeof(struct htab_elem)) MAX_BPF_STACK - sizeof(struct htab_elem))
...@@ -289,7 +286,28 @@ static struct bpf_map *htab_map_alloc(union bpf_attr *attr) ...@@ -289,7 +286,28 @@ static struct bpf_map *htab_map_alloc(union bpf_attr *attr)
* sure that the elem_size doesn't overflow and it's * sure that the elem_size doesn't overflow and it's
* kmalloc-able later in htab_map_update_elem() * kmalloc-able later in htab_map_update_elem()
*/ */
return ERR_PTR(-E2BIG); return -E2BIG;
return 0;
}
static struct bpf_map *htab_map_alloc(union bpf_attr *attr)
{
bool percpu = (attr->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
attr->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH);
bool lru = (attr->map_type == BPF_MAP_TYPE_LRU_HASH ||
attr->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH);
/* percpu_lru means each cpu has its own LRU list.
* it is different from BPF_MAP_TYPE_PERCPU_HASH where
* the map's value itself is percpu. percpu_lru has
* nothing to do with the map's value.
*/
bool percpu_lru = (attr->map_flags & BPF_F_NO_COMMON_LRU);
bool prealloc = !(attr->map_flags & BPF_F_NO_PREALLOC);
int numa_node = bpf_map_attr_numa_node(attr);
struct bpf_htab *htab;
int err, i;
u64 cost;
htab = kzalloc(sizeof(*htab), GFP_USER); htab = kzalloc(sizeof(*htab), GFP_USER);
if (!htab) if (!htab)
...@@ -1142,6 +1160,7 @@ static void htab_map_free(struct bpf_map *map) ...@@ -1142,6 +1160,7 @@ static void htab_map_free(struct bpf_map *map)
} }
const struct bpf_map_ops htab_map_ops = { const struct bpf_map_ops htab_map_ops = {
.map_alloc_check = htab_map_alloc_check,
.map_alloc = htab_map_alloc, .map_alloc = htab_map_alloc,
.map_free = htab_map_free, .map_free = htab_map_free,
.map_get_next_key = htab_map_get_next_key, .map_get_next_key = htab_map_get_next_key,
...@@ -1152,6 +1171,7 @@ const struct bpf_map_ops htab_map_ops = { ...@@ -1152,6 +1171,7 @@ const struct bpf_map_ops htab_map_ops = {
}; };
const struct bpf_map_ops htab_lru_map_ops = { const struct bpf_map_ops htab_lru_map_ops = {
.map_alloc_check = htab_map_alloc_check,
.map_alloc = htab_map_alloc, .map_alloc = htab_map_alloc,
.map_free = htab_map_free, .map_free = htab_map_free,
.map_get_next_key = htab_map_get_next_key, .map_get_next_key = htab_map_get_next_key,
...@@ -1235,6 +1255,7 @@ int bpf_percpu_hash_update(struct bpf_map *map, void *key, void *value, ...@@ -1235,6 +1255,7 @@ int bpf_percpu_hash_update(struct bpf_map *map, void *key, void *value,
} }
const struct bpf_map_ops htab_percpu_map_ops = { const struct bpf_map_ops htab_percpu_map_ops = {
.map_alloc_check = htab_map_alloc_check,
.map_alloc = htab_map_alloc, .map_alloc = htab_map_alloc,
.map_free = htab_map_free, .map_free = htab_map_free,
.map_get_next_key = htab_map_get_next_key, .map_get_next_key = htab_map_get_next_key,
...@@ -1244,6 +1265,7 @@ const struct bpf_map_ops htab_percpu_map_ops = { ...@@ -1244,6 +1265,7 @@ const struct bpf_map_ops htab_percpu_map_ops = {
}; };
const struct bpf_map_ops htab_lru_percpu_map_ops = { const struct bpf_map_ops htab_lru_percpu_map_ops = {
.map_alloc_check = htab_map_alloc_check,
.map_alloc = htab_map_alloc, .map_alloc = htab_map_alloc,
.map_free = htab_map_free, .map_free = htab_map_free,
.map_get_next_key = htab_map_get_next_key, .map_get_next_key = htab_map_get_next_key,
...@@ -1252,11 +1274,11 @@ const struct bpf_map_ops htab_lru_percpu_map_ops = { ...@@ -1252,11 +1274,11 @@ const struct bpf_map_ops htab_lru_percpu_map_ops = {
.map_delete_elem = htab_lru_map_delete_elem, .map_delete_elem = htab_lru_map_delete_elem,
}; };
static struct bpf_map *fd_htab_map_alloc(union bpf_attr *attr) static int fd_htab_map_alloc_check(union bpf_attr *attr)
{ {
if (attr->value_size != sizeof(u32)) if (attr->value_size != sizeof(u32))
return ERR_PTR(-EINVAL); return -EINVAL;
return htab_map_alloc(attr); return htab_map_alloc_check(attr);
} }
static void fd_htab_map_free(struct bpf_map *map) static void fd_htab_map_free(struct bpf_map *map)
...@@ -1327,7 +1349,7 @@ static struct bpf_map *htab_of_map_alloc(union bpf_attr *attr) ...@@ -1327,7 +1349,7 @@ static struct bpf_map *htab_of_map_alloc(union bpf_attr *attr)
if (IS_ERR(inner_map_meta)) if (IS_ERR(inner_map_meta))
return inner_map_meta; return inner_map_meta;
map = fd_htab_map_alloc(attr); map = htab_map_alloc(attr);
if (IS_ERR(map)) { if (IS_ERR(map)) {
bpf_map_meta_free(inner_map_meta); bpf_map_meta_free(inner_map_meta);
return map; return map;
...@@ -1371,6 +1393,7 @@ static void htab_of_map_free(struct bpf_map *map) ...@@ -1371,6 +1393,7 @@ static void htab_of_map_free(struct bpf_map *map)
} }
const struct bpf_map_ops htab_of_maps_map_ops = { const struct bpf_map_ops htab_of_maps_map_ops = {
.map_alloc_check = fd_htab_map_alloc_check,
.map_alloc = htab_of_map_alloc, .map_alloc = htab_of_map_alloc,
.map_free = htab_of_map_free, .map_free = htab_of_map_free,
.map_get_next_key = htab_map_get_next_key, .map_get_next_key = htab_map_get_next_key,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment