Commit 73d2c619 authored by Yafang Shao's avatar Yafang Shao Committed by Alexei Starovoitov

bpf, net: sock_map memory usage

sockmap and sockhash don't have something in common in allocation, so let's
introduce different helpers to calculate their memory usage.

The reuslt as follows,

- before
28: sockmap  name count_map  flags 0x0
        key 4B  value 4B  max_entries 65536  memlock 524288B
29: sockhash  name count_map  flags 0x0
        key 4B  value 4B  max_entries 65536  memlock 524288B

- after
28: sockmap  name count_map  flags 0x0
        key 4B  value 4B  max_entries 65536  memlock 524608B
29: sockhash  name count_map  flags 0x0  <<<< no updated elements
        key 4B  value 4B  max_entries 65536  memlock 1048896B
Signed-off-by: default avatarYafang Shao <laoar.shao@gmail.com>
Link: https://lore.kernel.org/r/20230305124615.12358-16-laoar.shao@gmail.comSigned-off-by: default avatarAlexei Starovoitov <ast@kernel.org>
parent 7490b7f1
...@@ -797,6 +797,14 @@ static void sock_map_fini_seq_private(void *priv_data) ...@@ -797,6 +797,14 @@ static void sock_map_fini_seq_private(void *priv_data)
bpf_map_put_with_uref(info->map); bpf_map_put_with_uref(info->map);
} }
static u64 sock_map_mem_usage(const struct bpf_map *map)
{
u64 usage = sizeof(struct bpf_stab);
usage += (u64)map->max_entries * sizeof(struct sock *);
return usage;
}
static const struct bpf_iter_seq_info sock_map_iter_seq_info = { static const struct bpf_iter_seq_info sock_map_iter_seq_info = {
.seq_ops = &sock_map_seq_ops, .seq_ops = &sock_map_seq_ops,
.init_seq_private = sock_map_init_seq_private, .init_seq_private = sock_map_init_seq_private,
...@@ -816,6 +824,7 @@ const struct bpf_map_ops sock_map_ops = { ...@@ -816,6 +824,7 @@ const struct bpf_map_ops sock_map_ops = {
.map_lookup_elem = sock_map_lookup, .map_lookup_elem = sock_map_lookup,
.map_release_uref = sock_map_release_progs, .map_release_uref = sock_map_release_progs,
.map_check_btf = map_check_no_btf, .map_check_btf = map_check_no_btf,
.map_mem_usage = sock_map_mem_usage,
.map_btf_id = &sock_map_btf_ids[0], .map_btf_id = &sock_map_btf_ids[0],
.iter_seq_info = &sock_map_iter_seq_info, .iter_seq_info = &sock_map_iter_seq_info,
}; };
...@@ -1397,6 +1406,16 @@ static void sock_hash_fini_seq_private(void *priv_data) ...@@ -1397,6 +1406,16 @@ static void sock_hash_fini_seq_private(void *priv_data)
bpf_map_put_with_uref(info->map); bpf_map_put_with_uref(info->map);
} }
static u64 sock_hash_mem_usage(const struct bpf_map *map)
{
struct bpf_shtab *htab = container_of(map, struct bpf_shtab, map);
u64 usage = sizeof(*htab);
usage += htab->buckets_num * sizeof(struct bpf_shtab_bucket);
usage += atomic_read(&htab->count) * (u64)htab->elem_size;
return usage;
}
static const struct bpf_iter_seq_info sock_hash_iter_seq_info = { static const struct bpf_iter_seq_info sock_hash_iter_seq_info = {
.seq_ops = &sock_hash_seq_ops, .seq_ops = &sock_hash_seq_ops,
.init_seq_private = sock_hash_init_seq_private, .init_seq_private = sock_hash_init_seq_private,
...@@ -1416,6 +1435,7 @@ const struct bpf_map_ops sock_hash_ops = { ...@@ -1416,6 +1435,7 @@ const struct bpf_map_ops sock_hash_ops = {
.map_lookup_elem_sys_only = sock_hash_lookup_sys, .map_lookup_elem_sys_only = sock_hash_lookup_sys,
.map_release_uref = sock_hash_release_progs, .map_release_uref = sock_hash_release_progs,
.map_check_btf = map_check_no_btf, .map_check_btf = map_check_no_btf,
.map_mem_usage = sock_hash_mem_usage,
.map_btf_id = &sock_hash_map_btf_ids[0], .map_btf_id = &sock_hash_map_btf_ids[0],
.iter_seq_info = &sock_hash_iter_seq_info, .iter_seq_info = &sock_hash_iter_seq_info,
}; };
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment