Commit 9629363c authored by Yafang Shao's avatar Yafang Shao Committed by Alexei Starovoitov

bpf: offload map memory usage

A new helper is introduced to calculate offload map memory usage. But
currently the memory dynamically allocated in netdev dev_ops, like
nsim_map_update_elem, is not counted. Let's just put it aside now.
Signed-off-by: default avatarYafang Shao <laoar.shao@gmail.com>
Link: https://lore.kernel.org/r/20230305124615.12358-18-laoar.shao@gmail.comSigned-off-by: default avatarAlexei Starovoitov <ast@kernel.org>
parent b4fd0d67
...@@ -2624,6 +2624,7 @@ static inline bool bpf_map_is_offloaded(struct bpf_map *map) ...@@ -2624,6 +2624,7 @@ static inline bool bpf_map_is_offloaded(struct bpf_map *map)
struct bpf_map *bpf_map_offload_map_alloc(union bpf_attr *attr); struct bpf_map *bpf_map_offload_map_alloc(union bpf_attr *attr);
void bpf_map_offload_map_free(struct bpf_map *map); void bpf_map_offload_map_free(struct bpf_map *map);
u64 bpf_map_offload_map_mem_usage(const struct bpf_map *map);
int bpf_prog_test_run_syscall(struct bpf_prog *prog, int bpf_prog_test_run_syscall(struct bpf_prog *prog,
const union bpf_attr *kattr, const union bpf_attr *kattr,
union bpf_attr __user *uattr); union bpf_attr __user *uattr);
...@@ -2695,6 +2696,11 @@ static inline void bpf_map_offload_map_free(struct bpf_map *map) ...@@ -2695,6 +2696,11 @@ static inline void bpf_map_offload_map_free(struct bpf_map *map)
{ {
} }
static inline u64 bpf_map_offload_map_mem_usage(const struct bpf_map *map)
{
return 0;
}
static inline int bpf_prog_test_run_syscall(struct bpf_prog *prog, static inline int bpf_prog_test_run_syscall(struct bpf_prog *prog,
const union bpf_attr *kattr, const union bpf_attr *kattr,
union bpf_attr __user *uattr) union bpf_attr __user *uattr)
......
...@@ -563,6 +563,12 @@ void bpf_map_offload_map_free(struct bpf_map *map) ...@@ -563,6 +563,12 @@ void bpf_map_offload_map_free(struct bpf_map *map)
bpf_map_area_free(offmap); bpf_map_area_free(offmap);
} }
u64 bpf_map_offload_map_mem_usage(const struct bpf_map *map)
{
/* The memory dynamically allocated in netdev dev_ops is not counted */
return sizeof(struct bpf_offloaded_map);
}
int bpf_map_offload_lookup_elem(struct bpf_map *map, void *key, void *value) int bpf_map_offload_lookup_elem(struct bpf_map *map, void *key, void *value)
{ {
struct bpf_offloaded_map *offmap = map_to_offmap(map); struct bpf_offloaded_map *offmap = map_to_offmap(map);
......
...@@ -105,6 +105,7 @@ const struct bpf_map_ops bpf_map_offload_ops = { ...@@ -105,6 +105,7 @@ const struct bpf_map_ops bpf_map_offload_ops = {
.map_alloc = bpf_map_offload_map_alloc, .map_alloc = bpf_map_offload_map_alloc,
.map_free = bpf_map_offload_map_free, .map_free = bpf_map_offload_map_free,
.map_check_btf = map_check_no_btf, .map_check_btf = map_check_no_btf,
.map_mem_usage = bpf_map_offload_map_mem_usage,
}; };
static struct bpf_map *find_and_alloc_map(union bpf_attr *attr) static struct bpf_map *find_and_alloc_map(union bpf_attr *attr)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment