Commit 86c7ecad authored by Andrii Nakryiko's avatar Andrii Nakryiko

Merge branch 'libbpf 1.0: deprecate bpf_map__def() API'

Christy Lee says:

====================

bpf_map__def() is rarely used and non-extensible. bpf_map_def fields
can be accessed with appropriate map getters and setters instead.
Deprecate bpf_map__def() API and replace use cases with getters and
setters.

Changelog:
----------
v1 -> v2:
https://lore.kernel.org/all/20220105230057.853163-1-christylee@fb.com/

* Fixed commit messages to match commit titles
* Fixed indentation
* Removed bpf_map__def() usage that was missed in v1
====================
Signed-off-by: default avatarAndrii Nakryiko <andrii@kernel.org>
parents 0991f6a3 063fa26a
...@@ -209,7 +209,7 @@ static struct datarec *alloc_record_per_cpu(void) ...@@ -209,7 +209,7 @@ static struct datarec *alloc_record_per_cpu(void)
static struct record *alloc_record_per_rxq(void) static struct record *alloc_record_per_rxq(void)
{ {
unsigned int nr_rxqs = bpf_map__def(rx_queue_index_map)->max_entries; unsigned int nr_rxqs = bpf_map__max_entries(rx_queue_index_map);
struct record *array; struct record *array;
array = calloc(nr_rxqs, sizeof(struct record)); array = calloc(nr_rxqs, sizeof(struct record));
...@@ -222,7 +222,7 @@ static struct record *alloc_record_per_rxq(void) ...@@ -222,7 +222,7 @@ static struct record *alloc_record_per_rxq(void)
static struct stats_record *alloc_stats_record(void) static struct stats_record *alloc_stats_record(void)
{ {
unsigned int nr_rxqs = bpf_map__def(rx_queue_index_map)->max_entries; unsigned int nr_rxqs = bpf_map__max_entries(rx_queue_index_map);
struct stats_record *rec; struct stats_record *rec;
int i; int i;
...@@ -241,7 +241,7 @@ static struct stats_record *alloc_stats_record(void) ...@@ -241,7 +241,7 @@ static struct stats_record *alloc_stats_record(void)
static void free_stats_record(struct stats_record *r) static void free_stats_record(struct stats_record *r)
{ {
unsigned int nr_rxqs = bpf_map__def(rx_queue_index_map)->max_entries; unsigned int nr_rxqs = bpf_map__max_entries(rx_queue_index_map);
int i; int i;
for (i = 0; i < nr_rxqs; i++) for (i = 0; i < nr_rxqs; i++)
...@@ -289,7 +289,7 @@ static void stats_collect(struct stats_record *rec) ...@@ -289,7 +289,7 @@ static void stats_collect(struct stats_record *rec)
map_collect_percpu(fd, 0, &rec->stats); map_collect_percpu(fd, 0, &rec->stats);
fd = bpf_map__fd(rx_queue_index_map); fd = bpf_map__fd(rx_queue_index_map);
max_rxqs = bpf_map__def(rx_queue_index_map)->max_entries; max_rxqs = bpf_map__max_entries(rx_queue_index_map);
for (i = 0; i < max_rxqs; i++) for (i = 0; i < max_rxqs; i++)
map_collect_percpu(fd, i, &rec->rxq[i]); map_collect_percpu(fd, i, &rec->rxq[i]);
} }
...@@ -335,7 +335,7 @@ static void stats_print(struct stats_record *stats_rec, ...@@ -335,7 +335,7 @@ static void stats_print(struct stats_record *stats_rec,
struct stats_record *stats_prev, struct stats_record *stats_prev,
int action, __u32 cfg_opt) int action, __u32 cfg_opt)
{ {
unsigned int nr_rxqs = bpf_map__def(rx_queue_index_map)->max_entries; unsigned int nr_rxqs = bpf_map__max_entries(rx_queue_index_map);
unsigned int nr_cpus = bpf_num_possible_cpus(); unsigned int nr_cpus = bpf_num_possible_cpus();
double pps = 0, err = 0; double pps = 0, err = 0;
struct record *rec, *prev; struct record *rec, *prev;
......
...@@ -227,7 +227,7 @@ static int codegen_datasecs(struct bpf_object *obj, const char *obj_name) ...@@ -227,7 +227,7 @@ static int codegen_datasecs(struct bpf_object *obj, const char *obj_name)
/* only generate definitions for memory-mapped internal maps */ /* only generate definitions for memory-mapped internal maps */
if (!bpf_map__is_internal(map)) if (!bpf_map__is_internal(map))
continue; continue;
if (!(bpf_map__def(map)->map_flags & BPF_F_MMAPABLE)) if (!(bpf_map__map_flags(map) & BPF_F_MMAPABLE))
continue; continue;
if (!get_map_ident(map, map_ident, sizeof(map_ident))) if (!get_map_ident(map, map_ident, sizeof(map_ident)))
...@@ -468,7 +468,7 @@ static void codegen_destroy(struct bpf_object *obj, const char *obj_name) ...@@ -468,7 +468,7 @@ static void codegen_destroy(struct bpf_object *obj, const char *obj_name)
if (!get_map_ident(map, ident, sizeof(ident))) if (!get_map_ident(map, ident, sizeof(ident)))
continue; continue;
if (bpf_map__is_internal(map) && if (bpf_map__is_internal(map) &&
(bpf_map__def(map)->map_flags & BPF_F_MMAPABLE)) (bpf_map__map_flags(map) & BPF_F_MMAPABLE))
printf("\tmunmap(skel->%1$s, %2$zd);\n", printf("\tmunmap(skel->%1$s, %2$zd);\n",
ident, bpf_map_mmap_sz(map)); ident, bpf_map_mmap_sz(map));
codegen("\ codegen("\
...@@ -536,7 +536,7 @@ static int gen_trace(struct bpf_object *obj, const char *obj_name, const char *h ...@@ -536,7 +536,7 @@ static int gen_trace(struct bpf_object *obj, const char *obj_name, const char *h
continue; continue;
if (!bpf_map__is_internal(map) || if (!bpf_map__is_internal(map) ||
!(bpf_map__def(map)->map_flags & BPF_F_MMAPABLE)) !(bpf_map__map_flags(map) & BPF_F_MMAPABLE))
continue; continue;
codegen("\ codegen("\
...@@ -600,10 +600,10 @@ static int gen_trace(struct bpf_object *obj, const char *obj_name, const char *h ...@@ -600,10 +600,10 @@ static int gen_trace(struct bpf_object *obj, const char *obj_name, const char *h
continue; continue;
if (!bpf_map__is_internal(map) || if (!bpf_map__is_internal(map) ||
!(bpf_map__def(map)->map_flags & BPF_F_MMAPABLE)) !(bpf_map__map_flags(map) & BPF_F_MMAPABLE))
continue; continue;
if (bpf_map__def(map)->map_flags & BPF_F_RDONLY_PROG) if (bpf_map__map_flags(map) & BPF_F_RDONLY_PROG)
mmap_flags = "PROT_READ"; mmap_flags = "PROT_READ";
else else
mmap_flags = "PROT_READ | PROT_WRITE"; mmap_flags = "PROT_READ | PROT_WRITE";
...@@ -961,7 +961,7 @@ static int do_skeleton(int argc, char **argv) ...@@ -961,7 +961,7 @@ static int do_skeleton(int argc, char **argv)
i, bpf_map__name(map), i, ident); i, bpf_map__name(map), i, ident);
/* memory-mapped internal maps */ /* memory-mapped internal maps */
if (bpf_map__is_internal(map) && if (bpf_map__is_internal(map) &&
(bpf_map__def(map)->map_flags & BPF_F_MMAPABLE)) { (bpf_map__map_flags(map) & BPF_F_MMAPABLE)) {
printf("\ts->maps[%zu].mmaped = (void **)&obj->%s;\n", printf("\ts->maps[%zu].mmaped = (void **)&obj->%s;\n",
i, ident); i, ident);
} }
......
...@@ -480,7 +480,6 @@ static int do_unregister(int argc, char **argv) ...@@ -480,7 +480,6 @@ static int do_unregister(int argc, char **argv)
static int do_register(int argc, char **argv) static int do_register(int argc, char **argv)
{ {
LIBBPF_OPTS(bpf_object_open_opts, open_opts); LIBBPF_OPTS(bpf_object_open_opts, open_opts);
const struct bpf_map_def *def;
struct bpf_map_info info = {}; struct bpf_map_info info = {};
__u32 info_len = sizeof(info); __u32 info_len = sizeof(info);
int nr_errs = 0, nr_maps = 0; int nr_errs = 0, nr_maps = 0;
...@@ -510,8 +509,7 @@ static int do_register(int argc, char **argv) ...@@ -510,8 +509,7 @@ static int do_register(int argc, char **argv)
} }
bpf_object__for_each_map(map, obj) { bpf_object__for_each_map(map, obj) {
def = bpf_map__def(map); if (bpf_map__type(map) != BPF_MAP_TYPE_STRUCT_OPS)
if (def->type != BPF_MAP_TYPE_STRUCT_OPS)
continue; continue;
link = bpf_map__attach_struct_ops(map); link = bpf_map__attach_struct_ops(map);
......
...@@ -706,7 +706,8 @@ bpf_object__prev_map(const struct bpf_object *obj, const struct bpf_map *map); ...@@ -706,7 +706,8 @@ bpf_object__prev_map(const struct bpf_object *obj, const struct bpf_map *map);
LIBBPF_API int bpf_map__fd(const struct bpf_map *map); LIBBPF_API int bpf_map__fd(const struct bpf_map *map);
LIBBPF_API int bpf_map__reuse_fd(struct bpf_map *map, int fd); LIBBPF_API int bpf_map__reuse_fd(struct bpf_map *map, int fd);
/* get map definition */ /* get map definition */
LIBBPF_API const struct bpf_map_def *bpf_map__def(const struct bpf_map *map); LIBBPF_API LIBBPF_DEPRECATED_SINCE(0, 8, "use appropriate getters or setters instead")
const struct bpf_map_def *bpf_map__def(const struct bpf_map *map);
/* get map name */ /* get map name */
LIBBPF_API const char *bpf_map__name(const struct bpf_map *map); LIBBPF_API const char *bpf_map__name(const struct bpf_map *map);
/* get/set map type */ /* get/set map type */
......
...@@ -1002,24 +1002,22 @@ __bpf_map__config_value(struct bpf_map *map, ...@@ -1002,24 +1002,22 @@ __bpf_map__config_value(struct bpf_map *map,
{ {
struct bpf_map_op *op; struct bpf_map_op *op;
const char *map_name = bpf_map__name(map); const char *map_name = bpf_map__name(map);
const struct bpf_map_def *def = bpf_map__def(map);
if (IS_ERR(def)) { if (!map) {
pr_debug("Unable to get map definition from '%s'\n", pr_debug("Map '%s' is invalid\n", map_name);
map_name);
return -BPF_LOADER_ERRNO__INTERNAL; return -BPF_LOADER_ERRNO__INTERNAL;
} }
if (def->type != BPF_MAP_TYPE_ARRAY) { if (bpf_map__type(map) != BPF_MAP_TYPE_ARRAY) {
pr_debug("Map %s type is not BPF_MAP_TYPE_ARRAY\n", pr_debug("Map %s type is not BPF_MAP_TYPE_ARRAY\n",
map_name); map_name);
return -BPF_LOADER_ERRNO__OBJCONF_MAP_TYPE; return -BPF_LOADER_ERRNO__OBJCONF_MAP_TYPE;
} }
if (def->key_size < sizeof(unsigned int)) { if (bpf_map__key_size(map) < sizeof(unsigned int)) {
pr_debug("Map %s has incorrect key size\n", map_name); pr_debug("Map %s has incorrect key size\n", map_name);
return -BPF_LOADER_ERRNO__OBJCONF_MAP_KEYSIZE; return -BPF_LOADER_ERRNO__OBJCONF_MAP_KEYSIZE;
} }
switch (def->value_size) { switch (bpf_map__value_size(map)) {
case 1: case 1:
case 2: case 2:
case 4: case 4:
...@@ -1061,7 +1059,6 @@ __bpf_map__config_event(struct bpf_map *map, ...@@ -1061,7 +1059,6 @@ __bpf_map__config_event(struct bpf_map *map,
struct parse_events_term *term, struct parse_events_term *term,
struct evlist *evlist) struct evlist *evlist)
{ {
const struct bpf_map_def *def;
struct bpf_map_op *op; struct bpf_map_op *op;
const char *map_name = bpf_map__name(map); const char *map_name = bpf_map__name(map);
struct evsel *evsel = evlist__find_evsel_by_str(evlist, term->val.str); struct evsel *evsel = evlist__find_evsel_by_str(evlist, term->val.str);
...@@ -1072,18 +1069,16 @@ __bpf_map__config_event(struct bpf_map *map, ...@@ -1072,18 +1069,16 @@ __bpf_map__config_event(struct bpf_map *map,
return -BPF_LOADER_ERRNO__OBJCONF_MAP_NOEVT; return -BPF_LOADER_ERRNO__OBJCONF_MAP_NOEVT;
} }
def = bpf_map__def(map); if (!map) {
if (IS_ERR(def)) { pr_debug("Map '%s' is invalid\n", map_name);
pr_debug("Unable to get map definition from '%s'\n", return PTR_ERR(map);
map_name);
return PTR_ERR(def);
} }
/* /*
* No need to check key_size and value_size: * No need to check key_size and value_size:
* kernel has already checked them. * kernel has already checked them.
*/ */
if (def->type != BPF_MAP_TYPE_PERF_EVENT_ARRAY) { if (bpf_map__type(map) != BPF_MAP_TYPE_PERF_EVENT_ARRAY) {
pr_debug("Map %s type is not BPF_MAP_TYPE_PERF_EVENT_ARRAY\n", pr_debug("Map %s type is not BPF_MAP_TYPE_PERF_EVENT_ARRAY\n",
map_name); map_name);
return -BPF_LOADER_ERRNO__OBJCONF_MAP_TYPE; return -BPF_LOADER_ERRNO__OBJCONF_MAP_TYPE;
...@@ -1132,7 +1127,6 @@ config_map_indices_range_check(struct parse_events_term *term, ...@@ -1132,7 +1127,6 @@ config_map_indices_range_check(struct parse_events_term *term,
const char *map_name) const char *map_name)
{ {
struct parse_events_array *array = &term->array; struct parse_events_array *array = &term->array;
const struct bpf_map_def *def;
unsigned int i; unsigned int i;
if (!array->nr_ranges) if (!array->nr_ranges)
...@@ -1143,10 +1137,8 @@ config_map_indices_range_check(struct parse_events_term *term, ...@@ -1143,10 +1137,8 @@ config_map_indices_range_check(struct parse_events_term *term,
return -BPF_LOADER_ERRNO__INTERNAL; return -BPF_LOADER_ERRNO__INTERNAL;
} }
def = bpf_map__def(map); if (!map) {
if (IS_ERR(def)) { pr_debug("Map '%s' is invalid\n", map_name);
pr_debug("ERROR: Unable to get map definition from '%s'\n",
map_name);
return -BPF_LOADER_ERRNO__INTERNAL; return -BPF_LOADER_ERRNO__INTERNAL;
} }
...@@ -1155,7 +1147,7 @@ config_map_indices_range_check(struct parse_events_term *term, ...@@ -1155,7 +1147,7 @@ config_map_indices_range_check(struct parse_events_term *term,
size_t length = array->ranges[i].length; size_t length = array->ranges[i].length;
unsigned int idx = start + length - 1; unsigned int idx = start + length - 1;
if (idx >= def->max_entries) { if (idx >= bpf_map__max_entries(map)) {
pr_debug("ERROR: index %d too large\n", idx); pr_debug("ERROR: index %d too large\n", idx);
return -BPF_LOADER_ERRNO__OBJCONF_MAP_IDX2BIG; return -BPF_LOADER_ERRNO__OBJCONF_MAP_IDX2BIG;
} }
...@@ -1248,21 +1240,21 @@ int bpf__config_obj(struct bpf_object *obj, ...@@ -1248,21 +1240,21 @@ int bpf__config_obj(struct bpf_object *obj,
} }
typedef int (*map_config_func_t)(const char *name, int map_fd, typedef int (*map_config_func_t)(const char *name, int map_fd,
const struct bpf_map_def *pdef, const struct bpf_map *map,
struct bpf_map_op *op, struct bpf_map_op *op,
void *pkey, void *arg); void *pkey, void *arg);
static int static int
foreach_key_array_all(map_config_func_t func, foreach_key_array_all(map_config_func_t func,
void *arg, const char *name, void *arg, const char *name,
int map_fd, const struct bpf_map_def *pdef, int map_fd, const struct bpf_map *map,
struct bpf_map_op *op) struct bpf_map_op *op)
{ {
unsigned int i; unsigned int i;
int err; int err;
for (i = 0; i < pdef->max_entries; i++) { for (i = 0; i < bpf_map__max_entries(map); i++) {
err = func(name, map_fd, pdef, op, &i, arg); err = func(name, map_fd, map, op, &i, arg);
if (err) { if (err) {
pr_debug("ERROR: failed to insert value to %s[%u]\n", pr_debug("ERROR: failed to insert value to %s[%u]\n",
name, i); name, i);
...@@ -1275,7 +1267,7 @@ foreach_key_array_all(map_config_func_t func, ...@@ -1275,7 +1267,7 @@ foreach_key_array_all(map_config_func_t func,
static int static int
foreach_key_array_ranges(map_config_func_t func, void *arg, foreach_key_array_ranges(map_config_func_t func, void *arg,
const char *name, int map_fd, const char *name, int map_fd,
const struct bpf_map_def *pdef, const struct bpf_map *map,
struct bpf_map_op *op) struct bpf_map_op *op)
{ {
unsigned int i, j; unsigned int i, j;
...@@ -1288,7 +1280,7 @@ foreach_key_array_ranges(map_config_func_t func, void *arg, ...@@ -1288,7 +1280,7 @@ foreach_key_array_ranges(map_config_func_t func, void *arg,
for (j = 0; j < length; j++) { for (j = 0; j < length; j++) {
unsigned int idx = start + j; unsigned int idx = start + j;
err = func(name, map_fd, pdef, op, &idx, arg); err = func(name, map_fd, map, op, &idx, arg);
if (err) { if (err) {
pr_debug("ERROR: failed to insert value to %s[%u]\n", pr_debug("ERROR: failed to insert value to %s[%u]\n",
name, idx); name, idx);
...@@ -1304,9 +1296,8 @@ bpf_map_config_foreach_key(struct bpf_map *map, ...@@ -1304,9 +1296,8 @@ bpf_map_config_foreach_key(struct bpf_map *map,
map_config_func_t func, map_config_func_t func,
void *arg) void *arg)
{ {
int err, map_fd; int err, map_fd, type;
struct bpf_map_op *op; struct bpf_map_op *op;
const struct bpf_map_def *def;
const char *name = bpf_map__name(map); const char *name = bpf_map__name(map);
struct bpf_map_priv *priv = bpf_map__priv(map); struct bpf_map_priv *priv = bpf_map__priv(map);
...@@ -1319,9 +1310,8 @@ bpf_map_config_foreach_key(struct bpf_map *map, ...@@ -1319,9 +1310,8 @@ bpf_map_config_foreach_key(struct bpf_map *map,
return 0; return 0;
} }
def = bpf_map__def(map); if (!map) {
if (IS_ERR(def)) { pr_debug("Map '%s' is invalid\n", name);
pr_debug("ERROR: failed to get definition from map %s\n", name);
return -BPF_LOADER_ERRNO__INTERNAL; return -BPF_LOADER_ERRNO__INTERNAL;
} }
map_fd = bpf_map__fd(map); map_fd = bpf_map__fd(map);
...@@ -1330,19 +1320,19 @@ bpf_map_config_foreach_key(struct bpf_map *map, ...@@ -1330,19 +1320,19 @@ bpf_map_config_foreach_key(struct bpf_map *map,
return map_fd; return map_fd;
} }
type = bpf_map__type(map);
list_for_each_entry(op, &priv->ops_list, list) { list_for_each_entry(op, &priv->ops_list, list) {
switch (def->type) { switch (type) {
case BPF_MAP_TYPE_ARRAY: case BPF_MAP_TYPE_ARRAY:
case BPF_MAP_TYPE_PERF_EVENT_ARRAY: case BPF_MAP_TYPE_PERF_EVENT_ARRAY:
switch (op->key_type) { switch (op->key_type) {
case BPF_MAP_KEY_ALL: case BPF_MAP_KEY_ALL:
err = foreach_key_array_all(func, arg, name, err = foreach_key_array_all(func, arg, name,
map_fd, def, op); map_fd, map, op);
break; break;
case BPF_MAP_KEY_RANGES: case BPF_MAP_KEY_RANGES:
err = foreach_key_array_ranges(func, arg, name, err = foreach_key_array_ranges(func, arg, name,
map_fd, def, map_fd, map, op);
op);
break; break;
default: default:
pr_debug("ERROR: keytype for map '%s' invalid\n", pr_debug("ERROR: keytype for map '%s' invalid\n",
...@@ -1451,7 +1441,7 @@ apply_config_evsel_for_key(const char *name, int map_fd, void *pkey, ...@@ -1451,7 +1441,7 @@ apply_config_evsel_for_key(const char *name, int map_fd, void *pkey,
static int static int
apply_obj_config_map_for_key(const char *name, int map_fd, apply_obj_config_map_for_key(const char *name, int map_fd,
const struct bpf_map_def *pdef, const struct bpf_map *map,
struct bpf_map_op *op, struct bpf_map_op *op,
void *pkey, void *arg __maybe_unused) void *pkey, void *arg __maybe_unused)
{ {
...@@ -1460,7 +1450,7 @@ apply_obj_config_map_for_key(const char *name, int map_fd, ...@@ -1460,7 +1450,7 @@ apply_obj_config_map_for_key(const char *name, int map_fd,
switch (op->op_type) { switch (op->op_type) {
case BPF_MAP_OP_SET_VALUE: case BPF_MAP_OP_SET_VALUE:
err = apply_config_value_for_key(map_fd, pkey, err = apply_config_value_for_key(map_fd, pkey,
pdef->value_size, bpf_map__value_size(map),
op->v.value); op->v.value);
break; break;
case BPF_MAP_OP_SET_EVSEL: case BPF_MAP_OP_SET_EVSEL:
......
...@@ -9,25 +9,25 @@ ...@@ -9,25 +9,25 @@
#include <stdlib.h> #include <stdlib.h>
#include <unistd.h> #include <unistd.h>
static bool bpf_map_def__is_per_cpu(const struct bpf_map_def *def) static bool bpf_map__is_per_cpu(enum bpf_map_type type)
{ {
return def->type == BPF_MAP_TYPE_PERCPU_HASH || return type == BPF_MAP_TYPE_PERCPU_HASH ||
def->type == BPF_MAP_TYPE_PERCPU_ARRAY || type == BPF_MAP_TYPE_PERCPU_ARRAY ||
def->type == BPF_MAP_TYPE_LRU_PERCPU_HASH || type == BPF_MAP_TYPE_LRU_PERCPU_HASH ||
def->type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE; type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE;
} }
static void *bpf_map_def__alloc_value(const struct bpf_map_def *def) static void *bpf_map__alloc_value(const struct bpf_map *map)
{ {
if (bpf_map_def__is_per_cpu(def)) if (bpf_map__is_per_cpu(bpf_map__type(map)))
return malloc(round_up(def->value_size, 8) * sysconf(_SC_NPROCESSORS_CONF)); return malloc(round_up(bpf_map__value_size(map), 8) *
sysconf(_SC_NPROCESSORS_CONF));
return malloc(def->value_size); return malloc(bpf_map__value_size(map));
} }
int bpf_map__fprintf(struct bpf_map *map, FILE *fp) int bpf_map__fprintf(struct bpf_map *map, FILE *fp)
{ {
const struct bpf_map_def *def = bpf_map__def(map);
void *prev_key = NULL, *key, *value; void *prev_key = NULL, *key, *value;
int fd = bpf_map__fd(map), err; int fd = bpf_map__fd(map), err;
int printed = 0; int printed = 0;
...@@ -35,15 +35,15 @@ int bpf_map__fprintf(struct bpf_map *map, FILE *fp) ...@@ -35,15 +35,15 @@ int bpf_map__fprintf(struct bpf_map *map, FILE *fp)
if (fd < 0) if (fd < 0)
return fd; return fd;
if (IS_ERR(def)) if (!map)
return PTR_ERR(def); return PTR_ERR(map);
err = -ENOMEM; err = -ENOMEM;
key = malloc(def->key_size); key = malloc(bpf_map__key_size(map));
if (key == NULL) if (key == NULL)
goto out; goto out;
value = bpf_map_def__alloc_value(def); value = bpf_map__alloc_value(map);
if (value == NULL) if (value == NULL)
goto out_free_key; goto out_free_key;
......
...@@ -457,7 +457,7 @@ static int init_prog_array(struct bpf_object *obj, struct bpf_map *prog_array) ...@@ -457,7 +457,7 @@ static int init_prog_array(struct bpf_object *obj, struct bpf_map *prog_array)
if (map_fd < 0) if (map_fd < 0)
return -1; return -1;
for (i = 0; i < bpf_map__def(prog_array)->max_entries; i++) { for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
snprintf(prog_name, sizeof(prog_name), "flow_dissector_%d", i); snprintf(prog_name, sizeof(prog_name), "flow_dissector_%d", i);
prog = bpf_object__find_program_by_name(obj, prog_name); prog = bpf_object__find_program_by_name(obj, prog_name);
......
...@@ -121,7 +121,7 @@ static void test_global_data_rdonly(struct bpf_object *obj, __u32 duration) ...@@ -121,7 +121,7 @@ static void test_global_data_rdonly(struct bpf_object *obj, __u32 duration)
if (CHECK_FAIL(map_fd < 0)) if (CHECK_FAIL(map_fd < 0))
return; return;
buff = malloc(bpf_map__def(map)->value_size); buff = malloc(bpf_map__value_size(map));
if (buff) if (buff)
err = bpf_map_update_elem(map_fd, &zero, buff, 0); err = bpf_map_update_elem(map_fd, &zero, buff, 0);
free(buff); free(buff);
......
...@@ -20,7 +20,7 @@ void test_global_data_init(void) ...@@ -20,7 +20,7 @@ void test_global_data_init(void)
if (CHECK_FAIL(!map || !bpf_map__is_internal(map))) if (CHECK_FAIL(!map || !bpf_map__is_internal(map)))
goto out; goto out;
sz = bpf_map__def(map)->value_size; sz = bpf_map__value_size(map);
newval = malloc(sz); newval = malloc(sz);
if (CHECK_FAIL(!newval)) if (CHECK_FAIL(!newval))
goto out; goto out;
......
...@@ -1413,14 +1413,12 @@ static void test_reuseport_mixed_groups(int family, int sotype, int sock_map, ...@@ -1413,14 +1413,12 @@ static void test_reuseport_mixed_groups(int family, int sotype, int sock_map,
static void test_ops_cleanup(const struct bpf_map *map) static void test_ops_cleanup(const struct bpf_map *map)
{ {
const struct bpf_map_def *def;
int err, mapfd; int err, mapfd;
u32 key; u32 key;
def = bpf_map__def(map);
mapfd = bpf_map__fd(map); mapfd = bpf_map__fd(map);
for (key = 0; key < def->max_entries; key++) { for (key = 0; key < bpf_map__max_entries(map); key++) {
err = bpf_map_delete_elem(mapfd, &key); err = bpf_map_delete_elem(mapfd, &key);
if (err && errno != EINVAL && errno != ENOENT) if (err && errno != EINVAL && errno != ENOENT)
FAIL_ERRNO("map_delete: expected EINVAL/ENOENT"); FAIL_ERRNO("map_delete: expected EINVAL/ENOENT");
...@@ -1443,13 +1441,13 @@ static const char *family_str(sa_family_t family) ...@@ -1443,13 +1441,13 @@ static const char *family_str(sa_family_t family)
static const char *map_type_str(const struct bpf_map *map) static const char *map_type_str(const struct bpf_map *map)
{ {
const struct bpf_map_def *def; int type;
def = bpf_map__def(map); if (!map)
if (IS_ERR(def))
return "invalid"; return "invalid";
type = bpf_map__type(map);
switch (def->type) { switch (type) {
case BPF_MAP_TYPE_SOCKMAP: case BPF_MAP_TYPE_SOCKMAP:
return "sockmap"; return "sockmap";
case BPF_MAP_TYPE_SOCKHASH: case BPF_MAP_TYPE_SOCKHASH:
......
...@@ -37,7 +37,7 @@ static void test_tailcall_1(void) ...@@ -37,7 +37,7 @@ static void test_tailcall_1(void)
if (CHECK_FAIL(map_fd < 0)) if (CHECK_FAIL(map_fd < 0))
goto out; goto out;
for (i = 0; i < bpf_map__def(prog_array)->max_entries; i++) { for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
snprintf(prog_name, sizeof(prog_name), "classifier_%d", i); snprintf(prog_name, sizeof(prog_name), "classifier_%d", i);
prog = bpf_object__find_program_by_name(obj, prog_name); prog = bpf_object__find_program_by_name(obj, prog_name);
...@@ -53,7 +53,7 @@ static void test_tailcall_1(void) ...@@ -53,7 +53,7 @@ static void test_tailcall_1(void)
goto out; goto out;
} }
for (i = 0; i < bpf_map__def(prog_array)->max_entries; i++) { for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
err = bpf_prog_test_run(main_fd, 1, buff, sizeof(buff), 0, err = bpf_prog_test_run(main_fd, 1, buff, sizeof(buff), 0,
&duration, &retval, NULL); &duration, &retval, NULL);
CHECK(err || retval != i, "tailcall", CHECK(err || retval != i, "tailcall",
...@@ -69,7 +69,7 @@ static void test_tailcall_1(void) ...@@ -69,7 +69,7 @@ static void test_tailcall_1(void)
CHECK(err || retval != 3, "tailcall", "err %d errno %d retval %d\n", CHECK(err || retval != 3, "tailcall", "err %d errno %d retval %d\n",
err, errno, retval); err, errno, retval);
for (i = 0; i < bpf_map__def(prog_array)->max_entries; i++) { for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
snprintf(prog_name, sizeof(prog_name), "classifier_%d", i); snprintf(prog_name, sizeof(prog_name), "classifier_%d", i);
prog = bpf_object__find_program_by_name(obj, prog_name); prog = bpf_object__find_program_by_name(obj, prog_name);
...@@ -90,8 +90,8 @@ static void test_tailcall_1(void) ...@@ -90,8 +90,8 @@ static void test_tailcall_1(void)
CHECK(err || retval != 0, "tailcall", "err %d errno %d retval %d\n", CHECK(err || retval != 0, "tailcall", "err %d errno %d retval %d\n",
err, errno, retval); err, errno, retval);
for (i = 0; i < bpf_map__def(prog_array)->max_entries; i++) { for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
j = bpf_map__def(prog_array)->max_entries - 1 - i; j = bpf_map__max_entries(prog_array) - 1 - i;
snprintf(prog_name, sizeof(prog_name), "classifier_%d", j); snprintf(prog_name, sizeof(prog_name), "classifier_%d", j);
prog = bpf_object__find_program_by_name(obj, prog_name); prog = bpf_object__find_program_by_name(obj, prog_name);
...@@ -107,8 +107,8 @@ static void test_tailcall_1(void) ...@@ -107,8 +107,8 @@ static void test_tailcall_1(void)
goto out; goto out;
} }
for (i = 0; i < bpf_map__def(prog_array)->max_entries; i++) { for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
j = bpf_map__def(prog_array)->max_entries - 1 - i; j = bpf_map__max_entries(prog_array) - 1 - i;
err = bpf_prog_test_run(main_fd, 1, buff, sizeof(buff), 0, err = bpf_prog_test_run(main_fd, 1, buff, sizeof(buff), 0,
&duration, &retval, NULL); &duration, &retval, NULL);
...@@ -125,7 +125,7 @@ static void test_tailcall_1(void) ...@@ -125,7 +125,7 @@ static void test_tailcall_1(void)
CHECK(err || retval != 3, "tailcall", "err %d errno %d retval %d\n", CHECK(err || retval != 3, "tailcall", "err %d errno %d retval %d\n",
err, errno, retval); err, errno, retval);
for (i = 0; i < bpf_map__def(prog_array)->max_entries; i++) { for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
err = bpf_map_delete_elem(map_fd, &i); err = bpf_map_delete_elem(map_fd, &i);
if (CHECK_FAIL(err >= 0 || errno != ENOENT)) if (CHECK_FAIL(err >= 0 || errno != ENOENT))
goto out; goto out;
...@@ -175,7 +175,7 @@ static void test_tailcall_2(void) ...@@ -175,7 +175,7 @@ static void test_tailcall_2(void)
if (CHECK_FAIL(map_fd < 0)) if (CHECK_FAIL(map_fd < 0))
goto out; goto out;
for (i = 0; i < bpf_map__def(prog_array)->max_entries; i++) { for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
snprintf(prog_name, sizeof(prog_name), "classifier_%d", i); snprintf(prog_name, sizeof(prog_name), "classifier_%d", i);
prog = bpf_object__find_program_by_name(obj, prog_name); prog = bpf_object__find_program_by_name(obj, prog_name);
...@@ -353,7 +353,7 @@ static void test_tailcall_4(void) ...@@ -353,7 +353,7 @@ static void test_tailcall_4(void)
if (CHECK_FAIL(map_fd < 0)) if (CHECK_FAIL(map_fd < 0))
return; return;
for (i = 0; i < bpf_map__def(prog_array)->max_entries; i++) { for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
snprintf(prog_name, sizeof(prog_name), "classifier_%d", i); snprintf(prog_name, sizeof(prog_name), "classifier_%d", i);
prog = bpf_object__find_program_by_name(obj, prog_name); prog = bpf_object__find_program_by_name(obj, prog_name);
...@@ -369,7 +369,7 @@ static void test_tailcall_4(void) ...@@ -369,7 +369,7 @@ static void test_tailcall_4(void)
goto out; goto out;
} }
for (i = 0; i < bpf_map__def(prog_array)->max_entries; i++) { for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
err = bpf_map_update_elem(data_fd, &zero, &i, BPF_ANY); err = bpf_map_update_elem(data_fd, &zero, &i, BPF_ANY);
if (CHECK_FAIL(err)) if (CHECK_FAIL(err))
goto out; goto out;
...@@ -380,7 +380,7 @@ static void test_tailcall_4(void) ...@@ -380,7 +380,7 @@ static void test_tailcall_4(void)
"err %d errno %d retval %d\n", err, errno, retval); "err %d errno %d retval %d\n", err, errno, retval);
} }
for (i = 0; i < bpf_map__def(prog_array)->max_entries; i++) { for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
err = bpf_map_update_elem(data_fd, &zero, &i, BPF_ANY); err = bpf_map_update_elem(data_fd, &zero, &i, BPF_ANY);
if (CHECK_FAIL(err)) if (CHECK_FAIL(err))
goto out; goto out;
...@@ -441,7 +441,7 @@ static void test_tailcall_5(void) ...@@ -441,7 +441,7 @@ static void test_tailcall_5(void)
if (CHECK_FAIL(map_fd < 0)) if (CHECK_FAIL(map_fd < 0))
return; return;
for (i = 0; i < bpf_map__def(prog_array)->max_entries; i++) { for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
snprintf(prog_name, sizeof(prog_name), "classifier_%d", i); snprintf(prog_name, sizeof(prog_name), "classifier_%d", i);
prog = bpf_object__find_program_by_name(obj, prog_name); prog = bpf_object__find_program_by_name(obj, prog_name);
...@@ -457,7 +457,7 @@ static void test_tailcall_5(void) ...@@ -457,7 +457,7 @@ static void test_tailcall_5(void)
goto out; goto out;
} }
for (i = 0; i < bpf_map__def(prog_array)->max_entries; i++) { for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
err = bpf_map_update_elem(data_fd, &zero, &key[i], BPF_ANY); err = bpf_map_update_elem(data_fd, &zero, &key[i], BPF_ANY);
if (CHECK_FAIL(err)) if (CHECK_FAIL(err))
goto out; goto out;
...@@ -468,7 +468,7 @@ static void test_tailcall_5(void) ...@@ -468,7 +468,7 @@ static void test_tailcall_5(void)
"err %d errno %d retval %d\n", err, errno, retval); "err %d errno %d retval %d\n", err, errno, retval);
} }
for (i = 0; i < bpf_map__def(prog_array)->max_entries; i++) { for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
err = bpf_map_update_elem(data_fd, &zero, &key[i], BPF_ANY); err = bpf_map_update_elem(data_fd, &zero, &key[i], BPF_ANY);
if (CHECK_FAIL(err)) if (CHECK_FAIL(err))
goto out; goto out;
...@@ -520,7 +520,7 @@ static void test_tailcall_bpf2bpf_1(void) ...@@ -520,7 +520,7 @@ static void test_tailcall_bpf2bpf_1(void)
goto out; goto out;
/* nop -> jmp */ /* nop -> jmp */
for (i = 0; i < bpf_map__def(prog_array)->max_entries; i++) { for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
snprintf(prog_name, sizeof(prog_name), "classifier_%d", i); snprintf(prog_name, sizeof(prog_name), "classifier_%d", i);
prog = bpf_object__find_program_by_name(obj, prog_name); prog = bpf_object__find_program_by_name(obj, prog_name);
...@@ -681,7 +681,7 @@ static void test_tailcall_bpf2bpf_3(void) ...@@ -681,7 +681,7 @@ static void test_tailcall_bpf2bpf_3(void)
if (CHECK_FAIL(map_fd < 0)) if (CHECK_FAIL(map_fd < 0))
goto out; goto out;
for (i = 0; i < bpf_map__def(prog_array)->max_entries; i++) { for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
snprintf(prog_name, sizeof(prog_name), "classifier_%d", i); snprintf(prog_name, sizeof(prog_name), "classifier_%d", i);
prog = bpf_object__find_program_by_name(obj, prog_name); prog = bpf_object__find_program_by_name(obj, prog_name);
...@@ -778,7 +778,7 @@ static void test_tailcall_bpf2bpf_4(bool noise) ...@@ -778,7 +778,7 @@ static void test_tailcall_bpf2bpf_4(bool noise)
if (CHECK_FAIL(map_fd < 0)) if (CHECK_FAIL(map_fd < 0))
goto out; goto out;
for (i = 0; i < bpf_map__def(prog_array)->max_entries; i++) { for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
snprintf(prog_name, sizeof(prog_name), "classifier_%d", i); snprintf(prog_name, sizeof(prog_name), "classifier_%d", i);
prog = bpf_object__find_program_by_name(obj, prog_name); prog = bpf_object__find_program_by_name(obj, prog_name);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment