Commit 516fca5a authored by Andrii Nakryiko's avatar Andrii Nakryiko

Merge branch 'libbpf-type-suffixes-and-autocreate-flag-for-struct_ops-maps'

Eduard Zingerman says:

====================
libbpf: type suffixes and autocreate flag for struct_ops maps

Tweak struct_ops related APIs to allow the following features:
- specify version suffixes for stuct_ops map types;
- share same BPF program between several map definitions with
  different local BTF types, assuming only maps with same
  kernel BTF type would be selected for load;
- toggle autocreate flag for struct_ops maps;
- automatically toggle autoload for struct_ops programs referenced
  from struct_ops maps, depending on autocreate status of the
  corresponding map;
- use SEC("?.struct_ops") and SEC("?.struct_ops.link")
  to define struct_ops maps with autocreate == false after object open.

This would allow loading programs like below:

    SEC("struct_ops/foo") int BPF_PROG(foo) { ... }
    SEC("struct_ops/bar") int BPF_PROG(bar) { ... }

    struct bpf_testmod_ops___v1 {
        int (*foo)(void);
    };

    struct bpf_testmod_ops___v2 {
        int (*foo)(void);
        int (*bar)(void);
    };

    /* Assume kernel type name to be 'test_ops' */
    SEC(".struct_ops.link")
    struct test_ops___v1 map_v1 = {
        /* Program 'foo' shared by maps with
         * different local BTF type
         */
        .foo = (void *)foo
    };

    SEC(".struct_ops.link")
    struct test_ops___v2 map_v2 = {
        .foo = (void *)foo,
        .bar = (void *)bar
    };

Assuming the following tweaks are done before loading:

    /* to load v1 */
    bpf_map__set_autocreate(skel->maps.map_v1, true);
    bpf_map__set_autocreate(skel->maps.map_v2, false);

    /* to load v2 */
    bpf_map__set_autocreate(skel->maps.map_v1, false);
    bpf_map__set_autocreate(skel->maps.map_v2, true);

Patch #8 ties autocreate and autoload flags for struct_ops maps and
programs.

Changelog:
- v3 [3] -> v4:
  - changes for multiple styling suggestions from Andrii;
  - patch #5: libbpf log capture now happens for LIBBPF_INFO and
    LIBBPF_WARN messages and does not depend on verbosity flags
    (Andrii);
  - patch #6: fixed runtime crash caused by conflict with newly added
    test case struct_ops_multi_pages;
  - patch #7: fixed free of possibly uninitialized pointer (Daniel)
  - patch #8: simpler algorithm to detect which programs to autoload
    (Andrii);
  - patch #9: added assertions for autoload flag after object load
    (Andrii);
  - patch #12: DATASEC name rewrite in libbpf is now done inplace, no
    new strings added to BTF (Andrii);
  - patch #14: allow any printable characters in DATASEC names when
    kernel validates BTF (Andrii)
- v2 [2] -> v3:
  - moved patch #8 logic to be fully done on load
    (requested by Andrii in offlist discussion);
  - in patch #9 added test case for shadow vars and
    autocreate/autoload interaction.
- v1 [1] -> v2:
  - fixed memory leak in patch #1 (Kui-Feng);
  - improved error messages in patch #2 (Martin, Andrii);
  - in bad_struct_ops selftest from patch #6 added .test_2
    map member setup (David);
  - added utility functions to capture libbpf log from selftests (David)
  - in selftests replaced usage of ...__open_and_load by separate
    calls to ..._open() and ..._load() (Andrii);
  - removed serial_... in selftest definitions (Andrii);
  - improved comments in selftest struct_ops_autocreate
    from patch #7 (David);
  - removed autoload toggling logic incompatible with shadow variables
    from bpf_map__set_autocreate(), instead struct_ops programs
    autoload property is computed at struct_ops maps load phase,
    see patch #8 (Kui-Feng, Martin, Andrii);
  - added support for SEC("?.struct_ops") and SEC("?.struct_ops.link")
    (Andrii).

[1] https://lore.kernel.org/bpf/20240227204556.17524-1-eddyz87@gmail.com/
[2] https://lore.kernel.org/bpf/20240302011920.15302-1-eddyz87@gmail.com/
[3] https://lore.kernel.org/bpf/20240304225156.24765-1-eddyz87@gmail.com/
====================

Link: https://lore.kernel.org/r/20240306104529.6453-1-eddyz87@gmail.comSigned-off-by: default avatarAndrii Nakryiko <andrii@kernel.org>
parents 0f79bb89 5208930a
......@@ -809,9 +809,23 @@ static bool btf_name_valid_identifier(const struct btf *btf, u32 offset)
return __btf_name_valid(btf, offset);
}
/* Allow any printable character in DATASEC names */
static bool btf_name_valid_section(const struct btf *btf, u32 offset)
{
return __btf_name_valid(btf, offset);
/* offset must be valid */
const char *src = btf_str_by_offset(btf, offset);
const char *src_limit;
/* set a limit on identifier length */
src_limit = src + KSYM_NAME_LEN;
src++;
while (*src && src < src_limit) {
if (!isprint(*src))
return false;
src++;
}
return !*src;
}
static const char *__btf_name_by_offset(const struct btf *btf, u32 offset)
......
......@@ -147,6 +147,25 @@ static int probe_kern_btf_datasec(int token_fd)
strs, sizeof(strs), token_fd));
}
static int probe_kern_btf_qmark_datasec(int token_fd)
{
static const char strs[] = "\0x\0?.data";
/* static int a; */
__u32 types[] = {
/* int */
BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
/* VAR x */ /* [2] */
BTF_TYPE_ENC(1, BTF_INFO_ENC(BTF_KIND_VAR, 0, 0), 1),
BTF_VAR_STATIC,
/* DATASEC ?.data */ /* [3] */
BTF_TYPE_ENC(3, BTF_INFO_ENC(BTF_KIND_DATASEC, 0, 1), 4),
BTF_VAR_SECINFO_ENC(2, 0, 4),
};
return probe_fd(libbpf__load_raw_btf((char *)types, sizeof(types),
strs, sizeof(strs), token_fd));
}
static int probe_kern_btf_float(int token_fd)
{
static const char strs[] = "\0float";
......@@ -534,6 +553,9 @@ static struct kern_feature_desc {
[FEAT_ARG_CTX_TAG] = {
"kernel-side __arg_ctx tag", probe_kern_arg_ctx_tag,
},
[FEAT_BTF_QMARK_DATASEC] = {
"BTF DATASEC names starting from '?'", probe_kern_btf_qmark_datasec,
},
};
bool feat_supported(struct kern_feature_cache *cache, enum kern_feature_id feat_id)
......
This diff is collapsed.
......@@ -374,6 +374,8 @@ enum kern_feature_id {
FEAT_UPROBE_MULTI_LINK,
/* Kernel supports arg:ctx tag (__arg_ctx) for global subprogs natively */
FEAT_ARG_CTX_TAG,
/* Kernel supports '?' at the front of datasec names */
FEAT_BTF_QMARK_DATASEC,
__FEAT_CNT,
};
......
......@@ -564,6 +564,8 @@ static int bpf_dummy_reg(void *kdata)
{
struct bpf_testmod_ops *ops = kdata;
if (ops->test_1)
ops->test_1();
/* Some test cases (ex. struct_ops_maybe_null) may not have test_2
* initialized, so we need to check for NULL.
*/
......@@ -609,6 +611,29 @@ struct bpf_struct_ops bpf_bpf_testmod_ops = {
.owner = THIS_MODULE,
};
static int bpf_dummy_reg2(void *kdata)
{
struct bpf_testmod_ops2 *ops = kdata;
ops->test_1();
return 0;
}
static struct bpf_testmod_ops2 __bpf_testmod_ops2 = {
.test_1 = bpf_testmod_test_1,
};
struct bpf_struct_ops bpf_testmod_ops2 = {
.verifier_ops = &bpf_testmod_verifier_ops,
.init = bpf_testmod_ops_init,
.init_member = bpf_testmod_ops_init_member,
.reg = bpf_dummy_reg2,
.unreg = bpf_dummy_unreg,
.cfi_stubs = &__bpf_testmod_ops2,
.name = "bpf_testmod_ops2",
.owner = THIS_MODULE,
};
extern int bpf_fentry_test1(int a);
static int bpf_testmod_init(void)
......@@ -620,6 +645,7 @@ static int bpf_testmod_init(void)
ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_TRACING, &bpf_testmod_kfunc_set);
ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_SYSCALL, &bpf_testmod_kfunc_set);
ret = ret ?: register_bpf_struct_ops(&bpf_bpf_testmod_ops, bpf_testmod_ops);
ret = ret ?: register_bpf_struct_ops(&bpf_testmod_ops2, bpf_testmod_ops2);
if (ret < 0)
return ret;
if (bpf_fentry_test1(0) < 0)
......
......@@ -89,4 +89,8 @@ struct bpf_testmod_ops {
int (*tramp_40)(int value);
};
struct bpf_testmod_ops2 {
int (*test_1)(void);
};
#endif /* _BPF_TESTMOD_H */
// SPDX-License-Identifier: GPL-2.0
#include <test_progs.h>
#include "bad_struct_ops.skel.h"
#include "bad_struct_ops2.skel.h"
static void invalid_prog_reuse(void)
{
struct bad_struct_ops *skel;
char *log = NULL;
int err;
skel = bad_struct_ops__open();
if (!ASSERT_OK_PTR(skel, "bad_struct_ops__open"))
return;
if (start_libbpf_log_capture())
goto cleanup;
err = bad_struct_ops__load(skel);
log = stop_libbpf_log_capture();
ASSERT_ERR(err, "bad_struct_ops__load should fail");
ASSERT_HAS_SUBSTR(log,
"struct_ops init_kern testmod_2 func ptr test_1: invalid reuse of prog test_1",
"expected init_kern message");
cleanup:
free(log);
bad_struct_ops__destroy(skel);
}
static void unused_program(void)
{
struct bad_struct_ops2 *skel;
char *log = NULL;
int err;
skel = bad_struct_ops2__open();
if (!ASSERT_OK_PTR(skel, "bad_struct_ops2__open"))
return;
/* struct_ops programs not referenced from any maps are open
* with autoload set to true.
*/
ASSERT_TRUE(bpf_program__autoload(skel->progs.foo), "foo autoload == true");
if (start_libbpf_log_capture())
goto cleanup;
err = bad_struct_ops2__load(skel);
ASSERT_ERR(err, "bad_struct_ops2__load should fail");
log = stop_libbpf_log_capture();
ASSERT_HAS_SUBSTR(log, "prog 'foo': failed to load",
"message about 'foo' failing to load");
cleanup:
free(log);
bad_struct_ops2__destroy(skel);
}
void test_bad_struct_ops(void)
{
if (test__start_subtest("invalid_prog_reuse"))
invalid_prog_reuse();
if (test__start_subtest("unused_program"))
unused_program();
}
......@@ -3535,6 +3535,32 @@ static struct btf_raw_test raw_tests[] = {
.value_type_id = 1,
.max_entries = 1,
},
{
.descr = "datasec: name '?.foo bar:buz' is ok",
.raw_types = {
/* int */
BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
/* VAR x */ /* [2] */
BTF_TYPE_ENC(1, BTF_INFO_ENC(BTF_KIND_VAR, 0, 0), 1),
BTF_VAR_STATIC,
/* DATASEC ?.data */ /* [3] */
BTF_TYPE_ENC(3, BTF_INFO_ENC(BTF_KIND_DATASEC, 0, 1), 4),
BTF_VAR_SECINFO_ENC(2, 0, 4),
BTF_END_RAW,
},
BTF_STR_SEC("\0x\0?.foo bar:buz"),
},
{
.descr = "type name '?foo' is not ok",
.raw_types = {
/* union ?foo; */
BTF_TYPE_ENC(1, BTF_INFO_ENC(BTF_KIND_FWD, 1, 0), 0), /* [1] */
BTF_END_RAW,
},
BTF_STR_SEC("\0?foo"),
.err_str = "Invalid name",
.btf_load_err = true,
},
{
.descr = "float test #1, well-formed",
......@@ -4363,6 +4389,9 @@ static void do_test_raw(unsigned int test_num)
if (err || btf_fd < 0)
goto done;
if (!test->map_type)
goto done;
opts.btf_fd = btf_fd;
opts.btf_key_type_id = test->key_type_id;
opts.btf_value_type_id = test->value_type_id;
......
// SPDX-License-Identifier: GPL-2.0
#include <test_progs.h>
#include "struct_ops_autocreate.skel.h"
#include "struct_ops_autocreate2.skel.h"
static void cant_load_full_object(void)
{
struct struct_ops_autocreate *skel;
char *log = NULL;
int err;
skel = struct_ops_autocreate__open();
if (!ASSERT_OK_PTR(skel, "struct_ops_autocreate__open"))
return;
if (start_libbpf_log_capture())
goto cleanup;
/* The testmod_2 map BTF type (struct bpf_testmod_ops___v2) doesn't
* match the BTF of the actual struct bpf_testmod_ops defined in the
* kernel, so we should fail to load it if we don't disable autocreate
* for that map.
*/
err = struct_ops_autocreate__load(skel);
log = stop_libbpf_log_capture();
if (!ASSERT_ERR(err, "struct_ops_autocreate__load"))
goto cleanup;
ASSERT_HAS_SUBSTR(log, "libbpf: struct_ops init_kern", "init_kern message");
ASSERT_EQ(err, -ENOTSUP, "errno should be ENOTSUP");
cleanup:
free(log);
struct_ops_autocreate__destroy(skel);
}
static int check_test_1_link(struct struct_ops_autocreate *skel, struct bpf_map *map)
{
struct bpf_link *link;
int err;
link = bpf_map__attach_struct_ops(skel->maps.testmod_1);
if (!ASSERT_OK_PTR(link, "bpf_map__attach_struct_ops"))
return -1;
/* test_1() would be called from bpf_dummy_reg2() in bpf_testmod.c */
err = ASSERT_EQ(skel->bss->test_1_result, 42, "test_1_result");
bpf_link__destroy(link);
return err;
}
static void can_load_partial_object(void)
{
struct struct_ops_autocreate *skel;
int err;
skel = struct_ops_autocreate__open();
if (!ASSERT_OK_PTR(skel, "struct_ops_autocreate__open_opts"))
return;
err = bpf_map__set_autocreate(skel->maps.testmod_2, false);
if (!ASSERT_OK(err, "bpf_map__set_autocreate"))
goto cleanup;
ASSERT_TRUE(bpf_program__autoload(skel->progs.test_1), "test_1 default autoload");
ASSERT_TRUE(bpf_program__autoload(skel->progs.test_2), "test_2 default autoload");
err = struct_ops_autocreate__load(skel);
if (ASSERT_OK(err, "struct_ops_autocreate__load"))
goto cleanup;
ASSERT_TRUE(bpf_program__autoload(skel->progs.test_1), "test_1 actual autoload");
ASSERT_FALSE(bpf_program__autoload(skel->progs.test_2), "test_2 actual autoload");
check_test_1_link(skel, skel->maps.testmod_1);
cleanup:
struct_ops_autocreate__destroy(skel);
}
static void optional_maps(void)
{
struct struct_ops_autocreate *skel;
int err;
skel = struct_ops_autocreate__open();
if (!ASSERT_OK_PTR(skel, "struct_ops_autocreate__open"))
return;
ASSERT_TRUE(bpf_map__autocreate(skel->maps.testmod_1), "testmod_1 autocreate");
ASSERT_TRUE(bpf_map__autocreate(skel->maps.testmod_2), "testmod_2 autocreate");
ASSERT_FALSE(bpf_map__autocreate(skel->maps.optional_map), "optional_map autocreate");
ASSERT_FALSE(bpf_map__autocreate(skel->maps.optional_map2), "optional_map2 autocreate");
err = bpf_map__set_autocreate(skel->maps.testmod_1, false);
err |= bpf_map__set_autocreate(skel->maps.testmod_2, false);
err |= bpf_map__set_autocreate(skel->maps.optional_map2, true);
if (!ASSERT_OK(err, "bpf_map__set_autocreate"))
goto cleanup;
err = struct_ops_autocreate__load(skel);
if (ASSERT_OK(err, "struct_ops_autocreate__load"))
goto cleanup;
check_test_1_link(skel, skel->maps.optional_map2);
cleanup:
struct_ops_autocreate__destroy(skel);
}
/* Swap test_mod1->test_1 program from 'bar' to 'foo' using shadow vars.
* test_mod1 load should enable autoload for 'foo'.
*/
static void autoload_and_shadow_vars(void)
{
struct struct_ops_autocreate2 *skel = NULL;
struct bpf_link *link = NULL;
int err;
skel = struct_ops_autocreate2__open();
if (!ASSERT_OK_PTR(skel, "struct_ops_autocreate__open_opts"))
return;
ASSERT_FALSE(bpf_program__autoload(skel->progs.foo), "foo default autoload");
ASSERT_FALSE(bpf_program__autoload(skel->progs.bar), "bar default autoload");
/* loading map testmod_1 would switch foo's autoload to true */
skel->struct_ops.testmod_1->test_1 = skel->progs.foo;
err = struct_ops_autocreate2__load(skel);
if (ASSERT_OK(err, "struct_ops_autocreate__load"))
goto cleanup;
ASSERT_TRUE(bpf_program__autoload(skel->progs.foo), "foo actual autoload");
ASSERT_FALSE(bpf_program__autoload(skel->progs.bar), "bar actual autoload");
link = bpf_map__attach_struct_ops(skel->maps.testmod_1);
if (!ASSERT_OK_PTR(link, "bpf_map__attach_struct_ops"))
goto cleanup;
/* test_1() would be called from bpf_dummy_reg2() in bpf_testmod.c */
err = ASSERT_EQ(skel->bss->test_1_result, 42, "test_1_result");
cleanup:
bpf_link__destroy(link);
struct_ops_autocreate2__destroy(skel);
}
void test_struct_ops_autocreate(void)
{
if (test__start_subtest("cant_load_full_object"))
cant_load_full_object();
if (test__start_subtest("can_load_partial_object"))
can_load_partial_object();
if (test__start_subtest("autoload_and_shadow_vars"))
autoload_and_shadow_vars();
if (test__start_subtest("optional_maps"))
optional_maps();
}
......@@ -30,11 +30,29 @@ static void check_map_info(struct bpf_map_info *info)
close(fd);
}
static int attach_ops_and_check(struct struct_ops_module *skel,
struct bpf_map *map,
int expected_test_2_result)
{
struct bpf_link *link;
link = bpf_map__attach_struct_ops(map);
ASSERT_OK_PTR(link, "attach_test_mod_1");
if (!link)
return -1;
/* test_{1,2}() would be called from bpf_dummy_reg() in bpf_testmod.c */
ASSERT_EQ(skel->bss->test_1_result, 0xdeadbeef, "test_1_result");
ASSERT_EQ(skel->bss->test_2_result, expected_test_2_result, "test_2_result");
bpf_link__destroy(link);
return 0;
}
static void test_struct_ops_load(void)
{
struct struct_ops_module *skel;
struct bpf_map_info info = {};
struct bpf_link *link;
int err;
u32 len;
......@@ -59,20 +77,17 @@ static void test_struct_ops_load(void)
if (!ASSERT_OK(err, "bpf_map_get_info_by_fd"))
goto cleanup;
link = bpf_map__attach_struct_ops(skel->maps.testmod_1);
ASSERT_OK_PTR(link, "attach_test_mod_1");
check_map_info(&info);
/* test_3() will be called from bpf_dummy_reg() in bpf_testmod.c
*
* In bpf_testmod.c it will pass 4 and 13 (the value of data) to
* .test_2. So, the value of test_2_result should be 20 (4 + 13 +
* 3).
*/
ASSERT_EQ(skel->bss->test_2_result, 20, "check_shadow_variables");
bpf_link__destroy(link);
check_map_info(&info);
if (!attach_ops_and_check(skel, skel->maps.testmod_1, 20))
goto cleanup;
if (!attach_ops_and_check(skel, skel->maps.testmod_2, 12))
goto cleanup;
cleanup:
struct_ops_module__destroy(skel);
......
// SPDX-License-Identifier: GPL-2.0
#include <vmlinux.h>
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
#include "../bpf_testmod/bpf_testmod.h"
char _license[] SEC("license") = "GPL";
SEC("struct_ops/test_1")
int BPF_PROG(test_1) { return 0; }
SEC("struct_ops/test_2")
int BPF_PROG(test_2) { return 0; }
SEC(".struct_ops.link")
struct bpf_testmod_ops testmod_1 = {
.test_1 = (void *)test_1,
.test_2 = (void *)test_2
};
SEC(".struct_ops.link")
struct bpf_testmod_ops2 testmod_2 = {
.test_1 = (void *)test_1
};
// SPDX-License-Identifier: GPL-2.0
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
char _license[] SEC("license") = "GPL";
/* This is an unused struct_ops program, it lacks corresponding
* struct_ops map, which provides attachment information.
* W/o additional configuration attempt to load such
* BPF object file would fail.
*/
SEC("struct_ops/foo")
void foo(void) {}
// SPDX-License-Identifier: GPL-2.0
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
char _license[] SEC("license") = "GPL";
int test_1_result = 0;
SEC("struct_ops/test_1")
int BPF_PROG(test_1)
{
test_1_result = 42;
return 0;
}
SEC("struct_ops/test_1")
int BPF_PROG(test_2)
{
return 0;
}
struct bpf_testmod_ops___v1 {
int (*test_1)(void);
};
struct bpf_testmod_ops___v2 {
int (*test_1)(void);
int (*does_not_exist)(void);
};
SEC(".struct_ops.link")
struct bpf_testmod_ops___v1 testmod_1 = {
.test_1 = (void *)test_1
};
SEC(".struct_ops.link")
struct bpf_testmod_ops___v2 testmod_2 = {
.test_1 = (void *)test_1,
.does_not_exist = (void *)test_2
};
SEC("?.struct_ops")
struct bpf_testmod_ops___v1 optional_map = {
.test_1 = (void *)test_1,
};
SEC("?.struct_ops.link")
struct bpf_testmod_ops___v1 optional_map2 = {
.test_1 = (void *)test_1,
};
// SPDX-License-Identifier: GPL-2.0
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
char _license[] SEC("license") = "GPL";
int test_1_result = 0;
SEC("?struct_ops/test_1")
int BPF_PROG(foo)
{
test_1_result = 42;
return 0;
}
SEC("?struct_ops/test_1")
int BPF_PROG(bar)
{
test_1_result = 24;
return 0;
}
struct bpf_testmod_ops {
int (*test_1)(void);
};
SEC(".struct_ops.link")
struct bpf_testmod_ops testmod_1 = {
.test_1 = (void *)bar
};
......@@ -7,12 +7,14 @@
char _license[] SEC("license") = "GPL";
int test_1_result = 0;
int test_2_result = 0;
SEC("struct_ops/test_1")
int BPF_PROG(test_1)
{
return 0xdeadbeef;
test_1_result = 0xdeadbeef;
return 0;
}
SEC("struct_ops/test_2")
......@@ -35,3 +37,20 @@ struct bpf_testmod_ops testmod_1 = {
.data = 0x1,
};
SEC("struct_ops/test_2")
void BPF_PROG(test_2_v2, int a, int b)
{
test_2_result = a * b;
}
struct bpf_testmod_ops___v2 {
int (*test_1)(void);
void (*test_2)(int a, int b);
int (*test_maybe_null)(int dummy, struct task_struct *task);
};
SEC(".struct_ops.link")
struct bpf_testmod_ops___v2 testmod_2 = {
.test_1 = (void *)test_1,
.test_2 = (void *)test_2_v2,
};
......@@ -683,11 +683,69 @@ static const struct argp_option opts[] = {
{},
};
static FILE *libbpf_capture_stream;
static struct {
char *buf;
size_t buf_sz;
} libbpf_output_capture;
/* Creates a global memstream capturing INFO and WARN level output
* passed to libbpf_print_fn.
* Returns 0 on success, negative value on failure.
* On failure the description is printed using PRINT_FAIL and
* current test case is marked as fail.
*/
int start_libbpf_log_capture(void)
{
if (libbpf_capture_stream) {
PRINT_FAIL("%s: libbpf_capture_stream != NULL\n", __func__);
return -EINVAL;
}
libbpf_capture_stream = open_memstream(&libbpf_output_capture.buf,
&libbpf_output_capture.buf_sz);
if (!libbpf_capture_stream) {
PRINT_FAIL("%s: open_memstream failed errno=%d\n", __func__, errno);
return -EINVAL;
}
return 0;
}
/* Destroys global memstream created by start_libbpf_log_capture().
* Returns a pointer to captured data which has to be freed.
* Returned buffer is null terminated.
*/
char *stop_libbpf_log_capture(void)
{
char *buf;
if (!libbpf_capture_stream)
return NULL;
fputc(0, libbpf_capture_stream);
fclose(libbpf_capture_stream);
libbpf_capture_stream = NULL;
/* get 'buf' after fclose(), see open_memstream() documentation */
buf = libbpf_output_capture.buf;
memset(&libbpf_output_capture, 0, sizeof(libbpf_output_capture));
return buf;
}
static int libbpf_print_fn(enum libbpf_print_level level,
const char *format, va_list args)
{
if (libbpf_capture_stream && level != LIBBPF_DEBUG) {
va_list args2;
va_copy(args2, args);
vfprintf(libbpf_capture_stream, format, args2);
}
if (env.verbosity < VERBOSE_VERY && level == LIBBPF_DEBUG)
return 0;
vfprintf(stdout, format, args);
return 0;
}
......@@ -1081,6 +1139,7 @@ static void run_one_test(int test_num)
cleanup_cgroup_environment();
stdio_restore();
free(stop_libbpf_log_capture());
dump_test_log(test, state, false, false, NULL);
}
......
......@@ -397,6 +397,9 @@ int test__join_cgroup(const char *path);
system(cmd); \
})
int start_libbpf_log_capture(void);
char *stop_libbpf_log_capture(void);
static inline __u64 ptr_to_u64(const void *ptr)
{
return (__u64) (unsigned long) ptr;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment