Commit a5f6b9d5 authored by Alexei Starovoitov's avatar Alexei Starovoitov

Merge branch 'Enable struct_ops programs to be sleepable'

David Vernet says:

====================

This is part 4 of https://lore.kernel.org/bpf/20230123232228.646563-1-void@manifault.com/

Part 3: https://lore.kernel.org/all/20230125050359.339273-1-void@manifault.com/
Part 2: https://lore.kernel.org/all/20230124160802.1122124-1-void@manifault.com/

Changelog:
----------
v3 -> v4:
- Fix accidental typo in name of dummy_st_ops introduced in v2, moving
  it back to dummy_st_ops from dummy_st_ops_success. Should fix s390x
  testruns.

v2 -> v3:
- Don't call a KF_SLEEPABLE kfunc from the dummy_st_ops testsuite, and
  remove the newly added bpf_kfunc_call_test_sleepable() test kfunc
  (Martin).
- Include vmlinux.h from progs/dummy_st_ops_success.c (previously
  progs/dummy_st_ops.c) rather than manually defining
  struct bpf_dummy_ops_state and struct bpf_dummy_ops.
  (Martin).
- Fix a typo added to prog_tests/dummy_st_ops.c in a previous version:
  s/trace_dummy_st_ops_success__open/trace_dummy_st_ops__open.

v1 -> v2:
- Add support for specifying sleepable struct_ops programs with
  struct_ops.s in libbpf (Alexei).
- Move failure test case into new dummy_st_ops_fail.c prog file.
- Update test_dummy_sleepable() to use struct_ops.s instead of manually
  setting prog flags. Also remove open_load_skel() helper which is no
  longer needed.
- Fix verifier tests to expect new sleepable prog failure message.
====================
Signed-off-by: default avatarAlexei Starovoitov <ast@kernel.org>
parents 2514a312 7dd88059
......@@ -1422,7 +1422,8 @@ struct bpf_struct_ops {
const struct bpf_verifier_ops *verifier_ops;
int (*init)(struct btf *btf);
int (*check_member)(const struct btf_type *t,
const struct btf_member *member);
const struct btf_member *member,
const struct bpf_prog *prog);
int (*init_member)(const struct btf_type *t,
const struct btf_member *member,
void *kdata, const void *udata);
......@@ -1473,6 +1474,7 @@ struct bpf_dummy_ops {
int (*test_1)(struct bpf_dummy_ops_state *cb);
int (*test_2)(struct bpf_dummy_ops_state *cb, int a1, unsigned short a2,
char a3, unsigned long a4);
int (*test_sleepable)(struct bpf_dummy_ops_state *cb);
};
int bpf_struct_ops_test_run(struct bpf_prog *prog, const union bpf_attr *kattr,
......
......@@ -16792,7 +16792,7 @@ static int check_struct_ops_btf_id(struct bpf_verifier_env *env)
}
if (st_ops->check_member) {
int err = st_ops->check_member(t, member);
int err = st_ops->check_member(t, member, prog);
if (err) {
verbose(env, "attach to unsupported member %s of struct %s\n",
......@@ -17114,7 +17114,8 @@ static bool can_be_sleepable(struct bpf_prog *prog)
}
}
return prog->type == BPF_PROG_TYPE_LSM ||
prog->type == BPF_PROG_TYPE_KPROBE; /* only for uprobes */
prog->type == BPF_PROG_TYPE_KPROBE /* only for uprobes */ ||
prog->type == BPF_PROG_TYPE_STRUCT_OPS;
}
static int check_attach_btf_id(struct bpf_verifier_env *env)
......@@ -17136,7 +17137,7 @@ static int check_attach_btf_id(struct bpf_verifier_env *env)
}
if (prog->aux->sleepable && !can_be_sleepable(prog)) {
verbose(env, "Only fentry/fexit/fmod_ret, lsm, iter and uprobe programs can be sleepable\n");
verbose(env, "Only fentry/fexit/fmod_ret, lsm, iter, uprobe, and struct_ops programs can be sleepable\n");
return -EINVAL;
}
......
......@@ -154,6 +154,23 @@ static bool bpf_dummy_ops_is_valid_access(int off, int size,
return bpf_tracing_btf_ctx_access(off, size, type, prog, info);
}
static int bpf_dummy_ops_check_member(const struct btf_type *t,
const struct btf_member *member,
const struct bpf_prog *prog)
{
u32 moff = __btf_member_bit_offset(t, member) / 8;
switch (moff) {
case offsetof(struct bpf_dummy_ops, test_sleepable):
break;
default:
if (prog->aux->sleepable)
return -EINVAL;
}
return 0;
}
static int bpf_dummy_ops_btf_struct_access(struct bpf_verifier_log *log,
const struct bpf_reg_state *reg,
int off, int size, enum bpf_access_type atype,
......@@ -208,6 +225,7 @@ static void bpf_dummy_unreg(void *kdata)
struct bpf_struct_ops bpf_bpf_dummy_ops = {
.verifier_ops = &bpf_dummy_verifier_ops,
.init = bpf_dummy_init,
.check_member = bpf_dummy_ops_check_member,
.init_member = bpf_dummy_init_member,
.reg = bpf_dummy_reg,
.unreg = bpf_dummy_unreg,
......
......@@ -248,7 +248,8 @@ static int bpf_tcp_ca_init_member(const struct btf_type *t,
}
static int bpf_tcp_ca_check_member(const struct btf_type *t,
const struct btf_member *member)
const struct btf_member *member,
const struct bpf_prog *prog)
{
if (is_unsupported(__btf_member_bit_offset(t, member) / 8))
return -ENOTSUPP;
......
......@@ -8605,6 +8605,7 @@ static const struct bpf_sec_def section_defs[] = {
SEC_DEF("cgroup/setsockopt", CGROUP_SOCKOPT, BPF_CGROUP_SETSOCKOPT, SEC_ATTACHABLE),
SEC_DEF("cgroup/dev", CGROUP_DEVICE, BPF_CGROUP_DEVICE, SEC_ATTACHABLE_OPT),
SEC_DEF("struct_ops+", STRUCT_OPS, 0, SEC_NONE),
SEC_DEF("struct_ops.s+", STRUCT_OPS, 0, SEC_SLEEPABLE),
SEC_DEF("sk_lookup", SK_LOOKUP, BPF_SK_LOOKUP, SEC_ATTACHABLE),
};
......
// SPDX-License-Identifier: GPL-2.0
/* Copyright (C) 2021. Huawei Technologies Co., Ltd */
#include <test_progs.h>
#include "dummy_st_ops.skel.h"
#include "dummy_st_ops_success.skel.h"
#include "dummy_st_ops_fail.skel.h"
#include "trace_dummy_st_ops.skel.h"
/* Need to keep consistent with definition in include/linux/bpf.h */
......@@ -11,17 +12,17 @@ struct bpf_dummy_ops_state {
static void test_dummy_st_ops_attach(void)
{
struct dummy_st_ops *skel;
struct dummy_st_ops_success *skel;
struct bpf_link *link;
skel = dummy_st_ops__open_and_load();
skel = dummy_st_ops_success__open_and_load();
if (!ASSERT_OK_PTR(skel, "dummy_st_ops_load"))
return;
link = bpf_map__attach_struct_ops(skel->maps.dummy_1);
ASSERT_EQ(libbpf_get_error(link), -EOPNOTSUPP, "dummy_st_ops_attach");
dummy_st_ops__destroy(skel);
dummy_st_ops_success__destroy(skel);
}
static void test_dummy_init_ret_value(void)
......@@ -31,10 +32,10 @@ static void test_dummy_init_ret_value(void)
.ctx_in = args,
.ctx_size_in = sizeof(args),
);
struct dummy_st_ops *skel;
struct dummy_st_ops_success *skel;
int fd, err;
skel = dummy_st_ops__open_and_load();
skel = dummy_st_ops_success__open_and_load();
if (!ASSERT_OK_PTR(skel, "dummy_st_ops_load"))
return;
......@@ -43,7 +44,7 @@ static void test_dummy_init_ret_value(void)
ASSERT_OK(err, "test_run");
ASSERT_EQ(attr.retval, 0xf2f3f4f5, "test_ret");
dummy_st_ops__destroy(skel);
dummy_st_ops_success__destroy(skel);
}
static void test_dummy_init_ptr_arg(void)
......@@ -58,10 +59,10 @@ static void test_dummy_init_ptr_arg(void)
.ctx_size_in = sizeof(args),
);
struct trace_dummy_st_ops *trace_skel;
struct dummy_st_ops *skel;
struct dummy_st_ops_success *skel;
int fd, err;
skel = dummy_st_ops__open_and_load();
skel = dummy_st_ops_success__open_and_load();
if (!ASSERT_OK_PTR(skel, "dummy_st_ops_load"))
return;
......@@ -91,7 +92,7 @@ static void test_dummy_init_ptr_arg(void)
ASSERT_EQ(trace_skel->bss->val, exp_retval, "fentry_val");
done:
dummy_st_ops__destroy(skel);
dummy_st_ops_success__destroy(skel);
trace_dummy_st_ops__destroy(trace_skel);
}
......@@ -102,12 +103,12 @@ static void test_dummy_multiple_args(void)
.ctx_in = args,
.ctx_size_in = sizeof(args),
);
struct dummy_st_ops *skel;
struct dummy_st_ops_success *skel;
int fd, err;
size_t i;
char name[8];
skel = dummy_st_ops__open_and_load();
skel = dummy_st_ops_success__open_and_load();
if (!ASSERT_OK_PTR(skel, "dummy_st_ops_load"))
return;
......@@ -119,7 +120,28 @@ static void test_dummy_multiple_args(void)
ASSERT_EQ(skel->bss->test_2_args[i], args[i], name);
}
dummy_st_ops__destroy(skel);
dummy_st_ops_success__destroy(skel);
}
static void test_dummy_sleepable(void)
{
__u64 args[1] = {0};
LIBBPF_OPTS(bpf_test_run_opts, attr,
.ctx_in = args,
.ctx_size_in = sizeof(args),
);
struct dummy_st_ops_success *skel;
int fd, err;
skel = dummy_st_ops_success__open_and_load();
if (!ASSERT_OK_PTR(skel, "dummy_st_ops_load"))
return;
fd = bpf_program__fd(skel->progs.test_sleepable);
err = bpf_prog_test_run_opts(fd, &attr);
ASSERT_OK(err, "test_run");
dummy_st_ops_success__destroy(skel);
}
void test_dummy_st_ops(void)
......@@ -132,4 +154,8 @@ void test_dummy_st_ops(void)
test_dummy_init_ptr_arg();
if (test__start_subtest("dummy_multiple_args"))
test_dummy_multiple_args();
if (test__start_subtest("dummy_sleepable"))
test_dummy_sleepable();
RUN_TESTS(dummy_st_ops_fail);
}
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */
#include "vmlinux.h"
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
#include "bpf_misc.h"
char _license[] SEC("license") = "GPL";
SEC("struct_ops.s/test_2")
__failure __msg("attach to unsupported member test_2 of struct bpf_dummy_ops")
int BPF_PROG(test_unsupported_field_sleepable,
struct bpf_dummy_ops_state *state, int a1, unsigned short a2,
char a3, unsigned long a4)
{
/* Tries to mark an unsleepable field in struct bpf_dummy_ops as sleepable. */
return 0;
}
SEC(".struct_ops")
struct bpf_dummy_ops dummy_1 = {
.test_1 = NULL,
.test_2 = (void *)test_unsupported_field_sleepable,
.test_sleepable = (void *)NULL,
};
// SPDX-License-Identifier: GPL-2.0
/* Copyright (C) 2021. Huawei Technologies Co., Ltd */
#include <linux/bpf.h>
#include "vmlinux.h"
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
struct bpf_dummy_ops_state {
int val;
} __attribute__((preserve_access_index));
struct bpf_dummy_ops {
int (*test_1)(struct bpf_dummy_ops_state *state);
int (*test_2)(struct bpf_dummy_ops_state *state, int a1, unsigned short a2,
char a3, unsigned long a4);
};
char _license[] SEC("license") = "GPL";
SEC("struct_ops/test_1")
......@@ -43,8 +33,15 @@ int BPF_PROG(test_2, struct bpf_dummy_ops_state *state, int a1, unsigned short a
return 0;
}
SEC("struct_ops.s/test_sleepable")
int BPF_PROG(test_sleepable, struct bpf_dummy_ops_state *state)
{
return 0;
}
SEC(".struct_ops")
struct bpf_dummy_ops dummy_1 = {
.test_1 = (void *)test_1,
.test_2 = (void *)test_2,
.test_sleepable = (void *)test_sleepable,
};
......@@ -85,7 +85,7 @@
.expected_attach_type = BPF_TRACE_RAW_TP,
.kfunc = "sched_switch",
.result = REJECT,
.errstr = "Only fentry/fexit/fmod_ret, lsm, iter and uprobe programs can be sleepable",
.errstr = "Only fentry/fexit/fmod_ret, lsm, iter, uprobe, and struct_ops programs can be sleepable",
.flags = BPF_F_SLEEPABLE,
.runs = -1,
},
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment