Commit f6284563 authored by Alexei Starovoitov's avatar Alexei Starovoitov

Merge branch 'bpf-add-gen_epilogue-to-bpf_verifier_ops'

Martin KaFai Lau says:

====================
bpf: Add gen_epilogue to bpf_verifier_ops

From: Martin KaFai Lau <martin.lau@kernel.org>

This set allows the subsystem to patch codes before BPF_EXIT.
The verifier ops, .gen_epilogue, is added for this purpose.
One of the use case will be in the bpf qdisc, the bpf qdisc
subsystem can ensure the skb->dev is in the correct value.
The bpf qdisc subsystem can either inline fixing it in the
epilogue or call another kernel function to handle it (e.g. drop)
in the epilogue. Another use case could be in bpf_tcp_ca.c to
enforce snd_cwnd has valid value (e.g. positive value).

v5:
 * Removed the skip_cnt argument from adjust_jmp_off() in patch 2.
   Instead, reuse the delta argument and skip
   the [tgt_idx, tgt_idx + delta) instructions.
 * Added a BPF_JMP32_A macro in patch 3.
 * Removed pro_epilogue_subprog.c in patch 6.
   The pro_epilogue_kfunc.c has covered the subprog case.
   Renamed the file pro_epilogue_kfunc.c to pro_epilogue.c.
   Some of the SEC names and function names are changed
   accordingly (mainly shorten them by removing the _kfunc suffix).
 * Added comments to explain the tail_call result in patch 7.
 * Fixed the following bpf CI breakages. I ran it in CI
   manually to confirm:
   https://github.com/kernel-patches/bpf/actions/runs/10590714532
 * s390 zext added "w3 = w3". Adjusted the test to
   use all ALU64 and BPF_DW to avoid zext.
   Also changed the "int a" in the "struct st_ops_args" to "u64 a".
 * llvm17 does not take:
       *(u64 *)(r1 +0) = 0;
   so it is changed to:
       r3 = 0;
       *(u64 *)(r1 +0) = r3;

v4:
 * Fixed a bug in the memcpy in patch 3
   The size in the memcpy should be
   epilogue_cnt * sizeof(*epilogue_buf)

v3:
 * Moved epilogue_buf[16] to env.
   Patch 1 is added to move the existing insn_buf[16] to env.
 * Fixed a case that the bpf prog has a BPF_JMP that goes back
   to the first instruction of the main prog.
   The jump back to 1st insn case also applies to the prologue.
   Patch 2 is added to handle it.
 * If the bpf main prog has multiple BPF_EXIT, use a BPF_JA
   to goto the earlier patched epilogue.
   Note that there are (BPF_JMP32 | BPF_JA) vs (BPF_JMP | BPF_JA)
   details in the patch 3 commit message.
 * There are subtle changes in patch 3, so I reset the Reviewed-by.
 * Added patch 8 and patch 9 to cover the changes in patch 2 and patch 3.
 * Dropped the kfunc call from pro/epilogue and its selftests.

v2:
 * Remove the RFC tag. Keep the ordering at where .gen_epilogue is
   called in the verifier relative to the check_max_stack_depth().
   This will be consistent with the other extra stack_depth
   usage like optimize_bpf_loop().
 * Use __xlated check provided by the test_loader to
   check the patched instructions after gen_pro/epilogue (Eduard).
 * Added Patch 3 by Eduard (Thanks!).
====================

Link: https://lore.kernel.org/r/20240829210833.388152-1-martin.lau@linux.devSigned-off-by: default avatarAlexei Starovoitov <ast@kernel.org>
parents c6d9dafb cada0bdc
...@@ -974,6 +974,8 @@ struct bpf_verifier_ops { ...@@ -974,6 +974,8 @@ struct bpf_verifier_ops {
struct bpf_insn_access_aux *info); struct bpf_insn_access_aux *info);
int (*gen_prologue)(struct bpf_insn *insn, bool direct_write, int (*gen_prologue)(struct bpf_insn *insn, bool direct_write,
const struct bpf_prog *prog); const struct bpf_prog *prog);
int (*gen_epilogue)(struct bpf_insn *insn, const struct bpf_prog *prog,
s16 ctx_stack_off);
int (*gen_ld_abs)(const struct bpf_insn *orig, int (*gen_ld_abs)(const struct bpf_insn *orig,
struct bpf_insn *insn_buf); struct bpf_insn *insn_buf);
u32 (*convert_ctx_access)(enum bpf_access_type type, u32 (*convert_ctx_access)(enum bpf_access_type type,
......
...@@ -23,6 +23,8 @@ ...@@ -23,6 +23,8 @@
* (in the "-8,-16,...,-512" form) * (in the "-8,-16,...,-512" form)
*/ */
#define TMP_STR_BUF_LEN 320 #define TMP_STR_BUF_LEN 320
/* Patch buffer size */
#define INSN_BUF_SIZE 16
/* Liveness marks, used for registers and spilled-regs (in stack slots). /* Liveness marks, used for registers and spilled-regs (in stack slots).
* Read marks propagate upwards until they find a write mark; they record that * Read marks propagate upwards until they find a write mark; they record that
...@@ -780,6 +782,8 @@ struct bpf_verifier_env { ...@@ -780,6 +782,8 @@ struct bpf_verifier_env {
* e.g., in reg_type_str() to generate reg_type string * e.g., in reg_type_str() to generate reg_type string
*/ */
char tmp_str_buf[TMP_STR_BUF_LEN]; char tmp_str_buf[TMP_STR_BUF_LEN];
struct bpf_insn insn_buf[INSN_BUF_SIZE];
struct bpf_insn epilogue_buf[INSN_BUF_SIZE];
}; };
static inline struct bpf_func_info_aux *subprog_aux(struct bpf_verifier_env *env, int subprog) static inline struct bpf_func_info_aux *subprog_aux(struct bpf_verifier_env *env, int subprog)
......
...@@ -437,6 +437,16 @@ static inline bool insn_is_cast_user(const struct bpf_insn *insn) ...@@ -437,6 +437,16 @@ static inline bool insn_is_cast_user(const struct bpf_insn *insn)
.off = OFF, \ .off = OFF, \
.imm = 0 }) .imm = 0 })
/* Unconditional jumps, gotol pc + imm32 */
#define BPF_JMP32_A(IMM) \
((struct bpf_insn) { \
.code = BPF_JMP32 | BPF_JA, \
.dst_reg = 0, \
.src_reg = 0, \
.off = 0, \
.imm = IMM })
/* Relative call */ /* Relative call */
#define BPF_CALL_REL(TGT) \ #define BPF_CALL_REL(TGT) \
......
...@@ -2034,6 +2034,7 @@ bpf_base_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) ...@@ -2034,6 +2034,7 @@ bpf_base_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
return NULL; return NULL;
} }
} }
EXPORT_SYMBOL_GPL(bpf_base_func_proto);
void bpf_list_head_free(const struct btf_field *field, void *list_head, void bpf_list_head_free(const struct btf_field *field, void *list_head,
struct bpf_spin_lock *spin_lock) struct bpf_spin_lock *spin_lock)
......
...@@ -19286,6 +19286,9 @@ static int adjust_jmp_off(struct bpf_prog *prog, u32 tgt_idx, u32 delta) ...@@ -19286,6 +19286,9 @@ static int adjust_jmp_off(struct bpf_prog *prog, u32 tgt_idx, u32 delta)
for (i = 0; i < insn_cnt; i++, insn++) { for (i = 0; i < insn_cnt; i++, insn++) {
u8 code = insn->code; u8 code = insn->code;
if (tgt_idx <= i && i < tgt_idx + delta)
continue;
if ((BPF_CLASS(code) != BPF_JMP && BPF_CLASS(code) != BPF_JMP32) || if ((BPF_CLASS(code) != BPF_JMP && BPF_CLASS(code) != BPF_JMP32) ||
BPF_OP(code) == BPF_CALL || BPF_OP(code) == BPF_EXIT) BPF_OP(code) == BPF_CALL || BPF_OP(code) == BPF_EXIT)
continue; continue;
...@@ -19674,14 +19677,39 @@ static int opt_subreg_zext_lo32_rnd_hi32(struct bpf_verifier_env *env, ...@@ -19674,14 +19677,39 @@ static int opt_subreg_zext_lo32_rnd_hi32(struct bpf_verifier_env *env,
*/ */
static int convert_ctx_accesses(struct bpf_verifier_env *env) static int convert_ctx_accesses(struct bpf_verifier_env *env)
{ {
struct bpf_subprog_info *subprogs = env->subprog_info;
const struct bpf_verifier_ops *ops = env->ops; const struct bpf_verifier_ops *ops = env->ops;
int i, cnt, size, ctx_field_size, delta = 0; int i, cnt, size, ctx_field_size, delta = 0, epilogue_cnt = 0;
const int insn_cnt = env->prog->len; const int insn_cnt = env->prog->len;
struct bpf_insn insn_buf[16], *insn; struct bpf_insn *epilogue_buf = env->epilogue_buf;
struct bpf_insn *insn_buf = env->insn_buf;
struct bpf_insn *insn;
u32 target_size, size_default, off; u32 target_size, size_default, off;
struct bpf_prog *new_prog; struct bpf_prog *new_prog;
enum bpf_access_type type; enum bpf_access_type type;
bool is_narrower_load; bool is_narrower_load;
int epilogue_idx = 0;
if (ops->gen_epilogue) {
epilogue_cnt = ops->gen_epilogue(epilogue_buf, env->prog,
-(subprogs[0].stack_depth + 8));
if (epilogue_cnt >= INSN_BUF_SIZE) {
verbose(env, "bpf verifier is misconfigured\n");
return -EINVAL;
} else if (epilogue_cnt) {
/* Save the ARG_PTR_TO_CTX for the epilogue to use */
cnt = 0;
subprogs[0].stack_depth += 8;
insn_buf[cnt++] = BPF_STX_MEM(BPF_DW, BPF_REG_FP, BPF_REG_1,
-subprogs[0].stack_depth);
insn_buf[cnt++] = env->prog->insnsi[0];
new_prog = bpf_patch_insn_data(env, 0, insn_buf, cnt);
if (!new_prog)
return -ENOMEM;
env->prog = new_prog;
delta += cnt - 1;
}
}
if (ops->gen_prologue || env->seen_direct_write) { if (ops->gen_prologue || env->seen_direct_write) {
if (!ops->gen_prologue) { if (!ops->gen_prologue) {
...@@ -19690,7 +19718,7 @@ static int convert_ctx_accesses(struct bpf_verifier_env *env) ...@@ -19690,7 +19718,7 @@ static int convert_ctx_accesses(struct bpf_verifier_env *env)
} }
cnt = ops->gen_prologue(insn_buf, env->seen_direct_write, cnt = ops->gen_prologue(insn_buf, env->seen_direct_write,
env->prog); env->prog);
if (cnt >= ARRAY_SIZE(insn_buf)) { if (cnt >= INSN_BUF_SIZE) {
verbose(env, "bpf verifier is misconfigured\n"); verbose(env, "bpf verifier is misconfigured\n");
return -EINVAL; return -EINVAL;
} else if (cnt) { } else if (cnt) {
...@@ -19703,6 +19731,9 @@ static int convert_ctx_accesses(struct bpf_verifier_env *env) ...@@ -19703,6 +19731,9 @@ static int convert_ctx_accesses(struct bpf_verifier_env *env)
} }
} }
if (delta)
WARN_ON(adjust_jmp_off(env->prog, 0, delta));
if (bpf_prog_is_offloaded(env->prog->aux)) if (bpf_prog_is_offloaded(env->prog->aux))
return 0; return 0;
...@@ -19735,6 +19766,25 @@ static int convert_ctx_accesses(struct bpf_verifier_env *env) ...@@ -19735,6 +19766,25 @@ static int convert_ctx_accesses(struct bpf_verifier_env *env)
insn->code = BPF_STX | BPF_PROBE_ATOMIC | BPF_SIZE(insn->code); insn->code = BPF_STX | BPF_PROBE_ATOMIC | BPF_SIZE(insn->code);
env->prog->aux->num_exentries++; env->prog->aux->num_exentries++;
continue; continue;
} else if (insn->code == (BPF_JMP | BPF_EXIT) &&
epilogue_cnt &&
i + delta < subprogs[1].start) {
/* Generate epilogue for the main prog */
if (epilogue_idx) {
/* jump back to the earlier generated epilogue */
insn_buf[0] = BPF_JMP32_A(epilogue_idx - i - delta - 1);
cnt = 1;
} else {
memcpy(insn_buf, epilogue_buf,
epilogue_cnt * sizeof(*epilogue_buf));
cnt = epilogue_cnt;
/* epilogue_idx cannot be 0. It must have at
* least one ctx ptr saving insn before the
* epilogue.
*/
epilogue_idx = i + delta;
}
goto patch_insn_buf;
} else { } else {
continue; continue;
} }
...@@ -19837,7 +19887,7 @@ static int convert_ctx_accesses(struct bpf_verifier_env *env) ...@@ -19837,7 +19887,7 @@ static int convert_ctx_accesses(struct bpf_verifier_env *env)
target_size = 0; target_size = 0;
cnt = convert_ctx_access(type, insn, insn_buf, env->prog, cnt = convert_ctx_access(type, insn, insn_buf, env->prog,
&target_size); &target_size);
if (cnt == 0 || cnt >= ARRAY_SIZE(insn_buf) || if (cnt == 0 || cnt >= INSN_BUF_SIZE ||
(ctx_field_size && !target_size)) { (ctx_field_size && !target_size)) {
verbose(env, "bpf verifier is misconfigured\n"); verbose(env, "bpf verifier is misconfigured\n");
return -EINVAL; return -EINVAL;
...@@ -19846,7 +19896,7 @@ static int convert_ctx_accesses(struct bpf_verifier_env *env) ...@@ -19846,7 +19896,7 @@ static int convert_ctx_accesses(struct bpf_verifier_env *env)
if (is_narrower_load && size < target_size) { if (is_narrower_load && size < target_size) {
u8 shift = bpf_ctx_narrow_access_offset( u8 shift = bpf_ctx_narrow_access_offset(
off, size, size_default) * 8; off, size, size_default) * 8;
if (shift && cnt + 1 >= ARRAY_SIZE(insn_buf)) { if (shift && cnt + 1 >= INSN_BUF_SIZE) {
verbose(env, "bpf verifier narrow ctx load misconfigured\n"); verbose(env, "bpf verifier narrow ctx load misconfigured\n");
return -EINVAL; return -EINVAL;
} }
...@@ -19871,6 +19921,7 @@ static int convert_ctx_accesses(struct bpf_verifier_env *env) ...@@ -19871,6 +19921,7 @@ static int convert_ctx_accesses(struct bpf_verifier_env *env)
insn->dst_reg, insn->dst_reg, insn->dst_reg, insn->dst_reg,
size * 8, 0); size * 8, 0);
patch_insn_buf:
new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt); new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt);
if (!new_prog) if (!new_prog)
return -ENOMEM; return -ENOMEM;
...@@ -20391,7 +20442,7 @@ static int do_misc_fixups(struct bpf_verifier_env *env) ...@@ -20391,7 +20442,7 @@ static int do_misc_fixups(struct bpf_verifier_env *env)
const int insn_cnt = prog->len; const int insn_cnt = prog->len;
const struct bpf_map_ops *ops; const struct bpf_map_ops *ops;
struct bpf_insn_aux_data *aux; struct bpf_insn_aux_data *aux;
struct bpf_insn insn_buf[16]; struct bpf_insn *insn_buf = env->insn_buf;
struct bpf_prog *new_prog; struct bpf_prog *new_prog;
struct bpf_map *map_ptr; struct bpf_map *map_ptr;
int i, ret, cnt, delta = 0, cur_subprog = 0; int i, ret, cnt, delta = 0, cur_subprog = 0;
...@@ -20510,7 +20561,7 @@ static int do_misc_fixups(struct bpf_verifier_env *env) ...@@ -20510,7 +20561,7 @@ static int do_misc_fixups(struct bpf_verifier_env *env)
(BPF_MODE(insn->code) == BPF_ABS || (BPF_MODE(insn->code) == BPF_ABS ||
BPF_MODE(insn->code) == BPF_IND)) { BPF_MODE(insn->code) == BPF_IND)) {
cnt = env->ops->gen_ld_abs(insn, insn_buf); cnt = env->ops->gen_ld_abs(insn, insn_buf);
if (cnt == 0 || cnt >= ARRAY_SIZE(insn_buf)) { if (cnt == 0 || cnt >= INSN_BUF_SIZE) {
verbose(env, "bpf verifier is misconfigured\n"); verbose(env, "bpf verifier is misconfigured\n");
return -EINVAL; return -EINVAL;
} }
...@@ -20803,7 +20854,7 @@ static int do_misc_fixups(struct bpf_verifier_env *env) ...@@ -20803,7 +20854,7 @@ static int do_misc_fixups(struct bpf_verifier_env *env)
cnt = ops->map_gen_lookup(map_ptr, insn_buf); cnt = ops->map_gen_lookup(map_ptr, insn_buf);
if (cnt == -EOPNOTSUPP) if (cnt == -EOPNOTSUPP)
goto patch_map_ops_generic; goto patch_map_ops_generic;
if (cnt <= 0 || cnt >= ARRAY_SIZE(insn_buf)) { if (cnt <= 0 || cnt >= INSN_BUF_SIZE) {
verbose(env, "bpf verifier is misconfigured\n"); verbose(env, "bpf verifier is misconfigured\n");
return -EINVAL; return -EINVAL;
} }
......
...@@ -17,6 +17,7 @@ ...@@ -17,6 +17,7 @@
#include <linux/in.h> #include <linux/in.h>
#include <linux/in6.h> #include <linux/in6.h>
#include <linux/un.h> #include <linux/un.h>
#include <linux/filter.h>
#include <net/sock.h> #include <net/sock.h>
#include <linux/namei.h> #include <linux/namei.h>
#include "bpf_testmod.h" #include "bpf_testmod.h"
...@@ -945,6 +946,51 @@ __bpf_kfunc int bpf_kfunc_call_kernel_getpeername(struct addr_args *args) ...@@ -945,6 +946,51 @@ __bpf_kfunc int bpf_kfunc_call_kernel_getpeername(struct addr_args *args)
return err; return err;
} }
static DEFINE_MUTEX(st_ops_mutex);
static struct bpf_testmod_st_ops *st_ops;
__bpf_kfunc int bpf_kfunc_st_ops_test_prologue(struct st_ops_args *args)
{
int ret = -1;
mutex_lock(&st_ops_mutex);
if (st_ops && st_ops->test_prologue)
ret = st_ops->test_prologue(args);
mutex_unlock(&st_ops_mutex);
return ret;
}
__bpf_kfunc int bpf_kfunc_st_ops_test_epilogue(struct st_ops_args *args)
{
int ret = -1;
mutex_lock(&st_ops_mutex);
if (st_ops && st_ops->test_epilogue)
ret = st_ops->test_epilogue(args);
mutex_unlock(&st_ops_mutex);
return ret;
}
__bpf_kfunc int bpf_kfunc_st_ops_test_pro_epilogue(struct st_ops_args *args)
{
int ret = -1;
mutex_lock(&st_ops_mutex);
if (st_ops && st_ops->test_pro_epilogue)
ret = st_ops->test_pro_epilogue(args);
mutex_unlock(&st_ops_mutex);
return ret;
}
__bpf_kfunc int bpf_kfunc_st_ops_inc10(struct st_ops_args *args)
{
args->a += 10;
return args->a;
}
BTF_KFUNCS_START(bpf_testmod_check_kfunc_ids) BTF_KFUNCS_START(bpf_testmod_check_kfunc_ids)
BTF_ID_FLAGS(func, bpf_testmod_test_mod_kfunc) BTF_ID_FLAGS(func, bpf_testmod_test_mod_kfunc)
BTF_ID_FLAGS(func, bpf_kfunc_call_test1) BTF_ID_FLAGS(func, bpf_kfunc_call_test1)
...@@ -981,6 +1027,10 @@ BTF_ID_FLAGS(func, bpf_kfunc_call_kernel_sendmsg, KF_SLEEPABLE) ...@@ -981,6 +1027,10 @@ BTF_ID_FLAGS(func, bpf_kfunc_call_kernel_sendmsg, KF_SLEEPABLE)
BTF_ID_FLAGS(func, bpf_kfunc_call_sock_sendmsg, KF_SLEEPABLE) BTF_ID_FLAGS(func, bpf_kfunc_call_sock_sendmsg, KF_SLEEPABLE)
BTF_ID_FLAGS(func, bpf_kfunc_call_kernel_getsockname, KF_SLEEPABLE) BTF_ID_FLAGS(func, bpf_kfunc_call_kernel_getsockname, KF_SLEEPABLE)
BTF_ID_FLAGS(func, bpf_kfunc_call_kernel_getpeername, KF_SLEEPABLE) BTF_ID_FLAGS(func, bpf_kfunc_call_kernel_getpeername, KF_SLEEPABLE)
BTF_ID_FLAGS(func, bpf_kfunc_st_ops_test_prologue, KF_TRUSTED_ARGS | KF_SLEEPABLE)
BTF_ID_FLAGS(func, bpf_kfunc_st_ops_test_epilogue, KF_TRUSTED_ARGS | KF_SLEEPABLE)
BTF_ID_FLAGS(func, bpf_kfunc_st_ops_test_pro_epilogue, KF_TRUSTED_ARGS | KF_SLEEPABLE)
BTF_ID_FLAGS(func, bpf_kfunc_st_ops_inc10, KF_TRUSTED_ARGS)
BTF_KFUNCS_END(bpf_testmod_check_kfunc_ids) BTF_KFUNCS_END(bpf_testmod_check_kfunc_ids)
static int bpf_testmod_ops_init(struct btf *btf) static int bpf_testmod_ops_init(struct btf *btf)
...@@ -1100,6 +1150,144 @@ struct bpf_struct_ops bpf_testmod_ops2 = { ...@@ -1100,6 +1150,144 @@ struct bpf_struct_ops bpf_testmod_ops2 = {
.owner = THIS_MODULE, .owner = THIS_MODULE,
}; };
static int bpf_test_mod_st_ops__test_prologue(struct st_ops_args *args)
{
return 0;
}
static int bpf_test_mod_st_ops__test_epilogue(struct st_ops_args *args)
{
return 0;
}
static int bpf_test_mod_st_ops__test_pro_epilogue(struct st_ops_args *args)
{
return 0;
}
static int st_ops_gen_prologue(struct bpf_insn *insn_buf, bool direct_write,
const struct bpf_prog *prog)
{
struct bpf_insn *insn = insn_buf;
if (strcmp(prog->aux->attach_func_name, "test_prologue") &&
strcmp(prog->aux->attach_func_name, "test_pro_epilogue"))
return 0;
/* r6 = r1[0]; // r6 will be "struct st_ops *args". r1 is "u64 *ctx".
* r7 = r6->a;
* r7 += 1000;
* r6->a = r7;
*/
*insn++ = BPF_LDX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0);
*insn++ = BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_6, offsetof(struct st_ops_args, a));
*insn++ = BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, 1000);
*insn++ = BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_7, offsetof(struct st_ops_args, a));
*insn++ = prog->insnsi[0];
return insn - insn_buf;
}
static int st_ops_gen_epilogue(struct bpf_insn *insn_buf, const struct bpf_prog *prog,
s16 ctx_stack_off)
{
struct bpf_insn *insn = insn_buf;
if (strcmp(prog->aux->attach_func_name, "test_epilogue") &&
strcmp(prog->aux->attach_func_name, "test_pro_epilogue"))
return 0;
/* r1 = stack[ctx_stack_off]; // r1 will be "u64 *ctx"
* r1 = r1[0]; // r1 will be "struct st_ops *args"
* r6 = r1->a;
* r6 += 10000;
* r1->a = r6;
* r0 = r6;
* r0 *= 2;
* BPF_EXIT;
*/
*insn++ = BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_FP, ctx_stack_off);
*insn++ = BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, 0);
*insn++ = BPF_LDX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, offsetof(struct st_ops_args, a));
*insn++ = BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 10000);
*insn++ = BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, offsetof(struct st_ops_args, a));
*insn++ = BPF_MOV64_REG(BPF_REG_0, BPF_REG_6);
*insn++ = BPF_ALU64_IMM(BPF_MUL, BPF_REG_0, 2);
*insn++ = BPF_EXIT_INSN();
return insn - insn_buf;
}
static int st_ops_btf_struct_access(struct bpf_verifier_log *log,
const struct bpf_reg_state *reg,
int off, int size)
{
if (off < 0 || off + size > sizeof(struct st_ops_args))
return -EACCES;
return 0;
}
static const struct bpf_verifier_ops st_ops_verifier_ops = {
.is_valid_access = bpf_testmod_ops_is_valid_access,
.btf_struct_access = st_ops_btf_struct_access,
.gen_prologue = st_ops_gen_prologue,
.gen_epilogue = st_ops_gen_epilogue,
.get_func_proto = bpf_base_func_proto,
};
static struct bpf_testmod_st_ops st_ops_cfi_stubs = {
.test_prologue = bpf_test_mod_st_ops__test_prologue,
.test_epilogue = bpf_test_mod_st_ops__test_epilogue,
.test_pro_epilogue = bpf_test_mod_st_ops__test_pro_epilogue,
};
static int st_ops_reg(void *kdata, struct bpf_link *link)
{
int err = 0;
mutex_lock(&st_ops_mutex);
if (st_ops) {
pr_err("st_ops has already been registered\n");
err = -EEXIST;
goto unlock;
}
st_ops = kdata;
unlock:
mutex_unlock(&st_ops_mutex);
return err;
}
static void st_ops_unreg(void *kdata, struct bpf_link *link)
{
mutex_lock(&st_ops_mutex);
st_ops = NULL;
mutex_unlock(&st_ops_mutex);
}
static int st_ops_init(struct btf *btf)
{
return 0;
}
static int st_ops_init_member(const struct btf_type *t,
const struct btf_member *member,
void *kdata, const void *udata)
{
return 0;
}
static struct bpf_struct_ops testmod_st_ops = {
.verifier_ops = &st_ops_verifier_ops,
.init = st_ops_init,
.init_member = st_ops_init_member,
.reg = st_ops_reg,
.unreg = st_ops_unreg,
.cfi_stubs = &st_ops_cfi_stubs,
.name = "bpf_testmod_st_ops",
.owner = THIS_MODULE,
};
extern int bpf_fentry_test1(int a); extern int bpf_fentry_test1(int a);
static int bpf_testmod_init(void) static int bpf_testmod_init(void)
...@@ -1117,8 +1305,10 @@ static int bpf_testmod_init(void) ...@@ -1117,8 +1305,10 @@ static int bpf_testmod_init(void)
ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_SCHED_CLS, &bpf_testmod_kfunc_set); ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_SCHED_CLS, &bpf_testmod_kfunc_set);
ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_TRACING, &bpf_testmod_kfunc_set); ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_TRACING, &bpf_testmod_kfunc_set);
ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_SYSCALL, &bpf_testmod_kfunc_set); ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_SYSCALL, &bpf_testmod_kfunc_set);
ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_STRUCT_OPS, &bpf_testmod_kfunc_set);
ret = ret ?: register_bpf_struct_ops(&bpf_bpf_testmod_ops, bpf_testmod_ops); ret = ret ?: register_bpf_struct_ops(&bpf_bpf_testmod_ops, bpf_testmod_ops);
ret = ret ?: register_bpf_struct_ops(&bpf_testmod_ops2, bpf_testmod_ops2); ret = ret ?: register_bpf_struct_ops(&bpf_testmod_ops2, bpf_testmod_ops2);
ret = ret ?: register_bpf_struct_ops(&testmod_st_ops, bpf_testmod_st_ops);
ret = ret ?: register_btf_id_dtor_kfuncs(bpf_testmod_dtors, ret = ret ?: register_btf_id_dtor_kfuncs(bpf_testmod_dtors,
ARRAY_SIZE(bpf_testmod_dtors), ARRAY_SIZE(bpf_testmod_dtors),
THIS_MODULE); THIS_MODULE);
......
...@@ -94,4 +94,15 @@ struct bpf_testmod_ops2 { ...@@ -94,4 +94,15 @@ struct bpf_testmod_ops2 {
int (*test_1)(void); int (*test_1)(void);
}; };
struct st_ops_args {
u64 a;
};
struct bpf_testmod_st_ops {
int (*test_prologue)(struct st_ops_args *args);
int (*test_epilogue)(struct st_ops_args *args);
int (*test_pro_epilogue)(struct st_ops_args *args);
struct module *owner;
};
#endif /* _BPF_TESTMOD_H */ #endif /* _BPF_TESTMOD_H */
...@@ -148,4 +148,10 @@ struct sk_buff *bpf_kfunc_nested_acquire_nonzero_offset_test(struct sk_buff_head ...@@ -148,4 +148,10 @@ struct sk_buff *bpf_kfunc_nested_acquire_nonzero_offset_test(struct sk_buff_head
struct sk_buff *bpf_kfunc_nested_acquire_zero_offset_test(struct sock_common *ptr) __ksym; struct sk_buff *bpf_kfunc_nested_acquire_zero_offset_test(struct sock_common *ptr) __ksym;
void bpf_kfunc_nested_release_test(struct sk_buff *ptr) __ksym; void bpf_kfunc_nested_release_test(struct sk_buff *ptr) __ksym;
struct st_ops_args;
int bpf_kfunc_st_ops_test_prologue(struct st_ops_args *args) __ksym;
int bpf_kfunc_st_ops_test_epilogue(struct st_ops_args *args) __ksym;
int bpf_kfunc_st_ops_test_pro_epilogue(struct st_ops_args *args) __ksym;
int bpf_kfunc_st_ops_inc10(struct st_ops_args *args) __ksym;
#endif /* _BPF_TESTMOD_KFUNC_H */ #endif /* _BPF_TESTMOD_KFUNC_H */
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */
#include <test_progs.h>
#include "pro_epilogue.skel.h"
#include "epilogue_tailcall.skel.h"
#include "pro_epilogue_goto_start.skel.h"
#include "epilogue_exit.skel.h"
struct st_ops_args {
__u64 a;
};
static void test_tailcall(void)
{
LIBBPF_OPTS(bpf_test_run_opts, topts);
struct epilogue_tailcall *skel;
struct st_ops_args args;
int err, prog_fd;
skel = epilogue_tailcall__open_and_load();
if (!ASSERT_OK_PTR(skel, "epilogue_tailcall__open_and_load"))
return;
topts.ctx_in = &args;
topts.ctx_size_in = sizeof(args);
skel->links.epilogue_tailcall =
bpf_map__attach_struct_ops(skel->maps.epilogue_tailcall);
if (!ASSERT_OK_PTR(skel->links.epilogue_tailcall, "attach_struct_ops"))
goto done;
/* Both test_epilogue_tailcall and test_epilogue_subprog are
* patched with epilogue. When syscall_epilogue_tailcall()
* is run, test_epilogue_tailcall() is triggered.
* It executes a tail call and control is transferred to
* test_epilogue_subprog(). Only test_epilogue_subprog()
* does args->a += 1, thus final args.a value of 10001
* guarantees that only the epilogue of the
* test_epilogue_subprog is executed.
*/
memset(&args, 0, sizeof(args));
prog_fd = bpf_program__fd(skel->progs.syscall_epilogue_tailcall);
err = bpf_prog_test_run_opts(prog_fd, &topts);
ASSERT_OK(err, "bpf_prog_test_run_opts");
ASSERT_EQ(args.a, 10001, "args.a");
ASSERT_EQ(topts.retval, 10001 * 2, "topts.retval");
done:
epilogue_tailcall__destroy(skel);
}
void test_pro_epilogue(void)
{
RUN_TESTS(pro_epilogue);
RUN_TESTS(pro_epilogue_goto_start);
RUN_TESTS(epilogue_exit);
if (test__start_subtest("tailcall"))
test_tailcall();
}
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */
#include <vmlinux.h>
#include <bpf/bpf_tracing.h>
#include "bpf_misc.h"
#include "../bpf_testmod/bpf_testmod.h"
#include "../bpf_testmod/bpf_testmod_kfunc.h"
char _license[] SEC("license") = "GPL";
__success
/* save __u64 *ctx to stack */
__xlated("0: *(u64 *)(r10 -8) = r1")
/* main prog */
__xlated("1: r1 = *(u64 *)(r1 +0)")
__xlated("2: r2 = *(u64 *)(r1 +0)")
__xlated("3: r3 = 0")
__xlated("4: r4 = 1")
__xlated("5: if r2 == 0x0 goto pc+10")
__xlated("6: r0 = 0")
__xlated("7: *(u64 *)(r1 +0) = r3")
/* epilogue */
__xlated("8: r1 = *(u64 *)(r10 -8)")
__xlated("9: r1 = *(u64 *)(r1 +0)")
__xlated("10: r6 = *(u64 *)(r1 +0)")
__xlated("11: r6 += 10000")
__xlated("12: *(u64 *)(r1 +0) = r6")
__xlated("13: r0 = r6")
__xlated("14: r0 *= 2")
__xlated("15: exit")
/* 2nd part of the main prog after the first exit */
__xlated("16: *(u64 *)(r1 +0) = r4")
__xlated("17: r0 = 1")
/* Clear the r1 to ensure it does not have
* off-by-1 error and ensure it jumps back to the
* beginning of epilogue which initializes
* the r1 with the ctx ptr.
*/
__xlated("18: r1 = 0")
__xlated("19: gotol pc-12")
SEC("struct_ops/test_epilogue_exit")
__naked int test_epilogue_exit(void)
{
asm volatile (
"r1 = *(u64 *)(r1 +0);"
"r2 = *(u64 *)(r1 +0);"
"r3 = 0;"
"r4 = 1;"
"if r2 == 0 goto +3;"
"r0 = 0;"
"*(u64 *)(r1 + 0) = r3;"
"exit;"
"*(u64 *)(r1 + 0) = r4;"
"r0 = 1;"
"r1 = 0;"
"exit;"
::: __clobber_all);
}
SEC(".struct_ops.link")
struct bpf_testmod_st_ops epilogue_exit = {
.test_epilogue = (void *)test_epilogue_exit,
};
SEC("syscall")
__retval(20000)
int syscall_epilogue_exit0(void *ctx)
{
struct st_ops_args args = { .a = 1 };
return bpf_kfunc_st_ops_test_epilogue(&args);
}
SEC("syscall")
__retval(20002)
int syscall_epilogue_exit1(void *ctx)
{
struct st_ops_args args = {};
return bpf_kfunc_st_ops_test_epilogue(&args);
}
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */
#include <vmlinux.h>
#include <bpf/bpf_tracing.h>
#include "bpf_misc.h"
#include "../bpf_testmod/bpf_testmod.h"
#include "../bpf_testmod/bpf_testmod_kfunc.h"
char _license[] SEC("license") = "GPL";
static __noinline __used int subprog(struct st_ops_args *args)
{
args->a += 1;
return args->a;
}
SEC("struct_ops/test_epilogue_subprog")
int BPF_PROG(test_epilogue_subprog, struct st_ops_args *args)
{
subprog(args);
return args->a;
}
struct {
__uint(type, BPF_MAP_TYPE_PROG_ARRAY);
__uint(max_entries, 1);
__uint(key_size, sizeof(__u32));
__uint(value_size, sizeof(__u32));
__array(values, void (void));
} epilogue_map SEC(".maps") = {
.values = {
[0] = (void *)&test_epilogue_subprog,
}
};
SEC("struct_ops/test_epilogue_tailcall")
int test_epilogue_tailcall(unsigned long long *ctx)
{
bpf_tail_call(ctx, &epilogue_map, 0);
return 0;
}
SEC(".struct_ops.link")
struct bpf_testmod_st_ops epilogue_tailcall = {
.test_epilogue = (void *)test_epilogue_tailcall,
};
SEC(".struct_ops.link")
struct bpf_testmod_st_ops epilogue_subprog = {
.test_epilogue = (void *)test_epilogue_subprog,
};
SEC("syscall")
int syscall_epilogue_tailcall(struct st_ops_args *args)
{
return bpf_kfunc_st_ops_test_epilogue(args);
}
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */
#include <vmlinux.h>
#include <bpf/bpf_tracing.h>
#include "bpf_misc.h"
#include "../bpf_testmod/bpf_testmod.h"
#include "../bpf_testmod/bpf_testmod_kfunc.h"
char _license[] SEC("license") = "GPL";
void __kfunc_btf_root(void)
{
bpf_kfunc_st_ops_inc10(NULL);
}
static __noinline __used int subprog(struct st_ops_args *args)
{
args->a += 1;
return args->a;
}
__success
/* prologue */
__xlated("0: r6 = *(u64 *)(r1 +0)")
__xlated("1: r7 = *(u64 *)(r6 +0)")
__xlated("2: r7 += 1000")
__xlated("3: *(u64 *)(r6 +0) = r7")
/* main prog */
__xlated("4: r1 = *(u64 *)(r1 +0)")
__xlated("5: r6 = r1")
__xlated("6: call kernel-function")
__xlated("7: r1 = r6")
__xlated("8: call pc+1")
__xlated("9: exit")
SEC("struct_ops/test_prologue")
__naked int test_prologue(void)
{
asm volatile (
"r1 = *(u64 *)(r1 +0);"
"r6 = r1;"
"call %[bpf_kfunc_st_ops_inc10];"
"r1 = r6;"
"call subprog;"
"exit;"
:
: __imm(bpf_kfunc_st_ops_inc10)
: __clobber_all);
}
__success
/* save __u64 *ctx to stack */
__xlated("0: *(u64 *)(r10 -8) = r1")
/* main prog */
__xlated("1: r1 = *(u64 *)(r1 +0)")
__xlated("2: r6 = r1")
__xlated("3: call kernel-function")
__xlated("4: r1 = r6")
__xlated("5: call pc+")
/* epilogue */
__xlated("6: r1 = *(u64 *)(r10 -8)")
__xlated("7: r1 = *(u64 *)(r1 +0)")
__xlated("8: r6 = *(u64 *)(r1 +0)")
__xlated("9: r6 += 10000")
__xlated("10: *(u64 *)(r1 +0) = r6")
__xlated("11: r0 = r6")
__xlated("12: r0 *= 2")
__xlated("13: exit")
SEC("struct_ops/test_epilogue")
__naked int test_epilogue(void)
{
asm volatile (
"r1 = *(u64 *)(r1 +0);"
"r6 = r1;"
"call %[bpf_kfunc_st_ops_inc10];"
"r1 = r6;"
"call subprog;"
"exit;"
:
: __imm(bpf_kfunc_st_ops_inc10)
: __clobber_all);
}
__success
/* prologue */
__xlated("0: r6 = *(u64 *)(r1 +0)")
__xlated("1: r7 = *(u64 *)(r6 +0)")
__xlated("2: r7 += 1000")
__xlated("3: *(u64 *)(r6 +0) = r7")
/* save __u64 *ctx to stack */
__xlated("4: *(u64 *)(r10 -8) = r1")
/* main prog */
__xlated("5: r1 = *(u64 *)(r1 +0)")
__xlated("6: r6 = r1")
__xlated("7: call kernel-function")
__xlated("8: r1 = r6")
__xlated("9: call pc+")
/* epilogue */
__xlated("10: r1 = *(u64 *)(r10 -8)")
__xlated("11: r1 = *(u64 *)(r1 +0)")
__xlated("12: r6 = *(u64 *)(r1 +0)")
__xlated("13: r6 += 10000")
__xlated("14: *(u64 *)(r1 +0) = r6")
__xlated("15: r0 = r6")
__xlated("16: r0 *= 2")
__xlated("17: exit")
SEC("struct_ops/test_pro_epilogue")
__naked int test_pro_epilogue(void)
{
asm volatile (
"r1 = *(u64 *)(r1 +0);"
"r6 = r1;"
"call %[bpf_kfunc_st_ops_inc10];"
"r1 = r6;"
"call subprog;"
"exit;"
:
: __imm(bpf_kfunc_st_ops_inc10)
: __clobber_all);
}
SEC("syscall")
__retval(1011) /* PROLOGUE_A [1000] + KFUNC_INC10 + SUBPROG_A [1] */
int syscall_prologue(void *ctx)
{
struct st_ops_args args = {};
return bpf_kfunc_st_ops_test_prologue(&args);
}
SEC("syscall")
__retval(20022) /* (KFUNC_INC10 + SUBPROG_A [1] + EPILOGUE_A [10000]) * 2 */
int syscall_epilogue(void *ctx)
{
struct st_ops_args args = {};
return bpf_kfunc_st_ops_test_epilogue(&args);
}
SEC("syscall")
__retval(22022) /* (PROLOGUE_A [1000] + KFUNC_INC10 + SUBPROG_A [1] + EPILOGUE_A [10000]) * 2 */
int syscall_pro_epilogue(void *ctx)
{
struct st_ops_args args = {};
return bpf_kfunc_st_ops_test_pro_epilogue(&args);
}
SEC(".struct_ops.link")
struct bpf_testmod_st_ops pro_epilogue = {
.test_prologue = (void *)test_prologue,
.test_epilogue = (void *)test_epilogue,
.test_pro_epilogue = (void *)test_pro_epilogue,
};
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */
#include <vmlinux.h>
#include <bpf/bpf_tracing.h>
#include "bpf_misc.h"
#include "../bpf_testmod/bpf_testmod.h"
#include "../bpf_testmod/bpf_testmod_kfunc.h"
char _license[] SEC("license") = "GPL";
__success
/* prologue */
__xlated("0: r6 = *(u64 *)(r1 +0)")
__xlated("1: r7 = *(u64 *)(r6 +0)")
__xlated("2: r7 += 1000")
__xlated("3: *(u64 *)(r6 +0) = r7")
/* main prog */
__xlated("4: if r1 == 0x0 goto pc+5")
__xlated("5: if r1 == 0x1 goto pc+2")
__xlated("6: r1 = 1")
__xlated("7: goto pc-3")
__xlated("8: r1 = 0")
__xlated("9: goto pc-6")
__xlated("10: r0 = 0")
__xlated("11: exit")
SEC("struct_ops/test_prologue_goto_start")
__naked int test_prologue_goto_start(void)
{
asm volatile (
"if r1 == 0 goto +5;"
"if r1 == 1 goto +2;"
"r1 = 1;"
"goto -3;"
"r1 = 0;"
"goto -6;"
"r0 = 0;"
"exit;"
::: __clobber_all);
}
__success
/* save __u64 *ctx to stack */
__xlated("0: *(u64 *)(r10 -8) = r1")
/* main prog */
__xlated("1: if r1 == 0x0 goto pc+5")
__xlated("2: if r1 == 0x1 goto pc+2")
__xlated("3: r1 = 1")
__xlated("4: goto pc-3")
__xlated("5: r1 = 0")
__xlated("6: goto pc-6")
__xlated("7: r0 = 0")
/* epilogue */
__xlated("8: r1 = *(u64 *)(r10 -8)")
__xlated("9: r1 = *(u64 *)(r1 +0)")
__xlated("10: r6 = *(u64 *)(r1 +0)")
__xlated("11: r6 += 10000")
__xlated("12: *(u64 *)(r1 +0) = r6")
__xlated("13: r0 = r6")
__xlated("14: r0 *= 2")
__xlated("15: exit")
SEC("struct_ops/test_epilogue_goto_start")
__naked int test_epilogue_goto_start(void)
{
asm volatile (
"if r1 == 0 goto +5;"
"if r1 == 1 goto +2;"
"r1 = 1;"
"goto -3;"
"r1 = 0;"
"goto -6;"
"r0 = 0;"
"exit;"
::: __clobber_all);
}
__success
/* prologue */
__xlated("0: r6 = *(u64 *)(r1 +0)")
__xlated("1: r7 = *(u64 *)(r6 +0)")
__xlated("2: r7 += 1000")
__xlated("3: *(u64 *)(r6 +0) = r7")
/* save __u64 *ctx to stack */
__xlated("4: *(u64 *)(r10 -8) = r1")
/* main prog */
__xlated("5: if r1 == 0x0 goto pc+5")
__xlated("6: if r1 == 0x1 goto pc+2")
__xlated("7: r1 = 1")
__xlated("8: goto pc-3")
__xlated("9: r1 = 0")
__xlated("10: goto pc-6")
__xlated("11: r0 = 0")
/* epilogue */
__xlated("12: r1 = *(u64 *)(r10 -8)")
__xlated("13: r1 = *(u64 *)(r1 +0)")
__xlated("14: r6 = *(u64 *)(r1 +0)")
__xlated("15: r6 += 10000")
__xlated("16: *(u64 *)(r1 +0) = r6")
__xlated("17: r0 = r6")
__xlated("18: r0 *= 2")
__xlated("19: exit")
SEC("struct_ops/test_pro_epilogue_goto_start")
__naked int test_pro_epilogue_goto_start(void)
{
asm volatile (
"if r1 == 0 goto +5;"
"if r1 == 1 goto +2;"
"r1 = 1;"
"goto -3;"
"r1 = 0;"
"goto -6;"
"r0 = 0;"
"exit;"
::: __clobber_all);
}
SEC(".struct_ops.link")
struct bpf_testmod_st_ops epilogue_goto_start = {
.test_prologue = (void *)test_prologue_goto_start,
.test_epilogue = (void *)test_epilogue_goto_start,
.test_pro_epilogue = (void *)test_pro_epilogue_goto_start,
};
SEC("syscall")
__retval(0)
int syscall_prologue_goto_start(void *ctx)
{
struct st_ops_args args = {};
return bpf_kfunc_st_ops_test_prologue(&args);
}
SEC("syscall")
__retval(20000) /* (EPILOGUE_A [10000]) * 2 */
int syscall_epilogue_goto_start(void *ctx)
{
struct st_ops_args args = {};
return bpf_kfunc_st_ops_test_epilogue(&args);
}
SEC("syscall")
__retval(22000) /* (PROLOGUE_A [1000] + EPILOGUE_A [10000]) * 2 */
int syscall_pro_epilogue_goto_start(void *ctx)
{
struct st_ops_args args = {};
return bpf_kfunc_st_ops_test_pro_epilogue(&args);
}
...@@ -890,11 +890,13 @@ void run_subtest(struct test_loader *tester, ...@@ -890,11 +890,13 @@ void run_subtest(struct test_loader *tester,
{ {
struct test_subspec *subspec = unpriv ? &spec->unpriv : &spec->priv; struct test_subspec *subspec = unpriv ? &spec->unpriv : &spec->priv;
struct bpf_program *tprog = NULL, *tprog_iter; struct bpf_program *tprog = NULL, *tprog_iter;
struct bpf_link *link, *links[32] = {};
struct test_spec *spec_iter; struct test_spec *spec_iter;
struct cap_state caps = {}; struct cap_state caps = {};
struct bpf_object *tobj; struct bpf_object *tobj;
struct bpf_map *map; struct bpf_map *map;
int retval, err, i; int retval, err, i;
int links_cnt = 0;
bool should_load; bool should_load;
if (!test__start_subtest(subspec->name)) if (!test__start_subtest(subspec->name))
...@@ -999,6 +1001,26 @@ void run_subtest(struct test_loader *tester, ...@@ -999,6 +1001,26 @@ void run_subtest(struct test_loader *tester,
if (restore_capabilities(&caps)) if (restore_capabilities(&caps))
goto tobj_cleanup; goto tobj_cleanup;
/* Do bpf_map__attach_struct_ops() for each struct_ops map.
* This should trigger bpf_struct_ops->reg callback on kernel side.
*/
bpf_object__for_each_map(map, tobj) {
if (!bpf_map__autocreate(map) ||
bpf_map__type(map) != BPF_MAP_TYPE_STRUCT_OPS)
continue;
if (links_cnt >= ARRAY_SIZE(links)) {
PRINT_FAIL("too many struct_ops maps");
goto tobj_cleanup;
}
link = bpf_map__attach_struct_ops(map);
if (!link) {
PRINT_FAIL("bpf_map__attach_struct_ops failed for map %s: err=%d\n",
bpf_map__name(map), err);
goto tobj_cleanup;
}
links[links_cnt++] = link;
}
if (tester->pre_execution_cb) { if (tester->pre_execution_cb) {
err = tester->pre_execution_cb(tobj); err = tester->pre_execution_cb(tobj);
if (err) { if (err) {
...@@ -1013,9 +1035,14 @@ void run_subtest(struct test_loader *tester, ...@@ -1013,9 +1035,14 @@ void run_subtest(struct test_loader *tester,
PRINT_FAIL("Unexpected retval: %d != %d\n", retval, subspec->retval); PRINT_FAIL("Unexpected retval: %d != %d\n", retval, subspec->retval);
goto tobj_cleanup; goto tobj_cleanup;
} }
/* redo bpf_map__attach_struct_ops for each test */
while (links_cnt > 0)
bpf_link__destroy(links[--links_cnt]);
} }
tobj_cleanup: tobj_cleanup:
while (links_cnt > 0)
bpf_link__destroy(links[--links_cnt]);
bpf_object__close(tobj); bpf_object__close(tobj);
subtest_cleanup: subtest_cleanup:
test__end_subtest(); test__end_subtest();
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment