Commit 1a437d35 authored by Alexei Starovoitov's avatar Alexei Starovoitov

Merge branch '__jited-test-tag-to-check-disassembly-after-jit'

Eduard Zingerman says:

====================
__jited test tag to check disassembly after jit

Some of the logic in the BPF jits might be non-trivial.
It might be useful to allow testing this logic by comparing
generated native code with expected code template.
This patch set adds a macro __jited() that could be used for
test_loader based tests in a following manner:

    SEC("tp")
    __arch_x86_64
    __jited("   endbr64")
    __jited("   nopl    (%rax,%rax)")
    __jited("   xorq    %rax, %rax")
    ...
    __naked void some_test(void) { ... }

Also add a test for jit code generated for tail calls handling to
demonstrate the feature.

The feature uses LLVM libraries to do the disassembly.
At selftests compilation time Makefile detects if these libraries are
available. When libraries are not available tests using __jit_x86()
are skipped.
Current CI environment does not include llvm development libraries,
but changes to add these are trivial.

This was previously discussed here:
https://lore.kernel.org/bpf/20240718205158.3651529-1-yonghong.song@linux.dev/

Patch-set includes a few auxiliary steps:
- patches #2 and #3 fix a few bugs in test_loader behaviour;
- patch #4 replaces __regex macro with ability to specify regular
  expressions in __msg and __xlated using "{{" "}}" escapes;
- patch #8 updates __xlated to match disassembly lines consequently,
  same way as __jited does.

Changes v2->v3:
- changed macro name from __jit_x86 to __jited with __arch_* to
  specify disassembly arch (Yonghong);
- __jited matches disassembly lines consequently with "..."
  allowing to skip some number of lines (Andrii);
- __xlated matches disassembly lines consequently, same as __jited;
- "{{...}}" regex brackets instead of __regex macro;
- bug fixes for old commits.

Changes v1->v2:
- stylistic changes suggested by Yonghong;
- fix for -Wformat-truncation related warning when compiled with
  llvm15 (Yonghong).

v1: https://lore.kernel.org/bpf/20240809010518.1137758-1-eddyz87@gmail.com/
v2: https://lore.kernel.org/bpf/20240815205449.242556-1-eddyz87@gmail.com/
====================

Link: https://lore.kernel.org/r/20240820102357.3372779-1-eddyz87@gmail.comSigned-off-by: default avatarAlexei Starovoitov <ast@kernel.org>
parents ffc41ce5 a038eacd
...@@ -8,6 +8,7 @@ test_lru_map ...@@ -8,6 +8,7 @@ test_lru_map
test_lpm_map test_lpm_map
test_tag test_tag
FEATURE-DUMP.libbpf FEATURE-DUMP.libbpf
FEATURE-DUMP.selftests
fixdep fixdep
/test_progs /test_progs
/test_progs-no_alu32 /test_progs-no_alu32
......
...@@ -33,6 +33,13 @@ OPT_FLAGS ?= $(if $(RELEASE),-O2,-O0) ...@@ -33,6 +33,13 @@ OPT_FLAGS ?= $(if $(RELEASE),-O2,-O0)
LIBELF_CFLAGS := $(shell $(PKG_CONFIG) libelf --cflags 2>/dev/null) LIBELF_CFLAGS := $(shell $(PKG_CONFIG) libelf --cflags 2>/dev/null)
LIBELF_LIBS := $(shell $(PKG_CONFIG) libelf --libs 2>/dev/null || echo -lelf) LIBELF_LIBS := $(shell $(PKG_CONFIG) libelf --libs 2>/dev/null || echo -lelf)
ifeq ($(srctree),)
srctree := $(patsubst %/,%,$(dir $(CURDIR)))
srctree := $(patsubst %/,%,$(dir $(srctree)))
srctree := $(patsubst %/,%,$(dir $(srctree)))
srctree := $(patsubst %/,%,$(dir $(srctree)))
endif
CFLAGS += -g $(OPT_FLAGS) -rdynamic \ CFLAGS += -g $(OPT_FLAGS) -rdynamic \
-Wall -Werror -fno-omit-frame-pointer \ -Wall -Werror -fno-omit-frame-pointer \
$(GENFLAGS) $(SAN_CFLAGS) $(LIBELF_CFLAGS) \ $(GENFLAGS) $(SAN_CFLAGS) $(LIBELF_CFLAGS) \
...@@ -60,6 +67,9 @@ progs/timer_crash.c-CFLAGS := -fno-strict-aliasing ...@@ -60,6 +67,9 @@ progs/timer_crash.c-CFLAGS := -fno-strict-aliasing
progs/test_global_func9.c-CFLAGS := -fno-strict-aliasing progs/test_global_func9.c-CFLAGS := -fno-strict-aliasing
progs/verifier_nocsr.c-CFLAGS := -fno-strict-aliasing progs/verifier_nocsr.c-CFLAGS := -fno-strict-aliasing
# Some utility functions use LLVM libraries
jit_disasm_helpers.c-CFLAGS = $(LLVM_CFLAGS)
ifneq ($(LLVM),) ifneq ($(LLVM),)
# Silence some warnings when compiled with clang # Silence some warnings when compiled with clang
CFLAGS += -Wno-unused-command-line-argument CFLAGS += -Wno-unused-command-line-argument
...@@ -168,6 +178,31 @@ endef ...@@ -168,6 +178,31 @@ endef
include ../lib.mk include ../lib.mk
NON_CHECK_FEAT_TARGETS := clean docs-clean
CHECK_FEAT := $(filter-out $(NON_CHECK_FEAT_TARGETS),$(or $(MAKECMDGOALS), "none"))
ifneq ($(CHECK_FEAT),)
FEATURE_USER := .selftests
FEATURE_TESTS := llvm
FEATURE_DISPLAY := $(FEATURE_TESTS)
# Makefile.feature expects OUTPUT to end with a slash
$(let OUTPUT,$(OUTPUT)/,\
$(eval include ../../../build/Makefile.feature))
endif
ifeq ($(feature-llvm),1)
LLVM_CFLAGS += -DHAVE_LLVM_SUPPORT
LLVM_CONFIG_LIB_COMPONENTS := mcdisassembler all-targets
# both llvm-config and lib.mk add -D_GNU_SOURCE, which ends up as conflict
LLVM_CFLAGS += $(filter-out -D_GNU_SOURCE,$(shell $(LLVM_CONFIG) --cflags))
LLVM_LDLIBS += $(shell $(LLVM_CONFIG) --libs $(LLVM_CONFIG_LIB_COMPONENTS))
ifeq ($(shell $(LLVM_CONFIG) --shared-mode),static)
LLVM_LDLIBS += $(shell $(LLVM_CONFIG) --system-libs $(LLVM_CONFIG_LIB_COMPONENTS))
LLVM_LDLIBS += -lstdc++
endif
LLVM_LDFLAGS += $(shell $(LLVM_CONFIG) --ldflags)
endif
SCRATCH_DIR := $(OUTPUT)/tools SCRATCH_DIR := $(OUTPUT)/tools
BUILD_DIR := $(SCRATCH_DIR)/build BUILD_DIR := $(SCRATCH_DIR)/build
INCLUDE_DIR := $(SCRATCH_DIR)/include INCLUDE_DIR := $(SCRATCH_DIR)/include
...@@ -612,6 +647,10 @@ ifeq ($(filter clean docs-clean,$(MAKECMDGOALS)),) ...@@ -612,6 +647,10 @@ ifeq ($(filter clean docs-clean,$(MAKECMDGOALS)),)
include $(wildcard $(TRUNNER_TEST_OBJS:.o=.d)) include $(wildcard $(TRUNNER_TEST_OBJS:.o=.d))
endif endif
# add per extra obj CFGLAGS definitions
$(foreach N,$(patsubst $(TRUNNER_OUTPUT)/%.o,%,$(TRUNNER_EXTRA_OBJS)), \
$(eval $(TRUNNER_OUTPUT)/$(N).o: CFLAGS += $($(N).c-CFLAGS)))
$(TRUNNER_EXTRA_OBJS): $(TRUNNER_OUTPUT)/%.o: \ $(TRUNNER_EXTRA_OBJS): $(TRUNNER_OUTPUT)/%.o: \
%.c \ %.c \
$(TRUNNER_EXTRA_HDRS) \ $(TRUNNER_EXTRA_HDRS) \
...@@ -628,6 +667,9 @@ ifneq ($2:$(OUTPUT),:$(shell pwd)) ...@@ -628,6 +667,9 @@ ifneq ($2:$(OUTPUT),:$(shell pwd))
$(Q)rsync -aq $$^ $(TRUNNER_OUTPUT)/ $(Q)rsync -aq $$^ $(TRUNNER_OUTPUT)/
endif endif
$(OUTPUT)/$(TRUNNER_BINARY): LDLIBS += $$(LLVM_LDLIBS)
$(OUTPUT)/$(TRUNNER_BINARY): LDFLAGS += $$(LLVM_LDFLAGS)
# some X.test.o files have runtime dependencies on Y.bpf.o files # some X.test.o files have runtime dependencies on Y.bpf.o files
$(OUTPUT)/$(TRUNNER_BINARY): | $(TRUNNER_BPF_OBJS) $(OUTPUT)/$(TRUNNER_BINARY): | $(TRUNNER_BPF_OBJS)
...@@ -637,7 +679,7 @@ $(OUTPUT)/$(TRUNNER_BINARY): $(TRUNNER_TEST_OBJS) \ ...@@ -637,7 +679,7 @@ $(OUTPUT)/$(TRUNNER_BINARY): $(TRUNNER_TEST_OBJS) \
$(TRUNNER_BPFTOOL) \ $(TRUNNER_BPFTOOL) \
| $(TRUNNER_BINARY)-extras | $(TRUNNER_BINARY)-extras
$$(call msg,BINARY,,$$@) $$(call msg,BINARY,,$$@)
$(Q)$$(CC) $$(CFLAGS) $$(filter %.a %.o,$$^) $$(LDLIBS) -o $$@ $(Q)$$(CC) $$(CFLAGS) $$(filter %.a %.o,$$^) $$(LDLIBS) $$(LDFLAGS) -o $$@
$(Q)$(RESOLVE_BTFIDS) --btf $(TRUNNER_OUTPUT)/btf_data.bpf.o $$@ $(Q)$(RESOLVE_BTFIDS) --btf $(TRUNNER_OUTPUT)/btf_data.bpf.o $$@
$(Q)ln -sf $(if $2,..,.)/tools/build/bpftool/$(USE_BOOTSTRAP)bpftool \ $(Q)ln -sf $(if $2,..,.)/tools/build/bpftool/$(USE_BOOTSTRAP)bpftool \
$(OUTPUT)/$(if $2,$2/)bpftool $(OUTPUT)/$(if $2,$2/)bpftool
...@@ -656,6 +698,7 @@ TRUNNER_EXTRA_SOURCES := test_progs.c \ ...@@ -656,6 +698,7 @@ TRUNNER_EXTRA_SOURCES := test_progs.c \
cap_helpers.c \ cap_helpers.c \
unpriv_helpers.c \ unpriv_helpers.c \
netlink_helpers.c \ netlink_helpers.c \
jit_disasm_helpers.c \
test_loader.c \ test_loader.c \
xsk.c \ xsk.c \
disasm.c \ disasm.c \
...@@ -798,7 +841,8 @@ EXTRA_CLEAN := $(SCRATCH_DIR) $(HOST_SCRATCH_DIR) \ ...@@ -798,7 +841,8 @@ EXTRA_CLEAN := $(SCRATCH_DIR) $(HOST_SCRATCH_DIR) \
$(addprefix $(OUTPUT)/,*.o *.d *.skel.h *.lskel.h *.subskel.h \ $(addprefix $(OUTPUT)/,*.o *.d *.skel.h *.lskel.h *.subskel.h \
no_alu32 cpuv4 bpf_gcc bpf_testmod.ko \ no_alu32 cpuv4 bpf_gcc bpf_testmod.ko \
bpf_test_no_cfi.ko \ bpf_test_no_cfi.ko \
liburandom_read.so) liburandom_read.so) \
$(OUTPUT)/FEATURE-DUMP.selftests
.PHONY: docs docs-clean .PHONY: docs docs-clean
......
// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
#include <bpf/bpf.h>
#include <bpf/libbpf.h>
#include <test_progs.h>
#ifdef HAVE_LLVM_SUPPORT
#include <llvm-c/Core.h>
#include <llvm-c/Disassembler.h>
#include <llvm-c/Target.h>
#include <llvm-c/TargetMachine.h>
/* The intent is to use get_jited_program_text() for small test
* programs written in BPF assembly, thus assume that 32 local labels
* would be sufficient.
*/
#define MAX_LOCAL_LABELS 32
static bool llvm_initialized;
struct local_labels {
bool print_phase;
__u32 prog_len;
__u32 cnt;
__u32 pcs[MAX_LOCAL_LABELS];
char names[MAX_LOCAL_LABELS][4];
};
static const char *lookup_symbol(void *data, uint64_t ref_value, uint64_t *ref_type,
uint64_t ref_pc, const char **ref_name)
{
struct local_labels *labels = data;
uint64_t type = *ref_type;
int i;
*ref_type = LLVMDisassembler_ReferenceType_InOut_None;
*ref_name = NULL;
if (type != LLVMDisassembler_ReferenceType_In_Branch)
return NULL;
/* Depending on labels->print_phase either discover local labels or
* return a name assigned with local jump target:
* - if print_phase is true and ref_value is in labels->pcs,
* return corresponding labels->name.
* - if print_phase is false, save program-local jump targets
* in labels->pcs;
*/
if (labels->print_phase) {
for (i = 0; i < labels->cnt; ++i)
if (labels->pcs[i] == ref_value)
return labels->names[i];
} else {
if (labels->cnt < MAX_LOCAL_LABELS && ref_value < labels->prog_len)
labels->pcs[labels->cnt++] = ref_value;
}
return NULL;
}
static int disasm_insn(LLVMDisasmContextRef ctx, uint8_t *image, __u32 len, __u32 pc,
char *buf, __u32 buf_sz)
{
int i, cnt;
cnt = LLVMDisasmInstruction(ctx, image + pc, len - pc, pc,
buf, buf_sz);
if (cnt > 0)
return cnt;
PRINT_FAIL("Can't disasm instruction at offset %d:", pc);
for (i = 0; i < 16 && pc + i < len; ++i)
printf(" %02x", image[pc + i]);
printf("\n");
return -EINVAL;
}
static int cmp_u32(const void *_a, const void *_b)
{
__u32 a = *(__u32 *)_a;
__u32 b = *(__u32 *)_b;
if (a < b)
return -1;
if (a > b)
return 1;
return 0;
}
static int disasm_one_func(FILE *text_out, uint8_t *image, __u32 len)
{
char *label, *colon, *triple = NULL;
LLVMDisasmContextRef ctx = NULL;
struct local_labels labels = {};
__u32 *label_pc, pc;
int i, cnt, err = 0;
char buf[64];
triple = LLVMGetDefaultTargetTriple();
ctx = LLVMCreateDisasm(triple, &labels, 0, NULL, lookup_symbol);
if (!ASSERT_OK_PTR(ctx, "LLVMCreateDisasm")) {
err = -EINVAL;
goto out;
}
cnt = LLVMSetDisasmOptions(ctx, LLVMDisassembler_Option_PrintImmHex);
if (!ASSERT_EQ(cnt, 1, "LLVMSetDisasmOptions")) {
err = -EINVAL;
goto out;
}
/* discover labels */
labels.prog_len = len;
pc = 0;
while (pc < len) {
cnt = disasm_insn(ctx, image, len, pc, buf, 1);
if (cnt < 0) {
err = cnt;
goto out;
}
pc += cnt;
}
qsort(labels.pcs, labels.cnt, sizeof(*labels.pcs), cmp_u32);
for (i = 0; i < labels.cnt; ++i)
/* use (i % 100) to avoid format truncation warning */
snprintf(labels.names[i], sizeof(labels.names[i]), "L%d", i % 100);
/* now print with labels */
labels.print_phase = true;
pc = 0;
while (pc < len) {
cnt = disasm_insn(ctx, image, len, pc, buf, sizeof(buf));
if (cnt < 0) {
err = cnt;
goto out;
}
label_pc = bsearch(&pc, labels.pcs, labels.cnt, sizeof(*labels.pcs), cmp_u32);
label = "";
colon = "";
if (label_pc) {
label = labels.names[label_pc - labels.pcs];
colon = ":";
}
fprintf(text_out, "%x:\t", pc);
for (i = 0; i < cnt; ++i)
fprintf(text_out, "%02x ", image[pc + i]);
for (i = cnt * 3; i < 12 * 3; ++i)
fputc(' ', text_out);
fprintf(text_out, "%s%s%s\n", label, colon, buf);
pc += cnt;
}
out:
if (triple)
LLVMDisposeMessage(triple);
if (ctx)
LLVMDisasmDispose(ctx);
return err;
}
int get_jited_program_text(int fd, char *text, size_t text_sz)
{
struct bpf_prog_info info = {};
__u32 info_len = sizeof(info);
__u32 jited_funcs, len, pc;
__u32 *func_lens = NULL;
FILE *text_out = NULL;
uint8_t *image = NULL;
int i, err = 0;
if (!llvm_initialized) {
LLVMInitializeAllTargetInfos();
LLVMInitializeAllTargetMCs();
LLVMInitializeAllDisassemblers();
llvm_initialized = 1;
}
text_out = fmemopen(text, text_sz, "w");
if (!ASSERT_OK_PTR(text_out, "open_memstream")) {
err = -errno;
goto out;
}
/* first call is to find out jited program len */
err = bpf_prog_get_info_by_fd(fd, &info, &info_len);
if (!ASSERT_OK(err, "bpf_prog_get_info_by_fd #1"))
goto out;
len = info.jited_prog_len;
image = malloc(len);
if (!ASSERT_OK_PTR(image, "malloc(info.jited_prog_len)")) {
err = -ENOMEM;
goto out;
}
jited_funcs = info.nr_jited_func_lens;
func_lens = malloc(jited_funcs * sizeof(__u32));
if (!ASSERT_OK_PTR(func_lens, "malloc(info.nr_jited_func_lens)")) {
err = -ENOMEM;
goto out;
}
memset(&info, 0, sizeof(info));
info.jited_prog_insns = (__u64)image;
info.jited_prog_len = len;
info.jited_func_lens = (__u64)func_lens;
info.nr_jited_func_lens = jited_funcs;
err = bpf_prog_get_info_by_fd(fd, &info, &info_len);
if (!ASSERT_OK(err, "bpf_prog_get_info_by_fd #2"))
goto out;
for (pc = 0, i = 0; i < jited_funcs; ++i) {
fprintf(text_out, "func #%d:\n", i);
disasm_one_func(text_out, image + pc, func_lens[i]);
fprintf(text_out, "\n");
pc += func_lens[i];
}
out:
if (text_out)
fclose(text_out);
if (image)
free(image);
if (func_lens)
free(func_lens);
return err;
}
#else /* HAVE_LLVM_SUPPORT */
int get_jited_program_text(int fd, char *text, size_t text_sz)
{
if (env.verbosity >= VERBOSE_VERY)
printf("compiled w/o llvm development libraries, can't dis-assembly binary code");
return -EOPNOTSUPP;
}
#endif /* HAVE_LLVM_SUPPORT */
/* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */
#ifndef __JIT_DISASM_HELPERS_H
#define __JIT_DISASM_HELPERS_H
#include <stddef.h>
int get_jited_program_text(int fd, char *text, size_t text_sz);
#endif /* __JIT_DISASM_HELPERS_H */
...@@ -75,6 +75,7 @@ ...@@ -75,6 +75,7 @@
#include "verifier_stack_ptr.skel.h" #include "verifier_stack_ptr.skel.h"
#include "verifier_subprog_precision.skel.h" #include "verifier_subprog_precision.skel.h"
#include "verifier_subreg.skel.h" #include "verifier_subreg.skel.h"
#include "verifier_tailcall_jit.skel.h"
#include "verifier_typedef.skel.h" #include "verifier_typedef.skel.h"
#include "verifier_uninit.skel.h" #include "verifier_uninit.skel.h"
#include "verifier_unpriv.skel.h" #include "verifier_unpriv.skel.h"
...@@ -198,6 +199,7 @@ void test_verifier_spin_lock(void) { RUN(verifier_spin_lock); } ...@@ -198,6 +199,7 @@ void test_verifier_spin_lock(void) { RUN(verifier_spin_lock); }
void test_verifier_stack_ptr(void) { RUN(verifier_stack_ptr); } void test_verifier_stack_ptr(void) { RUN(verifier_stack_ptr); }
void test_verifier_subprog_precision(void) { RUN(verifier_subprog_precision); } void test_verifier_subprog_precision(void) { RUN(verifier_subprog_precision); }
void test_verifier_subreg(void) { RUN(verifier_subreg); } void test_verifier_subreg(void) { RUN(verifier_subreg); }
void test_verifier_tailcall_jit(void) { RUN(verifier_tailcall_jit); }
void test_verifier_typedef(void) { RUN(verifier_typedef); } void test_verifier_typedef(void) { RUN(verifier_typedef); }
void test_verifier_uninit(void) { RUN(verifier_uninit); } void test_verifier_uninit(void) { RUN(verifier_uninit); }
void test_verifier_unpriv(void) { RUN(verifier_unpriv); } void test_verifier_unpriv(void) { RUN(verifier_unpriv); }
......
...@@ -2,6 +2,9 @@ ...@@ -2,6 +2,9 @@
#ifndef __BPF_MISC_H__ #ifndef __BPF_MISC_H__
#define __BPF_MISC_H__ #define __BPF_MISC_H__
#define XSTR(s) STR(s)
#define STR(s) #s
/* This set of attributes controls behavior of the /* This set of attributes controls behavior of the
* test_loader.c:test_loader__run_subtests(). * test_loader.c:test_loader__run_subtests().
* *
...@@ -22,14 +25,50 @@ ...@@ -22,14 +25,50 @@
* *
* __msg Message expected to be found in the verifier log. * __msg Message expected to be found in the verifier log.
* Multiple __msg attributes could be specified. * Multiple __msg attributes could be specified.
* To match a regular expression use "{{" "}}" brackets,
* e.g. "foo{{[0-9]+}}" matches strings like "foo007".
* Extended POSIX regular expression syntax is allowed
* inside the brackets.
* __msg_unpriv Same as __msg but for unprivileged mode. * __msg_unpriv Same as __msg but for unprivileged mode.
* *
* __regex Same as __msg, but using a regular expression.
* __regex_unpriv Same as __msg_unpriv but using a regular expression.
* __xlated Expect a line in a disassembly log after verifier applies rewrites. * __xlated Expect a line in a disassembly log after verifier applies rewrites.
* Multiple __xlated attributes could be specified. * Multiple __xlated attributes could be specified.
* Regular expressions could be specified same way as in __msg.
* __xlated_unpriv Same as __xlated but for unprivileged mode. * __xlated_unpriv Same as __xlated but for unprivileged mode.
* *
* __jited Match a line in a disassembly of the jited BPF program.
* Has to be used after __arch_* macro.
* For example:
*
* __arch_x86_64
* __jited(" endbr64")
* __jited(" nopl (%rax,%rax)")
* __jited(" xorq %rax, %rax")
* ...
* __naked void some_test(void)
* {
* asm volatile (... ::: __clobber_all);
* }
*
* Regular expressions could be included in patterns same way
* as in __msg.
*
* By default assume that each pattern has to be matched on the
* next consecutive line of disassembly, e.g.:
*
* __jited(" endbr64") # matched on line N
* __jited(" nopl (%rax,%rax)") # matched on line N+1
*
* If match occurs on a wrong line an error is reported.
* To override this behaviour use literal "...", e.g.:
*
* __jited(" endbr64") # matched on line N
* __jited("...") # not matched
* __jited(" nopl (%rax,%rax)") # matched on any line >= N
*
* __jited_unpriv Same as __jited but for unprivileged mode.
*
*
* __success Expect program load success in privileged mode. * __success Expect program load success in privileged mode.
* __success_unpriv Expect program load success in unprivileged mode. * __success_unpriv Expect program load success in unprivileged mode.
* *
...@@ -68,15 +107,15 @@ ...@@ -68,15 +107,15 @@
* Several __arch_* annotations could be specified at once. * Several __arch_* annotations could be specified at once.
* When test case is not run on current arch it is marked as skipped. * When test case is not run on current arch it is marked as skipped.
*/ */
#define __msg(msg) __attribute__((btf_decl_tag("comment:test_expect_msg=" msg))) #define __msg(msg) __attribute__((btf_decl_tag("comment:test_expect_msg=" XSTR(__COUNTER__) "=" msg)))
#define __regex(regex) __attribute__((btf_decl_tag("comment:test_expect_regex=" regex))) #define __xlated(msg) __attribute__((btf_decl_tag("comment:test_expect_xlated=" XSTR(__COUNTER__) "=" msg)))
#define __xlated(msg) __attribute__((btf_decl_tag("comment:test_expect_xlated=" msg))) #define __jited(msg) __attribute__((btf_decl_tag("comment:test_jited=" XSTR(__COUNTER__) "=" msg)))
#define __failure __attribute__((btf_decl_tag("comment:test_expect_failure"))) #define __failure __attribute__((btf_decl_tag("comment:test_expect_failure")))
#define __success __attribute__((btf_decl_tag("comment:test_expect_success"))) #define __success __attribute__((btf_decl_tag("comment:test_expect_success")))
#define __description(desc) __attribute__((btf_decl_tag("comment:test_description=" desc))) #define __description(desc) __attribute__((btf_decl_tag("comment:test_description=" desc)))
#define __msg_unpriv(msg) __attribute__((btf_decl_tag("comment:test_expect_msg_unpriv=" msg))) #define __msg_unpriv(msg) __attribute__((btf_decl_tag("comment:test_expect_msg_unpriv=" XSTR(__COUNTER__) "=" msg)))
#define __regex_unpriv(regex) __attribute__((btf_decl_tag("comment:test_expect_regex_unpriv=" regex))) #define __xlated_unpriv(msg) __attribute__((btf_decl_tag("comment:test_expect_xlated_unpriv=" XSTR(__COUNTER__) "=" msg)))
#define __xlated_unpriv(msg) __attribute__((btf_decl_tag("comment:test_expect_xlated_unpriv=" msg))) #define __jited_unpriv(msg) __attribute__((btf_decl_tag("comment:test_jited=" XSTR(__COUNTER__) "=" msg)))
#define __failure_unpriv __attribute__((btf_decl_tag("comment:test_expect_failure_unpriv"))) #define __failure_unpriv __attribute__((btf_decl_tag("comment:test_expect_failure_unpriv")))
#define __success_unpriv __attribute__((btf_decl_tag("comment:test_expect_success_unpriv"))) #define __success_unpriv __attribute__((btf_decl_tag("comment:test_expect_success_unpriv")))
#define __log_level(lvl) __attribute__((btf_decl_tag("comment:test_log_level="#lvl))) #define __log_level(lvl) __attribute__((btf_decl_tag("comment:test_log_level="#lvl)))
......
...@@ -964,7 +964,7 @@ int dynptr_invalidate_slice_reinit(void *ctx) ...@@ -964,7 +964,7 @@ int dynptr_invalidate_slice_reinit(void *ctx)
* mem_or_null pointers. * mem_or_null pointers.
*/ */
SEC("?raw_tp") SEC("?raw_tp")
__failure __regex("R[0-9]+ type=scalar expected=percpu_ptr_") __failure __msg("R{{[0-9]+}} type=scalar expected=percpu_ptr_")
int dynptr_invalidate_slice_or_null(void *ctx) int dynptr_invalidate_slice_or_null(void *ctx)
{ {
struct bpf_dynptr ptr; struct bpf_dynptr ptr;
...@@ -982,7 +982,7 @@ int dynptr_invalidate_slice_or_null(void *ctx) ...@@ -982,7 +982,7 @@ int dynptr_invalidate_slice_or_null(void *ctx)
/* Destruction of dynptr should also any slices obtained from it */ /* Destruction of dynptr should also any slices obtained from it */
SEC("?raw_tp") SEC("?raw_tp")
__failure __regex("R[0-9]+ invalid mem access 'scalar'") __failure __msg("R{{[0-9]+}} invalid mem access 'scalar'")
int dynptr_invalidate_slice_failure(void *ctx) int dynptr_invalidate_slice_failure(void *ctx)
{ {
struct bpf_dynptr ptr1; struct bpf_dynptr ptr1;
...@@ -1069,7 +1069,7 @@ int dynptr_read_into_slot(void *ctx) ...@@ -1069,7 +1069,7 @@ int dynptr_read_into_slot(void *ctx)
/* bpf_dynptr_slice()s are read-only and cannot be written to */ /* bpf_dynptr_slice()s are read-only and cannot be written to */
SEC("?tc") SEC("?tc")
__failure __regex("R[0-9]+ cannot write into rdonly_mem") __failure __msg("R{{[0-9]+}} cannot write into rdonly_mem")
int skb_invalid_slice_write(struct __sk_buff *skb) int skb_invalid_slice_write(struct __sk_buff *skb)
{ {
struct bpf_dynptr ptr; struct bpf_dynptr ptr;
......
...@@ -105,7 +105,7 @@ long rbtree_api_remove_unadded_node(void *ctx) ...@@ -105,7 +105,7 @@ long rbtree_api_remove_unadded_node(void *ctx)
} }
SEC("?tc") SEC("?tc")
__failure __regex("Unreleased reference id=3 alloc_insn=[0-9]+") __failure __msg("Unreleased reference id=3 alloc_insn={{[0-9]+}}")
long rbtree_api_remove_no_drop(void *ctx) long rbtree_api_remove_no_drop(void *ctx)
{ {
struct bpf_rb_node *res; struct bpf_rb_node *res;
......
...@@ -32,7 +32,7 @@ static bool less(struct bpf_rb_node *a, const struct bpf_rb_node *b) ...@@ -32,7 +32,7 @@ static bool less(struct bpf_rb_node *a, const struct bpf_rb_node *b)
} }
SEC("?tc") SEC("?tc")
__failure __regex("Unreleased reference id=4 alloc_insn=[0-9]+") __failure __msg("Unreleased reference id=4 alloc_insn={{[0-9]+}}")
long rbtree_refcounted_node_ref_escapes(void *ctx) long rbtree_refcounted_node_ref_escapes(void *ctx)
{ {
struct node_acquire *n, *m; struct node_acquire *n, *m;
...@@ -73,7 +73,7 @@ long refcount_acquire_maybe_null(void *ctx) ...@@ -73,7 +73,7 @@ long refcount_acquire_maybe_null(void *ctx)
} }
SEC("?tc") SEC("?tc")
__failure __regex("Unreleased reference id=3 alloc_insn=[0-9]+") __failure __msg("Unreleased reference id=3 alloc_insn={{[0-9]+}}")
long rbtree_refcounted_node_ref_escapes_owning_input(void *ctx) long rbtree_refcounted_node_ref_escapes_owning_input(void *ctx)
{ {
struct node_acquire *n, *m; struct node_acquire *n, *m;
......
...@@ -78,6 +78,7 @@ __naked void canary_arm64_riscv64(void) ...@@ -78,6 +78,7 @@ __naked void canary_arm64_riscv64(void)
SEC("raw_tp") SEC("raw_tp")
__arch_x86_64 __arch_x86_64
__xlated("1: r0 = &(void __percpu *)(r0)") __xlated("1: r0 = &(void __percpu *)(r0)")
__xlated("...")
__xlated("3: exit") __xlated("3: exit")
__success __success
__naked void canary_zero_spills(void) __naked void canary_zero_spills(void)
...@@ -94,7 +95,9 @@ SEC("raw_tp") ...@@ -94,7 +95,9 @@ SEC("raw_tp")
__arch_x86_64 __arch_x86_64
__log_level(4) __msg("stack depth 16") __log_level(4) __msg("stack depth 16")
__xlated("1: *(u64 *)(r10 -16) = r1") __xlated("1: *(u64 *)(r10 -16) = r1")
__xlated("...")
__xlated("3: r0 = &(void __percpu *)(r0)") __xlated("3: r0 = &(void __percpu *)(r0)")
__xlated("...")
__xlated("5: r2 = *(u64 *)(r10 -16)") __xlated("5: r2 = *(u64 *)(r10 -16)")
__success __success
__naked void wrong_reg_in_pattern1(void) __naked void wrong_reg_in_pattern1(void)
...@@ -113,7 +116,9 @@ __naked void wrong_reg_in_pattern1(void) ...@@ -113,7 +116,9 @@ __naked void wrong_reg_in_pattern1(void)
SEC("raw_tp") SEC("raw_tp")
__arch_x86_64 __arch_x86_64
__xlated("1: *(u64 *)(r10 -16) = r6") __xlated("1: *(u64 *)(r10 -16) = r6")
__xlated("...")
__xlated("3: r0 = &(void __percpu *)(r0)") __xlated("3: r0 = &(void __percpu *)(r0)")
__xlated("...")
__xlated("5: r6 = *(u64 *)(r10 -16)") __xlated("5: r6 = *(u64 *)(r10 -16)")
__success __success
__naked void wrong_reg_in_pattern2(void) __naked void wrong_reg_in_pattern2(void)
...@@ -132,7 +137,9 @@ __naked void wrong_reg_in_pattern2(void) ...@@ -132,7 +137,9 @@ __naked void wrong_reg_in_pattern2(void)
SEC("raw_tp") SEC("raw_tp")
__arch_x86_64 __arch_x86_64
__xlated("1: *(u64 *)(r10 -16) = r0") __xlated("1: *(u64 *)(r10 -16) = r0")
__xlated("...")
__xlated("3: r0 = &(void __percpu *)(r0)") __xlated("3: r0 = &(void __percpu *)(r0)")
__xlated("...")
__xlated("5: r0 = *(u64 *)(r10 -16)") __xlated("5: r0 = *(u64 *)(r10 -16)")
__success __success
__naked void wrong_reg_in_pattern3(void) __naked void wrong_reg_in_pattern3(void)
...@@ -151,7 +158,9 @@ __naked void wrong_reg_in_pattern3(void) ...@@ -151,7 +158,9 @@ __naked void wrong_reg_in_pattern3(void)
SEC("raw_tp") SEC("raw_tp")
__arch_x86_64 __arch_x86_64
__xlated("2: *(u64 *)(r2 -16) = r1") __xlated("2: *(u64 *)(r2 -16) = r1")
__xlated("...")
__xlated("4: r0 = &(void __percpu *)(r0)") __xlated("4: r0 = &(void __percpu *)(r0)")
__xlated("...")
__xlated("6: r1 = *(u64 *)(r10 -16)") __xlated("6: r1 = *(u64 *)(r10 -16)")
__success __success
__naked void wrong_base_in_pattern(void) __naked void wrong_base_in_pattern(void)
...@@ -171,7 +180,9 @@ __naked void wrong_base_in_pattern(void) ...@@ -171,7 +180,9 @@ __naked void wrong_base_in_pattern(void)
SEC("raw_tp") SEC("raw_tp")
__arch_x86_64 __arch_x86_64
__xlated("1: *(u64 *)(r10 -16) = r1") __xlated("1: *(u64 *)(r10 -16) = r1")
__xlated("...")
__xlated("3: r0 = &(void __percpu *)(r0)") __xlated("3: r0 = &(void __percpu *)(r0)")
__xlated("...")
__xlated("5: r2 = 1") __xlated("5: r2 = 1")
__success __success
__naked void wrong_insn_in_pattern(void) __naked void wrong_insn_in_pattern(void)
...@@ -191,7 +202,9 @@ __naked void wrong_insn_in_pattern(void) ...@@ -191,7 +202,9 @@ __naked void wrong_insn_in_pattern(void)
SEC("raw_tp") SEC("raw_tp")
__arch_x86_64 __arch_x86_64
__xlated("2: *(u64 *)(r10 -16) = r1") __xlated("2: *(u64 *)(r10 -16) = r1")
__xlated("...")
__xlated("4: r0 = &(void __percpu *)(r0)") __xlated("4: r0 = &(void __percpu *)(r0)")
__xlated("...")
__xlated("6: r1 = *(u64 *)(r10 -8)") __xlated("6: r1 = *(u64 *)(r10 -8)")
__success __success
__naked void wrong_off_in_pattern1(void) __naked void wrong_off_in_pattern1(void)
...@@ -211,7 +224,9 @@ __naked void wrong_off_in_pattern1(void) ...@@ -211,7 +224,9 @@ __naked void wrong_off_in_pattern1(void)
SEC("raw_tp") SEC("raw_tp")
__arch_x86_64 __arch_x86_64
__xlated("1: *(u32 *)(r10 -4) = r1") __xlated("1: *(u32 *)(r10 -4) = r1")
__xlated("...")
__xlated("3: r0 = &(void __percpu *)(r0)") __xlated("3: r0 = &(void __percpu *)(r0)")
__xlated("...")
__xlated("5: r1 = *(u32 *)(r10 -4)") __xlated("5: r1 = *(u32 *)(r10 -4)")
__success __success
__naked void wrong_off_in_pattern2(void) __naked void wrong_off_in_pattern2(void)
...@@ -230,7 +245,9 @@ __naked void wrong_off_in_pattern2(void) ...@@ -230,7 +245,9 @@ __naked void wrong_off_in_pattern2(void)
SEC("raw_tp") SEC("raw_tp")
__arch_x86_64 __arch_x86_64
__xlated("1: *(u32 *)(r10 -16) = r1") __xlated("1: *(u32 *)(r10 -16) = r1")
__xlated("...")
__xlated("3: r0 = &(void __percpu *)(r0)") __xlated("3: r0 = &(void __percpu *)(r0)")
__xlated("...")
__xlated("5: r1 = *(u32 *)(r10 -16)") __xlated("5: r1 = *(u32 *)(r10 -16)")
__success __success
__naked void wrong_size_in_pattern(void) __naked void wrong_size_in_pattern(void)
...@@ -249,7 +266,9 @@ __naked void wrong_size_in_pattern(void) ...@@ -249,7 +266,9 @@ __naked void wrong_size_in_pattern(void)
SEC("raw_tp") SEC("raw_tp")
__arch_x86_64 __arch_x86_64
__xlated("2: *(u32 *)(r10 -8) = r1") __xlated("2: *(u32 *)(r10 -8) = r1")
__xlated("...")
__xlated("4: r0 = &(void __percpu *)(r0)") __xlated("4: r0 = &(void __percpu *)(r0)")
__xlated("...")
__xlated("6: r1 = *(u32 *)(r10 -8)") __xlated("6: r1 = *(u32 *)(r10 -8)")
__success __success
__naked void partial_pattern(void) __naked void partial_pattern(void)
...@@ -275,11 +294,15 @@ __xlated("1: r2 = 2") ...@@ -275,11 +294,15 @@ __xlated("1: r2 = 2")
/* not patched, spills for -8, -16 not removed */ /* not patched, spills for -8, -16 not removed */
__xlated("2: *(u64 *)(r10 -8) = r1") __xlated("2: *(u64 *)(r10 -8) = r1")
__xlated("3: *(u64 *)(r10 -16) = r2") __xlated("3: *(u64 *)(r10 -16) = r2")
__xlated("...")
__xlated("5: r0 = &(void __percpu *)(r0)") __xlated("5: r0 = &(void __percpu *)(r0)")
__xlated("...")
__xlated("7: r2 = *(u64 *)(r10 -16)") __xlated("7: r2 = *(u64 *)(r10 -16)")
__xlated("8: r1 = *(u64 *)(r10 -8)") __xlated("8: r1 = *(u64 *)(r10 -8)")
/* patched, spills for -24, -32 removed */ /* patched, spills for -24, -32 removed */
__xlated("...")
__xlated("10: r0 = &(void __percpu *)(r0)") __xlated("10: r0 = &(void __percpu *)(r0)")
__xlated("...")
__xlated("12: exit") __xlated("12: exit")
__success __success
__naked void min_stack_offset(void) __naked void min_stack_offset(void)
...@@ -308,7 +331,9 @@ __naked void min_stack_offset(void) ...@@ -308,7 +331,9 @@ __naked void min_stack_offset(void)
SEC("raw_tp") SEC("raw_tp")
__arch_x86_64 __arch_x86_64
__xlated("1: *(u64 *)(r10 -8) = r1") __xlated("1: *(u64 *)(r10 -8) = r1")
__xlated("...")
__xlated("3: r0 = &(void __percpu *)(r0)") __xlated("3: r0 = &(void __percpu *)(r0)")
__xlated("...")
__xlated("5: r1 = *(u64 *)(r10 -8)") __xlated("5: r1 = *(u64 *)(r10 -8)")
__success __success
__naked void bad_fixed_read(void) __naked void bad_fixed_read(void)
...@@ -330,7 +355,9 @@ __naked void bad_fixed_read(void) ...@@ -330,7 +355,9 @@ __naked void bad_fixed_read(void)
SEC("raw_tp") SEC("raw_tp")
__arch_x86_64 __arch_x86_64
__xlated("1: *(u64 *)(r10 -8) = r1") __xlated("1: *(u64 *)(r10 -8) = r1")
__xlated("...")
__xlated("3: r0 = &(void __percpu *)(r0)") __xlated("3: r0 = &(void __percpu *)(r0)")
__xlated("...")
__xlated("5: r1 = *(u64 *)(r10 -8)") __xlated("5: r1 = *(u64 *)(r10 -8)")
__success __success
__naked void bad_fixed_write(void) __naked void bad_fixed_write(void)
...@@ -352,7 +379,9 @@ __naked void bad_fixed_write(void) ...@@ -352,7 +379,9 @@ __naked void bad_fixed_write(void)
SEC("raw_tp") SEC("raw_tp")
__arch_x86_64 __arch_x86_64
__xlated("6: *(u64 *)(r10 -16) = r1") __xlated("6: *(u64 *)(r10 -16) = r1")
__xlated("...")
__xlated("8: r0 = &(void __percpu *)(r0)") __xlated("8: r0 = &(void __percpu *)(r0)")
__xlated("...")
__xlated("10: r1 = *(u64 *)(r10 -16)") __xlated("10: r1 = *(u64 *)(r10 -16)")
__success __success
__naked void bad_varying_read(void) __naked void bad_varying_read(void)
...@@ -379,7 +408,9 @@ __naked void bad_varying_read(void) ...@@ -379,7 +408,9 @@ __naked void bad_varying_read(void)
SEC("raw_tp") SEC("raw_tp")
__arch_x86_64 __arch_x86_64
__xlated("6: *(u64 *)(r10 -16) = r1") __xlated("6: *(u64 *)(r10 -16) = r1")
__xlated("...")
__xlated("8: r0 = &(void __percpu *)(r0)") __xlated("8: r0 = &(void __percpu *)(r0)")
__xlated("...")
__xlated("10: r1 = *(u64 *)(r10 -16)") __xlated("10: r1 = *(u64 *)(r10 -16)")
__success __success
__naked void bad_varying_write(void) __naked void bad_varying_write(void)
...@@ -406,7 +437,9 @@ __naked void bad_varying_write(void) ...@@ -406,7 +437,9 @@ __naked void bad_varying_write(void)
SEC("raw_tp") SEC("raw_tp")
__arch_x86_64 __arch_x86_64
__xlated("1: *(u64 *)(r10 -8) = r1") __xlated("1: *(u64 *)(r10 -8) = r1")
__xlated("...")
__xlated("3: r0 = &(void __percpu *)(r0)") __xlated("3: r0 = &(void __percpu *)(r0)")
__xlated("...")
__xlated("5: r1 = *(u64 *)(r10 -8)") __xlated("5: r1 = *(u64 *)(r10 -8)")
__success __success
__naked void bad_write_in_subprog(void) __naked void bad_write_in_subprog(void)
...@@ -438,7 +471,9 @@ __naked static void bad_write_in_subprog_aux(void) ...@@ -438,7 +471,9 @@ __naked static void bad_write_in_subprog_aux(void)
SEC("raw_tp") SEC("raw_tp")
__arch_x86_64 __arch_x86_64
__xlated("1: *(u64 *)(r10 -8) = r1") __xlated("1: *(u64 *)(r10 -8) = r1")
__xlated("...")
__xlated("3: r0 = &(void __percpu *)(r0)") __xlated("3: r0 = &(void __percpu *)(r0)")
__xlated("...")
__xlated("5: r1 = *(u64 *)(r10 -8)") __xlated("5: r1 = *(u64 *)(r10 -8)")
__success __success
__naked void bad_helper_write(void) __naked void bad_helper_write(void)
...@@ -466,13 +501,19 @@ SEC("raw_tp") ...@@ -466,13 +501,19 @@ SEC("raw_tp")
__arch_x86_64 __arch_x86_64
/* main, not patched */ /* main, not patched */
__xlated("1: *(u64 *)(r10 -8) = r1") __xlated("1: *(u64 *)(r10 -8) = r1")
__xlated("...")
__xlated("3: r0 = &(void __percpu *)(r0)") __xlated("3: r0 = &(void __percpu *)(r0)")
__xlated("...")
__xlated("5: r1 = *(u64 *)(r10 -8)") __xlated("5: r1 = *(u64 *)(r10 -8)")
__xlated("...")
__xlated("9: call pc+1") __xlated("9: call pc+1")
__xlated("...")
__xlated("10: exit") __xlated("10: exit")
/* subprogram, patched */ /* subprogram, patched */
__xlated("11: r1 = 1") __xlated("11: r1 = 1")
__xlated("...")
__xlated("13: r0 = &(void __percpu *)(r0)") __xlated("13: r0 = &(void __percpu *)(r0)")
__xlated("...")
__xlated("15: exit") __xlated("15: exit")
__success __success
__naked void invalidate_one_subprog(void) __naked void invalidate_one_subprog(void)
...@@ -510,12 +551,16 @@ SEC("raw_tp") ...@@ -510,12 +551,16 @@ SEC("raw_tp")
__arch_x86_64 __arch_x86_64
/* main */ /* main */
__xlated("0: r1 = 1") __xlated("0: r1 = 1")
__xlated("...")
__xlated("2: r0 = &(void __percpu *)(r0)") __xlated("2: r0 = &(void __percpu *)(r0)")
__xlated("...")
__xlated("4: call pc+1") __xlated("4: call pc+1")
__xlated("5: exit") __xlated("5: exit")
/* subprogram */ /* subprogram */
__xlated("6: r1 = 1") __xlated("6: r1 = 1")
__xlated("...")
__xlated("8: r0 = &(void __percpu *)(r0)") __xlated("8: r0 = &(void __percpu *)(r0)")
__xlated("...")
__xlated("10: *(u64 *)(r10 -16) = r1") __xlated("10: *(u64 *)(r10 -16) = r1")
__xlated("11: exit") __xlated("11: exit")
__success __success
...@@ -576,7 +621,9 @@ __log_level(4) __msg("stack depth 16") ...@@ -576,7 +621,9 @@ __log_level(4) __msg("stack depth 16")
/* may_goto counter at -16 */ /* may_goto counter at -16 */
__xlated("0: *(u64 *)(r10 -16) =") __xlated("0: *(u64 *)(r10 -16) =")
__xlated("1: r1 = 1") __xlated("1: r1 = 1")
__xlated("...")
__xlated("3: r0 = &(void __percpu *)(r0)") __xlated("3: r0 = &(void __percpu *)(r0)")
__xlated("...")
/* may_goto expansion starts */ /* may_goto expansion starts */
__xlated("5: r11 = *(u64 *)(r10 -16)") __xlated("5: r11 = *(u64 *)(r10 -16)")
__xlated("6: if r11 == 0x0 goto pc+3") __xlated("6: if r11 == 0x0 goto pc+3")
...@@ -623,13 +670,15 @@ __xlated("5: r0 = *(u32 *)(r0 +0)") ...@@ -623,13 +670,15 @@ __xlated("5: r0 = *(u32 *)(r0 +0)")
__xlated("6: r2 =") __xlated("6: r2 =")
__xlated("7: r3 = 0") __xlated("7: r3 = 0")
__xlated("8: r4 = 0") __xlated("8: r4 = 0")
__xlated("...")
/* ... part of the inlined bpf_loop */ /* ... part of the inlined bpf_loop */
__xlated("12: *(u64 *)(r10 -32) = r6") __xlated("12: *(u64 *)(r10 -32) = r6")
__xlated("13: *(u64 *)(r10 -24) = r7") __xlated("13: *(u64 *)(r10 -24) = r7")
__xlated("14: *(u64 *)(r10 -16) = r8") __xlated("14: *(u64 *)(r10 -16) = r8")
/* ... */ __xlated("...")
__xlated("21: call pc+8") /* dummy_loop_callback */ __xlated("21: call pc+8") /* dummy_loop_callback */
/* ... last insns of the bpf_loop_interaction1 */ /* ... last insns of the bpf_loop_interaction1 */
__xlated("...")
__xlated("28: r0 = 0") __xlated("28: r0 = 0")
__xlated("29: exit") __xlated("29: exit")
/* dummy_loop_callback */ /* dummy_loop_callback */
...@@ -670,7 +719,7 @@ __xlated("5: r0 = *(u32 *)(r0 +0)") ...@@ -670,7 +719,7 @@ __xlated("5: r0 = *(u32 *)(r0 +0)")
__xlated("6: *(u64 *)(r10 -16) = r1") __xlated("6: *(u64 *)(r10 -16) = r1")
__xlated("7: call") __xlated("7: call")
__xlated("8: r1 = *(u64 *)(r10 -16)") __xlated("8: r1 = *(u64 *)(r10 -16)")
/* ... */ __xlated("...")
/* ... part of the inlined bpf_loop */ /* ... part of the inlined bpf_loop */
__xlated("15: *(u64 *)(r10 -40) = r6") __xlated("15: *(u64 *)(r10 -40) = r6")
__xlated("16: *(u64 *)(r10 -32) = r7") __xlated("16: *(u64 *)(r10 -32) = r7")
......
...@@ -1213,10 +1213,10 @@ __success __log_level(2) ...@@ -1213,10 +1213,10 @@ __success __log_level(2)
* - once for path entry - label 2; * - once for path entry - label 2;
* - once for path entry - label 1 - label 2. * - once for path entry - label 1 - label 2.
*/ */
__msg("r1 = *(u64 *)(r10 -8)") __msg("8: (79) r1 = *(u64 *)(r10 -8)")
__msg("exit") __msg("9: (95) exit")
__msg("r1 = *(u64 *)(r10 -8)") __msg("from 2 to 7")
__msg("exit") __msg("8: safe")
__msg("processed 11 insns") __msg("processed 11 insns")
__flag(BPF_F_TEST_STATE_FREQ) __flag(BPF_F_TEST_STATE_FREQ)
__naked void old_stack_misc_vs_cur_ctx_ptr(void) __naked void old_stack_misc_vs_cur_ctx_ptr(void)
......
// SPDX-License-Identifier: GPL-2.0
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
#include "bpf_misc.h"
int main(void);
struct {
__uint(type, BPF_MAP_TYPE_PROG_ARRAY);
__uint(max_entries, 1);
__uint(key_size, sizeof(__u32));
__array(values, void (void));
} jmp_table SEC(".maps") = {
.values = {
[0] = (void *) &main,
},
};
__noinline __auxiliary
static __naked int sub(void)
{
asm volatile (
"r2 = %[jmp_table] ll;"
"r3 = 0;"
"call 12;"
"exit;"
:
: __imm_addr(jmp_table)
: __clobber_all);
}
__success
__arch_x86_64
/* program entry for main(), regular function prologue */
__jited(" endbr64")
__jited(" nopl (%rax,%rax)")
__jited(" xorq %rax, %rax")
__jited(" pushq %rbp")
__jited(" movq %rsp, %rbp")
/* tail call prologue for program:
* - establish memory location for tail call counter at &rbp[-8];
* - spill tail_call_cnt_ptr at &rbp[-16];
* - expect tail call counter to be passed in rax;
* - for entry program rax is a raw counter, value < 33;
* - for tail called program rax is tail_call_cnt_ptr (value > 33).
*/
__jited(" endbr64")
__jited(" cmpq $0x21, %rax")
__jited(" ja L0")
__jited(" pushq %rax")
__jited(" movq %rsp, %rax")
__jited(" jmp L1")
__jited("L0: pushq %rax") /* rbp[-8] = rax */
__jited("L1: pushq %rax") /* rbp[-16] = rax */
/* on subprogram call restore rax to be tail_call_cnt_ptr from rbp[-16]
* (cause original rax might be clobbered by this point)
*/
__jited(" movq -0x10(%rbp), %rax")
__jited(" callq 0x{{.*}}") /* call to sub() */
__jited(" xorl %eax, %eax")
__jited(" leave")
__jited(" retq")
__jited("...")
/* subprogram entry for sub(), regular function prologue */
__jited(" endbr64")
__jited(" nopl (%rax,%rax)")
__jited(" nopl (%rax)")
__jited(" pushq %rbp")
__jited(" movq %rsp, %rbp")
/* tail call prologue for subprogram address of tail call counter
* stored at rbp[-16].
*/
__jited(" endbr64")
__jited(" pushq %rax") /* rbp[-8] = rax */
__jited(" pushq %rax") /* rbp[-16] = rax */
__jited(" movabsq ${{.*}}, %rsi") /* r2 = &jmp_table */
__jited(" xorl %edx, %edx") /* r3 = 0 */
/* bpf_tail_call implementation:
* - load tail_call_cnt_ptr from rbp[-16];
* - if *tail_call_cnt_ptr < 33, increment it and jump to target;
* - otherwise do nothing.
*/
__jited(" movq -0x10(%rbp), %rax")
__jited(" cmpq $0x21, (%rax)")
__jited(" jae L0")
__jited(" nopl (%rax,%rax)")
__jited(" addq $0x1, (%rax)") /* *tail_call_cnt_ptr += 1 */
__jited(" popq %rax")
__jited(" popq %rax")
__jited(" jmp {{.*}}") /* jump to tail call tgt */
__jited("L0: leave")
__jited(" retq")
SEC("tc")
__naked int main(void)
{
asm volatile (
"call %[sub];"
"r0 = 0;"
"exit;"
:
: __imm(sub)
: __clobber_all);
}
char __license[] SEC("license") = "GPL";
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment