lab.nexedi.com will be down from Thursday, 20 March 2025, 07:30:00 UTC for a duration of approximately 2 hours

Commit cf67d28d authored by Alexei Starovoitov's avatar Alexei Starovoitov

Merge branch 'implement-cpuv4-support-for-s390x'

Ilya Leoshkevich says:

====================
Implement cpuv4 support for s390x

v1: https://lore.kernel.org/bpf/20230830011128.1415752-1-iii@linux.ibm.com/
v1 -> v2:
- Redo Disable zero-extension for BPF_MEMSX as Puranjay and Alexei
  suggested.
- Drop the bpf_ct_insert_entry() patch, it went in via the bpf tree.
- Rebase, don't apply A-bs because there were fixed conflicts.

Hi,

This series adds the cpuv4 support to the s390x eBPF JIT.
Patches 1-3 are preliminary bugfixes.
Patches 4-8 implement the new instructions.
Patches 9-10 enable the tests.

Best regards,
Ilya
====================

Link: https://lore.kernel.org/r/20230919101336.2223655-1-iii@linux.ibm.comSigned-off-by: default avatarAlexei Starovoitov <ast@kernel.org>
parents e9cbc890 c29913bb
This diff is collapsed.
......@@ -3114,7 +3114,7 @@ static bool is_reg64(struct bpf_verifier_env *env, struct bpf_insn *insn,
if (class == BPF_LDX) {
if (t != SRC_OP)
return BPF_SIZE(code) == BPF_DW;
return BPF_SIZE(code) == BPF_DW || BPF_MODE(code) == BPF_MEMSX;
/* LDX source must be ptr. */
return true;
}
......
# TEMPORARY
# Alphabetical order
bloom_filter_map # failed to find kernel BTF type ID of '__x64_sys_getpgid': -3 (?)
bpf_cookie # failed to open_and_load program: -524 (trampoline)
bpf_loop # attaches to __x64_sys_nanosleep
cgrp_local_storage # prog_attach unexpected error: -524 (trampoline)
dynptr/test_dynptr_skb_data
dynptr/test_skb_readonly
exceptions # JIT does not support calling kfunc bpf_throw (exceptions)
fexit_sleep # fexit_skel_load fexit skeleton failed (trampoline)
get_stack_raw_tp # user_stack corrupted user stack (no backchain userspace)
iters/testmod_seq* # s390x doesn't support kfuncs in modules yet
kprobe_multi_bench_attach # bpf_program__attach_kprobe_multi_opts unexpected error: -95
kprobe_multi_test # relies on fentry
ksyms_btf/weak_ksyms* # test_ksyms_weak__open_and_load unexpected error: -22 (kfunc)
ksyms_module # test_ksyms_module__open_and_load unexpected error: -9 (?)
ksyms_module_libbpf # JIT does not support calling kernel function (kfunc)
ksyms_module_lskel # test_ksyms_module_lskel__open_and_load unexpected error: -9 (?)
module_attach # skel_attach skeleton attach failed: -524 (trampoline)
ringbuf # skel_load skeleton load failed (?)
stacktrace_build_id # compare_map_keys stackid_hmap vs. stackmap err -2 errno 2 (?)
test_lsm # attach unexpected error: -524 (trampoline)
trace_printk # trace_printk__load unexpected error: -2 (errno 2) (?)
trace_vprintk # trace_vprintk__open_and_load unexpected error: -9 (?)
unpriv_bpf_disabled # fentry
user_ringbuf # failed to find kernel BTF type ID of '__s390x_sys_prctl': -3 (?)
verif_stats # trace_vprintk__open_and_load unexpected error: -9 (?)
xdp_bonding # failed to auto-attach program 'trace_on_entry': -524 (trampoline)
xdp_metadata # JIT does not support calling kernel function (kfunc)
test_task_under_cgroup # JIT does not support calling kernel function (kfunc)
......@@ -49,6 +49,10 @@
snprintf(buf, sizeof(buf), "%s%s", NETCLS_MOUNT_PATH, \
CGROUP_WORK_DIR)
static __thread bool cgroup_workdir_mounted;
static void __cleanup_cgroup_environment(void);
static int __enable_controllers(const char *cgroup_path, const char *controllers)
{
char path[PATH_MAX + 1];
......@@ -209,9 +213,10 @@ int setup_cgroup_environment(void)
log_err("mount cgroup2");
return 1;
}
cgroup_workdir_mounted = true;
/* Cleanup existing failed runs, now that the environment is setup */
cleanup_cgroup_environment();
__cleanup_cgroup_environment();
if (mkdir(cgroup_workdir, 0777) && errno != EEXIST) {
log_err("mkdir cgroup work dir");
......@@ -305,11 +310,26 @@ int join_parent_cgroup(const char *relative_path)
return join_cgroup_from_top(cgroup_path);
}
/**
* __cleanup_cgroup_environment() - Delete temporary cgroups
*
* This is a helper for cleanup_cgroup_environment() that is responsible for
* deletion of all temporary cgroups that have been created during the test.
*/
static void __cleanup_cgroup_environment(void)
{
char cgroup_workdir[PATH_MAX + 1];
format_cgroup_path(cgroup_workdir, "");
join_cgroup_from_top(CGROUP_MOUNT_PATH);
nftw(cgroup_workdir, nftwfunc, WALK_FD_LIMIT, FTW_DEPTH | FTW_MOUNT);
}
/**
* cleanup_cgroup_environment() - Cleanup Cgroup Testing Environment
*
* This is an idempotent function to delete all temporary cgroups that
* have been created during the test, including the cgroup testing work
* have been created during the test and unmount the cgroup testing work
* directory.
*
* At call time, it moves the calling process to the root cgroup, and then
......@@ -320,11 +340,10 @@ int join_parent_cgroup(const char *relative_path)
*/
void cleanup_cgroup_environment(void)
{
char cgroup_workdir[PATH_MAX + 1];
format_cgroup_path(cgroup_workdir, "");
join_cgroup_from_top(CGROUP_MOUNT_PATH);
nftw(cgroup_workdir, nftwfunc, WALK_FD_LIMIT, FTW_DEPTH | FTW_MOUNT);
__cleanup_cgroup_environment();
if (cgroup_workdir_mounted && umount(CGROUP_MOUNT_PATH))
log_err("umount cgroup2");
cgroup_workdir_mounted = false;
}
/**
......
......@@ -6,7 +6,8 @@
#include <bpf/bpf_tracing.h>
#if (defined(__TARGET_ARCH_arm64) || defined(__TARGET_ARCH_x86) || \
(defined(__TARGET_ARCH_riscv) && __riscv_xlen == 64)) && __clang_major__ >= 18
(defined(__TARGET_ARCH_riscv) && __riscv_xlen == 64) || \
defined(__TARGET_ARCH_s390)) && __clang_major__ >= 18
const volatile int skip = 0;
#else
const volatile int skip = 1;
......@@ -104,7 +105,11 @@ int _tc(volatile struct __sk_buff *skb)
"%[tmp_mark] = r1"
: [tmp_mark]"=r"(tmp_mark)
: [ctx]"r"(skb),
[off_mark]"i"(offsetof(struct __sk_buff, mark))
[off_mark]"i"(offsetof(struct __sk_buff, mark)
#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+ sizeof(skb->mark) - 1
#endif
)
: "r1");
#else
tmp_mark = (char)skb->mark;
......
......@@ -5,7 +5,8 @@
#include "bpf_misc.h"
#if (defined(__TARGET_ARCH_arm64) || defined(__TARGET_ARCH_x86) || \
(defined(__TARGET_ARCH_riscv) && __riscv_xlen == 64) || defined(__TARGET_ARCH_arm)) && \
(defined(__TARGET_ARCH_riscv) && __riscv_xlen == 64) || \
defined(__TARGET_ARCH_arm) || defined(__TARGET_ARCH_s390)) && \
__clang_major__ >= 18
SEC("socket")
......
......@@ -5,7 +5,8 @@
#include "bpf_misc.h"
#if (defined(__TARGET_ARCH_arm64) || defined(__TARGET_ARCH_x86) || \
(defined(__TARGET_ARCH_riscv) && __riscv_xlen == 64) || defined(__TARGET_ARCH_arm)) && \
(defined(__TARGET_ARCH_riscv) && __riscv_xlen == 64) || \
defined(__TARGET_ARCH_arm) || defined(__TARGET_ARCH_s390)) && \
__clang_major__ >= 18
SEC("socket")
......
......@@ -5,7 +5,8 @@
#include "bpf_misc.h"
#if (defined(__TARGET_ARCH_arm64) || defined(__TARGET_ARCH_x86) || \
(defined(__TARGET_ARCH_riscv) && __riscv_xlen == 64) || defined(__TARGET_ARCH_arm)) && \
(defined(__TARGET_ARCH_riscv) && __riscv_xlen == 64) || \
defined(__TARGET_ARCH_arm) || defined(__TARGET_ARCH_s390)) && \
__clang_major__ >= 18
SEC("socket")
......@@ -13,12 +14,16 @@ __description("LDSX, S8")
__success __success_unpriv __retval(-2)
__naked void ldsx_s8(void)
{
asm volatile (" \
r1 = 0x3fe; \
*(u64 *)(r10 - 8) = r1; \
r0 = *(s8 *)(r10 - 8); \
exit; \
" ::: __clobber_all);
asm volatile (
"r1 = 0x3fe;"
"*(u64 *)(r10 - 8) = r1;"
#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
"r0 = *(s8 *)(r10 - 8);"
#else
"r0 = *(s8 *)(r10 - 1);"
#endif
"exit;"
::: __clobber_all);
}
SEC("socket")
......@@ -26,12 +31,16 @@ __description("LDSX, S16")
__success __success_unpriv __retval(-2)
__naked void ldsx_s16(void)
{
asm volatile (" \
r1 = 0x3fffe; \
*(u64 *)(r10 - 8) = r1; \
r0 = *(s16 *)(r10 - 8); \
exit; \
" ::: __clobber_all);
asm volatile (
"r1 = 0x3fffe;"
"*(u64 *)(r10 - 8) = r1;"
#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
"r0 = *(s16 *)(r10 - 8);"
#else
"r0 = *(s16 *)(r10 - 2);"
#endif
"exit;"
::: __clobber_all);
}
SEC("socket")
......@@ -39,13 +48,17 @@ __description("LDSX, S32")
__success __success_unpriv __retval(-1)
__naked void ldsx_s32(void)
{
asm volatile (" \
r1 = 0xfffffffe; \
*(u64 *)(r10 - 8) = r1; \
r0 = *(s32 *)(r10 - 8); \
r0 >>= 1; \
exit; \
" ::: __clobber_all);
asm volatile (
"r1 = 0xfffffffe;"
"*(u64 *)(r10 - 8) = r1;"
#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
"r0 = *(s32 *)(r10 - 8);"
#else
"r0 = *(s32 *)(r10 - 4);"
#endif
"r0 >>= 1;"
"exit;"
::: __clobber_all);
}
SEC("socket")
......@@ -54,20 +67,24 @@ __log_level(2) __success __retval(1)
__msg("R1_w=scalar(smin=-128,smax=127)")
__naked void ldsx_s8_range_priv(void)
{
asm volatile (" \
call %[bpf_get_prandom_u32]; \
*(u64 *)(r10 - 8) = r0; \
r1 = *(s8 *)(r10 - 8); \
/* r1 with s8 range */ \
if r1 s> 0x7f goto l0_%=; \
if r1 s< -0x80 goto l0_%=; \
r0 = 1; \
l1_%=: \
exit; \
l0_%=: \
r0 = 2; \
goto l1_%=; \
" :
asm volatile (
"call %[bpf_get_prandom_u32];"
"*(u64 *)(r10 - 8) = r0;"
#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
"r1 = *(s8 *)(r10 - 8);"
#else
"r1 = *(s8 *)(r10 - 1);"
#endif
/* r1 with s8 range */
"if r1 s> 0x7f goto l0_%=;"
"if r1 s< -0x80 goto l0_%=;"
"r0 = 1;"
"l1_%=:"
"exit;"
"l0_%=:"
"r0 = 2;"
"goto l1_%=;"
:
: __imm(bpf_get_prandom_u32)
: __clobber_all);
}
......@@ -77,20 +94,24 @@ __description("LDSX, S16 range checking")
__success __success_unpriv __retval(1)
__naked void ldsx_s16_range(void)
{
asm volatile (" \
call %[bpf_get_prandom_u32]; \
*(u64 *)(r10 - 8) = r0; \
r1 = *(s16 *)(r10 - 8); \
/* r1 with s16 range */ \
if r1 s> 0x7fff goto l0_%=; \
if r1 s< -0x8000 goto l0_%=; \
r0 = 1; \
l1_%=: \
exit; \
l0_%=: \
r0 = 2; \
goto l1_%=; \
" :
asm volatile (
"call %[bpf_get_prandom_u32];"
"*(u64 *)(r10 - 8) = r0;"
#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
"r1 = *(s16 *)(r10 - 8);"
#else
"r1 = *(s16 *)(r10 - 2);"
#endif
/* r1 with s16 range */
"if r1 s> 0x7fff goto l0_%=;"
"if r1 s< -0x8000 goto l0_%=;"
"r0 = 1;"
"l1_%=:"
"exit;"
"l0_%=:"
"r0 = 2;"
"goto l1_%=;"
:
: __imm(bpf_get_prandom_u32)
: __clobber_all);
}
......@@ -100,20 +121,24 @@ __description("LDSX, S32 range checking")
__success __success_unpriv __retval(1)
__naked void ldsx_s32_range(void)
{
asm volatile (" \
call %[bpf_get_prandom_u32]; \
*(u64 *)(r10 - 8) = r0; \
r1 = *(s32 *)(r10 - 8); \
/* r1 with s16 range */ \
if r1 s> 0x7fffFFFF goto l0_%=; \
if r1 s< -0x80000000 goto l0_%=; \
r0 = 1; \
l1_%=: \
exit; \
l0_%=: \
r0 = 2; \
goto l1_%=; \
" :
asm volatile (
"call %[bpf_get_prandom_u32];"
"*(u64 *)(r10 - 8) = r0;"
#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
"r1 = *(s32 *)(r10 - 8);"
#else
"r1 = *(s32 *)(r10 - 4);"
#endif
/* r1 with s16 range */
"if r1 s> 0x7fffFFFF goto l0_%=;"
"if r1 s< -0x80000000 goto l0_%=;"
"r0 = 1;"
"l1_%=:"
"exit;"
"l0_%=:"
"r0 = 2;"
"goto l1_%=;"
:
: __imm(bpf_get_prandom_u32)
: __clobber_all);
}
......
......@@ -5,7 +5,8 @@
#include "bpf_misc.h"
#if (defined(__TARGET_ARCH_arm64) || defined(__TARGET_ARCH_x86) || \
(defined(__TARGET_ARCH_riscv) && __riscv_xlen == 64) || defined(__TARGET_ARCH_arm)) && \
(defined(__TARGET_ARCH_riscv) && __riscv_xlen == 64) || \
defined(__TARGET_ARCH_arm) || defined(__TARGET_ARCH_s390)) && \
__clang_major__ >= 18
SEC("socket")
......
......@@ -5,7 +5,8 @@
#include "bpf_misc.h"
#if (defined(__TARGET_ARCH_arm64) || defined(__TARGET_ARCH_x86) || \
(defined(__TARGET_ARCH_riscv) && __riscv_xlen == 64) || defined(__TARGET_ARCH_arm)) && \
(defined(__TARGET_ARCH_riscv) && __riscv_xlen == 64) || \
defined(__TARGET_ARCH_arm) || defined(__TARGET_ARCH_s390)) && \
__clang_major__ >= 18
SEC("socket")
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment