Commit 55c14321 authored by Alexei Starovoitov's avatar Alexei Starovoitov

Merge branch 'bpf-inline-bpf_kptr_xchg'

Hou Tao says:

====================
The motivation of inlining bpf_kptr_xchg() comes from the performance
profiling of bpf memory allocator benchmark [1]. The benchmark uses
bpf_kptr_xchg() to stash the allocated objects and to pop the stashed
objects for free. After inling bpf_kptr_xchg(), the performance for
object free on 8-CPUs VM increases about 2%~10%. However the performance
gain comes with costs: both the kasan and kcsan checks on the pointer
will be unavailable. Initially the inline is implemented in do_jit() for
x86-64 directly, but I think it will more portable to implement the
inline in verifier.

Patch #1 supports inlining bpf_kptr_xchg() helper and enables it on
x86-4. Patch #2 factors out a helper for newly-added test in patch #3.
Patch #3 tests whether the inlining of bpf_kptr_xchg() is expected.
Please see individual patches for more details. And comments are always
welcome.

Change Log:
v3:
  * rebased on bpf-next tree
  * patch 1 & 2: Add Rvb-by and Ack-by tags from Eduard
  * patch 3: use inline assembly and naked function instead of c code
             (suggested by Eduard)

v2: https://lore.kernel.org/bpf/20231223104042.1432300-1-houtao@huaweicloud.com/
  * rebased on bpf-next tree
  * drop patch #1 in v1 due to discussion in [2]
  * patch #1: add the motivation in the commit message, merge patch #1
              and #3 into the new patch in v2. (Daniel)
  * patch #2/#3: newly-added patch to test the inlining of
                 bpf_kptr_xchg() (Eduard)

v1: https://lore.kernel.org/bpf/95b8c2cd-44d5-5fe1-60b5-7e8218779566@huaweicloud.com/

[1]: https://lore.kernel.org/bpf/20231221141501.3588586-1-houtao@huaweicloud.com/
[2]: https://lore.kernel.org/bpf/fd94efb9-4a56-c982-dc2e-c66be5202cb7@huaweicloud.com/
====================

Link: https://lore.kernel.org/r/20240105104819.3916743-1-houtao@huaweicloud.comSigned-off-by: default avatarAlexei Starovoitov <ast@kernel.org>
parents 2121c43f 17bda53e
...@@ -3242,3 +3242,8 @@ void bpf_arch_poke_desc_update(struct bpf_jit_poke_descriptor *poke, ...@@ -3242,3 +3242,8 @@ void bpf_arch_poke_desc_update(struct bpf_jit_poke_descriptor *poke,
BUG_ON(ret < 0); BUG_ON(ret < 0);
} }
} }
bool bpf_jit_supports_ptr_xchg(void)
{
return true;
}
...@@ -955,6 +955,7 @@ bool bpf_jit_supports_subprog_tailcalls(void); ...@@ -955,6 +955,7 @@ bool bpf_jit_supports_subprog_tailcalls(void);
bool bpf_jit_supports_kfunc_call(void); bool bpf_jit_supports_kfunc_call(void);
bool bpf_jit_supports_far_kfunc_call(void); bool bpf_jit_supports_far_kfunc_call(void);
bool bpf_jit_supports_exceptions(void); bool bpf_jit_supports_exceptions(void);
bool bpf_jit_supports_ptr_xchg(void);
void arch_bpf_stack_walk(bool (*consume_fn)(void *cookie, u64 ip, u64 sp, u64 bp), void *cookie); void arch_bpf_stack_walk(bool (*consume_fn)(void *cookie, u64 ip, u64 sp, u64 bp), void *cookie);
bool bpf_helper_changes_pkt_data(void *func); bool bpf_helper_changes_pkt_data(void *func);
......
...@@ -2925,6 +2925,16 @@ bool __weak bpf_jit_supports_far_kfunc_call(void) ...@@ -2925,6 +2925,16 @@ bool __weak bpf_jit_supports_far_kfunc_call(void)
return false; return false;
} }
/* Return TRUE if the JIT backend satisfies the following two conditions:
* 1) JIT backend supports atomic_xchg() on pointer-sized words.
* 2) Under the specific arch, the implementation of xchg() is the same
* as atomic_xchg() on pointer-sized words.
*/
bool __weak bpf_jit_supports_ptr_xchg(void)
{
return false;
}
/* To execute LD_ABS/LD_IND instructions __bpf_prog_run() may call /* To execute LD_ABS/LD_IND instructions __bpf_prog_run() may call
* skb_copy_bits(), so provide a weak definition of it for NET-less config. * skb_copy_bits(), so provide a weak definition of it for NET-less config.
*/ */
......
...@@ -1414,6 +1414,7 @@ BPF_CALL_2(bpf_kptr_xchg, void *, map_value, void *, ptr) ...@@ -1414,6 +1414,7 @@ BPF_CALL_2(bpf_kptr_xchg, void *, map_value, void *, ptr)
{ {
unsigned long *kptr = map_value; unsigned long *kptr = map_value;
/* This helper may be inlined by verifier. */
return xchg(kptr, (unsigned long)ptr); return xchg(kptr, (unsigned long)ptr);
} }
......
...@@ -19809,6 +19809,23 @@ static int do_misc_fixups(struct bpf_verifier_env *env) ...@@ -19809,6 +19809,23 @@ static int do_misc_fixups(struct bpf_verifier_env *env)
continue; continue;
} }
/* Implement bpf_kptr_xchg inline */
if (prog->jit_requested && BITS_PER_LONG == 64 &&
insn->imm == BPF_FUNC_kptr_xchg &&
bpf_jit_supports_ptr_xchg()) {
insn_buf[0] = BPF_MOV64_REG(BPF_REG_0, BPF_REG_2);
insn_buf[1] = BPF_ATOMIC_OP(BPF_DW, BPF_XCHG, BPF_REG_1, BPF_REG_0, 0);
cnt = 2;
new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt);
if (!new_prog)
return -ENOMEM;
delta += cnt - 1;
env->prog = prog = new_prog;
insn = new_prog->insnsi + i + delta;
continue;
}
patch_call_imm: patch_call_imm:
fn = env->ops->get_func_proto(insn->imm, env->prog); fn = env->ops->get_func_proto(insn->imm, env->prog);
/* all functions that have prototype and verifier allowed /* all functions that have prototype and verifier allowed
......
...@@ -626,50 +626,6 @@ static bool match_pattern(struct btf *btf, char *pattern, char *text, char *reg_ ...@@ -626,50 +626,6 @@ static bool match_pattern(struct btf *btf, char *pattern, char *text, char *reg_
return false; return false;
} }
/* Request BPF program instructions after all rewrites are applied,
* e.g. verifier.c:convert_ctx_access() is done.
*/
static int get_xlated_program(int fd_prog, struct bpf_insn **buf, __u32 *cnt)
{
struct bpf_prog_info info = {};
__u32 info_len = sizeof(info);
__u32 xlated_prog_len;
__u32 buf_element_size = sizeof(struct bpf_insn);
if (bpf_prog_get_info_by_fd(fd_prog, &info, &info_len)) {
perror("bpf_prog_get_info_by_fd failed");
return -1;
}
xlated_prog_len = info.xlated_prog_len;
if (xlated_prog_len % buf_element_size) {
printf("Program length %d is not multiple of %d\n",
xlated_prog_len, buf_element_size);
return -1;
}
*cnt = xlated_prog_len / buf_element_size;
*buf = calloc(*cnt, buf_element_size);
if (!buf) {
perror("can't allocate xlated program buffer");
return -ENOMEM;
}
bzero(&info, sizeof(info));
info.xlated_prog_len = xlated_prog_len;
info.xlated_prog_insns = (__u64)(unsigned long)*buf;
if (bpf_prog_get_info_by_fd(fd_prog, &info, &info_len)) {
perror("second bpf_prog_get_info_by_fd failed");
goto out_free_buf;
}
return 0;
out_free_buf:
free(*buf);
return -1;
}
static void print_insn(void *private_data, const char *fmt, ...) static void print_insn(void *private_data, const char *fmt, ...)
{ {
va_list args; va_list args;
......
// SPDX-License-Identifier: GPL-2.0
/* Copyright (C) 2023. Huawei Technologies Co., Ltd */
#include <test_progs.h>
#include "linux/filter.h"
#include "kptr_xchg_inline.skel.h"
void test_kptr_xchg_inline(void)
{
struct kptr_xchg_inline *skel;
struct bpf_insn *insn = NULL;
struct bpf_insn exp;
unsigned int cnt;
int err;
#if !defined(__x86_64__)
test__skip();
return;
#endif
skel = kptr_xchg_inline__open_and_load();
if (!ASSERT_OK_PTR(skel, "open_load"))
return;
err = get_xlated_program(bpf_program__fd(skel->progs.kptr_xchg_inline), &insn, &cnt);
if (!ASSERT_OK(err, "prog insn"))
goto out;
/* The original instructions are:
* r1 = map[id:xxx][0]+0
* r2 = 0
* call bpf_kptr_xchg#yyy
*
* call bpf_kptr_xchg#yyy will be inlined as:
* r0 = r2
* r0 = atomic64_xchg((u64 *)(r1 +0), r0)
*/
if (!ASSERT_GT(cnt, 5, "insn cnt"))
goto out;
exp = BPF_MOV64_REG(BPF_REG_0, BPF_REG_2);
if (!ASSERT_OK(memcmp(&insn[3], &exp, sizeof(exp)), "mov"))
goto out;
exp = BPF_ATOMIC_OP(BPF_DW, BPF_XCHG, BPF_REG_1, BPF_REG_0, 0);
if (!ASSERT_OK(memcmp(&insn[4], &exp, sizeof(exp)), "xchg"))
goto out;
out:
free(insn);
kptr_xchg_inline__destroy(skel);
}
// SPDX-License-Identifier: GPL-2.0
/* Copyright (C) 2023. Huawei Technologies Co., Ltd */
#include <linux/types.h>
#include <bpf/bpf_helpers.h>
#include "bpf_experimental.h"
#include "bpf_misc.h"
char _license[] SEC("license") = "GPL";
struct bin_data {
char blob[32];
};
#define private(name) SEC(".bss." #name) __hidden __attribute__((aligned(8)))
private(kptr) struct bin_data __kptr * ptr;
SEC("tc")
__naked int kptr_xchg_inline(void)
{
asm volatile (
"r1 = %[ptr] ll;"
"r2 = 0;"
"call %[bpf_kptr_xchg];"
"if r0 == 0 goto 1f;"
"r1 = r0;"
"r2 = 0;"
"call %[bpf_obj_drop_impl];"
"1:"
"r0 = 0;"
"exit;"
:
: __imm_addr(ptr),
__imm(bpf_kptr_xchg),
__imm(bpf_obj_drop_impl)
: __clobber_all
);
}
/* BTF FUNC records are not generated for kfuncs referenced
* from inline assembly. These records are necessary for
* libbpf to link the program. The function below is a hack
* to ensure that BTF FUNC records are generated.
*/
void __btf_root(void)
{
bpf_obj_drop(NULL);
}
...@@ -1341,48 +1341,6 @@ static bool cmp_str_seq(const char *log, const char *exp) ...@@ -1341,48 +1341,6 @@ static bool cmp_str_seq(const char *log, const char *exp)
return true; return true;
} }
static struct bpf_insn *get_xlated_program(int fd_prog, int *cnt)
{
__u32 buf_element_size = sizeof(struct bpf_insn);
struct bpf_prog_info info = {};
__u32 info_len = sizeof(info);
__u32 xlated_prog_len;
struct bpf_insn *buf;
if (bpf_prog_get_info_by_fd(fd_prog, &info, &info_len)) {
perror("bpf_prog_get_info_by_fd failed");
return NULL;
}
xlated_prog_len = info.xlated_prog_len;
if (xlated_prog_len % buf_element_size) {
printf("Program length %d is not multiple of %d\n",
xlated_prog_len, buf_element_size);
return NULL;
}
*cnt = xlated_prog_len / buf_element_size;
buf = calloc(*cnt, buf_element_size);
if (!buf) {
perror("can't allocate xlated program buffer");
return NULL;
}
bzero(&info, sizeof(info));
info.xlated_prog_len = xlated_prog_len;
info.xlated_prog_insns = (__u64)(unsigned long)buf;
if (bpf_prog_get_info_by_fd(fd_prog, &info, &info_len)) {
perror("second bpf_prog_get_info_by_fd failed");
goto out_free_buf;
}
return buf;
out_free_buf:
free(buf);
return NULL;
}
static bool is_null_insn(struct bpf_insn *insn) static bool is_null_insn(struct bpf_insn *insn)
{ {
struct bpf_insn null_insn = {}; struct bpf_insn null_insn = {};
...@@ -1505,7 +1463,7 @@ static void print_insn(struct bpf_insn *buf, int cnt) ...@@ -1505,7 +1463,7 @@ static void print_insn(struct bpf_insn *buf, int cnt)
static bool check_xlated_program(struct bpf_test *test, int fd_prog) static bool check_xlated_program(struct bpf_test *test, int fd_prog)
{ {
struct bpf_insn *buf; struct bpf_insn *buf;
int cnt; unsigned int cnt;
bool result = true; bool result = true;
bool check_expected = !is_null_insn(test->expected_insns); bool check_expected = !is_null_insn(test->expected_insns);
bool check_unexpected = !is_null_insn(test->unexpected_insns); bool check_unexpected = !is_null_insn(test->unexpected_insns);
...@@ -1513,8 +1471,7 @@ static bool check_xlated_program(struct bpf_test *test, int fd_prog) ...@@ -1513,8 +1471,7 @@ static bool check_xlated_program(struct bpf_test *test, int fd_prog)
if (!check_expected && !check_unexpected) if (!check_expected && !check_unexpected)
goto out; goto out;
buf = get_xlated_program(fd_prog, &cnt); if (get_xlated_program(fd_prog, &buf, &cnt)) {
if (!buf) {
printf("FAIL: can't get xlated program\n"); printf("FAIL: can't get xlated program\n");
result = false; result = false;
goto out; goto out;
......
...@@ -387,3 +387,45 @@ int kern_sync_rcu(void) ...@@ -387,3 +387,45 @@ int kern_sync_rcu(void)
{ {
return syscall(__NR_membarrier, MEMBARRIER_CMD_SHARED, 0, 0); return syscall(__NR_membarrier, MEMBARRIER_CMD_SHARED, 0, 0);
} }
int get_xlated_program(int fd_prog, struct bpf_insn **buf, __u32 *cnt)
{
__u32 buf_element_size = sizeof(struct bpf_insn);
struct bpf_prog_info info = {};
__u32 info_len = sizeof(info);
__u32 xlated_prog_len;
if (bpf_prog_get_info_by_fd(fd_prog, &info, &info_len)) {
perror("bpf_prog_get_info_by_fd failed");
return -1;
}
xlated_prog_len = info.xlated_prog_len;
if (xlated_prog_len % buf_element_size) {
printf("Program length %u is not multiple of %u\n",
xlated_prog_len, buf_element_size);
return -1;
}
*cnt = xlated_prog_len / buf_element_size;
*buf = calloc(*cnt, buf_element_size);
if (!buf) {
perror("can't allocate xlated program buffer");
return -ENOMEM;
}
bzero(&info, sizeof(info));
info.xlated_prog_len = xlated_prog_len;
info.xlated_prog_insns = (__u64)(unsigned long)*buf;
if (bpf_prog_get_info_by_fd(fd_prog, &info, &info_len)) {
perror("second bpf_prog_get_info_by_fd failed");
goto out_free_buf;
}
return 0;
out_free_buf:
free(*buf);
*buf = NULL;
return -1;
}
...@@ -46,4 +46,10 @@ static inline __u64 get_time_ns(void) ...@@ -46,4 +46,10 @@ static inline __u64 get_time_ns(void)
return (u64)t.tv_sec * 1000000000 + t.tv_nsec; return (u64)t.tv_sec * 1000000000 + t.tv_nsec;
} }
struct bpf_insn;
/* Request BPF program instructions after all rewrites are applied,
* e.g. verifier.c:convert_ctx_access() is done.
*/
int get_xlated_program(int fd_prog, struct bpf_insn **buf, __u32 *cnt);
#endif /* __TESTING_HELPERS_H */ #endif /* __TESTING_HELPERS_H */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment