Commit 26b367e3 authored by Alexei Starovoitov's avatar Alexei Starovoitov Committed by Andrii Nakryiko

selftests/bpf: Additional test for CO-RE in the kernel.

Add a test where randmap() function is appended to three different bpf
programs. That action checks struct bpf_core_relo replication logic
and offset adjustment in gen loader part of libbpf.

Fourth bpf program has 360 CO-RE relocations from vmlinux, bpf_testmod,
and non-existing type. It tests candidate cache logic.
Signed-off-by: default avatarAlexei Starovoitov <ast@kernel.org>
Signed-off-by: default avatarAndrii Nakryiko <andrii@kernel.org>
Acked-by: default avatarAndrii Nakryiko <andrii@kernel.org>
Link: https://lore.kernel.org/bpf/20211201181040.23337-16-alexei.starovoitov@gmail.com
parent 650c9dbd
...@@ -326,7 +326,7 @@ LINKED_SKELS := test_static_linked.skel.h linked_funcs.skel.h \ ...@@ -326,7 +326,7 @@ LINKED_SKELS := test_static_linked.skel.h linked_funcs.skel.h \
LSKELS := kfunc_call_test.c fentry_test.c fexit_test.c fexit_sleep.c \ LSKELS := kfunc_call_test.c fentry_test.c fexit_test.c fexit_sleep.c \
test_ringbuf.c atomics.c trace_printk.c trace_vprintk.c \ test_ringbuf.c atomics.c trace_printk.c trace_vprintk.c \
map_ptr_kern.c map_ptr_kern.c core_kern.c
# Generate both light skeleton and libbpf skeleton for these # Generate both light skeleton and libbpf skeleton for these
LSKELS_EXTRA := test_ksyms_module.c test_ksyms_weak.c kfunc_call_test_subprog.c LSKELS_EXTRA := test_ksyms_module.c test_ksyms_weak.c kfunc_call_test_subprog.c
SKEL_BLACKLIST += $$(LSKELS) SKEL_BLACKLIST += $$(LSKELS)
......
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2021 Facebook */
#include "test_progs.h"
#include "core_kern.lskel.h"
void test_core_kern_lskel(void)
{
struct core_kern_lskel *skel;
skel = core_kern_lskel__open_and_load();
ASSERT_OK_PTR(skel, "open_and_load");
core_kern_lskel__destroy(skel);
}
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2021 Facebook */
#include "vmlinux.h"
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
#include <bpf/bpf_core_read.h>
#define ATTR __always_inline
#include "test_jhash.h"
struct {
__uint(type, BPF_MAP_TYPE_ARRAY);
__type(key, u32);
__type(value, u32);
__uint(max_entries, 256);
} array1 SEC(".maps");
struct {
__uint(type, BPF_MAP_TYPE_ARRAY);
__type(key, u32);
__type(value, u32);
__uint(max_entries, 256);
} array2 SEC(".maps");
static __noinline int randmap(int v, const struct net_device *dev)
{
struct bpf_map *map = (struct bpf_map *)&array1;
int key = bpf_get_prandom_u32() & 0xff;
int *val;
if (bpf_get_prandom_u32() & 1)
map = (struct bpf_map *)&array2;
val = bpf_map_lookup_elem(map, &key);
if (val)
*val = bpf_get_prandom_u32() + v + dev->mtu;
return 0;
}
SEC("tp_btf/xdp_devmap_xmit")
int BPF_PROG(tp_xdp_devmap_xmit_multi, const struct net_device
*from_dev, const struct net_device *to_dev, int sent, int drops,
int err)
{
return randmap(from_dev->ifindex, from_dev);
}
SEC("fentry/eth_type_trans")
int BPF_PROG(fentry_eth_type_trans, struct sk_buff *skb,
struct net_device *dev, unsigned short protocol)
{
return randmap(dev->ifindex + skb->len, dev);
}
SEC("fexit/eth_type_trans")
int BPF_PROG(fexit_eth_type_trans, struct sk_buff *skb,
struct net_device *dev, unsigned short protocol)
{
return randmap(dev->ifindex + skb->len, dev);
}
volatile const int never;
struct __sk_bUfF /* it will not exist in vmlinux */ {
int len;
} __attribute__((preserve_access_index));
struct bpf_testmod_test_read_ctx /* it exists in bpf_testmod */ {
size_t len;
} __attribute__((preserve_access_index));
SEC("tc")
int balancer_ingress(struct __sk_buff *ctx)
{
void *data_end = (void *)(long)ctx->data_end;
void *data = (void *)(long)ctx->data;
void *ptr;
int ret = 0, nh_off, i = 0;
nh_off = 14;
/* pragma unroll doesn't work on large loops */
#define C do { \
ptr = data + i; \
if (ptr + nh_off > data_end) \
break; \
ctx->tc_index = jhash(ptr, nh_off, ctx->cb[0] + i++); \
if (never) { \
/* below is a dead code with unresolvable CO-RE relo */ \
i += ((struct __sk_bUfF *)ctx)->len; \
/* this CO-RE relo may or may not resolve
* depending on whether bpf_testmod is loaded.
*/ \
i += ((struct bpf_testmod_test_read_ctx *)ctx)->len; \
} \
} while (0);
#define C30 C;C;C;C;C;C;C;C;C;C;C;C;C;C;C;C;C;C;C;C;C;C;C;C;C;C;C;C;C;C;
C30;C30;C30; /* 90 calls */
return 0;
}
char LICENSE[] SEC("license") = "GPL";
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment