Commit a12a625c authored by Alexei Starovoitov's avatar Alexei Starovoitov

Merge branch 'libbpf-probing-improvements'

Andrii Nakryiko says:

====================
This patch set refactors libbpf feature probing to be done lazily on as-needed
basis, instead of proactively testing all possible features libbpf knows
about. This allows to scale such detections and mitigations better, without
issuing unnecessary syscalls on each bpf_object__load() call. It's also now
memoized globally, instead of per-bpf_object.

Building on that, libbpf will now detect availability of
bpf_probe_read_kernel() helper (which means also -user and -str variants), and
will sanitize BPF program code by replacing such references to generic
variants (bpf_probe_read[_str]()). This allows to migrate all BPF programs
into proper -kernel/-user probing helpers, without the fear of breaking them
for old kernels.

With that, update BPF_CORE_READ() and related macros to use
bpf_probe_read_kernel(), as it doesn't make much sense to do CO-RE relocations
against user-space types. And the only class of cases in which BPF program
might read kernel type from user-space are UAPI data structures which by
definition are fixed in their memory layout and don't need relocating. This is
exemplified by test_vmlinux test, which is fixed as part of this patch set as
well. BPF_CORE_READ() is useful for chainingg bpf_probe_read_{kernel,user}()
calls together even without relocation, so we might add user-space variants,
if there is a need.

While at making libbpf more useful for older kernels, also improve handling of
a complete lack of BTF support in kernel by not even attempting to load BTF
info into kernel. This eliminates annoying warning about lack of BTF support
in the kernel and map creation retry without BTF. If user is using features
that require kernel BTF support, it will still fail, of course.
====================
Signed-off-by: default avatarAlexei Starovoitov <ast@kernel.org>
parents 65bb2e0f 68b08647
......@@ -107,7 +107,7 @@ ifeq ($(feature-reallocarray), 0)
endif
# Append required CFLAGS
override CFLAGS += $(EXTRA_WARNINGS)
override CFLAGS += $(EXTRA_WARNINGS) -Wno-switch-enum
override CFLAGS += -Werror -Wall
override CFLAGS += -fPIC
override CFLAGS += $(INCLUDES)
......
......@@ -24,27 +24,29 @@ enum bpf_field_info_kind {
#if __BYTE_ORDER == __LITTLE_ENDIAN
#define __CORE_BITFIELD_PROBE_READ(dst, src, fld) \
bpf_probe_read((void *)dst, \
__CORE_RELO(src, fld, BYTE_SIZE), \
(const void *)src + __CORE_RELO(src, fld, BYTE_OFFSET))
bpf_probe_read_kernel( \
(void *)dst, \
__CORE_RELO(src, fld, BYTE_SIZE), \
(const void *)src + __CORE_RELO(src, fld, BYTE_OFFSET))
#else
/* semantics of LSHIFT_64 assumes loading values into low-ordered bytes, so
* for big-endian we need to adjust destination pointer accordingly, based on
* field byte size
*/
#define __CORE_BITFIELD_PROBE_READ(dst, src, fld) \
bpf_probe_read((void *)dst + (8 - __CORE_RELO(src, fld, BYTE_SIZE)), \
__CORE_RELO(src, fld, BYTE_SIZE), \
(const void *)src + __CORE_RELO(src, fld, BYTE_OFFSET))
bpf_probe_read_kernel( \
(void *)dst + (8 - __CORE_RELO(src, fld, BYTE_SIZE)), \
__CORE_RELO(src, fld, BYTE_SIZE), \
(const void *)src + __CORE_RELO(src, fld, BYTE_OFFSET))
#endif
/*
* Extract bitfield, identified by s->field, and return its value as u64.
* All this is done in relocatable manner, so bitfield changes such as
* signedness, bit size, offset changes, this will be handled automatically.
* This version of macro is using bpf_probe_read() to read underlying integer
* storage. Macro functions as an expression and its return type is
* bpf_probe_read()'s return value: 0, on success, <0 on error.
* This version of macro is using bpf_probe_read_kernel() to read underlying
* integer storage. Macro functions as an expression and its return type is
* bpf_probe_read_kernel()'s return value: 0, on success, <0 on error.
*/
#define BPF_CORE_READ_BITFIELD_PROBED(s, field) ({ \
unsigned long long val = 0; \
......@@ -99,8 +101,8 @@ enum bpf_field_info_kind {
__builtin_preserve_field_info(field, BPF_FIELD_BYTE_SIZE)
/*
* bpf_core_read() abstracts away bpf_probe_read() call and captures offset
* relocation for source address using __builtin_preserve_access_index()
* bpf_core_read() abstracts away bpf_probe_read_kernel() call and captures
* offset relocation for source address using __builtin_preserve_access_index()
* built-in, provided by Clang.
*
* __builtin_preserve_access_index() takes as an argument an expression of
......@@ -115,8 +117,8 @@ enum bpf_field_info_kind {
* (local) BTF, used to record relocation.
*/
#define bpf_core_read(dst, sz, src) \
bpf_probe_read(dst, sz, \
(const void *)__builtin_preserve_access_index(src))
bpf_probe_read_kernel(dst, sz, \
(const void *)__builtin_preserve_access_index(src))
/*
* bpf_core_read_str() is a thin wrapper around bpf_probe_read_str()
......@@ -124,8 +126,8 @@ enum bpf_field_info_kind {
* argument.
*/
#define bpf_core_read_str(dst, sz, src) \
bpf_probe_read_str(dst, sz, \
(const void *)__builtin_preserve_access_index(src))
bpf_probe_read_kernel_str(dst, sz, \
(const void *)__builtin_preserve_access_index(src))
#define ___concat(a, b) a ## b
#define ___apply(fn, n) ___concat(fn, n)
......@@ -239,15 +241,17 @@ enum bpf_field_info_kind {
* int x = BPF_CORE_READ(s, a.b.c, d.e, f, g);
*
* BPF_CORE_READ will decompose above statement into 4 bpf_core_read (BPF
* CO-RE relocatable bpf_probe_read() wrapper) calls, logically equivalent to:
* CO-RE relocatable bpf_probe_read_kernel() wrapper) calls, logically
* equivalent to:
* 1. const void *__t = s->a.b.c;
* 2. __t = __t->d.e;
* 3. __t = __t->f;
* 4. return __t->g;
*
* Equivalence is logical, because there is a heavy type casting/preservation
* involved, as well as all the reads are happening through bpf_probe_read()
* calls using __builtin_preserve_access_index() to emit CO-RE relocations.
* involved, as well as all the reads are happening through
* bpf_probe_read_kernel() calls using __builtin_preserve_access_index() to
* emit CO-RE relocations.
*
* N.B. Only up to 9 "field accessors" are supported, which should be more
* than enough for any practical purpose.
......
......@@ -289,9 +289,9 @@ struct pt_regs;
#define BPF_KRETPROBE_READ_RET_IP BPF_KPROBE_READ_RET_IP
#else
#define BPF_KPROBE_READ_RET_IP(ip, ctx) \
({ bpf_probe_read(&(ip), sizeof(ip), (void *)PT_REGS_RET(ctx)); })
({ bpf_probe_read_kernel(&(ip), sizeof(ip), (void *)PT_REGS_RET(ctx)); })
#define BPF_KRETPROBE_READ_RET_IP(ip, ctx) \
({ bpf_probe_read(&(ip), sizeof(ip), \
({ bpf_probe_read_kernel(&(ip), sizeof(ip), \
(void *)(PT_REGS_FP(ctx) + sizeof(ip))); })
#endif
......
......@@ -165,23 +165,30 @@ static inline __u64 ptr_to_u64(const void *ptr)
return (__u64) (unsigned long) ptr;
}
struct bpf_capabilities {
enum kern_feature_id {
/* v4.14: kernel support for program & map names. */
__u32 name:1;
FEAT_PROG_NAME,
/* v5.2: kernel support for global data sections. */
__u32 global_data:1;
FEAT_GLOBAL_DATA,
/* BTF support */
FEAT_BTF,
/* BTF_KIND_FUNC and BTF_KIND_FUNC_PROTO support */
__u32 btf_func:1;
FEAT_BTF_FUNC,
/* BTF_KIND_VAR and BTF_KIND_DATASEC support */
__u32 btf_datasec:1;
/* BPF_F_MMAPABLE is supported for arrays */
__u32 array_mmap:1;
FEAT_BTF_DATASEC,
/* BTF_FUNC_GLOBAL is supported */
__u32 btf_func_global:1;
FEAT_BTF_GLOBAL_FUNC,
/* BPF_F_MMAPABLE is supported for arrays */
FEAT_ARRAY_MMAP,
/* kernel support for expected_attach_type in BPF_PROG_LOAD */
__u32 exp_attach_type:1;
FEAT_EXP_ATTACH_TYPE,
/* bpf_probe_read_{kernel,user}[_str] helpers */
FEAT_PROBE_READ_KERN,
__FEAT_CNT,
};
static bool kernel_supports(enum kern_feature_id feat_id);
enum reloc_type {
RELO_LD64,
RELO_CALL,
......@@ -253,8 +260,6 @@ struct bpf_program {
__u32 func_info_rec_size;
__u32 func_info_cnt;
struct bpf_capabilities *caps;
void *line_info;
__u32 line_info_rec_size;
__u32 line_info_cnt;
......@@ -436,8 +441,6 @@ struct bpf_object {
void *priv;
bpf_object_clear_priv_t clear_priv;
struct bpf_capabilities caps;
char path[];
};
#define obj_elf_valid(o) ((o)->efile.elf)
......@@ -561,7 +564,6 @@ bpf_object__add_program(struct bpf_object *obj, void *data, size_t size,
if (err)
return err;
prog.caps = &obj->caps;
progs = obj->programs;
nr_progs = obj->nr_programs;
......@@ -2340,18 +2342,18 @@ static bool section_have_execinstr(struct bpf_object *obj, int idx)
static bool btf_needs_sanitization(struct bpf_object *obj)
{
bool has_func_global = obj->caps.btf_func_global;
bool has_datasec = obj->caps.btf_datasec;
bool has_func = obj->caps.btf_func;
bool has_func_global = kernel_supports(FEAT_BTF_GLOBAL_FUNC);
bool has_datasec = kernel_supports(FEAT_BTF_DATASEC);
bool has_func = kernel_supports(FEAT_BTF_FUNC);
return !has_func || !has_datasec || !has_func_global;
}
static void bpf_object__sanitize_btf(struct bpf_object *obj, struct btf *btf)
{
bool has_func_global = obj->caps.btf_func_global;
bool has_datasec = obj->caps.btf_datasec;
bool has_func = obj->caps.btf_func;
bool has_func_global = kernel_supports(FEAT_BTF_GLOBAL_FUNC);
bool has_datasec = kernel_supports(FEAT_BTF_DATASEC);
bool has_func = kernel_supports(FEAT_BTF_FUNC);
struct btf_type *t;
int i, j, vlen;
......@@ -2533,6 +2535,15 @@ static int bpf_object__sanitize_and_load_btf(struct bpf_object *obj)
if (!obj->btf)
return 0;
if (!kernel_supports(FEAT_BTF)) {
if (kernel_needs_btf(obj)) {
err = -EOPNOTSUPP;
goto report;
}
pr_debug("Kernel doesn't support BTF, skipping uploading it.\n");
return 0;
}
sanitize = btf_needs_sanitization(obj);
if (sanitize) {
const void *raw_data;
......@@ -2558,6 +2569,7 @@ static int bpf_object__sanitize_and_load_btf(struct bpf_object *obj)
}
btf__free(kern_btf);
}
report:
if (err) {
btf_mandatory = kernel_needs_btf(obj);
pr_warn("Error loading .BTF into kernel: %d. %s\n", err,
......@@ -3433,8 +3445,14 @@ bpf_object__probe_loading(struct bpf_object *obj)
return 0;
}
static int
bpf_object__probe_name(struct bpf_object *obj)
static int probe_fd(int fd)
{
if (fd >= 0)
close(fd);
return fd >= 0;
}
static int probe_kern_prog_name(void)
{
struct bpf_load_program_attr attr;
struct bpf_insn insns[] = {
......@@ -3452,16 +3470,10 @@ bpf_object__probe_name(struct bpf_object *obj)
attr.license = "GPL";
attr.name = "test";
ret = bpf_load_program_xattr(&attr, NULL, 0);
if (ret >= 0) {
obj->caps.name = 1;
close(ret);
}
return 0;
return probe_fd(ret);
}
static int
bpf_object__probe_global_data(struct bpf_object *obj)
static int probe_kern_global_data(void)
{
struct bpf_load_program_attr prg_attr;
struct bpf_create_map_attr map_attr;
......@@ -3498,16 +3510,23 @@ bpf_object__probe_global_data(struct bpf_object *obj)
prg_attr.license = "GPL";
ret = bpf_load_program_xattr(&prg_attr, NULL, 0);
if (ret >= 0) {
obj->caps.global_data = 1;
close(ret);
}
close(map);
return 0;
return probe_fd(ret);
}
static int bpf_object__probe_btf_func(struct bpf_object *obj)
static int probe_kern_btf(void)
{
static const char strs[] = "\0int";
__u32 types[] = {
/* int */
BTF_TYPE_INT_ENC(1, BTF_INT_SIGNED, 0, 32, 4),
};
return probe_fd(libbpf__load_raw_btf((char *)types, sizeof(types),
strs, sizeof(strs)));
}
static int probe_kern_btf_func(void)
{
static const char strs[] = "\0int\0x\0a";
/* void x(int a) {} */
......@@ -3520,20 +3539,12 @@ static int bpf_object__probe_btf_func(struct bpf_object *obj)
/* FUNC x */ /* [3] */
BTF_TYPE_ENC(5, BTF_INFO_ENC(BTF_KIND_FUNC, 0, 0), 2),
};
int btf_fd;
btf_fd = libbpf__load_raw_btf((char *)types, sizeof(types),
strs, sizeof(strs));
if (btf_fd >= 0) {
obj->caps.btf_func = 1;
close(btf_fd);
return 1;
}
return 0;
return probe_fd(libbpf__load_raw_btf((char *)types, sizeof(types),
strs, sizeof(strs)));
}
static int bpf_object__probe_btf_func_global(struct bpf_object *obj)
static int probe_kern_btf_func_global(void)
{
static const char strs[] = "\0int\0x\0a";
/* static void x(int a) {} */
......@@ -3546,20 +3557,12 @@ static int bpf_object__probe_btf_func_global(struct bpf_object *obj)
/* FUNC x BTF_FUNC_GLOBAL */ /* [3] */
BTF_TYPE_ENC(5, BTF_INFO_ENC(BTF_KIND_FUNC, 0, BTF_FUNC_GLOBAL), 2),
};
int btf_fd;
btf_fd = libbpf__load_raw_btf((char *)types, sizeof(types),
strs, sizeof(strs));
if (btf_fd >= 0) {
obj->caps.btf_func_global = 1;
close(btf_fd);
return 1;
}
return 0;
return probe_fd(libbpf__load_raw_btf((char *)types, sizeof(types),
strs, sizeof(strs)));
}
static int bpf_object__probe_btf_datasec(struct bpf_object *obj)
static int probe_kern_btf_datasec(void)
{
static const char strs[] = "\0x\0.data";
/* static int a; */
......@@ -3573,20 +3576,12 @@ static int bpf_object__probe_btf_datasec(struct bpf_object *obj)
BTF_TYPE_ENC(3, BTF_INFO_ENC(BTF_KIND_DATASEC, 0, 1), 4),
BTF_VAR_SECINFO_ENC(2, 0, 4),
};
int btf_fd;
btf_fd = libbpf__load_raw_btf((char *)types, sizeof(types),
strs, sizeof(strs));
if (btf_fd >= 0) {
obj->caps.btf_datasec = 1;
close(btf_fd);
return 1;
}
return 0;
return probe_fd(libbpf__load_raw_btf((char *)types, sizeof(types),
strs, sizeof(strs)));
}
static int bpf_object__probe_array_mmap(struct bpf_object *obj)
static int probe_kern_array_mmap(void)
{
struct bpf_create_map_attr attr = {
.map_type = BPF_MAP_TYPE_ARRAY,
......@@ -3595,27 +3590,17 @@ static int bpf_object__probe_array_mmap(struct bpf_object *obj)
.value_size = sizeof(int),
.max_entries = 1,
};
int fd;
fd = bpf_create_map_xattr(&attr);
if (fd >= 0) {
obj->caps.array_mmap = 1;
close(fd);
return 1;
}
return 0;
return probe_fd(bpf_create_map_xattr(&attr));
}
static int
bpf_object__probe_exp_attach_type(struct bpf_object *obj)
static int probe_kern_exp_attach_type(void)
{
struct bpf_load_program_attr attr;
struct bpf_insn insns[] = {
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
};
int fd;
memset(&attr, 0, sizeof(attr));
/* use any valid combination of program type and (optional)
......@@ -3629,36 +3614,91 @@ bpf_object__probe_exp_attach_type(struct bpf_object *obj)
attr.insns_cnt = ARRAY_SIZE(insns);
attr.license = "GPL";
fd = bpf_load_program_xattr(&attr, NULL, 0);
if (fd >= 0) {
obj->caps.exp_attach_type = 1;
close(fd);
return 1;
}
return 0;
return probe_fd(bpf_load_program_xattr(&attr, NULL, 0));
}
static int
bpf_object__probe_caps(struct bpf_object *obj)
{
int (*probe_fn[])(struct bpf_object *obj) = {
bpf_object__probe_name,
bpf_object__probe_global_data,
bpf_object__probe_btf_func,
bpf_object__probe_btf_func_global,
bpf_object__probe_btf_datasec,
bpf_object__probe_array_mmap,
bpf_object__probe_exp_attach_type,
static int probe_kern_probe_read_kernel(void)
{
struct bpf_load_program_attr attr;
struct bpf_insn insns[] = {
BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), /* r1 = r10 (fp) */
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8), /* r1 += -8 */
BPF_MOV64_IMM(BPF_REG_2, 8), /* r2 = 8 */
BPF_MOV64_IMM(BPF_REG_3, 0), /* r3 = 0 */
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_probe_read_kernel),
BPF_EXIT_INSN(),
};
int i, ret;
for (i = 0; i < ARRAY_SIZE(probe_fn); i++) {
ret = probe_fn[i](obj);
if (ret < 0)
pr_debug("Probe #%d failed with %d.\n", i, ret);
memset(&attr, 0, sizeof(attr));
attr.prog_type = BPF_PROG_TYPE_KPROBE;
attr.insns = insns;
attr.insns_cnt = ARRAY_SIZE(insns);
attr.license = "GPL";
return probe_fd(bpf_load_program_xattr(&attr, NULL, 0));
}
enum kern_feature_result {
FEAT_UNKNOWN = 0,
FEAT_SUPPORTED = 1,
FEAT_MISSING = 2,
};
typedef int (*feature_probe_fn)(void);
static struct kern_feature_desc {
const char *desc;
feature_probe_fn probe;
enum kern_feature_result res;
} feature_probes[__FEAT_CNT] = {
[FEAT_PROG_NAME] = {
"BPF program name", probe_kern_prog_name,
},
[FEAT_GLOBAL_DATA] = {
"global variables", probe_kern_global_data,
},
[FEAT_BTF] = {
"minimal BTF", probe_kern_btf,
},
[FEAT_BTF_FUNC] = {
"BTF functions", probe_kern_btf_func,
},
[FEAT_BTF_GLOBAL_FUNC] = {
"BTF global function", probe_kern_btf_func_global,
},
[FEAT_BTF_DATASEC] = {
"BTF data section and variable", probe_kern_btf_datasec,
},
[FEAT_ARRAY_MMAP] = {
"ARRAY map mmap()", probe_kern_array_mmap,
},
[FEAT_EXP_ATTACH_TYPE] = {
"BPF_PROG_LOAD expected_attach_type attribute",
probe_kern_exp_attach_type,
},
[FEAT_PROBE_READ_KERN] = {
"bpf_probe_read_kernel() helper", probe_kern_probe_read_kernel,
}
};
return 0;
static bool kernel_supports(enum kern_feature_id feat_id)
{
struct kern_feature_desc *feat = &feature_probes[feat_id];
int ret;
if (READ_ONCE(feat->res) == FEAT_UNKNOWN) {
ret = feat->probe();
if (ret > 0) {
WRITE_ONCE(feat->res, FEAT_SUPPORTED);
} else if (ret == 0) {
WRITE_ONCE(feat->res, FEAT_MISSING);
} else {
pr_warn("Detection of kernel %s support failed: %d\n", feat->desc, ret);
WRITE_ONCE(feat->res, FEAT_MISSING);
}
}
return READ_ONCE(feat->res) == FEAT_SUPPORTED;
}
static bool map_is_reuse_compat(const struct bpf_map *map, int map_fd)
......@@ -3760,7 +3800,7 @@ static int bpf_object__create_map(struct bpf_object *obj, struct bpf_map *map)
memset(&create_attr, 0, sizeof(create_attr));
if (obj->caps.name)
if (kernel_supports(FEAT_PROG_NAME))
create_attr.name = map->name;
create_attr.map_ifindex = map->map_ifindex;
create_attr.map_type = def->type;
......@@ -5348,6 +5388,53 @@ static int bpf_object__collect_reloc(struct bpf_object *obj)
return 0;
}
static bool insn_is_helper_call(struct bpf_insn *insn, enum bpf_func_id *func_id)
{
__u8 class = BPF_CLASS(insn->code);
if ((class == BPF_JMP || class == BPF_JMP32) &&
BPF_OP(insn->code) == BPF_CALL &&
BPF_SRC(insn->code) == BPF_K &&
insn->src_reg == 0 && insn->dst_reg == 0) {
if (func_id)
*func_id = insn->imm;
return true;
}
return false;
}
static int bpf_object__sanitize_prog(struct bpf_object* obj, struct bpf_program *prog)
{
struct bpf_insn *insn = prog->insns;
enum bpf_func_id func_id;
int i;
for (i = 0; i < prog->insns_cnt; i++, insn++) {
if (!insn_is_helper_call(insn, &func_id))
continue;
/* on kernels that don't yet support
* bpf_probe_read_{kernel,user}[_str] helpers, fall back
* to bpf_probe_read() which works well for old kernels
*/
switch (func_id) {
case BPF_FUNC_probe_read_kernel:
case BPF_FUNC_probe_read_user:
if (!kernel_supports(FEAT_PROBE_READ_KERN))
insn->imm = BPF_FUNC_probe_read;
break;
case BPF_FUNC_probe_read_kernel_str:
case BPF_FUNC_probe_read_user_str:
if (!kernel_supports(FEAT_PROBE_READ_KERN))
insn->imm = BPF_FUNC_probe_read_str;
break;
default:
break;
}
}
return 0;
}
static int
load_program(struct bpf_program *prog, struct bpf_insn *insns, int insns_cnt,
char *license, __u32 kern_version, int *pfd)
......@@ -5364,12 +5451,12 @@ load_program(struct bpf_program *prog, struct bpf_insn *insns, int insns_cnt,
memset(&load_attr, 0, sizeof(struct bpf_load_program_attr));
load_attr.prog_type = prog->type;
/* old kernels might not support specifying expected_attach_type */
if (!prog->caps->exp_attach_type && prog->sec_def &&
if (!kernel_supports(FEAT_EXP_ATTACH_TYPE) && prog->sec_def &&
prog->sec_def->is_exp_attach_type_optional)
load_attr.expected_attach_type = 0;
else
load_attr.expected_attach_type = prog->expected_attach_type;
if (prog->caps->name)
if (kernel_supports(FEAT_PROG_NAME))
load_attr.name = prog->name;
load_attr.insns = insns;
load_attr.insns_cnt = insns_cnt;
......@@ -5387,7 +5474,7 @@ load_program(struct bpf_program *prog, struct bpf_insn *insns, int insns_cnt,
}
/* specify func_info/line_info only if kernel supports them */
btf_fd = bpf_object__btf_fd(prog->obj);
if (btf_fd >= 0 && prog->obj->caps.btf_func) {
if (btf_fd >= 0 && kernel_supports(FEAT_BTF_FUNC)) {
load_attr.prog_btf_fd = btf_fd;
load_attr.func_info = prog->func_info;
load_attr.func_info_rec_size = prog->func_info_rec_size;
......@@ -5562,6 +5649,13 @@ bpf_object__load_progs(struct bpf_object *obj, int log_level)
size_t i;
int err;
for (i = 0; i < obj->nr_programs; i++) {
prog = &obj->programs[i];
err = bpf_object__sanitize_prog(obj, prog);
if (err)
return err;
}
for (i = 0; i < obj->nr_programs; i++) {
prog = &obj->programs[i];
if (bpf_program__is_function_storage(prog, obj))
......@@ -5750,11 +5844,11 @@ static int bpf_object__sanitize_maps(struct bpf_object *obj)
bpf_object__for_each_map(m, obj) {
if (!bpf_map__is_internal(m))
continue;
if (!obj->caps.global_data) {
if (!kernel_supports(FEAT_GLOBAL_DATA)) {
pr_warn("kernel doesn't support global data\n");
return -ENOTSUP;
}
if (!obj->caps.array_mmap)
if (!kernel_supports(FEAT_ARRAY_MMAP))
m->def.map_flags ^= BPF_F_MMAPABLE;
}
......@@ -5904,7 +5998,6 @@ int bpf_object__load_xattr(struct bpf_object_load_attr *attr)
}
err = bpf_object__probe_loading(obj);
err = err ? : bpf_object__probe_caps(obj);
err = err ? : bpf_object__resolve_externs(obj, obj->kconfig);
err = err ? : bpf_object__sanitize_and_load_btf(obj);
err = err ? : bpf_object__sanitize_maps(obj);
......
......@@ -19,12 +19,14 @@ SEC("tp/syscalls/sys_enter_nanosleep")
int handle__tp(struct trace_event_raw_sys_enter *args)
{
struct __kernel_timespec *ts;
long tv_nsec;
if (args->id != __NR_nanosleep)
return 0;
ts = (void *)args->args[0];
if (BPF_CORE_READ(ts, tv_nsec) != MY_TV_NSEC)
if (bpf_probe_read_user(&tv_nsec, sizeof(ts->tv_nsec), &ts->tv_nsec) ||
tv_nsec != MY_TV_NSEC)
return 0;
tp_called = true;
......@@ -35,12 +37,14 @@ SEC("raw_tp/sys_enter")
int BPF_PROG(handle__raw_tp, struct pt_regs *regs, long id)
{
struct __kernel_timespec *ts;
long tv_nsec;
if (id != __NR_nanosleep)
return 0;
ts = (void *)PT_REGS_PARM1_CORE(regs);
if (BPF_CORE_READ(ts, tv_nsec) != MY_TV_NSEC)
if (bpf_probe_read_user(&tv_nsec, sizeof(ts->tv_nsec), &ts->tv_nsec) ||
tv_nsec != MY_TV_NSEC)
return 0;
raw_tp_called = true;
......@@ -51,12 +55,14 @@ SEC("tp_btf/sys_enter")
int BPF_PROG(handle__tp_btf, struct pt_regs *regs, long id)
{
struct __kernel_timespec *ts;
long tv_nsec;
if (id != __NR_nanosleep)
return 0;
ts = (void *)PT_REGS_PARM1_CORE(regs);
if (BPF_CORE_READ(ts, tv_nsec) != MY_TV_NSEC)
if (bpf_probe_read_user(&tv_nsec, sizeof(ts->tv_nsec), &ts->tv_nsec) ||
tv_nsec != MY_TV_NSEC)
return 0;
tp_btf_called = true;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment