Commit f23c7ce3 authored by Daniel Borkmann's avatar Daniel Borkmann

Merge branch 'bpf-libbpf-bitfield-size-relo'

Andrii Nakryiko says:

====================
This patch set adds support for reading bitfields in a relocatable manner
through a set of relocations emitted by Clang, corresponding libbpf support
for those relocations, as well as abstracting details into
BPF_CORE_READ_BITFIELD/BPF_CORE_READ_BITFIELD_PROBED macro.

We also add support for capturing relocatable field size, so that BPF program
code can adjust its logic to actual amount of data it needs to operate on,
even if it changes between kernels. New convenience macro is added to
bpf_core_read.h (bpf_core_field_size(), in the same family of macro as
bpf_core_read() and bpf_core_field_exists()). Corresponding set of selftests
are added to excercise this logic and validate correctness in a variety of
scenarios.

Some of the overly strict logic of matching fields is relaxed to support wider
variety of scenarios. See patch #1 for that.

Patch #1 removes few overly strict test cases.
Patch #2 adds support for bitfield-related relocations.
Patch #3 adds some further adjustments to support generic field size
relocations and introduces bpf_core_field_size() macro.
Patch #4 tests bitfield reading.
Patch #5 tests field size relocations.

v1 -> v2:
  - added direct memory read-based macro and tests for bitfield reads.
====================
Signed-off-by: default avatarDaniel Borkmann <daniel@iogearbox.net>
parents 1574cf83 0b163565
......@@ -12,9 +12,81 @@
*/
enum bpf_field_info_kind {
BPF_FIELD_BYTE_OFFSET = 0, /* field byte offset */
BPF_FIELD_BYTE_SIZE = 1,
BPF_FIELD_EXISTS = 2, /* field existence in target kernel */
BPF_FIELD_SIGNED = 3,
BPF_FIELD_LSHIFT_U64 = 4,
BPF_FIELD_RSHIFT_U64 = 5,
};
#define __CORE_RELO(src, field, info) \
__builtin_preserve_field_info((src)->field, BPF_FIELD_##info)
#if __BYTE_ORDER == __LITTLE_ENDIAN
#define __CORE_BITFIELD_PROBE_READ(dst, src, fld) \
bpf_probe_read((void *)dst, \
__CORE_RELO(src, fld, BYTE_SIZE), \
(const void *)src + __CORE_RELO(src, fld, BYTE_OFFSET))
#else
/* semantics of LSHIFT_64 assumes loading values into low-ordered bytes, so
* for big-endian we need to adjust destination pointer accordingly, based on
* field byte size
*/
#define __CORE_BITFIELD_PROBE_READ(dst, src, fld) \
bpf_probe_read((void *)dst + (8 - __CORE_RELO(src, fld, BYTE_SIZE)), \
__CORE_RELO(src, fld, BYTE_SIZE), \
(const void *)src + __CORE_RELO(src, fld, BYTE_OFFSET))
#endif
/*
* Extract bitfield, identified by src->field, and put its value into u64
* *res. All this is done in relocatable manner, so bitfield changes such as
* signedness, bit size, offset changes, this will be handled automatically.
* This version of macro is using bpf_probe_read() to read underlying integer
* storage. Macro functions as an expression and its return type is
* bpf_probe_read()'s return value: 0, on success, <0 on error.
*/
#define BPF_CORE_READ_BITFIELD_PROBED(src, field, res) ({ \
unsigned long long val; \
\
*res = 0; \
val = __CORE_BITFIELD_PROBE_READ(res, src, field); \
if (!val) { \
*res <<= __CORE_RELO(src, field, LSHIFT_U64); \
val = __CORE_RELO(src, field, RSHIFT_U64); \
if (__CORE_RELO(src, field, SIGNED)) \
*res = ((long long)*res) >> val; \
else \
*res = ((unsigned long long)*res) >> val; \
val = 0; \
} \
val; \
})
/*
* Extract bitfield, identified by src->field, and return its value as u64.
* This version of macro is using direct memory reads and should be used from
* BPF program types that support such functionality (e.g., typed raw
* tracepoints).
*/
#define BPF_CORE_READ_BITFIELD(s, field) ({ \
const void *p = (const void *)s + __CORE_RELO(s, field, BYTE_OFFSET); \
unsigned long long val; \
\
switch (__CORE_RELO(s, field, BYTE_SIZE)) { \
case 1: val = *(const unsigned char *)p; \
case 2: val = *(const unsigned short *)p; \
case 4: val = *(const unsigned int *)p; \
case 8: val = *(const unsigned long long *)p; \
} \
val <<= __CORE_RELO(s, field, LSHIFT_U64); \
if (__CORE_RELO(s, field, SIGNED)) \
val = ((long long)val) >> __CORE_RELO(s, field, RSHIFT_U64); \
else \
val = val >> __CORE_RELO(s, field, RSHIFT_U64); \
val; \
})
/*
* Convenience macro to check that field actually exists in target kernel's.
* Returns:
......@@ -24,6 +96,13 @@ enum bpf_field_info_kind {
#define bpf_core_field_exists(field) \
__builtin_preserve_field_info(field, BPF_FIELD_EXISTS)
/*
* Convenience macro to get byte size of a field. Works for integers,
* struct/unions, pointers, arrays, and enums.
*/
#define bpf_core_field_size(field) \
__builtin_preserve_field_info(field, BPF_FIELD_BYTE_SIZE)
/*
* bpf_core_read() abstracts away bpf_probe_read() call and captures offset
* relocation for source address using __builtin_preserve_access_index()
......
......@@ -2470,8 +2470,8 @@ struct bpf_core_spec {
int raw_spec[BPF_CORE_SPEC_MAX_LEN];
/* raw spec length */
int raw_len;
/* field byte offset represented by spec */
__u32 offset;
/* field bit offset represented by spec */
__u32 bit_offset;
};
static bool str_is_empty(const char *s)
......@@ -2482,8 +2482,8 @@ static bool str_is_empty(const char *s)
/*
* Turn bpf_field_reloc into a low- and high-level spec representation,
* validating correctness along the way, as well as calculating resulting
* field offset (in bytes), specified by accessor string. Low-level spec
* captures every single level of nestedness, including traversing anonymous
* field bit offset, specified by accessor string. Low-level spec captures
* every single level of nestedness, including traversing anonymous
* struct/union members. High-level one only captures semantically meaningful
* "turning points": named fields and array indicies.
* E.g., for this case:
......@@ -2555,7 +2555,7 @@ static int bpf_core_spec_parse(const struct btf *btf,
sz = btf__resolve_size(btf, id);
if (sz < 0)
return sz;
spec->offset = access_idx * sz;
spec->bit_offset = access_idx * sz * 8;
for (i = 1; i < spec->raw_len; i++) {
t = skip_mods_and_typedefs(btf, id, &id);
......@@ -2566,17 +2566,13 @@ static int bpf_core_spec_parse(const struct btf *btf,
if (btf_is_composite(t)) {
const struct btf_member *m;
__u32 offset;
__u32 bit_offset;
if (access_idx >= btf_vlen(t))
return -EINVAL;
if (btf_member_bitfield_size(t, access_idx))
return -EINVAL;
offset = btf_member_bit_offset(t, access_idx);
if (offset % 8)
return -EINVAL;
spec->offset += offset / 8;
bit_offset = btf_member_bit_offset(t, access_idx);
spec->bit_offset += bit_offset;
m = btf_members(t) + access_idx;
if (m->name_off) {
......@@ -2605,7 +2601,7 @@ static int bpf_core_spec_parse(const struct btf *btf,
sz = btf__resolve_size(btf, id);
if (sz < 0)
return sz;
spec->offset += access_idx * sz;
spec->bit_offset += access_idx * sz * 8;
} else {
pr_warn("relo for [%u] %s (at idx %d) captures type [%d] of unexpected kind %d\n",
type_id, spec_str, i, id, btf_kind(t));
......@@ -2706,12 +2702,14 @@ static struct ids_vec *bpf_core_find_cands(const struct btf *local_btf,
}
/* Check two types for compatibility, skipping const/volatile/restrict and
* typedefs, to ensure we are relocating offset to the compatible entities:
* typedefs, to ensure we are relocating compatible entities:
* - any two STRUCTs/UNIONs are compatible and can be mixed;
* - any two FWDs are compatible;
* - any two FWDs are compatible, if their names match (modulo flavor suffix);
* - any two PTRs are always compatible;
* - for ENUMs, names should be the same (ignoring flavor suffix) or at
* least one of enums should be anonymous;
* - for ENUMs, check sizes, names are ignored;
* - for INT, size and bitness should match, signedness is ignored;
* - for INT, size and signedness are ignored;
* - for ARRAY, dimensionality is ignored, element types are checked for
* compatibility recursively;
* - everything else shouldn't be ever a target of relocation.
......@@ -2737,16 +2735,29 @@ static int bpf_core_fields_are_compat(const struct btf *local_btf,
return 0;
switch (btf_kind(local_type)) {
case BTF_KIND_FWD:
case BTF_KIND_PTR:
return 1;
case BTF_KIND_ENUM:
return local_type->size == targ_type->size;
case BTF_KIND_FWD:
case BTF_KIND_ENUM: {
const char *local_name, *targ_name;
size_t local_len, targ_len;
local_name = btf__name_by_offset(local_btf,
local_type->name_off);
targ_name = btf__name_by_offset(targ_btf, targ_type->name_off);
local_len = bpf_core_essential_name_len(local_name);
targ_len = bpf_core_essential_name_len(targ_name);
/* one of them is anonymous or both w/ same flavor-less names */
return local_len == 0 || targ_len == 0 ||
(local_len == targ_len &&
strncmp(local_name, targ_name, local_len) == 0);
}
case BTF_KIND_INT:
/* just reject deprecated bitfield-like integers; all other
* integers are by default compatible between each other
*/
return btf_int_offset(local_type) == 0 &&
btf_int_offset(targ_type) == 0 &&
local_type->size == targ_type->size &&
btf_int_bits(local_type) == btf_int_bits(targ_type);
btf_int_offset(targ_type) == 0;
case BTF_KIND_ARRAY:
local_id = btf_array(local_type)->type;
targ_id = btf_array(targ_type)->type;
......@@ -2762,7 +2773,7 @@ static int bpf_core_fields_are_compat(const struct btf *local_btf,
* Given single high-level named field accessor in local type, find
* corresponding high-level accessor for a target type. Along the way,
* maintain low-level spec for target as well. Also keep updating target
* offset.
* bit offset.
*
* Searching is performed through recursive exhaustive enumeration of all
* fields of a struct/union. If there are any anonymous (embedded)
......@@ -2801,21 +2812,16 @@ static int bpf_core_match_member(const struct btf *local_btf,
n = btf_vlen(targ_type);
m = btf_members(targ_type);
for (i = 0; i < n; i++, m++) {
__u32 offset;
__u32 bit_offset;
/* bitfield relocations not supported */
if (btf_member_bitfield_size(targ_type, i))
continue;
offset = btf_member_bit_offset(targ_type, i);
if (offset % 8)
continue;
bit_offset = btf_member_bit_offset(targ_type, i);
/* too deep struct/union/array nesting */
if (spec->raw_len == BPF_CORE_SPEC_MAX_LEN)
return -E2BIG;
/* speculate this member will be the good one */
spec->offset += offset / 8;
spec->bit_offset += bit_offset;
spec->raw_spec[spec->raw_len++] = i;
targ_name = btf__name_by_offset(targ_btf, m->name_off);
......@@ -2844,7 +2850,7 @@ static int bpf_core_match_member(const struct btf *local_btf,
return found;
}
/* member turned out not to be what we looked for */
spec->offset -= offset / 8;
spec->bit_offset -= bit_offset;
spec->raw_len--;
}
......@@ -2853,7 +2859,7 @@ static int bpf_core_match_member(const struct btf *local_btf,
/*
* Try to match local spec to a target type and, if successful, produce full
* target spec (high-level, low-level + offset).
* target spec (high-level, low-level + bit offset).
*/
static int bpf_core_spec_match(struct bpf_core_spec *local_spec,
const struct btf *targ_btf, __u32 targ_id,
......@@ -2916,13 +2922,120 @@ static int bpf_core_spec_match(struct bpf_core_spec *local_spec,
sz = btf__resolve_size(targ_btf, targ_id);
if (sz < 0)
return sz;
targ_spec->offset += local_acc->idx * sz;
targ_spec->bit_offset += local_acc->idx * sz * 8;
}
}
return 1;
}
static int bpf_core_calc_field_relo(const struct bpf_program *prog,
const struct bpf_field_reloc *relo,
const struct bpf_core_spec *spec,
__u32 *val, bool *validate)
{
const struct bpf_core_accessor *acc = &spec->spec[spec->len - 1];
const struct btf_type *t = btf__type_by_id(spec->btf, acc->type_id);
__u32 byte_off, byte_sz, bit_off, bit_sz;
const struct btf_member *m;
const struct btf_type *mt;
bool bitfield;
__s64 sz;
/* a[n] accessor needs special handling */
if (!acc->name) {
if (relo->kind == BPF_FIELD_BYTE_OFFSET) {
*val = spec->bit_offset / 8;
} else if (relo->kind == BPF_FIELD_BYTE_SIZE) {
sz = btf__resolve_size(spec->btf, acc->type_id);
if (sz < 0)
return -EINVAL;
*val = sz;
} else {
pr_warn("prog '%s': relo %d at insn #%d can't be applied to array access\n",
bpf_program__title(prog, false),
relo->kind, relo->insn_off / 8);
return -EINVAL;
}
if (validate)
*validate = true;
return 0;
}
m = btf_members(t) + acc->idx;
mt = skip_mods_and_typedefs(spec->btf, m->type, NULL);
bit_off = spec->bit_offset;
bit_sz = btf_member_bitfield_size(t, acc->idx);
bitfield = bit_sz > 0;
if (bitfield) {
byte_sz = mt->size;
byte_off = bit_off / 8 / byte_sz * byte_sz;
/* figure out smallest int size necessary for bitfield load */
while (bit_off + bit_sz - byte_off * 8 > byte_sz * 8) {
if (byte_sz >= 8) {
/* bitfield can't be read with 64-bit read */
pr_warn("prog '%s': relo %d at insn #%d can't be satisfied for bitfield\n",
bpf_program__title(prog, false),
relo->kind, relo->insn_off / 8);
return -E2BIG;
}
byte_sz *= 2;
byte_off = bit_off / 8 / byte_sz * byte_sz;
}
} else {
sz = btf__resolve_size(spec->btf, m->type);
if (sz < 0)
return -EINVAL;
byte_sz = sz;
byte_off = spec->bit_offset / 8;
bit_sz = byte_sz * 8;
}
/* for bitfields, all the relocatable aspects are ambiguous and we
* might disagree with compiler, so turn off validation of expected
* value, except for signedness
*/
if (validate)
*validate = !bitfield;
switch (relo->kind) {
case BPF_FIELD_BYTE_OFFSET:
*val = byte_off;
break;
case BPF_FIELD_BYTE_SIZE:
*val = byte_sz;
break;
case BPF_FIELD_SIGNED:
/* enums will be assumed unsigned */
*val = btf_is_enum(mt) ||
(btf_int_encoding(mt) & BTF_INT_SIGNED);
if (validate)
*validate = true; /* signedness is never ambiguous */
break;
case BPF_FIELD_LSHIFT_U64:
#if __BYTE_ORDER == __LITTLE_ENDIAN
*val = 64 - (bit_off + bit_sz - byte_off * 8);
#else
*val = (8 - byte_sz) * 8 + (bit_off - byte_off * 8);
#endif
break;
case BPF_FIELD_RSHIFT_U64:
*val = 64 - bit_sz;
if (validate)
*validate = true; /* right shift is never ambiguous */
break;
case BPF_FIELD_EXISTS:
default:
pr_warn("prog '%s': unknown relo %d at insn #%d\n",
bpf_program__title(prog, false),
relo->kind, relo->insn_off / 8);
return -EINVAL;
}
return 0;
}
/*
* Patch relocatable BPF instruction.
*
......@@ -2942,36 +3055,31 @@ static int bpf_core_reloc_insn(struct bpf_program *prog,
const struct bpf_core_spec *local_spec,
const struct bpf_core_spec *targ_spec)
{
bool failed = false, validate = true;
__u32 orig_val, new_val;
struct bpf_insn *insn;
int insn_idx;
int insn_idx, err;
__u8 class;
if (relo->insn_off % sizeof(struct bpf_insn))
return -EINVAL;
insn_idx = relo->insn_off / sizeof(struct bpf_insn);
switch (relo->kind) {
case BPF_FIELD_BYTE_OFFSET:
orig_val = local_spec->offset;
if (targ_spec) {
new_val = targ_spec->offset;
} else {
pr_warn("prog '%s': patching insn #%d w/ failed reloc, imm %d -> %d\n",
bpf_program__title(prog, false), insn_idx,
orig_val, -1);
new_val = (__u32)-1;
}
break;
case BPF_FIELD_EXISTS:
if (relo->kind == BPF_FIELD_EXISTS) {
orig_val = 1; /* can't generate EXISTS relo w/o local field */
new_val = targ_spec ? 1 : 0;
break;
default:
pr_warn("prog '%s': unknown relo %d at insn #%d'\n",
bpf_program__title(prog, false),
relo->kind, insn_idx);
return -EINVAL;
} else if (!targ_spec) {
failed = true;
new_val = (__u32)-1;
} else {
err = bpf_core_calc_field_relo(prog, relo, local_spec,
&orig_val, &validate);
if (err)
return err;
err = bpf_core_calc_field_relo(prog, relo, targ_spec,
&new_val, NULL);
if (err)
return err;
}
insn = &prog->insns[insn_idx];
......@@ -2980,12 +3088,17 @@ static int bpf_core_reloc_insn(struct bpf_program *prog,
if (class == BPF_ALU || class == BPF_ALU64) {
if (BPF_SRC(insn->code) != BPF_K)
return -EINVAL;
if (insn->imm != orig_val)
if (!failed && validate && insn->imm != orig_val) {
pr_warn("prog '%s': unexpected insn #%d value: got %u, exp %u -> %u\n",
bpf_program__title(prog, false), insn_idx,
insn->imm, orig_val, new_val);
return -EINVAL;
}
orig_val = insn->imm;
insn->imm = new_val;
pr_debug("prog '%s': patched insn #%d (ALU/ALU64) imm %d -> %d\n",
bpf_program__title(prog, false),
insn_idx, orig_val, new_val);
pr_debug("prog '%s': patched insn #%d (ALU/ALU64)%s imm %u -> %u\n",
bpf_program__title(prog, false), insn_idx,
failed ? " w/ failed reloc" : "", orig_val, new_val);
} else {
pr_warn("prog '%s': trying to relocate unrecognized insn #%d, code:%x, src:%x, dst:%x, off:%x, imm:%x\n",
bpf_program__title(prog, false),
......@@ -3103,7 +3216,8 @@ static void bpf_core_dump_spec(int level, const struct bpf_core_spec *spec)
libbpf_print(level, "%d%s", spec->raw_spec[i],
i == spec->raw_len - 1 ? " => " : ":");
libbpf_print(level, "%u @ &x", spec->offset);
libbpf_print(level, "%u.%u @ &x",
spec->bit_offset / 8, spec->bit_offset % 8);
for (i = 0; i < spec->len; i++) {
if (spec->spec[i].name)
......@@ -3217,7 +3331,8 @@ static int bpf_core_reloc_field(struct bpf_program *prog,
return -EINVAL;
}
pr_debug("prog '%s': relo #%d: spec is ", prog_name, relo_idx);
pr_debug("prog '%s': relo #%d: kind %d, spec is ", prog_name, relo_idx,
relo->kind);
bpf_core_dump_spec(LIBBPF_DEBUG, &local_spec);
libbpf_print(LIBBPF_DEBUG, "\n");
......@@ -3257,13 +3372,13 @@ static int bpf_core_reloc_field(struct bpf_program *prog,
if (j == 0) {
targ_spec = cand_spec;
} else if (cand_spec.offset != targ_spec.offset) {
} else if (cand_spec.bit_offset != targ_spec.bit_offset) {
/* if there are many candidates, they should all
* resolve to the same offset
* resolve to the same bit offset
*/
pr_warn("prog '%s': relo #%d: offset ambiguity: %u != %u\n",
prog_name, relo_idx, cand_spec.offset,
targ_spec.offset);
prog_name, relo_idx, cand_spec.bit_offset,
targ_spec.bit_offset);
return -EINVAL;
}
......
......@@ -158,7 +158,11 @@ struct bpf_line_info_min {
*/
enum bpf_field_info_kind {
BPF_FIELD_BYTE_OFFSET = 0, /* field byte offset */
BPF_FIELD_BYTE_SIZE = 1,
BPF_FIELD_EXISTS = 2, /* field existence in target kernel */
BPF_FIELD_SIGNED = 3,
BPF_FIELD_LSHIFT_U64 = 4,
BPF_FIELD_RSHIFT_U64 = 5,
};
/* The minimum bpf_field_reloc checked by the loader
......
......@@ -174,21 +174,82 @@
.fails = true, \
}
#define EXISTENCE_DATA(struct_name) STRUCT_TO_CHAR_PTR(struct_name) { \
.a = 42, \
}
#define EXISTENCE_CASE_COMMON(name) \
.case_name = #name, \
.bpf_obj_file = "test_core_reloc_existence.o", \
.btf_src_file = "btf__core_reloc_" #name ".o", \
.relaxed_core_relocs = true \
.relaxed_core_relocs = true
#define EXISTENCE_ERR_CASE(name) { \
EXISTENCE_CASE_COMMON(name), \
.fails = true, \
}
#define BITFIELDS_CASE_COMMON(objfile, test_name_prefix, name) \
.case_name = test_name_prefix#name, \
.bpf_obj_file = objfile, \
.btf_src_file = "btf__core_reloc_" #name ".o"
#define BITFIELDS_CASE(name, ...) { \
BITFIELDS_CASE_COMMON("test_core_reloc_bitfields_probed.o", \
"direct:", name), \
.input = STRUCT_TO_CHAR_PTR(core_reloc_##name) __VA_ARGS__, \
.input_len = sizeof(struct core_reloc_##name), \
.output = STRUCT_TO_CHAR_PTR(core_reloc_bitfields_output) \
__VA_ARGS__, \
.output_len = sizeof(struct core_reloc_bitfields_output), \
}, { \
BITFIELDS_CASE_COMMON("test_core_reloc_bitfields_direct.o", \
"probed:", name), \
.input = STRUCT_TO_CHAR_PTR(core_reloc_##name) __VA_ARGS__, \
.input_len = sizeof(struct core_reloc_##name), \
.output = STRUCT_TO_CHAR_PTR(core_reloc_bitfields_output) \
__VA_ARGS__, \
.output_len = sizeof(struct core_reloc_bitfields_output), \
.direct_raw_tp = true, \
}
#define BITFIELDS_ERR_CASE(name) { \
BITFIELDS_CASE_COMMON("test_core_reloc_bitfields_probed.o", \
"probed:", name), \
.fails = true, \
}, { \
BITFIELDS_CASE_COMMON("test_core_reloc_bitfields_direct.o", \
"direct:", name), \
.direct_raw_tp = true, \
.fails = true, \
}
#define SIZE_CASE_COMMON(name) \
.case_name = #name, \
.bpf_obj_file = "test_core_reloc_size.o", \
.btf_src_file = "btf__core_reloc_" #name ".o", \
.relaxed_core_relocs = true
#define SIZE_OUTPUT_DATA(type) \
STRUCT_TO_CHAR_PTR(core_reloc_size_output) { \
.int_sz = sizeof(((type *)0)->int_field), \
.struct_sz = sizeof(((type *)0)->struct_field), \
.union_sz = sizeof(((type *)0)->union_field), \
.arr_sz = sizeof(((type *)0)->arr_field), \
.arr_elem_sz = sizeof(((type *)0)->arr_field[0]), \
.ptr_sz = sizeof(((type *)0)->ptr_field), \
.enum_sz = sizeof(((type *)0)->enum_field), \
}
#define SIZE_CASE(name) { \
SIZE_CASE_COMMON(name), \
.input_len = 0, \
.output = SIZE_OUTPUT_DATA(struct core_reloc_##name), \
.output_len = sizeof(struct core_reloc_size_output), \
}
#define SIZE_ERR_CASE(name) { \
SIZE_CASE_COMMON(name), \
.fails = true, \
}
struct core_reloc_test_case {
const char *case_name;
const char *bpf_obj_file;
......@@ -199,6 +260,7 @@ struct core_reloc_test_case {
int output_len;
bool fails;
bool relaxed_core_relocs;
bool direct_raw_tp;
};
static struct core_reloc_test_case test_cases[] = {
......@@ -352,6 +414,44 @@ static struct core_reloc_test_case test_cases[] = {
EXISTENCE_ERR_CASE(existence__err_arr_kind),
EXISTENCE_ERR_CASE(existence__err_arr_value_type),
EXISTENCE_ERR_CASE(existence__err_struct_type),
/* bitfield relocation checks */
BITFIELDS_CASE(bitfields, {
.ub1 = 1,
.ub2 = 2,
.ub7 = 96,
.sb4 = -7,
.sb20 = -0x76543,
.u32 = 0x80000000,
.s32 = -0x76543210,
}),
BITFIELDS_CASE(bitfields___bit_sz_change, {
.ub1 = 6,
.ub2 = 0xABCDE,
.ub7 = 1,
.sb4 = -1,
.sb20 = -0x17654321,
.u32 = 0xBEEF,
.s32 = -0x3FEDCBA987654321,
}),
BITFIELDS_CASE(bitfields___bitfield_vs_int, {
.ub1 = 0xFEDCBA9876543210,
.ub2 = 0xA6,
.ub7 = -0x7EDCBA987654321,
.sb4 = -0x6123456789ABCDE,
.sb20 = 0xD00D,
.u32 = -0x76543,
.s32 = 0x0ADEADBEEFBADB0B,
}),
BITFIELDS_CASE(bitfields___just_big_enough, {
.ub1 = 0xF,
.ub2 = 0x0812345678FEDCBA,
}),
BITFIELDS_ERR_CASE(bitfields___err_too_big_bitfield),
/* size relocation checks */
SIZE_CASE(size),
SIZE_CASE(size___diff_sz),
};
struct data {
......@@ -361,9 +461,9 @@ struct data {
void test_core_reloc(void)
{
const char *probe_name = "raw_tracepoint/sys_enter";
struct bpf_object_load_attr load_attr = {};
struct core_reloc_test_case *test_case;
const char *tp_name, *probe_name;
int err, duration = 0, i, equal;
struct bpf_link *link = NULL;
struct bpf_map *data_map;
......@@ -387,6 +487,15 @@ void test_core_reloc(void)
test_case->bpf_obj_file, PTR_ERR(obj)))
continue;
/* for typed raw tracepoints, NULL should be specified */
if (test_case->direct_raw_tp) {
probe_name = "tp_btf/sys_enter";
tp_name = NULL;
} else {
probe_name = "raw_tracepoint/sys_enter";
tp_name = "sys_enter";
}
prog = bpf_object__find_program_by_title(obj, probe_name);
if (CHECK(!prog, "find_probe",
"prog '%s' not found\n", probe_name))
......@@ -407,7 +516,7 @@ void test_core_reloc(void)
goto cleanup;
}
link = bpf_program__attach_raw_tracepoint(prog, "sys_enter");
link = bpf_program__attach_raw_tracepoint(prog, tp_name);
if (CHECK(IS_ERR(link), "attach_raw_tp", "err %ld\n",
PTR_ERR(link)))
goto cleanup;
......
#include "core_reloc_types.h"
void f(struct core_reloc_ints___err_wrong_sz_16 x) {}
void f(struct core_reloc_arrays___err_wrong_val_type x) {}
#include "core_reloc_types.h"
void f(struct core_reloc_arrays___err_wrong_val_type2 x) {}
#include "core_reloc_types.h"
void f(struct core_reloc_ints___err_bitfield x) {}
void f(struct core_reloc_bitfields x) {}
#include "core_reloc_types.h"
void f(struct core_reloc_ints___err_wrong_sz_32 x) {}
void f(struct core_reloc_bitfields___bit_sz_change x) {}
#include "core_reloc_types.h"
void f(struct core_reloc_bitfields___bitfield_vs_int x) {}
#include "core_reloc_types.h"
void f(struct core_reloc_arrays___err_wrong_val_type1 x) {}
void f(struct core_reloc_bitfields___err_too_big_bitfield x) {}
#include "core_reloc_types.h"
void f(struct core_reloc_bitfields___just_big_enough x) {}
#include "core_reloc_types.h"
void f(struct core_reloc_ints___err_wrong_sz_8 x) {}
void f(struct core_reloc_size x) {}
#include "core_reloc_types.h"
void f(struct core_reloc_ints___err_wrong_sz_64 x) {}
void f(struct core_reloc_size___diff_sz x) {}
......@@ -386,14 +386,7 @@ struct core_reloc_arrays___err_non_array {
struct core_reloc_arrays_substruct d[1][2];
};
struct core_reloc_arrays___err_wrong_val_type1 {
char a[5]; /* char instead of int */
char b[2][3][4];
struct core_reloc_arrays_substruct c[3];
struct core_reloc_arrays_substruct d[1][2];
};
struct core_reloc_arrays___err_wrong_val_type2 {
struct core_reloc_arrays___err_wrong_val_type {
int a[5];
char b[2][3][4];
int c[3]; /* value is not a struct */
......@@ -589,67 +582,6 @@ struct core_reloc_ints___bool {
int64_t s64_field;
};
struct core_reloc_ints___err_bitfield {
uint8_t u8_field;
int8_t s8_field;
uint16_t u16_field;
int16_t s16_field;
uint32_t u32_field: 32; /* bitfields are not supported */
int32_t s32_field;
uint64_t u64_field;
int64_t s64_field;
};
struct core_reloc_ints___err_wrong_sz_8 {
uint16_t u8_field; /* not 8-bit anymore */
int16_t s8_field; /* not 8-bit anymore */
uint16_t u16_field;
int16_t s16_field;
uint32_t u32_field;
int32_t s32_field;
uint64_t u64_field;
int64_t s64_field;
};
struct core_reloc_ints___err_wrong_sz_16 {
uint8_t u8_field;
int8_t s8_field;
uint32_t u16_field; /* not 16-bit anymore */
int32_t s16_field; /* not 16-bit anymore */
uint32_t u32_field;
int32_t s32_field;
uint64_t u64_field;
int64_t s64_field;
};
struct core_reloc_ints___err_wrong_sz_32 {
uint8_t u8_field;
int8_t s8_field;
uint16_t u16_field;
int16_t s16_field;
uint64_t u32_field; /* not 32-bit anymore */
int64_t s32_field; /* not 32-bit anymore */
uint64_t u64_field;
int64_t s64_field;
};
struct core_reloc_ints___err_wrong_sz_64 {
uint8_t u8_field;
int8_t s8_field;
uint16_t u16_field;
int16_t s16_field;
uint32_t u32_field;
int32_t s32_field;
uint32_t u64_field; /* not 64-bit anymore */
int32_t s64_field; /* not 64-bit anymore */
};
/*
* MISC
*/
......@@ -730,3 +662,106 @@ struct core_reloc_existence___err_wrong_arr_value_type {
struct core_reloc_existence___err_wrong_struct_type {
int s;
};
/*
* BITFIELDS
*/
/* bitfield read results, all as plain integers */
struct core_reloc_bitfields_output {
int64_t ub1;
int64_t ub2;
int64_t ub7;
int64_t sb4;
int64_t sb20;
int64_t u32;
int64_t s32;
};
struct core_reloc_bitfields {
/* unsigned bitfields */
uint8_t ub1: 1;
uint8_t ub2: 2;
uint32_t ub7: 7;
/* signed bitfields */
int8_t sb4: 4;
int32_t sb20: 20;
/* non-bitfields */
uint32_t u32;
int32_t s32;
};
/* different bit sizes (both up and down) */
struct core_reloc_bitfields___bit_sz_change {
/* unsigned bitfields */
uint16_t ub1: 3; /* 1 -> 3 */
uint32_t ub2: 20; /* 2 -> 20 */
uint8_t ub7: 1; /* 7 -> 1 */
/* signed bitfields */
int8_t sb4: 1; /* 4 -> 1 */
int32_t sb20: 30; /* 20 -> 30 */
/* non-bitfields */
uint16_t u32; /* 32 -> 16 */
int64_t s32; /* 32 -> 64 */
};
/* turn bitfield into non-bitfield and vice versa */
struct core_reloc_bitfields___bitfield_vs_int {
uint64_t ub1; /* 3 -> 64 non-bitfield */
uint8_t ub2; /* 20 -> 8 non-bitfield */
int64_t ub7; /* 7 -> 64 non-bitfield signed */
int64_t sb4; /* 4 -> 64 non-bitfield signed */
uint64_t sb20; /* 20 -> 16 non-bitfield unsigned */
int32_t u32: 20; /* 32 non-bitfield -> 20 bitfield */
uint64_t s32: 60; /* 32 non-bitfield -> 60 bitfield */
};
struct core_reloc_bitfields___just_big_enough {
uint64_t ub1: 4;
uint64_t ub2: 60; /* packed tightly */
uint32_t ub7;
uint32_t sb4;
uint32_t sb20;
uint32_t u32;
uint32_t s32;
} __attribute__((packed)) ;
struct core_reloc_bitfields___err_too_big_bitfield {
uint64_t ub1: 4;
uint64_t ub2: 61; /* packed tightly */
uint32_t ub7;
uint32_t sb4;
uint32_t sb20;
uint32_t u32;
uint32_t s32;
} __attribute__((packed)) ;
/*
* SIZE
*/
struct core_reloc_size_output {
int int_sz;
int struct_sz;
int union_sz;
int arr_sz;
int arr_elem_sz;
int ptr_sz;
int enum_sz;
};
struct core_reloc_size {
int int_field;
struct { int x; } struct_field;
union { int x; } union_field;
int arr_field[4];
void *ptr_field;
enum { VALUE = 123 } enum_field;
};
struct core_reloc_size___diff_sz {
uint64_t int_field;
struct { int x; int y; int z; } struct_field;
union { int x; char bla[123]; } union_field;
char arr_field[10];
void *ptr_field;
enum { OTHER_VALUE = 0xFFFFFFFFFFFFFFFF } enum_field;
};
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2019 Facebook
#include <linux/bpf.h>
#include <stdint.h>
#include "bpf_helpers.h"
#include "bpf_core_read.h"
char _license[] SEC("license") = "GPL";
static volatile struct data {
char in[256];
char out[256];
} data;
struct core_reloc_bitfields {
/* unsigned bitfields */
uint8_t ub1: 1;
uint8_t ub2: 2;
uint32_t ub7: 7;
/* signed bitfields */
int8_t sb4: 4;
int32_t sb20: 20;
/* non-bitfields */
uint32_t u32;
int32_t s32;
};
/* bitfield read results, all as plain integers */
struct core_reloc_bitfields_output {
int64_t ub1;
int64_t ub2;
int64_t ub7;
int64_t sb4;
int64_t sb20;
int64_t u32;
int64_t s32;
};
struct pt_regs;
struct trace_sys_enter {
struct pt_regs *regs;
long id;
};
SEC("tp_btf/sys_enter")
int test_core_bitfields_direct(void *ctx)
{
struct core_reloc_bitfields *in = (void *)&data.in;
struct core_reloc_bitfields_output *out = (void *)&data.out;
out->ub1 = BPF_CORE_READ_BITFIELD(in, ub1);
out->ub2 = BPF_CORE_READ_BITFIELD(in, ub2);
out->ub7 = BPF_CORE_READ_BITFIELD(in, ub7);
out->sb4 = BPF_CORE_READ_BITFIELD(in, sb4);
out->sb20 = BPF_CORE_READ_BITFIELD(in, sb20);
out->u32 = BPF_CORE_READ_BITFIELD(in, u32);
out->s32 = BPF_CORE_READ_BITFIELD(in, s32);
return 0;
}
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2019 Facebook
#include <linux/bpf.h>
#include <stdint.h>
#include "bpf_helpers.h"
#include "bpf_core_read.h"
char _license[] SEC("license") = "GPL";
static volatile struct data {
char in[256];
char out[256];
} data;
struct core_reloc_bitfields {
/* unsigned bitfields */
uint8_t ub1: 1;
uint8_t ub2: 2;
uint32_t ub7: 7;
/* signed bitfields */
int8_t sb4: 4;
int32_t sb20: 20;
/* non-bitfields */
uint32_t u32;
int32_t s32;
};
/* bitfield read results, all as plain integers */
struct core_reloc_bitfields_output {
int64_t ub1;
int64_t ub2;
int64_t ub7;
int64_t sb4;
int64_t sb20;
int64_t u32;
int64_t s32;
};
#define TRANSFER_BITFIELD(in, out, field) \
if (BPF_CORE_READ_BITFIELD_PROBED(in, field, &res)) \
return 1; \
out->field = res
SEC("raw_tracepoint/sys_enter")
int test_core_bitfields(void *ctx)
{
struct core_reloc_bitfields *in = (void *)&data.in;
struct core_reloc_bitfields_output *out = (void *)&data.out;
uint64_t res;
TRANSFER_BITFIELD(in, out, ub1);
TRANSFER_BITFIELD(in, out, ub2);
TRANSFER_BITFIELD(in, out, ub7);
TRANSFER_BITFIELD(in, out, sb4);
TRANSFER_BITFIELD(in, out, sb20);
TRANSFER_BITFIELD(in, out, u32);
TRANSFER_BITFIELD(in, out, s32);
return 0;
}
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2019 Facebook
#include <linux/bpf.h>
#include <stdint.h>
#include "bpf_helpers.h"
#include "bpf_core_read.h"
char _license[] SEC("license") = "GPL";
static volatile struct data {
char in[256];
char out[256];
} data;
struct core_reloc_size_output {
int int_sz;
int struct_sz;
int union_sz;
int arr_sz;
int arr_elem_sz;
int ptr_sz;
int enum_sz;
};
struct core_reloc_size {
int int_field;
struct { int x; } struct_field;
union { int x; } union_field;
int arr_field[4];
void *ptr_field;
enum { VALUE = 123 } enum_field;
};
SEC("raw_tracepoint/sys_enter")
int test_core_size(void *ctx)
{
struct core_reloc_size *in = (void *)&data.in;
struct core_reloc_size_output *out = (void *)&data.out;
out->int_sz = bpf_core_field_size(in->int_field);
out->struct_sz = bpf_core_field_size(in->struct_field);
out->union_sz = bpf_core_field_size(in->union_field);
out->arr_sz = bpf_core_field_size(in->arr_field);
out->arr_elem_sz = bpf_core_field_size(in->arr_field[0]);
out->ptr_sz = bpf_core_field_size(in->ptr_field);
out->enum_sz = bpf_core_field_size(in->enum_field);
return 0;
}
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment