Commit 740baecd authored by Alexei Starovoitov's avatar Alexei Starovoitov

Merge branch 'btf-func-info'

Martin KaFai Lau says:

====================
The BTF support was added to kernel by Commit 69b693f0
("bpf: btf: Introduce BPF Type Format (BTF)"), which introduced
.BTF section into ELF file and is primarily
used for map pretty print.
pahole is used to convert dwarf to BTF for ELF files.

This patch added func info support to the kernel so we can
get better ksym's for bpf function calls. Basically,
function call types are passed to kernel and the kernel
extract function names from these types in order to contruct ksym
for these functions.

The llvm patch at https://reviews.llvm.org/D53736
will generate .BTF section and one more section .BTF.ext.
The .BTF.ext section encodes function type
information. The following is a sample output for selftests
test_btf with file test_btf_haskv.o for translated insns
and jited insns respectively.

  $ bpftool prog dump xlated id 1
  int _dummy_tracepoint(struct dummy_tracepoint_args * arg):
     0: (85) call pc+2#bpf_prog_2dcecc18072623fc_test_long_fname_1
     1: (b7) r0 = 0
     2: (95) exit
  int test_long_fname_1(struct dummy_tracepoint_args * arg):
     3: (85) call pc+1#bpf_prog_89d64e4abf0f0126_test_long_fname_2
     4: (95) exit
  int test_long_fname_2(struct dummy_tracepoint_args * arg):
     5: (b7) r2 = 0
     6: (63) *(u32 *)(r10 -4) = r2
     7: (79) r1 = *(u64 *)(r1 +8)
     ...
     22: (07) r1 += 1
     23: (63) *(u32 *)(r0 +4) = r1
     24: (95) exit

  $ bpftool prog dump jited id 1
  int _dummy_tracepoint(struct dummy_tracepoint_args * arg):
  bpf_prog_b07ccb89267cf242__dummy_tracepoint:
     0:   push   %rbp
     1:   mov    %rsp,%rbp
    ......
    3c:   add    $0x28,%rbp
    40:   leaveq
    41:   retq

  int test_long_fname_1(struct dummy_tracepoint_args * arg):
  bpf_prog_2dcecc18072623fc_test_long_fname_1:
     0:   push   %rbp
     1:   mov    %rsp,%rbp
    ......
    3a:   add    $0x28,%rbp
    3e:   leaveq
    3f:   retq

  int test_long_fname_2(struct dummy_tracepoint_args * arg):
  bpf_prog_89d64e4abf0f0126_test_long_fname_2:
     0:   push   %rbp
     1:   mov    %rsp,%rbp
    ......
    80:   add    $0x28,%rbp
    84:   leaveq
    85:   retq

Changelogs:
  v4 -> v5:
    . Add back BTF_KIND_FUNC_PROTO as v1 did.  The difference
      is BTF_KIND_FUNC_PROTO cannot have t->name_off now.
      All param metadata is defined in BTF_KIND_FUNC_PROTO.
      BTF_KIND_FUNC must have t->name_off != 0 and t->type
      refers to a BTF_KIND_FUNC_PROTO.

      The above is the conclusion after the discussion between
      Edward Cree, Alexei, Daniel, Yonghong and Martin.
  v3 -> v4:
    . Remove BTF_KIND_FUNC_PROTO. BTF_KIND_FUNC is used for
      both function pointer and subprogram. The name_off field
      is used to distinguish both.
    . The record size is added to the func_info subsection
      in .BTF.ext to enable future extension.
    . The bpf_prog_info interface change to make it similar
      bpf_prog_load.
    . Related kernel and libbpf changes to accommodate the
      new .BTF.ext and kernel interface changes.
  v2 -> v3:
    . Removed kernel btf extern functions btf_type_id_func()
      and btf_get_name_by_id(). Instead, exposing existing
      functions btf_type_by_id() and btf_name_by_offset().
    . Added comments about ELF section .BTF.ext layout.
    . Better codes in btftool as suggested by Edward Cree.
  v1 -> v2:
    . Added missing sign-off.
    . Limited the func_name/struct_member_name length for validity test.
    . Removed/changed several verifier messages.
    . Modified several commit messages to remove line_off reference.
====================
Acked-by: default avatarEdward Cree <ecree@solarflare.com>
Signed-off-by: default avatarAlexei Starovoitov <ast@kernel.org>
parents bbe5d311 254471e5
......@@ -316,6 +316,8 @@ struct bpf_prog_aux {
void *security;
#endif
struct bpf_prog_offload *offload;
struct btf *btf;
u32 type_id; /* type id for this prog/func */
union {
struct work_struct work;
struct rcu_head rcu;
......@@ -527,7 +529,8 @@ static inline void bpf_long_memcpy(void *dst, const void *src, u32 size)
}
/* verify correctness of eBPF program */
int bpf_check(struct bpf_prog **fp, union bpf_attr *attr);
int bpf_check(struct bpf_prog **fp, union bpf_attr *attr,
union bpf_attr __user *uattr);
void bpf_patch_call_args(struct bpf_insn *insn, u32 stack_depth);
/* Map specifics */
......
......@@ -204,6 +204,7 @@ static inline bool bpf_verifier_log_needed(const struct bpf_verifier_log *log)
struct bpf_subprog_info {
u32 start; /* insn idx of function entry point */
u16 stack_depth; /* max. stack depth used by this function */
u32 type_id; /* btf type_id for this subprog */
};
/* single container for all structs
......
......@@ -46,5 +46,7 @@ void btf_type_seq_show(const struct btf *btf, u32 type_id, void *obj,
struct seq_file *m);
int btf_get_fd_by_id(u32 id);
u32 btf_id(const struct btf *btf);
const struct btf_type *btf_type_by_id(const struct btf *btf, u32 type_id);
const char *btf_name_by_offset(const struct btf *btf, u32 offset);
#endif
......@@ -338,6 +338,10 @@ union bpf_attr {
* (context accesses, allowed helpers, etc).
*/
__u32 expected_attach_type;
__u32 prog_btf_fd; /* fd pointing to BTF type data */
__u32 func_info_rec_size; /* userspace bpf_func_info size */
__aligned_u64 func_info; /* func info */
__u32 func_info_cnt; /* number of bpf_func_info records */
};
struct { /* anonymous struct used by BPF_OBJ_* commands */
......@@ -2638,6 +2642,10 @@ struct bpf_prog_info {
__u32 nr_jited_func_lens;
__aligned_u64 jited_ksyms;
__aligned_u64 jited_func_lens;
__u32 btf_id;
__u32 func_info_rec_size;
__aligned_u64 func_info;
__u32 func_info_cnt;
} __attribute__((aligned(8)));
struct bpf_map_info {
......@@ -2949,4 +2957,9 @@ struct bpf_flow_keys {
};
};
struct bpf_func_info {
__u32 insn_offset;
__u32 type_id;
};
#endif /* _UAPI__LINUX_BPF_H__ */
......@@ -40,7 +40,8 @@ struct btf_type {
/* "size" is used by INT, ENUM, STRUCT and UNION.
* "size" tells the size of the type it is describing.
*
* "type" is used by PTR, TYPEDEF, VOLATILE, CONST and RESTRICT.
* "type" is used by PTR, TYPEDEF, VOLATILE, CONST, RESTRICT,
* FUNC and FUNC_PROTO.
* "type" is a type_id referring to another type.
*/
union {
......@@ -64,8 +65,10 @@ struct btf_type {
#define BTF_KIND_VOLATILE 9 /* Volatile */
#define BTF_KIND_CONST 10 /* Const */
#define BTF_KIND_RESTRICT 11 /* Restrict */
#define BTF_KIND_MAX 11
#define NR_BTF_KINDS 12
#define BTF_KIND_FUNC 12 /* Function */
#define BTF_KIND_FUNC_PROTO 13 /* Function Proto */
#define BTF_KIND_MAX 13
#define NR_BTF_KINDS 14
/* For some specific BTF_KIND, "struct btf_type" is immediately
* followed by extra data.
......@@ -110,4 +113,13 @@ struct btf_member {
__u32 offset; /* offset in bits */
};
/* BTF_KIND_FUNC_PROTO is followed by multiple "struct btf_param".
* The exact number of btf_param is stored in the vlen (of the
* info in "struct btf_type").
*/
struct btf_param {
__u32 name_off;
__u32 type;
};
#endif /* _UAPI__LINUX_BTF_H__ */
This diff is collapsed.
......@@ -21,12 +21,14 @@
* Kris Katterjohn - Added many additional checks in bpf_check_classic()
*/
#include <uapi/linux/btf.h>
#include <linux/filter.h>
#include <linux/skbuff.h>
#include <linux/vmalloc.h>
#include <linux/random.h>
#include <linux/moduleloader.h>
#include <linux/bpf.h>
#include <linux/btf.h>
#include <linux/frame.h>
#include <linux/rbtree_latch.h>
#include <linux/kallsyms.h>
......@@ -390,6 +392,8 @@ bpf_get_prog_addr_region(const struct bpf_prog *prog,
static void bpf_get_prog_name(const struct bpf_prog *prog, char *sym)
{
const char *end = sym + KSYM_NAME_LEN;
const struct btf_type *type;
const char *func_name;
BUILD_BUG_ON(sizeof("bpf_prog_") +
sizeof(prog->tag) * 2 +
......@@ -404,6 +408,15 @@ static void bpf_get_prog_name(const struct bpf_prog *prog, char *sym)
sym += snprintf(sym, KSYM_NAME_LEN, "bpf_prog_");
sym = bin2hex(sym, prog->tag, sizeof(prog->tag));
/* prog->aux->name will be ignored if full btf name is available */
if (prog->aux->btf) {
type = btf_type_by_id(prog->aux->btf, prog->aux->type_id);
func_name = btf_name_by_offset(prog->aux->btf, type->name_off);
snprintf(sym, (size_t)(end - sym), "_%s", func_name);
return;
}
if (prog->aux->name[0])
snprintf(sym, (size_t)(end - sym), "_%s", prog->aux->name);
else
......
......@@ -1213,6 +1213,7 @@ static void __bpf_prog_put(struct bpf_prog *prog, bool do_idr_lock)
/* bpf_prog_free_id() must be called first */
bpf_prog_free_id(prog, do_idr_lock);
bpf_prog_kallsyms_del_all(prog);
btf_put(prog->aux->btf);
call_rcu(&prog->aux->rcu, __bpf_prog_put_rcu);
}
......@@ -1437,9 +1438,9 @@ bpf_prog_load_check_attach_type(enum bpf_prog_type prog_type,
}
/* last field in 'union bpf_attr' used by this command */
#define BPF_PROG_LOAD_LAST_FIELD expected_attach_type
#define BPF_PROG_LOAD_LAST_FIELD func_info_cnt
static int bpf_prog_load(union bpf_attr *attr)
static int bpf_prog_load(union bpf_attr *attr, union bpf_attr __user *uattr)
{
enum bpf_prog_type type = attr->prog_type;
struct bpf_prog *prog;
......@@ -1525,7 +1526,7 @@ static int bpf_prog_load(union bpf_attr *attr)
goto free_prog;
/* run eBPF verifier */
err = bpf_check(&prog, attr);
err = bpf_check(&prog, attr, uattr);
if (err < 0)
goto free_used_maps;
......@@ -2079,6 +2080,7 @@ static int bpf_prog_get_info_by_fd(struct bpf_prog *prog,
info.xlated_prog_len = 0;
info.nr_jited_ksyms = 0;
info.nr_jited_func_lens = 0;
info.func_info_cnt = 0;
goto done;
}
......@@ -2216,6 +2218,55 @@ static int bpf_prog_get_info_by_fd(struct bpf_prog *prog,
}
}
if (prog->aux->btf) {
u32 ucnt, urec_size;
info.btf_id = btf_id(prog->aux->btf);
ucnt = info.func_info_cnt;
info.func_info_cnt = prog->aux->func_cnt ? : 1;
urec_size = info.func_info_rec_size;
info.func_info_rec_size = sizeof(struct bpf_func_info);
if (ucnt) {
/* expect passed-in urec_size is what the kernel expects */
if (urec_size != info.func_info_rec_size)
return -EINVAL;
if (bpf_dump_raw_ok()) {
struct bpf_func_info kern_finfo;
char __user *user_finfo;
u32 i, insn_offset;
user_finfo = u64_to_user_ptr(info.func_info);
if (prog->aux->func_cnt) {
ucnt = min_t(u32, info.func_info_cnt, ucnt);
insn_offset = 0;
for (i = 0; i < ucnt; i++) {
kern_finfo.insn_offset = insn_offset;
kern_finfo.type_id = prog->aux->func[i]->aux->type_id;
if (copy_to_user(user_finfo, &kern_finfo,
sizeof(kern_finfo)))
return -EFAULT;
/* func[i]->len holds the prog len */
insn_offset += prog->aux->func[i]->len;
user_finfo += urec_size;
}
} else {
kern_finfo.insn_offset = 0;
kern_finfo.type_id = prog->aux->type_id;
if (copy_to_user(user_finfo, &kern_finfo,
sizeof(kern_finfo)))
return -EFAULT;
}
} else {
info.func_info_cnt = 0;
}
}
} else {
info.func_info_cnt = 0;
}
done:
if (copy_to_user(uinfo, &info, info_len) ||
put_user(info_len, &uattr->info.info_len))
......@@ -2501,7 +2552,7 @@ SYSCALL_DEFINE3(bpf, int, cmd, union bpf_attr __user *, uattr, unsigned int, siz
err = map_get_next_key(&attr);
break;
case BPF_PROG_LOAD:
err = bpf_prog_load(&attr);
err = bpf_prog_load(&attr, uattr);
break;
case BPF_OBJ_PIN:
err = bpf_obj_pin(&attr);
......
......@@ -11,10 +11,12 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*/
#include <uapi/linux/btf.h>
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/slab.h>
#include <linux/bpf.h>
#include <linux/btf.h>
#include <linux/bpf_verifier.h>
#include <linux/filter.h>
#include <net/netlink.h>
......@@ -4639,6 +4641,114 @@ static int check_cfg(struct bpf_verifier_env *env)
return ret;
}
/* The minimum supported BTF func info size */
#define MIN_BPF_FUNCINFO_SIZE 8
#define MAX_FUNCINFO_REC_SIZE 252
static int check_btf_func(struct bpf_prog *prog, struct bpf_verifier_env *env,
union bpf_attr *attr, union bpf_attr __user *uattr)
{
u32 i, nfuncs, urec_size, min_size, prev_offset;
u32 krec_size = sizeof(struct bpf_func_info);
struct bpf_func_info krecord = {};
const struct btf_type *type;
void __user *urecord;
struct btf *btf;
int ret = 0;
nfuncs = attr->func_info_cnt;
if (!nfuncs)
return 0;
if (nfuncs != env->subprog_cnt) {
verbose(env, "number of funcs in func_info doesn't match number of subprogs\n");
return -EINVAL;
}
urec_size = attr->func_info_rec_size;
if (urec_size < MIN_BPF_FUNCINFO_SIZE ||
urec_size > MAX_FUNCINFO_REC_SIZE ||
urec_size % sizeof(u32)) {
verbose(env, "invalid func info rec size %u\n", urec_size);
return -EINVAL;
}
btf = btf_get_by_fd(attr->prog_btf_fd);
if (IS_ERR(btf)) {
verbose(env, "unable to get btf from fd\n");
return PTR_ERR(btf);
}
urecord = u64_to_user_ptr(attr->func_info);
min_size = min_t(u32, krec_size, urec_size);
for (i = 0; i < nfuncs; i++) {
ret = bpf_check_uarg_tail_zero(urecord, krec_size, urec_size);
if (ret) {
if (ret == -E2BIG) {
verbose(env, "nonzero tailing record in func info");
/* set the size kernel expects so loader can zero
* out the rest of the record.
*/
if (put_user(min_size, &uattr->func_info_rec_size))
ret = -EFAULT;
}
goto free_btf;
}
if (copy_from_user(&krecord, urecord, min_size)) {
ret = -EFAULT;
goto free_btf;
}
/* check insn_offset */
if (i == 0) {
if (krecord.insn_offset) {
verbose(env,
"nonzero insn_offset %u for the first func info record",
krecord.insn_offset);
ret = -EINVAL;
goto free_btf;
}
} else if (krecord.insn_offset <= prev_offset) {
verbose(env,
"same or smaller insn offset (%u) than previous func info record (%u)",
krecord.insn_offset, prev_offset);
ret = -EINVAL;
goto free_btf;
}
if (env->subprog_info[i].start != krecord.insn_offset) {
verbose(env, "func_info BTF section doesn't match subprog layout in BPF program\n");
ret = -EINVAL;
goto free_btf;
}
/* check type_id */
type = btf_type_by_id(btf, krecord.type_id);
if (!type || BTF_INFO_KIND(type->info) != BTF_KIND_FUNC) {
verbose(env, "invalid type id %d in func info",
krecord.type_id);
ret = -EINVAL;
goto free_btf;
}
if (i == 0)
prog->aux->type_id = krecord.type_id;
env->subprog_info[i].type_id = krecord.type_id;
prev_offset = krecord.insn_offset;
urecord += urec_size;
}
prog->aux->btf = btf;
return 0;
free_btf:
btf_put(btf);
return ret;
}
/* check %cur's range satisfies %old's */
static bool range_within(struct bpf_reg_state *old,
struct bpf_reg_state *cur)
......@@ -5939,6 +6049,9 @@ static int jit_subprogs(struct bpf_verifier_env *env)
func[i]->aux->name[0] = 'F';
func[i]->aux->stack_depth = env->subprog_info[i].stack_depth;
func[i]->jit_requested = 1;
/* the btf will be freed only at prog->aux */
func[i]->aux->btf = prog->aux->btf;
func[i]->aux->type_id = env->subprog_info[i].type_id;
func[i] = bpf_int_jit_compile(func[i]);
if (!func[i]->jited) {
err = -ENOTSUPP;
......@@ -6325,7 +6438,8 @@ static void free_states(struct bpf_verifier_env *env)
kfree(env->explored_states);
}
int bpf_check(struct bpf_prog **prog, union bpf_attr *attr)
int bpf_check(struct bpf_prog **prog, union bpf_attr *attr,
union bpf_attr __user *uattr)
{
struct bpf_verifier_env *env;
struct bpf_verifier_log *log;
......@@ -6397,6 +6511,10 @@ int bpf_check(struct bpf_prog **prog, union bpf_attr *attr)
if (ret < 0)
goto skip_full_check;
ret = check_btf_func(env->prog, env, attr, uattr);
if (ret < 0)
goto skip_full_check;
ret = do_check(env);
if (env->cur_state) {
free_verifier_state(env->cur_state, true);
......
......@@ -208,12 +208,20 @@ endif
BTF_LLC_PROBE := $(shell $(LLC) -march=bpf -mattr=help 2>&1 | grep dwarfris)
BTF_PAHOLE_PROBE := $(shell $(BTF_PAHOLE) --help 2>&1 | grep BTF)
BTF_OBJCOPY_PROBE := $(shell $(LLVM_OBJCOPY) --help 2>&1 | grep -i 'usage.*llvm')
BTF_LLVM_PROBE := $(shell echo "int main() { return 0; }" | \
$(CLANG) -target bpf -O2 -g -c -x c - -o ./llvm_btf_verify.o; \
readelf -S ./llvm_btf_verify.o | grep BTF; \
/bin/rm -f ./llvm_btf_verify.o)
ifneq ($(BTF_LLVM_PROBE),)
EXTRA_CFLAGS += -g
else
ifneq ($(and $(BTF_LLC_PROBE),$(BTF_PAHOLE_PROBE),$(BTF_OBJCOPY_PROBE)),)
EXTRA_CFLAGS += -g
LLC_FLAGS += -mattr=dwarfris
DWARF2BTF = y
endif
endif
# Trick to allow make to be run from this directory
all:
......
......@@ -249,3 +249,139 @@ int btf_dumper_type(const struct btf_dumper *d, __u32 type_id,
{
return btf_dumper_do_type(d, type_id, 0, data);
}
#define BTF_PRINT_ARG(...) \
do { \
pos += snprintf(func_sig + pos, size - pos, \
__VA_ARGS__); \
if (pos >= size) \
return -1; \
} while (0)
#define BTF_PRINT_TYPE(type) \
do { \
pos = __btf_dumper_type_only(btf, type, func_sig, \
pos, size); \
if (pos == -1) \
return -1; \
} while (0)
static int btf_dump_func(const struct btf *btf, char *func_sig,
const struct btf_type *func_proto,
const struct btf_type *func, int pos, int size);
static int __btf_dumper_type_only(const struct btf *btf, __u32 type_id,
char *func_sig, int pos, int size)
{
const struct btf_type *proto_type;
const struct btf_array *array;
const struct btf_type *t;
if (!type_id) {
BTF_PRINT_ARG("void ");
return pos;
}
t = btf__type_by_id(btf, type_id);
switch (BTF_INFO_KIND(t->info)) {
case BTF_KIND_INT:
BTF_PRINT_ARG("%s ", btf__name_by_offset(btf, t->name_off));
break;
case BTF_KIND_STRUCT:
BTF_PRINT_ARG("struct %s ",
btf__name_by_offset(btf, t->name_off));
break;
case BTF_KIND_UNION:
BTF_PRINT_ARG("union %s ",
btf__name_by_offset(btf, t->name_off));
break;
case BTF_KIND_ENUM:
BTF_PRINT_ARG("enum %s ",
btf__name_by_offset(btf, t->name_off));
break;
case BTF_KIND_ARRAY:
array = (struct btf_array *)(t + 1);
BTF_PRINT_TYPE(array->type);
BTF_PRINT_ARG("[%d]", array->nelems);
break;
case BTF_KIND_PTR:
BTF_PRINT_TYPE(t->type);
BTF_PRINT_ARG("* ");
break;
case BTF_KIND_UNKN:
case BTF_KIND_FWD:
case BTF_KIND_TYPEDEF:
return -1;
case BTF_KIND_VOLATILE:
BTF_PRINT_ARG("volatile ");
BTF_PRINT_TYPE(t->type);
break;
case BTF_KIND_CONST:
BTF_PRINT_ARG("const ");
BTF_PRINT_TYPE(t->type);
break;
case BTF_KIND_RESTRICT:
BTF_PRINT_ARG("restrict ");
BTF_PRINT_TYPE(t->type);
break;
case BTF_KIND_FUNC_PROTO:
pos = btf_dump_func(btf, func_sig, t, NULL, pos, size);
if (pos == -1)
return -1;
break;
case BTF_KIND_FUNC:
proto_type = btf__type_by_id(btf, t->type);
pos = btf_dump_func(btf, func_sig, proto_type, t, pos, size);
if (pos == -1)
return -1;
break;
default:
return -1;
}
return pos;
}
static int btf_dump_func(const struct btf *btf, char *func_sig,
const struct btf_type *func_proto,
const struct btf_type *func, int pos, int size)
{
int i, vlen;
BTF_PRINT_TYPE(func_proto->type);
if (func)
BTF_PRINT_ARG("%s(", btf__name_by_offset(btf, func->name_off));
else
BTF_PRINT_ARG("(");
vlen = BTF_INFO_VLEN(func_proto->info);
for (i = 0; i < vlen; i++) {
struct btf_param *arg = &((struct btf_param *)(func_proto + 1))[i];
if (i)
BTF_PRINT_ARG(", ");
if (arg->type) {
BTF_PRINT_TYPE(arg->type);
BTF_PRINT_ARG("%s",
btf__name_by_offset(btf, arg->name_off));
} else {
BTF_PRINT_ARG("...");
}
}
BTF_PRINT_ARG(")");
return pos;
}
void btf_dumper_type_only(const struct btf *btf, __u32 type_id, char *func_sig,
int size)
{
int err;
func_sig[0] = '\0';
if (!btf)
return;
err = __btf_dumper_type_only(btf, type_id, func_sig, 0, size);
if (err < 0)
func_sig[0] = '\0';
}
......@@ -187,6 +187,8 @@ struct btf_dumper {
*/
int btf_dumper_type(const struct btf_dumper *d, __u32 type_id,
const void *data);
void btf_dumper_type_only(const struct btf *btf, __u32 func_type_id,
char *func_only, int size);
struct nlattr;
struct ifinfomsg;
......
......@@ -215,70 +215,6 @@ static int do_dump_btf(const struct btf_dumper *d,
return ret;
}
static int get_btf(struct bpf_map_info *map_info, struct btf **btf)
{
struct bpf_btf_info btf_info = { 0 };
__u32 len = sizeof(btf_info);
__u32 last_size;
int btf_fd;
void *ptr;
int err;
err = 0;
*btf = NULL;
btf_fd = bpf_btf_get_fd_by_id(map_info->btf_id);
if (btf_fd < 0)
return 0;
/* we won't know btf_size until we call bpf_obj_get_info_by_fd(). so
* let's start with a sane default - 4KiB here - and resize it only if
* bpf_obj_get_info_by_fd() needs a bigger buffer.
*/
btf_info.btf_size = 4096;
last_size = btf_info.btf_size;
ptr = malloc(last_size);
if (!ptr) {
err = -ENOMEM;
goto exit_free;
}
bzero(ptr, last_size);
btf_info.btf = ptr_to_u64(ptr);
err = bpf_obj_get_info_by_fd(btf_fd, &btf_info, &len);
if (!err && btf_info.btf_size > last_size) {
void *temp_ptr;
last_size = btf_info.btf_size;
temp_ptr = realloc(ptr, last_size);
if (!temp_ptr) {
err = -ENOMEM;
goto exit_free;
}
ptr = temp_ptr;
bzero(ptr, last_size);
btf_info.btf = ptr_to_u64(ptr);
err = bpf_obj_get_info_by_fd(btf_fd, &btf_info, &len);
}
if (err || btf_info.btf_size > last_size) {
err = errno;
goto exit_free;
}
*btf = btf__new((__u8 *)btf_info.btf, btf_info.btf_size, NULL);
if (IS_ERR(*btf)) {
err = PTR_ERR(*btf);
*btf = NULL;
}
exit_free:
close(btf_fd);
free(ptr);
return err;
}
static json_writer_t *get_btf_writer(void)
{
json_writer_t *jw = jsonw_new(stdout);
......@@ -775,7 +711,7 @@ static int do_dump(int argc, char **argv)
prev_key = NULL;
err = get_btf(&info, &btf);
err = btf_get_from_id(info.btf_id, &btf);
if (err) {
p_err("failed to get btf");
goto exit_free;
......@@ -919,7 +855,7 @@ static int do_lookup(int argc, char **argv)
}
/* here means bpf_map_lookup_elem() succeeded */
err = get_btf(&info, &btf);
err = btf_get_from_id(info.btf_id, &btf);
if (err) {
p_err("failed to get btf");
goto exit_free;
......
......@@ -47,6 +47,7 @@
#include <linux/err.h>
#include <bpf.h>
#include <btf.h>
#include <libbpf.h>
#include "cfg.h"
......@@ -451,14 +452,19 @@ static int do_dump(int argc, char **argv)
struct bpf_prog_info info = {};
unsigned int *func_lens = NULL;
const char *disasm_opt = NULL;
unsigned int finfo_rec_size;
unsigned int nr_func_ksyms;
unsigned int nr_func_lens;
struct dump_data dd = {};
__u32 len = sizeof(info);
struct btf *btf = NULL;
void *func_info = NULL;
unsigned int finfo_cnt;
unsigned int buf_size;
char *filepath = NULL;
bool opcodes = false;
bool visual = false;
char func_sig[1024];
unsigned char *buf;
__u32 *member_len;
__u64 *member_ptr;
......@@ -551,6 +557,17 @@ static int do_dump(int argc, char **argv)
}
}
finfo_cnt = info.func_info_cnt;
finfo_rec_size = info.func_info_rec_size;
if (finfo_cnt && finfo_rec_size) {
func_info = malloc(finfo_cnt * finfo_rec_size);
if (!func_info) {
p_err("mem alloc failed");
close(fd);
goto err_free;
}
}
memset(&info, 0, sizeof(info));
*member_ptr = ptr_to_u64(buf);
......@@ -559,6 +576,9 @@ static int do_dump(int argc, char **argv)
info.nr_jited_ksyms = nr_func_ksyms;
info.jited_func_lens = ptr_to_u64(func_lens);
info.nr_jited_func_lens = nr_func_lens;
info.func_info_cnt = finfo_cnt;
info.func_info_rec_size = finfo_rec_size;
info.func_info = ptr_to_u64(func_info);
err = bpf_obj_get_info_by_fd(fd, &info, &len);
close(fd);
......@@ -582,6 +602,18 @@ static int do_dump(int argc, char **argv)
goto err_free;
}
if (info.func_info_cnt != finfo_cnt) {
p_err("incorrect func_info_cnt %d vs. expected %d",
info.func_info_cnt, finfo_cnt);
goto err_free;
}
if (info.func_info_rec_size != finfo_rec_size) {
p_err("incorrect func_info_rec_size %d vs. expected %d",
info.func_info_rec_size, finfo_rec_size);
goto err_free;
}
if ((member_len == &info.jited_prog_len &&
info.jited_prog_insns == 0) ||
(member_len == &info.xlated_prog_len &&
......@@ -590,6 +622,11 @@ static int do_dump(int argc, char **argv)
goto err_free;
}
if (info.btf_id && btf_get_from_id(info.btf_id, &btf)) {
p_err("failed to get btf");
goto err_free;
}
if (filepath) {
fd = open(filepath, O_WRONLY | O_CREAT | O_TRUNC, 0600);
if (fd < 0) {
......@@ -622,6 +659,7 @@ static int do_dump(int argc, char **argv)
if (info.nr_jited_func_lens && info.jited_func_lens) {
struct kernel_sym *sym = NULL;
struct bpf_func_info *record;
char sym_name[SYM_MAX_NAME];
unsigned char *img = buf;
__u64 *ksyms = NULL;
......@@ -648,12 +686,25 @@ static int do_dump(int argc, char **argv)
strcpy(sym_name, "unknown");
}
if (func_info) {
record = func_info + i * finfo_rec_size;
btf_dumper_type_only(btf, record->type_id,
func_sig,
sizeof(func_sig));
}
if (json_output) {
jsonw_start_object(json_wtr);
if (func_info && func_sig[0] != '\0') {
jsonw_name(json_wtr, "proto");
jsonw_string(json_wtr, func_sig);
}
jsonw_name(json_wtr, "name");
jsonw_string(json_wtr, sym_name);
jsonw_name(json_wtr, "insns");
} else {
if (func_info && func_sig[0] != '\0')
printf("%s:\n", func_sig);
printf("%s:\n", sym_name);
}
......@@ -682,6 +733,9 @@ static int do_dump(int argc, char **argv)
kernel_syms_load(&dd);
dd.nr_jited_ksyms = info.nr_jited_ksyms;
dd.jited_ksyms = (__u64 *) info.jited_ksyms;
dd.btf = btf;
dd.func_info = func_info;
dd.finfo_rec_size = finfo_rec_size;
if (json_output)
dump_xlated_json(&dd, buf, *member_len, opcodes);
......@@ -693,12 +747,14 @@ static int do_dump(int argc, char **argv)
free(buf);
free(func_ksyms);
free(func_lens);
free(func_info);
return 0;
err_free:
free(buf);
free(func_ksyms);
free(func_lens);
free(func_info);
return -1;
}
......
......@@ -242,11 +242,15 @@ void dump_xlated_json(struct dump_data *dd, void *buf, unsigned int len,
.cb_imm = print_imm,
.private_data = dd,
};
struct bpf_func_info *record;
struct bpf_insn *insn = buf;
struct btf *btf = dd->btf;
bool double_insn = false;
char func_sig[1024];
unsigned int i;
jsonw_start_array(json_wtr);
record = dd->func_info;
for (i = 0; i < len / sizeof(*insn); i++) {
if (double_insn) {
double_insn = false;
......@@ -255,6 +259,20 @@ void dump_xlated_json(struct dump_data *dd, void *buf, unsigned int len,
double_insn = insn[i].code == (BPF_LD | BPF_IMM | BPF_DW);
jsonw_start_object(json_wtr);
if (btf && record) {
if (record->insn_offset == i) {
btf_dumper_type_only(btf, record->type_id,
func_sig,
sizeof(func_sig));
if (func_sig[0] != '\0') {
jsonw_name(json_wtr, "proto");
jsonw_string(json_wtr, func_sig);
}
record = (void *)record + dd->finfo_rec_size;
}
}
jsonw_name(json_wtr, "disasm");
print_bpf_insn(&cbs, insn + i, true);
......@@ -297,16 +315,31 @@ void dump_xlated_plain(struct dump_data *dd, void *buf, unsigned int len,
.cb_imm = print_imm,
.private_data = dd,
};
struct bpf_func_info *record;
struct bpf_insn *insn = buf;
struct btf *btf = dd->btf;
bool double_insn = false;
char func_sig[1024];
unsigned int i;
record = dd->func_info;
for (i = 0; i < len / sizeof(*insn); i++) {
if (double_insn) {
double_insn = false;
continue;
}
if (btf && record) {
if (record->insn_offset == i) {
btf_dumper_type_only(btf, record->type_id,
func_sig,
sizeof(func_sig));
if (func_sig[0] != '\0')
printf("%s:\n", func_sig);
record = (void *)record + dd->finfo_rec_size;
}
}
double_insn = insn[i].code == (BPF_LD | BPF_IMM | BPF_DW);
printf("% 4d: ", i);
......
......@@ -51,6 +51,9 @@ struct dump_data {
__u32 sym_count;
__u64 *jited_ksyms;
__u32 nr_jited_ksyms;
struct btf *btf;
void *func_info;
__u32 finfo_rec_size;
char scratch_buff[SYM_MAX_NAME + 8];
};
......
......@@ -338,6 +338,10 @@ union bpf_attr {
* (context accesses, allowed helpers, etc).
*/
__u32 expected_attach_type;
__u32 prog_btf_fd; /* fd pointing to BTF type data */
__u32 func_info_rec_size; /* userspace bpf_func_info size */
__aligned_u64 func_info; /* func info */
__u32 func_info_cnt; /* number of bpf_func_info records */
};
struct { /* anonymous struct used by BPF_OBJ_* commands */
......@@ -2638,6 +2642,10 @@ struct bpf_prog_info {
__u32 nr_jited_func_lens;
__aligned_u64 jited_ksyms;
__aligned_u64 jited_func_lens;
__u32 btf_id;
__u32 func_info_rec_size;
__aligned_u64 func_info;
__u32 func_info_cnt;
} __attribute__((aligned(8)));
struct bpf_map_info {
......@@ -2949,4 +2957,9 @@ struct bpf_flow_keys {
};
};
struct bpf_func_info {
__u32 insn_offset;
__u32 type_id;
};
#endif /* _UAPI__LINUX_BPF_H__ */
......@@ -40,7 +40,8 @@ struct btf_type {
/* "size" is used by INT, ENUM, STRUCT and UNION.
* "size" tells the size of the type it is describing.
*
* "type" is used by PTR, TYPEDEF, VOLATILE, CONST and RESTRICT.
* "type" is used by PTR, TYPEDEF, VOLATILE, CONST, RESTRICT,
* FUNC and FUNC_PROTO.
* "type" is a type_id referring to another type.
*/
union {
......@@ -64,8 +65,10 @@ struct btf_type {
#define BTF_KIND_VOLATILE 9 /* Volatile */
#define BTF_KIND_CONST 10 /* Const */
#define BTF_KIND_RESTRICT 11 /* Restrict */
#define BTF_KIND_MAX 11
#define NR_BTF_KINDS 12
#define BTF_KIND_FUNC 12 /* Function */
#define BTF_KIND_FUNC_PROTO 13 /* Function Proto */
#define BTF_KIND_MAX 13
#define NR_BTF_KINDS 14
/* For some specific BTF_KIND, "struct btf_type" is immediately
* followed by extra data.
......@@ -110,4 +113,13 @@ struct btf_member {
__u32 offset; /* offset in bits */
};
/* BTF_KIND_FUNC_PROTO is followed by multiple "struct btf_param".
* The exact number of btf_param is stored in the vlen (of the
* info in "struct btf_type").
*/
struct btf_param {
__u32 name_off;
__u32 type;
};
#endif /* _UAPI__LINUX_BTF_H__ */
......@@ -186,6 +186,7 @@ int bpf_load_program_xattr(const struct bpf_load_program_attr *load_attr,
char *log_buf, size_t log_buf_sz)
{
union bpf_attr attr;
void *finfo = NULL;
__u32 name_len;
int fd;
......@@ -205,6 +206,10 @@ int bpf_load_program_xattr(const struct bpf_load_program_attr *load_attr,
attr.log_level = 0;
attr.kern_version = load_attr->kern_version;
attr.prog_ifindex = load_attr->prog_ifindex;
attr.prog_btf_fd = load_attr->prog_btf_fd;
attr.func_info_rec_size = load_attr->func_info_rec_size;
attr.func_info_cnt = load_attr->func_info_cnt;
attr.func_info = ptr_to_u64(load_attr->func_info);
memcpy(attr.prog_name, load_attr->name,
min(name_len, BPF_OBJ_NAME_LEN - 1));
......@@ -212,12 +217,55 @@ int bpf_load_program_xattr(const struct bpf_load_program_attr *load_attr,
if (fd >= 0 || !log_buf || !log_buf_sz)
return fd;
/* After bpf_prog_load, the kernel may modify certain attributes
* to give user space a hint how to deal with loading failure.
* Check to see whether we can make some changes and load again.
*/
if (errno == E2BIG && attr.func_info_cnt &&
attr.func_info_rec_size < load_attr->func_info_rec_size) {
__u32 actual_rec_size = load_attr->func_info_rec_size;
__u32 expected_rec_size = attr.func_info_rec_size;
__u32 finfo_cnt = load_attr->func_info_cnt;
__u64 finfo_len = actual_rec_size * finfo_cnt;
const void *orecord;
void *nrecord;
int i;
finfo = malloc(finfo_len);
if (!finfo)
/* further try with log buffer won't help */
return fd;
/* zero out bytes kernel does not understand */
orecord = load_attr->func_info;
nrecord = finfo;
for (i = 0; i < load_attr->func_info_cnt; i++) {
memcpy(nrecord, orecord, expected_rec_size);
memset(nrecord + expected_rec_size, 0,
actual_rec_size - expected_rec_size);
orecord += actual_rec_size;
nrecord += actual_rec_size;
}
/* try with corrected func info records */
attr.func_info = ptr_to_u64(finfo);
attr.func_info_rec_size = load_attr->func_info_rec_size;
fd = sys_bpf(BPF_PROG_LOAD, &attr, sizeof(attr));
if (fd >= 0 || !log_buf || !log_buf_sz)
goto done;
}
/* Try again with log */
attr.log_buf = ptr_to_u64(log_buf);
attr.log_size = log_buf_sz;
attr.log_level = 1;
log_buf[0] = 0;
return sys_bpf(BPF_PROG_LOAD, &attr, sizeof(attr));
fd = sys_bpf(BPF_PROG_LOAD, &attr, sizeof(attr));
done:
free(finfo);
return fd;
}
int bpf_load_program(enum bpf_prog_type type, const struct bpf_insn *insns,
......
......@@ -74,6 +74,10 @@ struct bpf_load_program_attr {
const char *license;
__u32 kern_version;
__u32 prog_ifindex;
__u32 prog_btf_fd;
__u32 func_info_rec_size;
const void *func_info;
__u32 func_info_cnt;
};
/* Flags to direct loading requirements */
......
......@@ -37,6 +37,23 @@ struct btf {
int fd;
};
struct btf_ext {
void *func_info;
__u32 func_info_rec_size;
__u32 func_info_len;
};
/* The minimum bpf_func_info checked by the loader */
struct bpf_func_info_min {
__u32 insn_offset;
__u32 type_id;
};
static inline __u64 ptr_to_u64(const void *ptr)
{
return (__u64) (unsigned long) ptr;
}
static int btf_add_type(struct btf *btf, struct btf_type *t)
{
if (btf->types_size - btf->nr_types < 2) {
......@@ -165,6 +182,10 @@ static int btf_parse_type_sec(struct btf *btf, btf_print_fn_t err_log)
case BTF_KIND_ENUM:
next_type += vlen * sizeof(struct btf_enum);
break;
case BTF_KIND_FUNC_PROTO:
next_type += vlen * sizeof(struct btf_param);
break;
case BTF_KIND_FUNC:
case BTF_KIND_TYPEDEF:
case BTF_KIND_PTR:
case BTF_KIND_FWD:
......@@ -393,3 +414,329 @@ const char *btf__name_by_offset(const struct btf *btf, __u32 offset)
else
return NULL;
}
int btf_get_from_id(__u32 id, struct btf **btf)
{
struct bpf_btf_info btf_info = { 0 };
__u32 len = sizeof(btf_info);
__u32 last_size;
int btf_fd;
void *ptr;
int err;
err = 0;
*btf = NULL;
btf_fd = bpf_btf_get_fd_by_id(id);
if (btf_fd < 0)
return 0;
/* we won't know btf_size until we call bpf_obj_get_info_by_fd(). so
* let's start with a sane default - 4KiB here - and resize it only if
* bpf_obj_get_info_by_fd() needs a bigger buffer.
*/
btf_info.btf_size = 4096;
last_size = btf_info.btf_size;
ptr = malloc(last_size);
if (!ptr) {
err = -ENOMEM;
goto exit_free;
}
bzero(ptr, last_size);
btf_info.btf = ptr_to_u64(ptr);
err = bpf_obj_get_info_by_fd(btf_fd, &btf_info, &len);
if (!err && btf_info.btf_size > last_size) {
void *temp_ptr;
last_size = btf_info.btf_size;
temp_ptr = realloc(ptr, last_size);
if (!temp_ptr) {
err = -ENOMEM;
goto exit_free;
}
ptr = temp_ptr;
bzero(ptr, last_size);
btf_info.btf = ptr_to_u64(ptr);
err = bpf_obj_get_info_by_fd(btf_fd, &btf_info, &len);
}
if (err || btf_info.btf_size > last_size) {
err = errno;
goto exit_free;
}
*btf = btf__new((__u8 *)btf_info.btf, btf_info.btf_size, NULL);
if (IS_ERR(*btf)) {
err = PTR_ERR(*btf);
*btf = NULL;
}
exit_free:
close(btf_fd);
free(ptr);
return err;
}
static int btf_ext_validate_func_info(const void *finfo, __u32 size,
btf_print_fn_t err_log)
{
int sec_hdrlen = sizeof(struct btf_sec_func_info);
__u32 size_left, num_records, record_size;
const struct btf_sec_func_info *sinfo;
__u64 total_record_size;
/* At least a func_info record size */
if (size < sizeof(__u32)) {
elog("BTF.ext func_info record size not found");
return -EINVAL;
}
/* The record size needs to meet below minimum standard */
record_size = *(__u32 *)finfo;
if (record_size < sizeof(struct bpf_func_info_min) ||
record_size % sizeof(__u32)) {
elog("BTF.ext func_info invalid record size");
return -EINVAL;
}
sinfo = finfo + sizeof(__u32);
size_left = size - sizeof(__u32);
/* If no func_info records, return failure now so .BTF.ext
* won't be used.
*/
if (!size_left) {
elog("BTF.ext no func info records");
return -EINVAL;
}
while (size_left) {
if (size_left < sec_hdrlen) {
elog("BTF.ext func_info header not found");
return -EINVAL;
}
num_records = sinfo->num_func_info;
if (num_records == 0) {
elog("incorrect BTF.ext num_func_info");
return -EINVAL;
}
total_record_size = sec_hdrlen +
(__u64)num_records * record_size;
if (size_left < total_record_size) {
elog("incorrect BTF.ext num_func_info");
return -EINVAL;
}
size_left -= total_record_size;
sinfo = (void *)sinfo + total_record_size;
}
return 0;
}
static int btf_ext_parse_hdr(__u8 *data, __u32 data_size,
btf_print_fn_t err_log)
{
const struct btf_ext_header *hdr = (struct btf_ext_header *)data;
__u32 meta_left, last_func_info_pos;
void *finfo;
if (data_size < offsetof(struct btf_ext_header, func_info_off) ||
data_size < hdr->hdr_len) {
elog("BTF.ext header not found");
return -EINVAL;
}
if (hdr->magic != BTF_MAGIC) {
elog("Invalid BTF.ext magic:%x\n", hdr->magic);
return -EINVAL;
}
if (hdr->version != BTF_VERSION) {
elog("Unsupported BTF.ext version:%u\n", hdr->version);
return -ENOTSUP;
}
if (hdr->flags) {
elog("Unsupported BTF.ext flags:%x\n", hdr->flags);
return -ENOTSUP;
}
meta_left = data_size - hdr->hdr_len;
if (!meta_left) {
elog("BTF.ext has no data\n");
return -EINVAL;
}
if (meta_left < hdr->func_info_off) {
elog("Invalid BTF.ext func_info section offset:%u\n",
hdr->func_info_off);
return -EINVAL;
}
if (hdr->func_info_off & 0x03) {
elog("BTF.ext func_info section is not aligned to 4 bytes\n");
return -EINVAL;
}
last_func_info_pos = hdr->hdr_len + hdr->func_info_off +
hdr->func_info_len;
if (last_func_info_pos > data_size) {
elog("Invalid BTF.ext func_info section size:%u\n",
hdr->func_info_len);
return -EINVAL;
}
finfo = data + hdr->hdr_len + hdr->func_info_off;
return btf_ext_validate_func_info(finfo, hdr->func_info_len,
err_log);
}
void btf_ext__free(struct btf_ext *btf_ext)
{
if (!btf_ext)
return;
free(btf_ext->func_info);
free(btf_ext);
}
struct btf_ext *btf_ext__new(__u8 *data, __u32 size, btf_print_fn_t err_log)
{
const struct btf_ext_header *hdr;
struct btf_ext *btf_ext;
void *org_fdata, *fdata;
__u32 hdrlen, size_u32;
int err;
err = btf_ext_parse_hdr(data, size, err_log);
if (err)
return ERR_PTR(err);
btf_ext = calloc(1, sizeof(struct btf_ext));
if (!btf_ext)
return ERR_PTR(-ENOMEM);
hdr = (const struct btf_ext_header *)data;
hdrlen = hdr->hdr_len;
size_u32 = sizeof(__u32);
fdata = malloc(hdr->func_info_len - size_u32);
if (!fdata) {
free(btf_ext);
return ERR_PTR(-ENOMEM);
}
/* remember record size and copy rest of func_info data */
org_fdata = data + hdrlen + hdr->func_info_off;
btf_ext->func_info_rec_size = *(__u32 *)org_fdata;
memcpy(fdata, org_fdata + size_u32, hdr->func_info_len - size_u32);
btf_ext->func_info = fdata;
btf_ext->func_info_len = hdr->func_info_len - size_u32;
return btf_ext;
}
int btf_ext__reloc_init(struct btf *btf, struct btf_ext *btf_ext,
const char *sec_name, void **func_info,
__u32 *func_info_rec_size, __u32 *func_info_len)
{
__u32 sec_hdrlen = sizeof(struct btf_sec_func_info);
__u32 i, record_size, records_len;
struct btf_sec_func_info *sinfo;
const char *info_sec_name;
__s64 remain_len;
void *data;
record_size = btf_ext->func_info_rec_size;
sinfo = btf_ext->func_info;
remain_len = btf_ext->func_info_len;
while (remain_len > 0) {
records_len = sinfo->num_func_info * record_size;
info_sec_name = btf__name_by_offset(btf, sinfo->sec_name_off);
if (strcmp(info_sec_name, sec_name)) {
remain_len -= sec_hdrlen + records_len;
sinfo = (void *)sinfo + sec_hdrlen + records_len;
continue;
}
data = malloc(records_len);
if (!data)
return -ENOMEM;
memcpy(data, sinfo->data, records_len);
/* adjust the insn_offset, the data in .BTF.ext is
* the actual byte offset, and the kernel expects
* the offset in term of bpf_insn.
*
* adjust the insn offset only, the rest data will
* be passed to kernel.
*/
for (i = 0; i < sinfo->num_func_info; i++) {
struct bpf_func_info_min *record;
record = data + i * record_size;
record->insn_offset /= sizeof(struct bpf_insn);
}
*func_info = data;
*func_info_len = records_len;
*func_info_rec_size = record_size;
return 0;
}
return -EINVAL;
}
int btf_ext__reloc(struct btf *btf, struct btf_ext *btf_ext,
const char *sec_name, __u32 insns_cnt,
void **func_info, __u32 *func_info_len)
{
__u32 sec_hdrlen = sizeof(struct btf_sec_func_info);
__u32 i, record_size, existing_flen, records_len;
struct btf_sec_func_info *sinfo;
const char *info_sec_name;
__u64 remain_len;
void *data;
record_size = btf_ext->func_info_rec_size;
sinfo = btf_ext->func_info;
remain_len = btf_ext->func_info_len;
while (remain_len > 0) {
records_len = sinfo->num_func_info * record_size;
info_sec_name = btf__name_by_offset(btf, sinfo->sec_name_off);
if (strcmp(info_sec_name, sec_name)) {
remain_len -= sec_hdrlen + records_len;
sinfo = (void *)sinfo + sec_hdrlen + records_len;
continue;
}
existing_flen = *func_info_len;
data = realloc(*func_info, existing_flen + records_len);
if (!data)
return -ENOMEM;
memcpy(data + existing_flen, sinfo->data, records_len);
/* adjust insn_offset only, the rest data will be passed
* to the kernel.
*/
for (i = 0; i < sinfo->num_func_info; i++) {
struct bpf_func_info_min *record;
record = data + existing_flen + i * record_size;
record->insn_offset =
record->insn_offset / sizeof(struct bpf_insn) +
insns_cnt;
}
*func_info = data;
*func_info_len = existing_flen + records_len;
return 0;
}
return -EINVAL;
}
......@@ -11,10 +11,51 @@
#endif
#define BTF_ELF_SEC ".BTF"
#define BTF_EXT_ELF_SEC ".BTF.ext"
struct btf;
struct btf_ext;
struct btf_type;
/*
* The .BTF.ext ELF section layout defined as
* struct btf_ext_header
* func_info subsection
*
* The func_info subsection layout:
* record size for struct bpf_func_info in the func_info subsection
* struct btf_sec_func_info for section #1
* a list of bpf_func_info records for section #1
* where struct bpf_func_info mimics one in include/uapi/linux/bpf.h
* but may not be identical
* struct btf_sec_func_info for section #2
* a list of bpf_func_info records for section #2
* ......
*
* Note that the bpf_func_info record size in .BTF.ext may not
* be the same as the one defined in include/uapi/linux/bpf.h.
* The loader should ensure that record_size meets minimum
* requirement and pass the record as is to the kernel. The
* kernel will handle the func_info properly based on its contents.
*/
struct btf_ext_header {
__u16 magic;
__u8 version;
__u8 flags;
__u32 hdr_len;
/* All offsets are in bytes relative to the end of this header */
__u32 func_info_off;
__u32 func_info_len;
};
struct btf_sec_func_info {
__u32 sec_name_off;
__u32 num_func_info;
/* Followed by num_func_info number of bpf func_info records */
__u8 data[0];
};
typedef int (*btf_print_fn_t)(const char *, ...)
__attribute__((format(printf, 1, 2)));
......@@ -28,5 +69,15 @@ LIBBPF_API __s64 btf__resolve_size(const struct btf *btf, __u32 type_id);
LIBBPF_API int btf__resolve_type(const struct btf *btf, __u32 type_id);
LIBBPF_API int btf__fd(const struct btf *btf);
LIBBPF_API const char *btf__name_by_offset(const struct btf *btf, __u32 offset);
LIBBPF_API int btf_get_from_id(__u32 id, struct btf **btf);
struct btf_ext *btf_ext__new(__u8 *data, __u32 size, btf_print_fn_t err_log);
void btf_ext__free(struct btf_ext *btf_ext);
int btf_ext__reloc_init(struct btf *btf, struct btf_ext *btf_ext,
const char *sec_name, void **func_info,
__u32 *func_info_rec_size, __u32 *func_info_len);
int btf_ext__reloc(struct btf *btf, struct btf_ext *btf_ext,
const char *sec_name, __u32 insns_cnt, void **func_info,
__u32 *func_info_len);
#endif /* __LIBBPF_BTF_H */
......@@ -156,6 +156,10 @@ struct bpf_program {
bpf_program_clear_priv_t clear_priv;
enum bpf_attach_type expected_attach_type;
int btf_fd;
void *func_info;
__u32 func_info_rec_size;
__u32 func_info_len;
};
struct bpf_map {
......@@ -212,6 +216,7 @@ struct bpf_object {
struct list_head list;
struct btf *btf;
struct btf_ext *btf_ext;
void *priv;
bpf_object_clear_priv_t clear_priv;
......@@ -241,6 +246,9 @@ void bpf_program__unload(struct bpf_program *prog)
prog->instances.nr = -1;
zfree(&prog->instances.fds);
zclose(prog->btf_fd);
zfree(&prog->func_info);
}
static void bpf_program__exit(struct bpf_program *prog)
......@@ -315,6 +323,7 @@ bpf_program__init(void *data, size_t size, char *section_name, int idx,
prog->instances.fds = NULL;
prog->instances.nr = -1;
prog->type = BPF_PROG_TYPE_KPROBE;
prog->btf_fd = -1;
return 0;
errout:
......@@ -807,6 +816,15 @@ static int bpf_object__elf_collect(struct bpf_object *obj, int flags)
BTF_ELF_SEC, PTR_ERR(obj->btf));
obj->btf = NULL;
}
} else if (strcmp(name, BTF_EXT_ELF_SEC) == 0) {
obj->btf_ext = btf_ext__new(data->d_buf, data->d_size,
__pr_debug);
if (IS_ERR(obj->btf_ext)) {
pr_warning("Error loading ELF section %s: %ld. Ignored and continue.\n",
BTF_EXT_ELF_SEC,
PTR_ERR(obj->btf_ext));
obj->btf_ext = NULL;
}
} else if (sh.sh_type == SHT_SYMTAB) {
if (obj->efile.symbols) {
pr_warning("bpf: multiple SYMTAB in %s\n",
......@@ -1190,6 +1208,7 @@ bpf_program__reloc_text(struct bpf_program *prog, struct bpf_object *obj,
struct bpf_insn *insn, *new_insn;
struct bpf_program *text;
size_t new_cnt;
int err;
if (relo->type != RELO_CALL)
return -LIBBPF_ERRNO__RELOC;
......@@ -1212,6 +1231,20 @@ bpf_program__reloc_text(struct bpf_program *prog, struct bpf_object *obj,
pr_warning("oom in prog realloc\n");
return -ENOMEM;
}
if (obj->btf && obj->btf_ext) {
err = btf_ext__reloc(obj->btf, obj->btf_ext,
text->section_name,
prog->insns_cnt,
&prog->func_info,
&prog->func_info_len);
if (err) {
pr_warning("error in btf_ext__reloc for sec %s\n",
text->section_name);
return err;
}
}
memcpy(new_insn + prog->insns_cnt, text->insns,
text->insns_cnt * sizeof(*insn));
prog->insns = new_insn;
......@@ -1231,7 +1264,24 @@ bpf_program__relocate(struct bpf_program *prog, struct bpf_object *obj)
{
int i, err;
if (!prog || !prog->reloc_desc)
if (!prog)
return 0;
if (obj->btf && obj->btf_ext) {
err = btf_ext__reloc_init(obj->btf, obj->btf_ext,
prog->section_name,
&prog->func_info,
&prog->func_info_rec_size,
&prog->func_info_len);
if (err) {
pr_warning("err in btf_ext__reloc_init for sec %s\n",
prog->section_name);
return err;
}
prog->btf_fd = btf__fd(obj->btf);
}
if (!prog->reloc_desc)
return 0;
for (i = 0; i < prog->nr_reloc; i++) {
......@@ -1319,9 +1369,9 @@ static int bpf_object__collect_reloc(struct bpf_object *obj)
}
static int
load_program(enum bpf_prog_type type, enum bpf_attach_type expected_attach_type,
const char *name, struct bpf_insn *insns, int insns_cnt,
char *license, __u32 kern_version, int *pfd, int prog_ifindex)
load_program(struct bpf_program *prog, struct bpf_insn *insns, int insns_cnt,
char *license, __u32 kern_version, int *pfd,
__u32 func_info_cnt)
{
struct bpf_load_program_attr load_attr;
char *cp, errmsg[STRERR_BUFSIZE];
......@@ -1329,14 +1379,18 @@ load_program(enum bpf_prog_type type, enum bpf_attach_type expected_attach_type,
int ret;
memset(&load_attr, 0, sizeof(struct bpf_load_program_attr));
load_attr.prog_type = type;
load_attr.expected_attach_type = expected_attach_type;
load_attr.name = name;
load_attr.prog_type = prog->type;
load_attr.expected_attach_type = prog->expected_attach_type;
load_attr.name = prog->name;
load_attr.insns = insns;
load_attr.insns_cnt = insns_cnt;
load_attr.license = license;
load_attr.kern_version = kern_version;
load_attr.prog_ifindex = prog_ifindex;
load_attr.prog_ifindex = prog->prog_ifindex;
load_attr.prog_btf_fd = prog->btf_fd;
load_attr.func_info = prog->func_info;
load_attr.func_info_rec_size = prog->func_info_rec_size;
load_attr.func_info_cnt = func_info_cnt;
if (!load_attr.insns || !load_attr.insns_cnt)
return -EINVAL;
......@@ -1394,8 +1448,14 @@ int
bpf_program__load(struct bpf_program *prog,
char *license, __u32 kern_version)
{
__u32 func_info_cnt;
int err = 0, fd, i;
if (prog->func_info_len == 0)
func_info_cnt = 0;
else
func_info_cnt = prog->func_info_len / prog->func_info_rec_size;
if (prog->instances.nr < 0 || !prog->instances.fds) {
if (prog->preprocessor) {
pr_warning("Internal error: can't load program '%s'\n",
......@@ -1417,10 +1477,9 @@ bpf_program__load(struct bpf_program *prog,
pr_warning("Program '%s' is inconsistent: nr(%d) != 1\n",
prog->section_name, prog->instances.nr);
}
err = load_program(prog->type, prog->expected_attach_type,
prog->name, prog->insns, prog->insns_cnt,
err = load_program(prog, prog->insns, prog->insns_cnt,
license, kern_version, &fd,
prog->prog_ifindex);
func_info_cnt);
if (!err)
prog->instances.fds[0] = fd;
goto out;
......@@ -1448,11 +1507,10 @@ bpf_program__load(struct bpf_program *prog,
continue;
}
err = load_program(prog->type, prog->expected_attach_type,
prog->name, result.new_insn_ptr,
err = load_program(prog, result.new_insn_ptr,
result.new_insn_cnt,
license, kern_version, &fd,
prog->prog_ifindex);
func_info_cnt);
if (err) {
pr_warning("Loading the %dth instance of program '%s' failed\n",
......@@ -2120,6 +2178,7 @@ void bpf_object__close(struct bpf_object *obj)
bpf_object__elf_finish(obj);
bpf_object__unload(obj);
btf__free(obj->btf);
btf_ext__free(obj->btf_ext);
for (i = 0; i < obj->nr_maps; i++) {
zfree(&obj->maps[i].name);
......
......@@ -126,7 +126,14 @@ $(OUTPUT)/test_stack_map.o: test_queue_stack_map.h
BTF_LLC_PROBE := $(shell $(LLC) -march=bpf -mattr=help 2>&1 | grep dwarfris)
BTF_PAHOLE_PROBE := $(shell $(BTF_PAHOLE) --help 2>&1 | grep BTF)
BTF_OBJCOPY_PROBE := $(shell $(LLVM_OBJCOPY) --help 2>&1 | grep -i 'usage.*llvm')
BTF_LLVM_PROBE := $(shell echo "int main() { return 0; }" | \
$(CLANG) -target bpf -O2 -g -c -x c - -o ./llvm_btf_verify.o; \
readelf -S ./llvm_btf_verify.o | grep BTF; \
/bin/rm -f ./llvm_btf_verify.o)
ifneq ($(BTF_LLVM_PROBE),)
CLANG_FLAGS += -g
else
ifneq ($(BTF_LLC_PROBE),)
ifneq ($(BTF_PAHOLE_PROBE),)
ifneq ($(BTF_OBJCOPY_PROBE),)
......@@ -136,6 +143,7 @@ ifneq ($(BTF_OBJCOPY_PROBE),)
endif
endif
endif
endif
# Have one program compiled without "-target bpf" to test whether libbpf loads
# it successfully
......
This diff is collapsed.
......@@ -24,8 +24,8 @@ struct dummy_tracepoint_args {
struct sock *sock;
};
SEC("dummy_tracepoint")
int _dummy_tracepoint(struct dummy_tracepoint_args *arg)
__attribute__((noinline))
static int test_long_fname_2(struct dummy_tracepoint_args *arg)
{
struct ipv_counts *counts;
int key = 0;
......@@ -42,4 +42,16 @@ int _dummy_tracepoint(struct dummy_tracepoint_args *arg)
return 0;
}
__attribute__((noinline))
static int test_long_fname_1(struct dummy_tracepoint_args *arg)
{
return test_long_fname_2(arg);
}
SEC("dummy_tracepoint")
int _dummy_tracepoint(struct dummy_tracepoint_args *arg)
{
return test_long_fname_1(arg);
}
char _license[] SEC("license") = "GPL";
......@@ -22,8 +22,8 @@ struct dummy_tracepoint_args {
struct sock *sock;
};
SEC("dummy_tracepoint")
int _dummy_tracepoint(struct dummy_tracepoint_args *arg)
__attribute__((noinline))
static int test_long_fname_2(struct dummy_tracepoint_args *arg)
{
struct ipv_counts *counts;
int key = 0;
......@@ -40,4 +40,16 @@ int _dummy_tracepoint(struct dummy_tracepoint_args *arg)
return 0;
}
__attribute__((noinline))
static int test_long_fname_1(struct dummy_tracepoint_args *arg)
{
return test_long_fname_2(arg);
}
SEC("dummy_tracepoint")
int _dummy_tracepoint(struct dummy_tracepoint_args *arg)
{
return test_long_fname_1(arg);
}
char _license[] SEC("license") = "GPL";
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment