Commit 32a16f6b authored by Alexei Starovoitov's avatar Alexei Starovoitov

Merge branch 'Support kernel module function calls from eBPF'

Kumar Kartikeya says:

====================

This set enables kernel module function calls, and also modifies verifier logic
to permit invalid kernel function calls as long as they are pruned as part of
dead code elimination. This is done to provide better runtime portability for
BPF objects, which can conditionally disable parts of code that are pruned later
by the verifier (e.g. const volatile vars, kconfig options). libbpf
modifications are made along with kernel changes to support module function
calls.

It also converts TCP congestion control objects to use the module kfunc support
instead of relying on IS_BUILTIN ifdef.

Changelog:
----------
v6 -> v7
v6: https://lore.kernel.org/bpf/20210930062948.1843919-1-memxor@gmail.com

 * Let __bpf_check_kfunc_call take kfunc_btf_id_list instead of generating
   callbacks (Andrii)
 * Rename it to bpf_check_mod_kfunc_call to reflect usage
 * Remove OOM checks (Alexei)
 * Remove resolve_btfids invocation for bpf_testmod (Andrii)
 * Move fd_array_cnt initialization near fd_array alloc (Andrii)
 * Rename helper to btf_find_by_name_kind and pass start_id (Andrii)
 * memset when data is NULL in add_data (Alexei)
 * Fix other nits

v5 -> v6
v5: https://lore.kernel.org/bpf/20210927145941.1383001-1-memxor@gmail.com

 * Rework gen_loader relocation emits
   * Only emit bpf_btf_find_by_name_kind call when required (Alexei)
   * Refactor code to emit ksym var and func relo into separate helpers, this
     will be easier to add future weak/typeless ksym support to (for my followup)
   * Count references for both ksym var and funcs, and avoid calling helpers
     unless required for both of them. This also means we share fds between
     ksym vars for the module BTFs. Also be careful with this when closing
     BTF fd so that we only close one instance of the fd for each ksym

v4 -> v5
v4: https://lore.kernel.org/bpf/20210920141526.3940002-1-memxor@gmail.com

 * Address comments from Alexei
   * Use reserved fd_array area in loader map instead of creating a new map
   * Drop selftest testing the 256 kfunc limit, however selftest testing reuse
     of BTF fd for same kfunc in gen_loader and libbpf is kept
 * Address comments from Andrii
   * Make --no-fail the default for resolve_btfids, i.e. only fail if we find
     BTF section and cannot process it
   * Use obj->btf_modules array to store index in the fd_array, so that we don't
     have to do any searching to reuse the index, instead only set it the first
     time a module BTF's fd is used
   * Make find_ksym_btf_id to return struct module_btf * in last parameter
   * Improve logging when index becomes bigger than INT16_MAX
   * Add btf__find_by_name_kind_own internal helper to only start searching for
     kfunc ID in module BTF, since find_ksym_btf_id already checks vmlinux BTF
     before iterating over module BTFs.
   * Fix various other nits
 * Fixes for failing selftests on BPF CI
 * Rearrange/cleanup selftests
   * Avoid testing kfunc limit (Alexei)
   * Do test gen_loader and libbpf BTF fd index dedup with 256 calls
   * Move invalid kfunc failure test to verifier selftest
   * Minimize duplication
 * Use consistent bpf_<type>_check_kfunc_call naming for module kfunc callback
 * Since we try to add fd using add_data while we can, cherry pick Alexei's
   patch from CO-RE RFC series to align gen_loader data.

v3 -> v4
v3: https://lore.kernel.org/bpf/20210915050943.679062-1-memxor@gmail.com

 * Address comments from Alexei
   * Drop MAX_BPF_STACK change, instead move map_fd and BTF fd to BPF array map
     and pass fd_array using BPF_PSEUDO_MAP_IDX_VALUE
 * Address comments from Andrii
   * Fix selftest to store to variable for observing function call instead of
     printk and polluting CI logs
 * Drop use of raw_tp for testing, instead reuse classifier based prog_test_run
 * Drop index + 1 based insn->off convention for kfunc module calls
 * Expand selftests to cover more corner cases
 * Misc cleanups

v2 -> v3
v2: https://lore.kernel.org/bpf/20210914123750.460750-1-memxor@gmail.com

 * Fix issues pointed out by Kernel Test Robot
 * Fix find_kfunc_desc to also take offset into consideration when comparing

RFC v1 -> v2
v1: https://lore.kernel.org/bpf/20210830173424.1385796-1-memxor@gmail.com

 * Address comments from Alexei
   * Reuse fd_array instead of introducing kfunc_btf_fds array
   * Take btf and module reference as needed, instead of preloading
   * Add BTF_KIND_FUNC relocation support to gen_loader infrastructure
 * Address comments from Andrii
   * Drop hashmap in libbpf for finding index of existing BTF in fd_array
   * Preserve invalid kfunc calls only when the symbol is weak
 * Adjust verifier selftests
====================
Signed-off-by: default avatarAlexei Starovoitov <ast@kernel.org>
parents d0f1c248 c48e51c8
...@@ -513,7 +513,7 @@ struct bpf_verifier_ops { ...@@ -513,7 +513,7 @@ struct bpf_verifier_ops {
const struct btf_type *t, int off, int size, const struct btf_type *t, int off, int size,
enum bpf_access_type atype, enum bpf_access_type atype,
u32 *next_btf_id); u32 *next_btf_id);
bool (*check_kfunc_call)(u32 kfunc_btf_id); bool (*check_kfunc_call)(u32 kfunc_btf_id, struct module *owner);
}; };
struct bpf_prog_offload_ops { struct bpf_prog_offload_ops {
...@@ -877,6 +877,7 @@ struct bpf_prog_aux { ...@@ -877,6 +877,7 @@ struct bpf_prog_aux {
void *jit_data; /* JIT specific data. arch dependent */ void *jit_data; /* JIT specific data. arch dependent */
struct bpf_jit_poke_descriptor *poke_tab; struct bpf_jit_poke_descriptor *poke_tab;
struct bpf_kfunc_desc_tab *kfunc_tab; struct bpf_kfunc_desc_tab *kfunc_tab;
struct bpf_kfunc_btf_tab *kfunc_btf_tab;
u32 size_poke_tab; u32 size_poke_tab;
struct bpf_ksym ksym; struct bpf_ksym ksym;
const struct bpf_prog_ops *ops; const struct bpf_prog_ops *ops;
...@@ -1639,7 +1640,7 @@ int bpf_prog_test_run_raw_tp(struct bpf_prog *prog, ...@@ -1639,7 +1640,7 @@ int bpf_prog_test_run_raw_tp(struct bpf_prog *prog,
int bpf_prog_test_run_sk_lookup(struct bpf_prog *prog, int bpf_prog_test_run_sk_lookup(struct bpf_prog *prog,
const union bpf_attr *kattr, const union bpf_attr *kattr,
union bpf_attr __user *uattr); union bpf_attr __user *uattr);
bool bpf_prog_test_check_kfunc_call(u32 kfunc_id); bool bpf_prog_test_check_kfunc_call(u32 kfunc_id, struct module *owner);
bool btf_ctx_access(int off, int size, enum bpf_access_type type, bool btf_ctx_access(int off, int size, enum bpf_access_type type,
const struct bpf_prog *prog, const struct bpf_prog *prog,
struct bpf_insn_access_aux *info); struct bpf_insn_access_aux *info);
...@@ -1860,7 +1861,8 @@ static inline int bpf_prog_test_run_sk_lookup(struct bpf_prog *prog, ...@@ -1860,7 +1861,8 @@ static inline int bpf_prog_test_run_sk_lookup(struct bpf_prog *prog,
return -ENOTSUPP; return -ENOTSUPP;
} }
static inline bool bpf_prog_test_check_kfunc_call(u32 kfunc_id) static inline bool bpf_prog_test_check_kfunc_call(u32 kfunc_id,
struct module *owner)
{ {
return false; return false;
} }
......
...@@ -527,5 +527,7 @@ int bpf_check_attach_target(struct bpf_verifier_log *log, ...@@ -527,5 +527,7 @@ int bpf_check_attach_target(struct bpf_verifier_log *log,
const struct bpf_prog *tgt_prog, const struct bpf_prog *tgt_prog,
u32 btf_id, u32 btf_id,
struct bpf_attach_target_info *tgt_info); struct bpf_attach_target_info *tgt_info);
void bpf_free_kfunc_btf_tab(struct bpf_kfunc_btf_tab *tab);
#endif /* _LINUX_BPF_VERIFIER_H */ #endif /* _LINUX_BPF_VERIFIER_H */
...@@ -3,6 +3,7 @@ ...@@ -3,6 +3,7 @@
#ifndef _LINUX_BPFPTR_H #ifndef _LINUX_BPFPTR_H
#define _LINUX_BPFPTR_H #define _LINUX_BPFPTR_H
#include <linux/mm.h>
#include <linux/sockptr.h> #include <linux/sockptr.h>
typedef sockptr_t bpfptr_t; typedef sockptr_t bpfptr_t;
......
...@@ -5,6 +5,7 @@ ...@@ -5,6 +5,7 @@
#define _LINUX_BTF_H 1 #define _LINUX_BTF_H 1
#include <linux/types.h> #include <linux/types.h>
#include <linux/bpfptr.h>
#include <uapi/linux/btf.h> #include <uapi/linux/btf.h>
#include <uapi/linux/bpf.h> #include <uapi/linux/bpf.h>
...@@ -238,4 +239,42 @@ static inline const char *btf_name_by_offset(const struct btf *btf, ...@@ -238,4 +239,42 @@ static inline const char *btf_name_by_offset(const struct btf *btf,
} }
#endif #endif
struct kfunc_btf_id_set {
struct list_head list;
struct btf_id_set *set;
struct module *owner;
};
struct kfunc_btf_id_list;
#ifdef CONFIG_DEBUG_INFO_BTF_MODULES
void register_kfunc_btf_id_set(struct kfunc_btf_id_list *l,
struct kfunc_btf_id_set *s);
void unregister_kfunc_btf_id_set(struct kfunc_btf_id_list *l,
struct kfunc_btf_id_set *s);
bool bpf_check_mod_kfunc_call(struct kfunc_btf_id_list *klist, u32 kfunc_id,
struct module *owner);
#else
static inline void register_kfunc_btf_id_set(struct kfunc_btf_id_list *l,
struct kfunc_btf_id_set *s)
{
}
static inline void unregister_kfunc_btf_id_set(struct kfunc_btf_id_list *l,
struct kfunc_btf_id_set *s)
{
}
static inline bool bpf_check_mod_kfunc_call(struct kfunc_btf_id_list *klist,
u32 kfunc_id, struct module *owner)
{
return false;
}
#endif
#define DEFINE_KFUNC_BTF_ID_SET(set, name) \
struct kfunc_btf_id_set name = { LIST_HEAD_INIT(name.list), (set), \
THIS_MODULE }
extern struct kfunc_btf_id_list bpf_tcp_ca_kfunc_list;
extern struct kfunc_btf_id_list prog_test_kfunc_list;
#endif #endif
...@@ -6343,3 +6343,58 @@ const struct bpf_func_proto bpf_btf_find_by_name_kind_proto = { ...@@ -6343,3 +6343,58 @@ const struct bpf_func_proto bpf_btf_find_by_name_kind_proto = {
}; };
BTF_ID_LIST_GLOBAL_SINGLE(btf_task_struct_ids, struct, task_struct) BTF_ID_LIST_GLOBAL_SINGLE(btf_task_struct_ids, struct, task_struct)
/* BTF ID set registration API for modules */
struct kfunc_btf_id_list {
struct list_head list;
struct mutex mutex;
};
#ifdef CONFIG_DEBUG_INFO_BTF_MODULES
void register_kfunc_btf_id_set(struct kfunc_btf_id_list *l,
struct kfunc_btf_id_set *s)
{
mutex_lock(&l->mutex);
list_add(&s->list, &l->list);
mutex_unlock(&l->mutex);
}
EXPORT_SYMBOL_GPL(register_kfunc_btf_id_set);
void unregister_kfunc_btf_id_set(struct kfunc_btf_id_list *l,
struct kfunc_btf_id_set *s)
{
mutex_lock(&l->mutex);
list_del_init(&s->list);
mutex_unlock(&l->mutex);
}
EXPORT_SYMBOL_GPL(unregister_kfunc_btf_id_set);
bool bpf_check_mod_kfunc_call(struct kfunc_btf_id_list *klist, u32 kfunc_id,
struct module *owner)
{
struct kfunc_btf_id_set *s;
if (!owner)
return false;
mutex_lock(&klist->mutex);
list_for_each_entry(s, &klist->list, list) {
if (s->owner == owner && btf_id_set_contains(s->set, kfunc_id)) {
mutex_unlock(&klist->mutex);
return true;
}
}
mutex_unlock(&klist->mutex);
return false;
}
#endif
#define DEFINE_KFUNC_BTF_ID_LIST(name) \
struct kfunc_btf_id_list name = { LIST_HEAD_INIT(name.list), \
__MUTEX_INITIALIZER(name.mutex) }; \
EXPORT_SYMBOL_GPL(name)
DEFINE_KFUNC_BTF_ID_LIST(bpf_tcp_ca_kfunc_list);
DEFINE_KFUNC_BTF_ID_LIST(prog_test_kfunc_list);
...@@ -32,6 +32,7 @@ ...@@ -32,6 +32,7 @@
#include <linux/perf_event.h> #include <linux/perf_event.h>
#include <linux/extable.h> #include <linux/extable.h>
#include <linux/log2.h> #include <linux/log2.h>
#include <linux/bpf_verifier.h>
#include <asm/barrier.h> #include <asm/barrier.h>
#include <asm/unaligned.h> #include <asm/unaligned.h>
...@@ -2255,6 +2256,9 @@ static void bpf_prog_free_deferred(struct work_struct *work) ...@@ -2255,6 +2256,9 @@ static void bpf_prog_free_deferred(struct work_struct *work)
int i; int i;
aux = container_of(work, struct bpf_prog_aux, work); aux = container_of(work, struct bpf_prog_aux, work);
#ifdef CONFIG_BPF_SYSCALL
bpf_free_kfunc_btf_tab(aux->kfunc_btf_tab);
#endif
bpf_free_used_maps(aux); bpf_free_used_maps(aux);
bpf_free_used_btfs(aux); bpf_free_used_btfs(aux);
if (bpf_prog_is_dev_bound(aux)) if (bpf_prog_is_dev_bound(aux))
......
This diff is collapsed.
...@@ -2,6 +2,7 @@ ...@@ -2,6 +2,7 @@
/* Copyright (c) 2017 Facebook /* Copyright (c) 2017 Facebook
*/ */
#include <linux/bpf.h> #include <linux/bpf.h>
#include <linux/btf.h>
#include <linux/btf_ids.h> #include <linux/btf_ids.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/vmalloc.h> #include <linux/vmalloc.h>
...@@ -241,9 +242,11 @@ BTF_ID(func, bpf_kfunc_call_test2) ...@@ -241,9 +242,11 @@ BTF_ID(func, bpf_kfunc_call_test2)
BTF_ID(func, bpf_kfunc_call_test3) BTF_ID(func, bpf_kfunc_call_test3)
BTF_SET_END(test_sk_kfunc_ids) BTF_SET_END(test_sk_kfunc_ids)
bool bpf_prog_test_check_kfunc_call(u32 kfunc_id) bool bpf_prog_test_check_kfunc_call(u32 kfunc_id, struct module *owner)
{ {
return btf_id_set_contains(&test_sk_kfunc_ids, kfunc_id); if (btf_id_set_contains(&test_sk_kfunc_ids, kfunc_id))
return true;
return bpf_check_mod_kfunc_call(&prog_test_kfunc_list, kfunc_id, owner);
} }
static void *bpf_test_init(const union bpf_attr *kattr, u32 size, static void *bpf_test_init(const union bpf_attr *kattr, u32 size,
......
...@@ -223,41 +223,13 @@ BTF_ID(func, tcp_reno_cong_avoid) ...@@ -223,41 +223,13 @@ BTF_ID(func, tcp_reno_cong_avoid)
BTF_ID(func, tcp_reno_undo_cwnd) BTF_ID(func, tcp_reno_undo_cwnd)
BTF_ID(func, tcp_slow_start) BTF_ID(func, tcp_slow_start)
BTF_ID(func, tcp_cong_avoid_ai) BTF_ID(func, tcp_cong_avoid_ai)
#ifdef CONFIG_X86
#ifdef CONFIG_DYNAMIC_FTRACE
#if IS_BUILTIN(CONFIG_TCP_CONG_CUBIC)
BTF_ID(func, cubictcp_init)
BTF_ID(func, cubictcp_recalc_ssthresh)
BTF_ID(func, cubictcp_cong_avoid)
BTF_ID(func, cubictcp_state)
BTF_ID(func, cubictcp_cwnd_event)
BTF_ID(func, cubictcp_acked)
#endif
#if IS_BUILTIN(CONFIG_TCP_CONG_DCTCP)
BTF_ID(func, dctcp_init)
BTF_ID(func, dctcp_update_alpha)
BTF_ID(func, dctcp_cwnd_event)
BTF_ID(func, dctcp_ssthresh)
BTF_ID(func, dctcp_cwnd_undo)
BTF_ID(func, dctcp_state)
#endif
#if IS_BUILTIN(CONFIG_TCP_CONG_BBR)
BTF_ID(func, bbr_init)
BTF_ID(func, bbr_main)
BTF_ID(func, bbr_sndbuf_expand)
BTF_ID(func, bbr_undo_cwnd)
BTF_ID(func, bbr_cwnd_event)
BTF_ID(func, bbr_ssthresh)
BTF_ID(func, bbr_min_tso_segs)
BTF_ID(func, bbr_set_state)
#endif
#endif /* CONFIG_DYNAMIC_FTRACE */
#endif /* CONFIG_X86 */
BTF_SET_END(bpf_tcp_ca_kfunc_ids) BTF_SET_END(bpf_tcp_ca_kfunc_ids)
static bool bpf_tcp_ca_check_kfunc_call(u32 kfunc_btf_id) static bool bpf_tcp_ca_check_kfunc_call(u32 kfunc_btf_id, struct module *owner)
{ {
return btf_id_set_contains(&bpf_tcp_ca_kfunc_ids, kfunc_btf_id); if (btf_id_set_contains(&bpf_tcp_ca_kfunc_ids, kfunc_btf_id))
return true;
return bpf_check_mod_kfunc_call(&bpf_tcp_ca_kfunc_list, kfunc_btf_id, owner);
} }
static const struct bpf_verifier_ops bpf_tcp_ca_verifier_ops = { static const struct bpf_verifier_ops bpf_tcp_ca_verifier_ops = {
......
...@@ -56,6 +56,8 @@ ...@@ -56,6 +56,8 @@
* otherwise TCP stack falls back to an internal pacing using one high * otherwise TCP stack falls back to an internal pacing using one high
* resolution timer per TCP socket and may use more resources. * resolution timer per TCP socket and may use more resources.
*/ */
#include <linux/btf.h>
#include <linux/btf_ids.h>
#include <linux/module.h> #include <linux/module.h>
#include <net/tcp.h> #include <net/tcp.h>
#include <linux/inet_diag.h> #include <linux/inet_diag.h>
...@@ -1152,14 +1154,38 @@ static struct tcp_congestion_ops tcp_bbr_cong_ops __read_mostly = { ...@@ -1152,14 +1154,38 @@ static struct tcp_congestion_ops tcp_bbr_cong_ops __read_mostly = {
.set_state = bbr_set_state, .set_state = bbr_set_state,
}; };
BTF_SET_START(tcp_bbr_kfunc_ids)
#ifdef CONFIG_X86
#ifdef CONFIG_DYNAMIC_FTRACE
BTF_ID(func, bbr_init)
BTF_ID(func, bbr_main)
BTF_ID(func, bbr_sndbuf_expand)
BTF_ID(func, bbr_undo_cwnd)
BTF_ID(func, bbr_cwnd_event)
BTF_ID(func, bbr_ssthresh)
BTF_ID(func, bbr_min_tso_segs)
BTF_ID(func, bbr_set_state)
#endif
#endif
BTF_SET_END(tcp_bbr_kfunc_ids)
static DEFINE_KFUNC_BTF_ID_SET(&tcp_bbr_kfunc_ids, tcp_bbr_kfunc_btf_set);
static int __init bbr_register(void) static int __init bbr_register(void)
{ {
int ret;
BUILD_BUG_ON(sizeof(struct bbr) > ICSK_CA_PRIV_SIZE); BUILD_BUG_ON(sizeof(struct bbr) > ICSK_CA_PRIV_SIZE);
return tcp_register_congestion_control(&tcp_bbr_cong_ops); ret = tcp_register_congestion_control(&tcp_bbr_cong_ops);
if (ret)
return ret;
register_kfunc_btf_id_set(&bpf_tcp_ca_kfunc_list, &tcp_bbr_kfunc_btf_set);
return 0;
} }
static void __exit bbr_unregister(void) static void __exit bbr_unregister(void)
{ {
unregister_kfunc_btf_id_set(&bpf_tcp_ca_kfunc_list, &tcp_bbr_kfunc_btf_set);
tcp_unregister_congestion_control(&tcp_bbr_cong_ops); tcp_unregister_congestion_control(&tcp_bbr_cong_ops);
} }
......
...@@ -25,6 +25,8 @@ ...@@ -25,6 +25,8 @@
*/ */
#include <linux/mm.h> #include <linux/mm.h>
#include <linux/btf.h>
#include <linux/btf_ids.h>
#include <linux/module.h> #include <linux/module.h>
#include <linux/math64.h> #include <linux/math64.h>
#include <net/tcp.h> #include <net/tcp.h>
...@@ -482,8 +484,25 @@ static struct tcp_congestion_ops cubictcp __read_mostly = { ...@@ -482,8 +484,25 @@ static struct tcp_congestion_ops cubictcp __read_mostly = {
.name = "cubic", .name = "cubic",
}; };
BTF_SET_START(tcp_cubic_kfunc_ids)
#ifdef CONFIG_X86
#ifdef CONFIG_DYNAMIC_FTRACE
BTF_ID(func, cubictcp_init)
BTF_ID(func, cubictcp_recalc_ssthresh)
BTF_ID(func, cubictcp_cong_avoid)
BTF_ID(func, cubictcp_state)
BTF_ID(func, cubictcp_cwnd_event)
BTF_ID(func, cubictcp_acked)
#endif
#endif
BTF_SET_END(tcp_cubic_kfunc_ids)
static DEFINE_KFUNC_BTF_ID_SET(&tcp_cubic_kfunc_ids, tcp_cubic_kfunc_btf_set);
static int __init cubictcp_register(void) static int __init cubictcp_register(void)
{ {
int ret;
BUILD_BUG_ON(sizeof(struct bictcp) > ICSK_CA_PRIV_SIZE); BUILD_BUG_ON(sizeof(struct bictcp) > ICSK_CA_PRIV_SIZE);
/* Precompute a bunch of the scaling factors that are used per-packet /* Precompute a bunch of the scaling factors that are used per-packet
...@@ -514,11 +533,16 @@ static int __init cubictcp_register(void) ...@@ -514,11 +533,16 @@ static int __init cubictcp_register(void)
/* divide by bic_scale and by constant Srtt (100ms) */ /* divide by bic_scale and by constant Srtt (100ms) */
do_div(cube_factor, bic_scale * 10); do_div(cube_factor, bic_scale * 10);
return tcp_register_congestion_control(&cubictcp); ret = tcp_register_congestion_control(&cubictcp);
if (ret)
return ret;
register_kfunc_btf_id_set(&bpf_tcp_ca_kfunc_list, &tcp_cubic_kfunc_btf_set);
return 0;
} }
static void __exit cubictcp_unregister(void) static void __exit cubictcp_unregister(void)
{ {
unregister_kfunc_btf_id_set(&bpf_tcp_ca_kfunc_list, &tcp_cubic_kfunc_btf_set);
tcp_unregister_congestion_control(&cubictcp); tcp_unregister_congestion_control(&cubictcp);
} }
......
...@@ -36,6 +36,8 @@ ...@@ -36,6 +36,8 @@
* Glenn Judd <glenn.judd@morganstanley.com> * Glenn Judd <glenn.judd@morganstanley.com>
*/ */
#include <linux/btf.h>
#include <linux/btf_ids.h>
#include <linux/module.h> #include <linux/module.h>
#include <linux/mm.h> #include <linux/mm.h>
#include <net/tcp.h> #include <net/tcp.h>
...@@ -236,14 +238,36 @@ static struct tcp_congestion_ops dctcp_reno __read_mostly = { ...@@ -236,14 +238,36 @@ static struct tcp_congestion_ops dctcp_reno __read_mostly = {
.name = "dctcp-reno", .name = "dctcp-reno",
}; };
BTF_SET_START(tcp_dctcp_kfunc_ids)
#ifdef CONFIG_X86
#ifdef CONFIG_DYNAMIC_FTRACE
BTF_ID(func, dctcp_init)
BTF_ID(func, dctcp_update_alpha)
BTF_ID(func, dctcp_cwnd_event)
BTF_ID(func, dctcp_ssthresh)
BTF_ID(func, dctcp_cwnd_undo)
BTF_ID(func, dctcp_state)
#endif
#endif
BTF_SET_END(tcp_dctcp_kfunc_ids)
static DEFINE_KFUNC_BTF_ID_SET(&tcp_dctcp_kfunc_ids, tcp_dctcp_kfunc_btf_set);
static int __init dctcp_register(void) static int __init dctcp_register(void)
{ {
int ret;
BUILD_BUG_ON(sizeof(struct dctcp) > ICSK_CA_PRIV_SIZE); BUILD_BUG_ON(sizeof(struct dctcp) > ICSK_CA_PRIV_SIZE);
return tcp_register_congestion_control(&dctcp); ret = tcp_register_congestion_control(&dctcp);
if (ret)
return ret;
register_kfunc_btf_id_set(&bpf_tcp_ca_kfunc_list, &tcp_dctcp_kfunc_btf_set);
return 0;
} }
static void __exit dctcp_unregister(void) static void __exit dctcp_unregister(void)
{ {
unregister_kfunc_btf_id_set(&bpf_tcp_ca_kfunc_list, &tcp_dctcp_kfunc_btf_set);
tcp_unregister_congestion_control(&dctcp); tcp_unregister_congestion_control(&dctcp);
} }
......
...@@ -41,6 +41,7 @@ quiet_cmd_btf_ko = BTF [M] $@ ...@@ -41,6 +41,7 @@ quiet_cmd_btf_ko = BTF [M] $@
cmd_btf_ko = \ cmd_btf_ko = \
if [ -f vmlinux ]; then \ if [ -f vmlinux ]; then \
LLVM_OBJCOPY="$(OBJCOPY)" $(PAHOLE) -J --btf_base vmlinux $@; \ LLVM_OBJCOPY="$(OBJCOPY)" $(PAHOLE) -J --btf_base vmlinux $@; \
$(RESOLVE_BTFIDS) -b vmlinux $@; \
else \ else \
printf "Skipping BTF generation for %s due to unavailability of vmlinux\n" $@ 1>&2; \ printf "Skipping BTF generation for %s due to unavailability of vmlinux\n" $@ 1>&2; \
fi; fi;
......
...@@ -89,6 +89,7 @@ struct btf_id { ...@@ -89,6 +89,7 @@ struct btf_id {
struct object { struct object {
const char *path; const char *path;
const char *btf; const char *btf;
const char *base_btf_path;
struct { struct {
int fd; int fd;
...@@ -477,16 +478,27 @@ static int symbols_resolve(struct object *obj) ...@@ -477,16 +478,27 @@ static int symbols_resolve(struct object *obj)
int nr_structs = obj->nr_structs; int nr_structs = obj->nr_structs;
int nr_unions = obj->nr_unions; int nr_unions = obj->nr_unions;
int nr_funcs = obj->nr_funcs; int nr_funcs = obj->nr_funcs;
struct btf *base_btf = NULL;
int err, type_id; int err, type_id;
struct btf *btf; struct btf *btf;
__u32 nr_types; __u32 nr_types;
btf = btf__parse(obj->btf ?: obj->path, NULL); if (obj->base_btf_path) {
base_btf = btf__parse(obj->base_btf_path, NULL);
err = libbpf_get_error(base_btf);
if (err) {
pr_err("FAILED: load base BTF from %s: %s\n",
obj->base_btf_path, strerror(-err));
return -1;
}
}
btf = btf__parse_split(obj->btf ?: obj->path, base_btf);
err = libbpf_get_error(btf); err = libbpf_get_error(btf);
if (err) { if (err) {
pr_err("FAILED: load BTF from %s: %s\n", pr_err("FAILED: load BTF from %s: %s\n",
obj->btf ?: obj->path, strerror(-err)); obj->btf ?: obj->path, strerror(-err));
return -1; goto out;
} }
err = -1; err = -1;
...@@ -545,6 +557,7 @@ static int symbols_resolve(struct object *obj) ...@@ -545,6 +557,7 @@ static int symbols_resolve(struct object *obj)
err = 0; err = 0;
out: out:
btf__free(base_btf);
btf__free(btf); btf__free(btf);
return err; return err;
} }
...@@ -678,7 +691,6 @@ static const char * const resolve_btfids_usage[] = { ...@@ -678,7 +691,6 @@ static const char * const resolve_btfids_usage[] = {
int main(int argc, const char **argv) int main(int argc, const char **argv)
{ {
bool no_fail = false;
struct object obj = { struct object obj = {
.efile = { .efile = {
.idlist_shndx = -1, .idlist_shndx = -1,
...@@ -695,8 +707,8 @@ int main(int argc, const char **argv) ...@@ -695,8 +707,8 @@ int main(int argc, const char **argv)
"be more verbose (show errors, etc)"), "be more verbose (show errors, etc)"),
OPT_STRING(0, "btf", &obj.btf, "BTF data", OPT_STRING(0, "btf", &obj.btf, "BTF data",
"BTF data"), "BTF data"),
OPT_BOOLEAN(0, "no-fail", &no_fail, OPT_STRING('b', "btf_base", &obj.base_btf_path, "file",
"do not fail if " BTF_IDS_SECTION " section is not found"), "path of file providing base BTF"),
OPT_END() OPT_END()
}; };
int err = -1; int err = -1;
...@@ -717,10 +729,8 @@ int main(int argc, const char **argv) ...@@ -717,10 +729,8 @@ int main(int argc, const char **argv)
*/ */
if (obj.efile.idlist_shndx == -1 || if (obj.efile.idlist_shndx == -1 ||
obj.efile.symbols_shndx == -1) { obj.efile.symbols_shndx == -1) {
if (no_fail) pr_debug("Cannot find .BTF_ids or symbols sections, nothing to do\n");
return 0; return 0;
pr_err("FAILED to find needed sections\n");
return -1;
} }
if (symbols_collect(&obj)) if (symbols_collect(&obj))
......
...@@ -264,6 +264,7 @@ int libbpf__bpf_prog_load(const struct bpf_prog_load_params *load_attr) ...@@ -264,6 +264,7 @@ int libbpf__bpf_prog_load(const struct bpf_prog_load_params *load_attr)
attr.line_info_rec_size = load_attr->line_info_rec_size; attr.line_info_rec_size = load_attr->line_info_rec_size;
attr.line_info_cnt = load_attr->line_info_cnt; attr.line_info_cnt = load_attr->line_info_cnt;
attr.line_info = ptr_to_u64(load_attr->line_info); attr.line_info = ptr_to_u64(load_attr->line_info);
attr.fd_array = ptr_to_u64(load_attr->fd_array);
if (load_attr->name) if (load_attr->name)
memcpy(attr.prog_name, load_attr->name, memcpy(attr.prog_name, load_attr->name,
......
...@@ -7,6 +7,15 @@ struct ksym_relo_desc { ...@@ -7,6 +7,15 @@ struct ksym_relo_desc {
const char *name; const char *name;
int kind; int kind;
int insn_idx; int insn_idx;
bool is_weak;
};
struct ksym_desc {
const char *name;
int ref;
int kind;
int off;
int insn;
}; };
struct bpf_gen { struct bpf_gen {
...@@ -24,6 +33,10 @@ struct bpf_gen { ...@@ -24,6 +33,10 @@ struct bpf_gen {
int relo_cnt; int relo_cnt;
char attach_target[128]; char attach_target[128];
int attach_kind; int attach_kind;
struct ksym_desc *ksyms;
__u32 nr_ksyms;
int fd_array;
int nr_fd_array;
}; };
void bpf_gen__init(struct bpf_gen *gen, int log_level); void bpf_gen__init(struct bpf_gen *gen, int log_level);
...@@ -36,6 +49,7 @@ void bpf_gen__prog_load(struct bpf_gen *gen, struct bpf_prog_load_params *load_a ...@@ -36,6 +49,7 @@ void bpf_gen__prog_load(struct bpf_gen *gen, struct bpf_prog_load_params *load_a
void bpf_gen__map_update_elem(struct bpf_gen *gen, int map_idx, void *value, __u32 value_size); void bpf_gen__map_update_elem(struct bpf_gen *gen, int map_idx, void *value, __u32 value_size);
void bpf_gen__map_freeze(struct bpf_gen *gen, int map_idx); void bpf_gen__map_freeze(struct bpf_gen *gen, int map_idx);
void bpf_gen__record_attach_target(struct bpf_gen *gen, const char *name, enum bpf_attach_type type); void bpf_gen__record_attach_target(struct bpf_gen *gen, const char *name, enum bpf_attach_type type);
void bpf_gen__record_extern(struct bpf_gen *gen, const char *name, int kind, int insn_idx); void bpf_gen__record_extern(struct bpf_gen *gen, const char *name, bool is_weak, int kind,
int insn_idx);
#endif #endif
...@@ -695,15 +695,15 @@ __s32 btf__find_by_name(const struct btf *btf, const char *type_name) ...@@ -695,15 +695,15 @@ __s32 btf__find_by_name(const struct btf *btf, const char *type_name)
return libbpf_err(-ENOENT); return libbpf_err(-ENOENT);
} }
__s32 btf__find_by_name_kind(const struct btf *btf, const char *type_name, static __s32 btf_find_by_name_kind(const struct btf *btf, int start_id,
__u32 kind) const char *type_name, __u32 kind)
{ {
__u32 i, nr_types = btf__get_nr_types(btf); __u32 i, nr_types = btf__get_nr_types(btf);
if (kind == BTF_KIND_UNKN || !strcmp(type_name, "void")) if (kind == BTF_KIND_UNKN || !strcmp(type_name, "void"))
return 0; return 0;
for (i = 1; i <= nr_types; i++) { for (i = start_id; i <= nr_types; i++) {
const struct btf_type *t = btf__type_by_id(btf, i); const struct btf_type *t = btf__type_by_id(btf, i);
const char *name; const char *name;
...@@ -717,6 +717,18 @@ __s32 btf__find_by_name_kind(const struct btf *btf, const char *type_name, ...@@ -717,6 +717,18 @@ __s32 btf__find_by_name_kind(const struct btf *btf, const char *type_name,
return libbpf_err(-ENOENT); return libbpf_err(-ENOENT);
} }
__s32 btf__find_by_name_kind_own(const struct btf *btf, const char *type_name,
__u32 kind)
{
return btf_find_by_name_kind(btf, btf->start_id, type_name, kind);
}
__s32 btf__find_by_name_kind(const struct btf *btf, const char *type_name,
__u32 kind)
{
return btf_find_by_name_kind(btf, 1, type_name, kind);
}
static bool btf_is_modifiable(const struct btf *btf) static bool btf_is_modifiable(const struct btf *btf)
{ {
return (void *)btf->hdr != btf->raw_data; return (void *)btf->hdr != btf->raw_data;
......
This diff is collapsed.
...@@ -443,6 +443,11 @@ struct extern_desc { ...@@ -443,6 +443,11 @@ struct extern_desc {
/* local btf_id of the ksym extern's type. */ /* local btf_id of the ksym extern's type. */
__u32 type_id; __u32 type_id;
/* BTF fd index to be patched in for insn->off, this is
* 0 for vmlinux BTF, index in obj->fd_array for module
* BTF
*/
__s16 btf_fd_idx;
} ksym; } ksym;
}; };
}; };
...@@ -454,6 +459,7 @@ struct module_btf { ...@@ -454,6 +459,7 @@ struct module_btf {
char *name; char *name;
__u32 id; __u32 id;
int fd; int fd;
int fd_array_idx;
}; };
struct bpf_object { struct bpf_object {
...@@ -539,6 +545,10 @@ struct bpf_object { ...@@ -539,6 +545,10 @@ struct bpf_object {
void *priv; void *priv;
bpf_object_clear_priv_t clear_priv; bpf_object_clear_priv_t clear_priv;
int *fd_array;
size_t fd_array_cap;
size_t fd_array_cnt;
char path[]; char path[];
}; };
#define obj_elf_valid(o) ((o)->efile.elf) #define obj_elf_valid(o) ((o)->efile.elf)
...@@ -3429,11 +3439,6 @@ static int bpf_object__collect_externs(struct bpf_object *obj) ...@@ -3429,11 +3439,6 @@ static int bpf_object__collect_externs(struct bpf_object *obj)
return -ENOTSUP; return -ENOTSUP;
} }
} else if (strcmp(sec_name, KSYMS_SEC) == 0) { } else if (strcmp(sec_name, KSYMS_SEC) == 0) {
if (btf_is_func(t) && ext->is_weak) {
pr_warn("extern weak function %s is unsupported\n",
ext->name);
return -ENOTSUP;
}
ksym_sec = sec; ksym_sec = sec;
ext->type = EXT_KSYM; ext->type = EXT_KSYM;
skip_mods_and_typedefs(obj->btf, t->type, skip_mods_and_typedefs(obj->btf, t->type,
...@@ -5406,7 +5411,13 @@ bpf_object__relocate_data(struct bpf_object *obj, struct bpf_program *prog) ...@@ -5406,7 +5411,13 @@ bpf_object__relocate_data(struct bpf_object *obj, struct bpf_program *prog)
case RELO_EXTERN_FUNC: case RELO_EXTERN_FUNC:
ext = &obj->externs[relo->sym_off]; ext = &obj->externs[relo->sym_off];
insn[0].src_reg = BPF_PSEUDO_KFUNC_CALL; insn[0].src_reg = BPF_PSEUDO_KFUNC_CALL;
if (ext->is_set) {
insn[0].imm = ext->ksym.kernel_btf_id; insn[0].imm = ext->ksym.kernel_btf_id;
insn[0].off = ext->ksym.btf_fd_idx;
} else { /* unresolved weak kfunc */
insn[0].imm = 0;
insn[0].off = 0;
}
break; break;
case RELO_SUBPROG_ADDR: case RELO_SUBPROG_ADDR:
if (insn[0].src_reg != BPF_PSEUDO_FUNC) { if (insn[0].src_reg != BPF_PSEUDO_FUNC) {
...@@ -6236,6 +6247,7 @@ load_program(struct bpf_program *prog, struct bpf_insn *insns, int insns_cnt, ...@@ -6236,6 +6247,7 @@ load_program(struct bpf_program *prog, struct bpf_insn *insns, int insns_cnt,
} }
load_attr.log_level = prog->log_level; load_attr.log_level = prog->log_level;
load_attr.prog_flags = prog->prog_flags; load_attr.prog_flags = prog->prog_flags;
load_attr.fd_array = prog->obj->fd_array;
/* adjust load_attr if sec_def provides custom preload callback */ /* adjust load_attr if sec_def provides custom preload callback */
if (prog->sec_def && prog->sec_def->preload_fn) { if (prog->sec_def && prog->sec_def->preload_fn) {
...@@ -6348,12 +6360,12 @@ static int bpf_program__record_externs(struct bpf_program *prog) ...@@ -6348,12 +6360,12 @@ static int bpf_program__record_externs(struct bpf_program *prog)
ext->name); ext->name);
return -ENOTSUP; return -ENOTSUP;
} }
bpf_gen__record_extern(obj->gen_loader, ext->name, BTF_KIND_VAR, bpf_gen__record_extern(obj->gen_loader, ext->name, ext->is_weak,
relo->insn_idx); BTF_KIND_VAR, relo->insn_idx);
break; break;
case RELO_EXTERN_FUNC: case RELO_EXTERN_FUNC:
bpf_gen__record_extern(obj->gen_loader, ext->name, BTF_KIND_FUNC, bpf_gen__record_extern(obj->gen_loader, ext->name, ext->is_weak,
relo->insn_idx); BTF_KIND_FUNC, relo->insn_idx);
break; break;
default: default:
continue; continue;
...@@ -6752,13 +6764,14 @@ static int bpf_object__read_kallsyms_file(struct bpf_object *obj) ...@@ -6752,13 +6764,14 @@ static int bpf_object__read_kallsyms_file(struct bpf_object *obj)
static int find_ksym_btf_id(struct bpf_object *obj, const char *ksym_name, static int find_ksym_btf_id(struct bpf_object *obj, const char *ksym_name,
__u16 kind, struct btf **res_btf, __u16 kind, struct btf **res_btf,
int *res_btf_fd) struct module_btf **res_mod_btf)
{ {
int i, id, btf_fd, err; struct module_btf *mod_btf;
struct btf *btf; struct btf *btf;
int i, id, err;
btf = obj->btf_vmlinux; btf = obj->btf_vmlinux;
btf_fd = 0; mod_btf = NULL;
id = btf__find_by_name_kind(btf, ksym_name, kind); id = btf__find_by_name_kind(btf, ksym_name, kind);
if (id == -ENOENT) { if (id == -ENOENT) {
...@@ -6767,10 +6780,10 @@ static int find_ksym_btf_id(struct bpf_object *obj, const char *ksym_name, ...@@ -6767,10 +6780,10 @@ static int find_ksym_btf_id(struct bpf_object *obj, const char *ksym_name,
return err; return err;
for (i = 0; i < obj->btf_module_cnt; i++) { for (i = 0; i < obj->btf_module_cnt; i++) {
btf = obj->btf_modules[i].btf; /* we assume module_btf's BTF FD is always >0 */
/* we assume module BTF FD is always >0 */ mod_btf = &obj->btf_modules[i];
btf_fd = obj->btf_modules[i].fd; btf = mod_btf->btf;
id = btf__find_by_name_kind(btf, ksym_name, kind); id = btf__find_by_name_kind_own(btf, ksym_name, kind);
if (id != -ENOENT) if (id != -ENOENT)
break; break;
} }
...@@ -6779,7 +6792,7 @@ static int find_ksym_btf_id(struct bpf_object *obj, const char *ksym_name, ...@@ -6779,7 +6792,7 @@ static int find_ksym_btf_id(struct bpf_object *obj, const char *ksym_name,
return -ESRCH; return -ESRCH;
*res_btf = btf; *res_btf = btf;
*res_btf_fd = btf_fd; *res_mod_btf = mod_btf;
return id; return id;
} }
...@@ -6788,14 +6801,15 @@ static int bpf_object__resolve_ksym_var_btf_id(struct bpf_object *obj, ...@@ -6788,14 +6801,15 @@ static int bpf_object__resolve_ksym_var_btf_id(struct bpf_object *obj,
{ {
const struct btf_type *targ_var, *targ_type; const struct btf_type *targ_var, *targ_type;
__u32 targ_type_id, local_type_id; __u32 targ_type_id, local_type_id;
struct module_btf *mod_btf = NULL;
const char *targ_var_name; const char *targ_var_name;
int id, btf_fd = 0, err;
struct btf *btf = NULL; struct btf *btf = NULL;
int id, err;
id = find_ksym_btf_id(obj, ext->name, BTF_KIND_VAR, &btf, &btf_fd); id = find_ksym_btf_id(obj, ext->name, BTF_KIND_VAR, &btf, &mod_btf);
if (id == -ESRCH && ext->is_weak) { if (id < 0) {
if (id == -ESRCH && ext->is_weak)
return 0; return 0;
} else if (id < 0) {
pr_warn("extern (var ksym) '%s': not found in kernel BTF\n", pr_warn("extern (var ksym) '%s': not found in kernel BTF\n",
ext->name); ext->name);
return id; return id;
...@@ -6827,7 +6841,7 @@ static int bpf_object__resolve_ksym_var_btf_id(struct bpf_object *obj, ...@@ -6827,7 +6841,7 @@ static int bpf_object__resolve_ksym_var_btf_id(struct bpf_object *obj,
} }
ext->is_set = true; ext->is_set = true;
ext->ksym.kernel_btf_obj_fd = btf_fd; ext->ksym.kernel_btf_obj_fd = mod_btf ? mod_btf->fd : 0;
ext->ksym.kernel_btf_id = id; ext->ksym.kernel_btf_id = id;
pr_debug("extern (var ksym) '%s': resolved to [%d] %s %s\n", pr_debug("extern (var ksym) '%s': resolved to [%d] %s %s\n",
ext->name, id, btf_kind_str(targ_var), targ_var_name); ext->name, id, btf_kind_str(targ_var), targ_var_name);
...@@ -6839,26 +6853,22 @@ static int bpf_object__resolve_ksym_func_btf_id(struct bpf_object *obj, ...@@ -6839,26 +6853,22 @@ static int bpf_object__resolve_ksym_func_btf_id(struct bpf_object *obj,
struct extern_desc *ext) struct extern_desc *ext)
{ {
int local_func_proto_id, kfunc_proto_id, kfunc_id; int local_func_proto_id, kfunc_proto_id, kfunc_id;
struct module_btf *mod_btf = NULL;
const struct btf_type *kern_func; const struct btf_type *kern_func;
struct btf *kern_btf = NULL; struct btf *kern_btf = NULL;
int ret, kern_btf_fd = 0; int ret;
local_func_proto_id = ext->ksym.type_id; local_func_proto_id = ext->ksym.type_id;
kfunc_id = find_ksym_btf_id(obj, ext->name, BTF_KIND_FUNC, kfunc_id = find_ksym_btf_id(obj, ext->name, BTF_KIND_FUNC, &kern_btf, &mod_btf);
&kern_btf, &kern_btf_fd);
if (kfunc_id < 0) { if (kfunc_id < 0) {
pr_warn("extern (func ksym) '%s': not found in kernel BTF\n", if (kfunc_id == -ESRCH && ext->is_weak)
return 0;
pr_warn("extern (func ksym) '%s': not found in kernel or module BTFs\n",
ext->name); ext->name);
return kfunc_id; return kfunc_id;
} }
if (kern_btf != obj->btf_vmlinux) {
pr_warn("extern (func ksym) '%s': function in kernel module is not supported\n",
ext->name);
return -ENOTSUP;
}
kern_func = btf__type_by_id(kern_btf, kfunc_id); kern_func = btf__type_by_id(kern_btf, kfunc_id);
kfunc_proto_id = kern_func->type; kfunc_proto_id = kern_func->type;
...@@ -6870,9 +6880,30 @@ static int bpf_object__resolve_ksym_func_btf_id(struct bpf_object *obj, ...@@ -6870,9 +6880,30 @@ static int bpf_object__resolve_ksym_func_btf_id(struct bpf_object *obj,
return -EINVAL; return -EINVAL;
} }
/* set index for module BTF fd in fd_array, if unset */
if (mod_btf && !mod_btf->fd_array_idx) {
/* insn->off is s16 */
if (obj->fd_array_cnt == INT16_MAX) {
pr_warn("extern (func ksym) '%s': module BTF fd index %d too big to fit in bpf_insn offset\n",
ext->name, mod_btf->fd_array_idx);
return -E2BIG;
}
/* Cannot use index 0 for module BTF fd */
if (!obj->fd_array_cnt)
obj->fd_array_cnt = 1;
ret = libbpf_ensure_mem((void **)&obj->fd_array, &obj->fd_array_cap, sizeof(int),
obj->fd_array_cnt + 1);
if (ret)
return ret;
mod_btf->fd_array_idx = obj->fd_array_cnt;
/* we assume module BTF FD is always >0 */
obj->fd_array[obj->fd_array_cnt++] = mod_btf->fd;
}
ext->is_set = true; ext->is_set = true;
ext->ksym.kernel_btf_obj_fd = kern_btf_fd;
ext->ksym.kernel_btf_id = kfunc_id; ext->ksym.kernel_btf_id = kfunc_id;
ext->ksym.btf_fd_idx = mod_btf ? mod_btf->fd_array_idx : 0;
pr_debug("extern (func ksym) '%s': resolved to kernel [%d]\n", pr_debug("extern (func ksym) '%s': resolved to kernel [%d]\n",
ext->name, kfunc_id); ext->name, kfunc_id);
...@@ -7031,6 +7062,9 @@ int bpf_object__load_xattr(struct bpf_object_load_attr *attr) ...@@ -7031,6 +7062,9 @@ int bpf_object__load_xattr(struct bpf_object_load_attr *attr)
err = bpf_gen__finish(obj->gen_loader); err = bpf_gen__finish(obj->gen_loader);
} }
/* clean up fd_array */
zfree(&obj->fd_array);
/* clean up module BTFs */ /* clean up module BTFs */
for (i = 0; i < obj->btf_module_cnt; i++) { for (i = 0; i < obj->btf_module_cnt; i++) {
close(obj->btf_modules[i].fd); close(obj->btf_modules[i].fd);
......
...@@ -298,6 +298,7 @@ struct bpf_prog_load_params { ...@@ -298,6 +298,7 @@ struct bpf_prog_load_params {
__u32 log_level; __u32 log_level;
char *log_buf; char *log_buf;
size_t log_buf_sz; size_t log_buf_sz;
int *fd_array;
}; };
int libbpf__bpf_prog_load(const struct bpf_prog_load_params *load_attr); int libbpf__bpf_prog_load(const struct bpf_prog_load_params *load_attr);
...@@ -408,6 +409,8 @@ int btf_type_visit_type_ids(struct btf_type *t, type_id_visit_fn visit, void *ct ...@@ -408,6 +409,8 @@ int btf_type_visit_type_ids(struct btf_type *t, type_id_visit_fn visit, void *ct
int btf_type_visit_str_offs(struct btf_type *t, str_off_visit_fn visit, void *ctx); int btf_type_visit_str_offs(struct btf_type *t, str_off_visit_fn visit, void *ctx);
int btf_ext_visit_type_ids(struct btf_ext *btf_ext, type_id_visit_fn visit, void *ctx); int btf_ext_visit_type_ids(struct btf_ext *btf_ext, type_id_visit_fn visit, void *ctx);
int btf_ext_visit_str_offs(struct btf_ext *btf_ext, str_off_visit_fn visit, void *ctx); int btf_ext_visit_str_offs(struct btf_ext *btf_ext, str_off_visit_fn visit, void *ctx);
__s32 btf__find_by_name_kind_own(const struct btf *btf, const char *type_name,
__u32 kind);
extern enum libbpf_strict_mode libbpf_mode; extern enum libbpf_strict_mode libbpf_mode;
......
...@@ -315,8 +315,9 @@ LINKED_SKELS := test_static_linked.skel.h linked_funcs.skel.h \ ...@@ -315,8 +315,9 @@ LINKED_SKELS := test_static_linked.skel.h linked_funcs.skel.h \
linked_vars.skel.h linked_maps.skel.h linked_vars.skel.h linked_maps.skel.h
LSKELS := kfunc_call_test.c fentry_test.c fexit_test.c fexit_sleep.c \ LSKELS := kfunc_call_test.c fentry_test.c fexit_test.c fexit_sleep.c \
test_ksyms_module.c test_ringbuf.c atomics.c trace_printk.c \ test_ringbuf.c atomics.c trace_printk.c trace_vprintk.c
trace_vprintk.c # Generate both light skeleton and libbpf skeleton for these
LSKELS_EXTRA := test_ksyms_module.c
SKEL_BLACKLIST += $$(LSKELS) SKEL_BLACKLIST += $$(LSKELS)
test_static_linked.skel.h-deps := test_static_linked1.o test_static_linked2.o test_static_linked.skel.h-deps := test_static_linked1.o test_static_linked2.o
...@@ -346,7 +347,7 @@ TRUNNER_BPF_OBJS := $$(patsubst %.c,$$(TRUNNER_OUTPUT)/%.o, $$(TRUNNER_BPF_SRCS) ...@@ -346,7 +347,7 @@ TRUNNER_BPF_OBJS := $$(patsubst %.c,$$(TRUNNER_OUTPUT)/%.o, $$(TRUNNER_BPF_SRCS)
TRUNNER_BPF_SKELS := $$(patsubst %.c,$$(TRUNNER_OUTPUT)/%.skel.h, \ TRUNNER_BPF_SKELS := $$(patsubst %.c,$$(TRUNNER_OUTPUT)/%.skel.h, \
$$(filter-out $(SKEL_BLACKLIST) $(LINKED_BPF_SRCS),\ $$(filter-out $(SKEL_BLACKLIST) $(LINKED_BPF_SRCS),\
$$(TRUNNER_BPF_SRCS))) $$(TRUNNER_BPF_SRCS)))
TRUNNER_BPF_LSKELS := $$(patsubst %.c,$$(TRUNNER_OUTPUT)/%.lskel.h, $$(LSKELS)) TRUNNER_BPF_LSKELS := $$(patsubst %.c,$$(TRUNNER_OUTPUT)/%.lskel.h, $$(LSKELS) $$(LSKELS_EXTRA))
TRUNNER_BPF_SKELS_LINKED := $$(addprefix $$(TRUNNER_OUTPUT)/,$(LINKED_SKELS)) TRUNNER_BPF_SKELS_LINKED := $$(addprefix $$(TRUNNER_OUTPUT)/,$(LINKED_SKELS))
TEST_GEN_FILES += $$(TRUNNER_BPF_OBJS) TEST_GEN_FILES += $$(TRUNNER_BPF_OBJS)
...@@ -454,7 +455,7 @@ $(OUTPUT)/$(TRUNNER_BINARY): $(TRUNNER_TEST_OBJS) \ ...@@ -454,7 +455,7 @@ $(OUTPUT)/$(TRUNNER_BINARY): $(TRUNNER_TEST_OBJS) \
| $(TRUNNER_BINARY)-extras | $(TRUNNER_BINARY)-extras
$$(call msg,BINARY,,$$@) $$(call msg,BINARY,,$$@)
$(Q)$$(CC) $$(CFLAGS) $$(filter %.a %.o,$$^) $$(LDLIBS) -o $$@ $(Q)$$(CC) $$(CFLAGS) $$(filter %.a %.o,$$^) $$(LDLIBS) -o $$@
$(Q)$(RESOLVE_BTFIDS) --no-fail --btf $(TRUNNER_OUTPUT)/btf_data.o $$@ $(Q)$(RESOLVE_BTFIDS) --btf $(TRUNNER_OUTPUT)/btf_data.o $$@
endef endef
......
// SPDX-License-Identifier: GPL-2.0 // SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2020 Facebook */ /* Copyright (c) 2020 Facebook */
#include <linux/btf.h>
#include <linux/btf_ids.h>
#include <linux/error-injection.h> #include <linux/error-injection.h>
#include <linux/init.h> #include <linux/init.h>
#include <linux/module.h> #include <linux/module.h>
...@@ -13,6 +15,12 @@ ...@@ -13,6 +15,12 @@
DEFINE_PER_CPU(int, bpf_testmod_ksym_percpu) = 123; DEFINE_PER_CPU(int, bpf_testmod_ksym_percpu) = 123;
noinline void
bpf_testmod_test_mod_kfunc(int i)
{
*(int *)this_cpu_ptr(&bpf_testmod_ksym_percpu) = i;
}
noinline int bpf_testmod_loop_test(int n) noinline int bpf_testmod_loop_test(int n)
{ {
int i, sum = 0; int i, sum = 0;
...@@ -71,13 +79,26 @@ static struct bin_attribute bin_attr_bpf_testmod_file __ro_after_init = { ...@@ -71,13 +79,26 @@ static struct bin_attribute bin_attr_bpf_testmod_file __ro_after_init = {
.write = bpf_testmod_test_write, .write = bpf_testmod_test_write,
}; };
BTF_SET_START(bpf_testmod_kfunc_ids)
BTF_ID(func, bpf_testmod_test_mod_kfunc)
BTF_SET_END(bpf_testmod_kfunc_ids)
static DEFINE_KFUNC_BTF_ID_SET(&bpf_testmod_kfunc_ids, bpf_testmod_kfunc_btf_set);
static int bpf_testmod_init(void) static int bpf_testmod_init(void)
{ {
return sysfs_create_bin_file(kernel_kobj, &bin_attr_bpf_testmod_file); int ret;
ret = sysfs_create_bin_file(kernel_kobj, &bin_attr_bpf_testmod_file);
if (ret)
return ret;
register_kfunc_btf_id_set(&prog_test_kfunc_list, &bpf_testmod_kfunc_btf_set);
return 0;
} }
static void bpf_testmod_exit(void) static void bpf_testmod_exit(void)
{ {
unregister_kfunc_btf_id_set(&prog_test_kfunc_list, &bpf_testmod_kfunc_btf_set);
return sysfs_remove_bin_file(kernel_kobj, &bin_attr_bpf_testmod_file); return sysfs_remove_bin_file(kernel_kobj, &bin_attr_bpf_testmod_file);
} }
......
...@@ -2,30 +2,29 @@ ...@@ -2,30 +2,29 @@
/* Copyright (c) 2021 Facebook */ /* Copyright (c) 2021 Facebook */
#include <test_progs.h> #include <test_progs.h>
#include <bpf/libbpf.h> #include <network_helpers.h>
#include <bpf/btf.h>
#include "test_ksyms_module.lskel.h" #include "test_ksyms_module.lskel.h"
static int duration;
void test_ksyms_module(void) void test_ksyms_module(void)
{ {
struct test_ksyms_module* skel; struct test_ksyms_module *skel;
int retval;
int err; int err;
skel = test_ksyms_module__open_and_load(); if (!env.has_testmod) {
if (CHECK(!skel, "skel_open", "failed to open skeleton\n")) test__skip();
return; return;
}
err = test_ksyms_module__attach(skel); skel = test_ksyms_module__open_and_load();
if (CHECK(err, "skel_attach", "skeleton attach failed: %d\n", err)) if (!ASSERT_OK_PTR(skel, "test_ksyms_module__open_and_load"))
return;
err = bpf_prog_test_run(skel->progs.load.prog_fd, 1, &pkt_v4, sizeof(pkt_v4),
NULL, NULL, (__u32 *)&retval, NULL);
if (!ASSERT_OK(err, "bpf_prog_test_run"))
goto cleanup; goto cleanup;
ASSERT_EQ(retval, 0, "retval");
usleep(1); ASSERT_EQ(skel->bss->out_bpf_testmod_ksym, 42, "bpf_testmod_ksym");
ASSERT_EQ(skel->bss->triggered, true, "triggered");
ASSERT_EQ(skel->bss->out_mod_ksym_global, 123, "global_ksym_val");
cleanup: cleanup:
test_ksyms_module__destroy(skel); test_ksyms_module__destroy(skel);
} }
// SPDX-License-Identifier: GPL-2.0
#include <test_progs.h>
#include <network_helpers.h>
#include "test_ksyms_module.skel.h"
void test_ksyms_module_libbpf(void)
{
struct test_ksyms_module *skel;
int retval, err;
if (!env.has_testmod) {
test__skip();
return;
}
skel = test_ksyms_module__open_and_load();
if (!ASSERT_OK_PTR(skel, "test_ksyms_module__open"))
return;
err = bpf_prog_test_run(bpf_program__fd(skel->progs.load), 1, &pkt_v4,
sizeof(pkt_v4), NULL, NULL, (__u32 *)&retval, NULL);
if (!ASSERT_OK(err, "bpf_prog_test_run"))
goto cleanup;
ASSERT_EQ(retval, 0, "retval");
ASSERT_EQ(skel->bss->out_bpf_testmod_ksym, 42, "bpf_testmod_ksym");
cleanup:
test_ksyms_module__destroy(skel);
}
...@@ -2,24 +2,48 @@ ...@@ -2,24 +2,48 @@
/* Copyright (c) 2021 Facebook */ /* Copyright (c) 2021 Facebook */
#include "vmlinux.h" #include "vmlinux.h"
#include <bpf/bpf_helpers.h> #include <bpf/bpf_helpers.h>
#define X_0(x)
#define X_1(x) x X_0(x)
#define X_2(x) x X_1(x)
#define X_3(x) x X_2(x)
#define X_4(x) x X_3(x)
#define X_5(x) x X_4(x)
#define X_6(x) x X_5(x)
#define X_7(x) x X_6(x)
#define X_8(x) x X_7(x)
#define X_9(x) x X_8(x)
#define X_10(x) x X_9(x)
#define REPEAT_256(Y) X_2(X_10(X_10(Y))) X_5(X_10(Y)) X_6(Y)
extern const int bpf_testmod_ksym_percpu __ksym; extern const int bpf_testmod_ksym_percpu __ksym;
extern void bpf_testmod_test_mod_kfunc(int i) __ksym;
extern void bpf_testmod_invalid_mod_kfunc(void) __ksym __weak;
int out_mod_ksym_global = 0; int out_bpf_testmod_ksym = 0;
bool triggered = false; const volatile int x = 0;
SEC("raw_tp/sys_enter") SEC("tc")
int handler(const void *ctx) int load(struct __sk_buff *skb)
{ {
int *val; /* This will be kept by clang, but removed by verifier. Since it is
__u32 cpu; * marked as __weak, libbpf and gen_loader don't error out if BTF ID
* is not found for it, instead imm and off is set to 0 for it.
val = (int *)bpf_this_cpu_ptr(&bpf_testmod_ksym_percpu); */
out_mod_ksym_global = *val; if (x)
triggered = true; bpf_testmod_invalid_mod_kfunc();
bpf_testmod_test_mod_kfunc(42);
out_bpf_testmod_ksym = *(int *)bpf_this_cpu_ptr(&bpf_testmod_ksym_percpu);
return 0;
}
SEC("tc")
int load_256(struct __sk_buff *skb)
{
/* this will fail if kfunc doesn't reuse its own btf fd index */
REPEAT_256(bpf_testmod_test_mod_kfunc(42););
bpf_testmod_test_mod_kfunc(42);
return 0; return 0;
} }
......
{
"calls: invalid kfunc call not eliminated",
.insns = {
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0),
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
.result = REJECT,
.errstr = "invalid kernel function call not eliminated in verifier pass",
},
{
"calls: invalid kfunc call unreachable",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_JMP_IMM(BPF_JGT, BPF_REG_0, 0, 2),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0),
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
.result = ACCEPT,
},
{ {
"calls: basic sanity", "calls: basic sanity",
.insns = { .insns = {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment