Commit 07cbce2e authored by Jakub Kicinski's avatar Jakub Kicinski

Merge git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf-next

Daniel Borkmann says:

====================
pull-request: bpf-next 2020-11-14

1) Add BTF generation for kernel modules and extend BTF infra in kernel
   e.g. support for split BTF loading and validation, from Andrii Nakryiko.

2) Support for pointers beyond pkt_end to recognize LLVM generated patterns
   on inlined branch conditions, from Alexei Starovoitov.

3) Implements bpf_local_storage for task_struct for BPF LSM, from KP Singh.

4) Enable FENTRY/FEXIT/RAW_TP tracing program to use the bpf_sk_storage
   infra, from Martin KaFai Lau.

5) Add XDP bulk APIs that introduce a defer/flush mechanism to optimize the
   XDP_REDIRECT path, from Lorenzo Bianconi.

6) Fix a potential (although rather theoretical) deadlock of hashtab in NMI
   context, from Song Liu.

7) Fixes for cross and out-of-tree build of bpftool and runqslower allowing build
   for different target archs on same source tree, from Jean-Philippe Brucker.

8) Fix error path in htab_map_alloc() triggered from syzbot, from Eric Dumazet.

9) Move functionality from test_tcpbpf_user into the test_progs framework so it
   can run in BPF CI, from Alexander Duyck.

10) Lift hashtab key_size limit to be larger than MAX_BPF_STACK, from Florian Lehner.

Note that for the fix from Song we have seen a sparse report on context
imbalance which requires changes in sparse itself for proper annotation
detection where this is currently being discussed on linux-sparse among
developers [0]. Once we have more clarification/guidance after their fix,
Song will follow-up.

  [0] https://lore.kernel.org/linux-sparse/CAHk-=wh4bx8A8dHnX612MsDO13st6uzAz1mJ1PaHHVevJx_ZCw@mail.gmail.com/T/
      https://lore.kernel.org/linux-sparse/20201109221345.uklbp3lzgq6g42zb@ltop.local/T/

* git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf-next: (66 commits)
  net: mlx5: Add xdp tx return bulking support
  net: mvpp2: Add xdp tx return bulking support
  net: mvneta: Add xdp tx return bulking support
  net: page_pool: Add bulk support for ptr_ring
  net: xdp: Introduce bulking for xdp tx return path
  bpf: Expose bpf_d_path helper to sleepable LSM hooks
  bpf: Augment the set of sleepable LSM hooks
  bpf: selftest: Use bpf_sk_storage in FENTRY/FEXIT/RAW_TP
  bpf: Allow using bpf_sk_storage in FENTRY/FEXIT/RAW_TP
  bpf: Rename some functions in bpf_sk_storage
  bpf: Folding omem_charge() into sk_storage_charge()
  selftests/bpf: Add asm tests for pkt vs pkt_end comparison.
  selftests/bpf: Add skb_pkt_end test
  bpf: Support for pointers beyond pkt_end.
  tools/bpf: Always run the *-clean recipes
  tools/bpf: Add bootstrap/ to .gitignore
  bpf: Fix NULL dereference in bpf_task_storage
  tools/bpftool: Fix build slowdown
  tools/runqslower: Build bpftool using HOSTCC
  tools/runqslower: Enable out-of-tree build
  ...
====================

Link: https://lore.kernel.org/r/20201114020819.29584-1-daniel@iogearbox.netSigned-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parents 774626fa c14d61fc
......@@ -15,3 +15,11 @@ Description:
information with description of all internal kernel types. See
Documentation/bpf/btf.rst for detailed description of format
itself.
What: /sys/kernel/btf/<module-name>
Date: Nov 2020
KernelVersion: 5.11
Contact: bpf@vger.kernel.org
Description:
Read-only binary attribute exposing kernel module's BTF type
information as an add-on to the kernel's BTF (/sys/kernel/btf/vmlinux).
......@@ -1834,8 +1834,13 @@ static void mvneta_txq_bufs_free(struct mvneta_port *pp,
struct netdev_queue *nq, bool napi)
{
unsigned int bytes_compl = 0, pkts_compl = 0;
struct xdp_frame_bulk bq;
int i;
xdp_frame_bulk_init(&bq);
rcu_read_lock(); /* need for xdp_return_frame_bulk */
for (i = 0; i < num; i++) {
struct mvneta_tx_buf *buf = &txq->buf[txq->txq_get_index];
struct mvneta_tx_desc *tx_desc = txq->descs +
......@@ -1857,9 +1862,12 @@ static void mvneta_txq_bufs_free(struct mvneta_port *pp,
if (napi && buf->type == MVNETA_TYPE_XDP_TX)
xdp_return_frame_rx_napi(buf->xdpf);
else
xdp_return_frame(buf->xdpf);
xdp_return_frame_bulk(buf->xdpf, &bq);
}
}
xdp_flush_frame_bulk(&bq);
rcu_read_unlock();
netdev_tx_completed_queue(nq, pkts_compl, bytes_compl);
}
......
......@@ -2440,8 +2440,13 @@ static void mvpp2_txq_bufs_free(struct mvpp2_port *port,
struct mvpp2_tx_queue *txq,
struct mvpp2_txq_pcpu *txq_pcpu, int num)
{
struct xdp_frame_bulk bq;
int i;
xdp_frame_bulk_init(&bq);
rcu_read_lock(); /* need for xdp_return_frame_bulk */
for (i = 0; i < num; i++) {
struct mvpp2_txq_pcpu_buf *tx_buf =
txq_pcpu->buffs + txq_pcpu->txq_get_index;
......@@ -2454,10 +2459,13 @@ static void mvpp2_txq_bufs_free(struct mvpp2_port *port,
dev_kfree_skb_any(tx_buf->skb);
else if (tx_buf->type == MVPP2_TYPE_XDP_TX ||
tx_buf->type == MVPP2_TYPE_XDP_NDO)
xdp_return_frame(tx_buf->xdpf);
xdp_return_frame_bulk(tx_buf->xdpf, &bq);
mvpp2_txq_inc_get(txq_pcpu);
}
xdp_flush_frame_bulk(&bq);
rcu_read_unlock();
}
static inline struct mvpp2_rx_queue *mvpp2_get_rx_queue(struct mvpp2_port *port,
......
......@@ -366,7 +366,8 @@ mlx5e_xmit_xdp_frame(struct mlx5e_xdpsq *sq, struct mlx5e_xmit_data *xdptxd,
static void mlx5e_free_xdpsq_desc(struct mlx5e_xdpsq *sq,
struct mlx5e_xdp_wqe_info *wi,
u32 *xsk_frames,
bool recycle)
bool recycle,
struct xdp_frame_bulk *bq)
{
struct mlx5e_xdp_info_fifo *xdpi_fifo = &sq->db.xdpi_fifo;
u16 i;
......@@ -379,7 +380,7 @@ static void mlx5e_free_xdpsq_desc(struct mlx5e_xdpsq *sq,
/* XDP_TX from the XSK RQ and XDP_REDIRECT */
dma_unmap_single(sq->pdev, xdpi.frame.dma_addr,
xdpi.frame.xdpf->len, DMA_TO_DEVICE);
xdp_return_frame(xdpi.frame.xdpf);
xdp_return_frame_bulk(xdpi.frame.xdpf, bq);
break;
case MLX5E_XDP_XMIT_MODE_PAGE:
/* XDP_TX from the regular RQ */
......@@ -397,12 +398,15 @@ static void mlx5e_free_xdpsq_desc(struct mlx5e_xdpsq *sq,
bool mlx5e_poll_xdpsq_cq(struct mlx5e_cq *cq)
{
struct xdp_frame_bulk bq;
struct mlx5e_xdpsq *sq;
struct mlx5_cqe64 *cqe;
u32 xsk_frames = 0;
u16 sqcc;
int i;
xdp_frame_bulk_init(&bq);
sq = container_of(cq, struct mlx5e_xdpsq, cq);
if (unlikely(!test_bit(MLX5E_SQ_STATE_ENABLED, &sq->state)))
......@@ -434,7 +438,7 @@ bool mlx5e_poll_xdpsq_cq(struct mlx5e_cq *cq)
sqcc += wi->num_wqebbs;
mlx5e_free_xdpsq_desc(sq, wi, &xsk_frames, true);
mlx5e_free_xdpsq_desc(sq, wi, &xsk_frames, true, &bq);
} while (!last_wqe);
if (unlikely(get_cqe_opcode(cqe) != MLX5_CQE_REQ)) {
......@@ -447,6 +451,8 @@ bool mlx5e_poll_xdpsq_cq(struct mlx5e_cq *cq)
}
} while ((++i < MLX5E_TX_CQ_POLL_BUDGET) && (cqe = mlx5_cqwq_get_cqe(&cq->wq)));
xdp_flush_frame_bulk(&bq);
if (xsk_frames)
xsk_tx_completed(sq->xsk_pool, xsk_frames);
......@@ -463,8 +469,13 @@ bool mlx5e_poll_xdpsq_cq(struct mlx5e_cq *cq)
void mlx5e_free_xdpsq_descs(struct mlx5e_xdpsq *sq)
{
struct xdp_frame_bulk bq;
u32 xsk_frames = 0;
xdp_frame_bulk_init(&bq);
rcu_read_lock(); /* need for xdp_return_frame_bulk */
while (sq->cc != sq->pc) {
struct mlx5e_xdp_wqe_info *wi;
u16 ci;
......@@ -474,9 +485,12 @@ void mlx5e_free_xdpsq_descs(struct mlx5e_xdpsq *sq)
sq->cc += wi->num_wqebbs;
mlx5e_free_xdpsq_desc(sq, wi, &xsk_frames, false);
mlx5e_free_xdpsq_desc(sq, wi, &xsk_frames, false, &bq);
}
xdp_flush_frame_bulk(&bq);
rcu_read_unlock();
if (xsk_frames)
xsk_tx_completed(sq->xsk_pool, xsk_frames);
}
......
......@@ -36,9 +36,11 @@ struct seq_operations;
struct bpf_iter_aux_info;
struct bpf_local_storage;
struct bpf_local_storage_map;
struct kobject;
extern struct idr btf_idr;
extern spinlock_t btf_idr_lock;
extern struct kobject *btf_kobj;
typedef int (*bpf_iter_init_seq_priv_t)(void *private_data,
struct bpf_iter_aux_info *aux);
......@@ -310,6 +312,7 @@ enum bpf_return_type {
RET_PTR_TO_BTF_ID_OR_NULL, /* returns a pointer to a btf_id or NULL */
RET_PTR_TO_MEM_OR_BTF_ID_OR_NULL, /* returns a pointer to a valid memory or a btf_id or NULL */
RET_PTR_TO_MEM_OR_BTF_ID, /* returns a pointer to a valid memory or a btf_id */
RET_PTR_TO_BTF_ID, /* returns a pointer to a btf_id */
};
/* eBPF function prototype used by verifier to allow BPF_CALLs from eBPF programs
......@@ -1294,6 +1297,10 @@ typedef void (*bpf_iter_show_fdinfo_t) (const struct bpf_iter_aux_info *aux,
typedef int (*bpf_iter_fill_link_info_t)(const struct bpf_iter_aux_info *aux,
struct bpf_link_info *info);
enum bpf_iter_feature {
BPF_ITER_RESCHED = BIT(0),
};
#define BPF_ITER_CTX_ARG_MAX 2
struct bpf_iter_reg {
const char *target;
......@@ -1302,6 +1309,7 @@ struct bpf_iter_reg {
bpf_iter_show_fdinfo_t show_fdinfo;
bpf_iter_fill_link_info_t fill_link_info;
u32 ctx_arg_info_size;
u32 feature;
struct bpf_ctx_arg_aux ctx_arg_info[BPF_ITER_CTX_ARG_MAX];
const struct bpf_iter_seq_info *seq_info;
};
......
......@@ -7,6 +7,7 @@
#ifndef _LINUX_BPF_LSM_H
#define _LINUX_BPF_LSM_H
#include <linux/sched.h>
#include <linux/bpf.h>
#include <linux/lsm_hooks.h>
......@@ -26,6 +27,8 @@ extern struct lsm_blob_sizes bpf_lsm_blob_sizes;
int bpf_lsm_verify_prog(struct bpf_verifier_log *vlog,
const struct bpf_prog *prog);
bool bpf_lsm_is_sleepable_hook(u32 btf_id);
static inline struct bpf_storage_blob *bpf_inode(
const struct inode *inode)
{
......@@ -35,12 +38,29 @@ static inline struct bpf_storage_blob *bpf_inode(
return inode->i_security + bpf_lsm_blob_sizes.lbs_inode;
}
static inline struct bpf_storage_blob *bpf_task(
const struct task_struct *task)
{
if (unlikely(!task->security))
return NULL;
return task->security + bpf_lsm_blob_sizes.lbs_task;
}
extern const struct bpf_func_proto bpf_inode_storage_get_proto;
extern const struct bpf_func_proto bpf_inode_storage_delete_proto;
extern const struct bpf_func_proto bpf_task_storage_get_proto;
extern const struct bpf_func_proto bpf_task_storage_delete_proto;
void bpf_inode_storage_free(struct inode *inode);
void bpf_task_storage_free(struct task_struct *task);
#else /* !CONFIG_BPF_LSM */
static inline bool bpf_lsm_is_sleepable_hook(u32 btf_id)
{
return false;
}
static inline int bpf_lsm_verify_prog(struct bpf_verifier_log *vlog,
const struct bpf_prog *prog)
{
......@@ -53,10 +73,20 @@ static inline struct bpf_storage_blob *bpf_inode(
return NULL;
}
static inline struct bpf_storage_blob *bpf_task(
const struct task_struct *task)
{
return NULL;
}
static inline void bpf_inode_storage_free(struct inode *inode)
{
}
static inline void bpf_task_storage_free(struct task_struct *task)
{
}
#endif /* CONFIG_BPF_LSM */
#endif /* _LINUX_BPF_LSM_H */
......@@ -109,6 +109,7 @@ BPF_MAP_TYPE(BPF_MAP_TYPE_SOCKHASH, sock_hash_ops)
#endif
#ifdef CONFIG_BPF_LSM
BPF_MAP_TYPE(BPF_MAP_TYPE_INODE_STORAGE, inode_storage_map_ops)
BPF_MAP_TYPE(BPF_MAP_TYPE_TASK_STORAGE, task_storage_map_ops)
#endif
BPF_MAP_TYPE(BPF_MAP_TYPE_CPUMAP, cpu_map_ops)
#if defined(CONFIG_XDP_SOCKETS)
......
......@@ -45,7 +45,7 @@ struct bpf_reg_state {
enum bpf_reg_type type;
union {
/* valid when type == PTR_TO_PACKET */
u16 range;
int range;
/* valid when type == CONST_PTR_TO_MAP | PTR_TO_MAP_VALUE |
* PTR_TO_MAP_VALUE_OR_NULL
......
......@@ -475,6 +475,10 @@ struct module {
unsigned int num_bpf_raw_events;
struct bpf_raw_event_map *bpf_raw_events;
#endif
#ifdef CONFIG_DEBUG_INFO_BTF_MODULES
unsigned int btf_data_size;
void *btf_data;
#endif
#ifdef CONFIG_JUMP_LABEL
struct jump_entry *jump_entries;
unsigned int num_jump_entries;
......
......@@ -20,6 +20,8 @@ void bpf_sk_storage_free(struct sock *sk);
extern const struct bpf_func_proto bpf_sk_storage_get_proto;
extern const struct bpf_func_proto bpf_sk_storage_delete_proto;
extern const struct bpf_func_proto bpf_sk_storage_get_tracing_proto;
extern const struct bpf_func_proto bpf_sk_storage_delete_tracing_proto;
struct bpf_local_storage_elem;
struct bpf_sk_storage_diag;
......
......@@ -152,6 +152,8 @@ struct page_pool *page_pool_create(const struct page_pool_params *params);
void page_pool_destroy(struct page_pool *pool);
void page_pool_use_xdp_mem(struct page_pool *pool, void (*disconnect)(void *));
void page_pool_release_page(struct page_pool *pool, struct page *page);
void page_pool_put_page_bulk(struct page_pool *pool, void **data,
int count);
#else
static inline void page_pool_destroy(struct page_pool *pool)
{
......@@ -165,6 +167,11 @@ static inline void page_pool_release_page(struct page_pool *pool,
struct page *page)
{
}
static inline void page_pool_put_page_bulk(struct page_pool *pool, void **data,
int count)
{
}
#endif
void page_pool_put_page(struct page_pool *pool, struct page *page,
......@@ -215,4 +222,23 @@ static inline void page_pool_nid_changed(struct page_pool *pool, int new_nid)
if (unlikely(pool->p.nid != new_nid))
page_pool_update_nid(pool, new_nid);
}
static inline void page_pool_ring_lock(struct page_pool *pool)
__acquires(&pool->ring.producer_lock)
{
if (in_serving_softirq())
spin_lock(&pool->ring.producer_lock);
else
spin_lock_bh(&pool->ring.producer_lock);
}
static inline void page_pool_ring_unlock(struct page_pool *pool)
__releases(&pool->ring.producer_lock)
{
if (in_serving_softirq())
spin_unlock(&pool->ring.producer_lock);
else
spin_unlock_bh(&pool->ring.producer_lock);
}
#endif /* _NET_PAGE_POOL_H */
......@@ -104,6 +104,18 @@ struct xdp_frame {
struct net_device *dev_rx; /* used by cpumap */
};
#define XDP_BULK_QUEUE_SIZE 16
struct xdp_frame_bulk {
int count;
void *xa;
void *q[XDP_BULK_QUEUE_SIZE];
};
static __always_inline void xdp_frame_bulk_init(struct xdp_frame_bulk *bq)
{
/* bq->count will be zero'ed when bq->xa gets updated */
bq->xa = NULL;
}
static inline struct skb_shared_info *
xdp_get_shared_info_from_frame(struct xdp_frame *frame)
......@@ -194,6 +206,9 @@ struct xdp_frame *xdp_convert_buff_to_frame(struct xdp_buff *xdp)
void xdp_return_frame(struct xdp_frame *xdpf);
void xdp_return_frame_rx_napi(struct xdp_frame *xdpf);
void xdp_return_buff(struct xdp_buff *xdp);
void xdp_flush_frame_bulk(struct xdp_frame_bulk *bq);
void xdp_return_frame_bulk(struct xdp_frame *xdpf,
struct xdp_frame_bulk *bq);
/* When sending xdp_frame into the network stack, then there is no
* return point callback, which is needed to release e.g. DMA-mapping
......@@ -245,6 +260,6 @@ bool xdp_attachment_flags_ok(struct xdp_attachment_info *info,
void xdp_attachment_setup(struct xdp_attachment_info *info,
struct netdev_bpf *bpf);
#define DEV_MAP_BULK_SIZE 16
#define DEV_MAP_BULK_SIZE XDP_BULK_QUEUE_SIZE
#endif /* __LINUX_NET_XDP_H__ */
......@@ -157,6 +157,7 @@ enum bpf_map_type {
BPF_MAP_TYPE_STRUCT_OPS,
BPF_MAP_TYPE_RINGBUF,
BPF_MAP_TYPE_INODE_STORAGE,
BPF_MAP_TYPE_TASK_STORAGE,
};
/* Note that tracing related programs such as
......@@ -3742,6 +3743,50 @@ union bpf_attr {
* Return
* The helper returns **TC_ACT_REDIRECT** on success or
* **TC_ACT_SHOT** on error.
*
* void *bpf_task_storage_get(struct bpf_map *map, struct task_struct *task, void *value, u64 flags)
* Description
* Get a bpf_local_storage from the *task*.
*
* Logically, it could be thought of as getting the value from
* a *map* with *task* as the **key**. From this
* perspective, the usage is not much different from
* **bpf_map_lookup_elem**\ (*map*, **&**\ *task*) except this
* helper enforces the key must be an task_struct and the map must also
* be a **BPF_MAP_TYPE_TASK_STORAGE**.
*
* Underneath, the value is stored locally at *task* instead of
* the *map*. The *map* is used as the bpf-local-storage
* "type". The bpf-local-storage "type" (i.e. the *map*) is
* searched against all bpf_local_storage residing at *task*.
*
* An optional *flags* (**BPF_LOCAL_STORAGE_GET_F_CREATE**) can be
* used such that a new bpf_local_storage will be
* created if one does not exist. *value* can be used
* together with **BPF_LOCAL_STORAGE_GET_F_CREATE** to specify
* the initial value of a bpf_local_storage. If *value* is
* **NULL**, the new bpf_local_storage will be zero initialized.
* Return
* A bpf_local_storage pointer is returned on success.
*
* **NULL** if not found or there was an error in adding
* a new bpf_local_storage.
*
* long bpf_task_storage_delete(struct bpf_map *map, struct task_struct *task)
* Description
* Delete a bpf_local_storage from a *task*.
* Return
* 0 on success.
*
* **-ENOENT** if the bpf_local_storage cannot be found.
*
* struct task_struct *bpf_get_current_task_btf(void)
* Description
* Return a BTF pointer to the "current" task.
* This pointer can also be used in helpers that accept an
* *ARG_PTR_TO_BTF_ID* of type *task_struct*.
* Return
* Pointer to the current task.
*/
#define __BPF_FUNC_MAPPER(FN) \
FN(unspec), \
......@@ -3900,6 +3945,9 @@ union bpf_attr {
FN(bpf_per_cpu_ptr), \
FN(bpf_this_cpu_ptr), \
FN(redirect_peer), \
FN(task_storage_get), \
FN(task_storage_delete), \
FN(get_current_task_btf), \
/* */
/* integer value in 'imm' field of BPF_CALL instruction selects which helper
......@@ -4418,6 +4466,9 @@ struct bpf_btf_info {
__aligned_u64 btf;
__u32 btf_size;
__u32 id;
__aligned_u64 name;
__u32 name_len;
__u32 kernel_btf;
} __attribute__((aligned(8)));
struct bpf_link_info {
......
......@@ -10,6 +10,7 @@ obj-$(CONFIG_BPF_SYSCALL) += syscall.o verifier.o inode.o helpers.o tnum.o bpf_i
obj-$(CONFIG_BPF_SYSCALL) += hashtab.o arraymap.o percpu_freelist.o bpf_lru_list.o lpm_trie.o map_in_map.o
obj-$(CONFIG_BPF_SYSCALL) += local_storage.o queue_stack_maps.o ringbuf.o
obj-${CONFIG_BPF_LSM} += bpf_inode_storage.o
obj-${CONFIG_BPF_LSM} += bpf_task_storage.o
obj-$(CONFIG_BPF_SYSCALL) += disasm.o
obj-$(CONFIG_BPF_JIT) += trampoline.o
obj-$(CONFIG_BPF_SYSCALL) += btf.o
......
......@@ -67,6 +67,15 @@ static void bpf_iter_done_stop(struct seq_file *seq)
iter_priv->done_stop = true;
}
static bool bpf_iter_support_resched(struct seq_file *seq)
{
struct bpf_iter_priv_data *iter_priv;
iter_priv = container_of(seq->private, struct bpf_iter_priv_data,
target_private);
return iter_priv->tinfo->reg_info->feature & BPF_ITER_RESCHED;
}
/* maximum visited objects before bailing out */
#define MAX_ITER_OBJECTS 1000000
......@@ -83,6 +92,7 @@ static ssize_t bpf_seq_read(struct file *file, char __user *buf, size_t size,
struct seq_file *seq = file->private_data;
size_t n, offs, copied = 0;
int err = 0, num_objs = 0;
bool can_resched;
void *p;
mutex_lock(&seq->lock);
......@@ -135,6 +145,7 @@ static ssize_t bpf_seq_read(struct file *file, char __user *buf, size_t size,
goto done;
}
can_resched = bpf_iter_support_resched(seq);
while (1) {
loff_t pos = seq->index;
......@@ -180,6 +191,9 @@ static ssize_t bpf_seq_read(struct file *file, char __user *buf, size_t size,
}
break;
}
if (can_resched)
cond_resched();
}
stop:
offs = seq->count;
......
......@@ -63,11 +63,99 @@ bpf_lsm_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
return &bpf_sk_storage_get_proto;
case BPF_FUNC_sk_storage_delete:
return &bpf_sk_storage_delete_proto;
case BPF_FUNC_spin_lock:
return &bpf_spin_lock_proto;
case BPF_FUNC_spin_unlock:
return &bpf_spin_unlock_proto;
case BPF_FUNC_task_storage_get:
return &bpf_task_storage_get_proto;
case BPF_FUNC_task_storage_delete:
return &bpf_task_storage_delete_proto;
default:
return tracing_prog_func_proto(func_id, prog);
}
}
/* The set of hooks which are called without pagefaults disabled and are allowed
* to "sleep" and thus can be used for sleeable BPF programs.
*/
BTF_SET_START(sleepable_lsm_hooks)
BTF_ID(func, bpf_lsm_bpf)
BTF_ID(func, bpf_lsm_bpf_map)
BTF_ID(func, bpf_lsm_bpf_map_alloc_security)
BTF_ID(func, bpf_lsm_bpf_map_free_security)
BTF_ID(func, bpf_lsm_bpf_prog)
BTF_ID(func, bpf_lsm_bprm_check_security)
BTF_ID(func, bpf_lsm_bprm_committed_creds)
BTF_ID(func, bpf_lsm_bprm_committing_creds)
BTF_ID(func, bpf_lsm_bprm_creds_for_exec)
BTF_ID(func, bpf_lsm_bprm_creds_from_file)
BTF_ID(func, bpf_lsm_capget)
BTF_ID(func, bpf_lsm_capset)
BTF_ID(func, bpf_lsm_cred_prepare)
BTF_ID(func, bpf_lsm_file_ioctl)
BTF_ID(func, bpf_lsm_file_lock)
BTF_ID(func, bpf_lsm_file_open)
BTF_ID(func, bpf_lsm_file_receive)
BTF_ID(func, bpf_lsm_inet_conn_established)
BTF_ID(func, bpf_lsm_inode_create)
BTF_ID(func, bpf_lsm_inode_free_security)
BTF_ID(func, bpf_lsm_inode_getattr)
BTF_ID(func, bpf_lsm_inode_getxattr)
BTF_ID(func, bpf_lsm_inode_mknod)
BTF_ID(func, bpf_lsm_inode_need_killpriv)
BTF_ID(func, bpf_lsm_inode_post_setxattr)
BTF_ID(func, bpf_lsm_inode_readlink)
BTF_ID(func, bpf_lsm_inode_rename)
BTF_ID(func, bpf_lsm_inode_rmdir)
BTF_ID(func, bpf_lsm_inode_setattr)
BTF_ID(func, bpf_lsm_inode_setxattr)
BTF_ID(func, bpf_lsm_inode_symlink)
BTF_ID(func, bpf_lsm_inode_unlink)
BTF_ID(func, bpf_lsm_kernel_module_request)
BTF_ID(func, bpf_lsm_kernfs_init_security)
BTF_ID(func, bpf_lsm_key_free)
BTF_ID(func, bpf_lsm_mmap_file)
BTF_ID(func, bpf_lsm_netlink_send)
BTF_ID(func, bpf_lsm_path_notify)
BTF_ID(func, bpf_lsm_release_secctx)
BTF_ID(func, bpf_lsm_sb_alloc_security)
BTF_ID(func, bpf_lsm_sb_eat_lsm_opts)
BTF_ID(func, bpf_lsm_sb_kern_mount)
BTF_ID(func, bpf_lsm_sb_mount)
BTF_ID(func, bpf_lsm_sb_remount)
BTF_ID(func, bpf_lsm_sb_set_mnt_opts)
BTF_ID(func, bpf_lsm_sb_show_options)
BTF_ID(func, bpf_lsm_sb_statfs)
BTF_ID(func, bpf_lsm_sb_umount)
BTF_ID(func, bpf_lsm_settime)
BTF_ID(func, bpf_lsm_socket_accept)
BTF_ID(func, bpf_lsm_socket_bind)
BTF_ID(func, bpf_lsm_socket_connect)
BTF_ID(func, bpf_lsm_socket_create)
BTF_ID(func, bpf_lsm_socket_getpeername)
BTF_ID(func, bpf_lsm_socket_getpeersec_dgram)
BTF_ID(func, bpf_lsm_socket_getsockname)
BTF_ID(func, bpf_lsm_socket_getsockopt)
BTF_ID(func, bpf_lsm_socket_listen)
BTF_ID(func, bpf_lsm_socket_post_create)
BTF_ID(func, bpf_lsm_socket_recvmsg)
BTF_ID(func, bpf_lsm_socket_sendmsg)
BTF_ID(func, bpf_lsm_socket_shutdown)
BTF_ID(func, bpf_lsm_socket_socketpair)
BTF_ID(func, bpf_lsm_syslog)
BTF_ID(func, bpf_lsm_task_alloc)
BTF_ID(func, bpf_lsm_task_getsecid)
BTF_ID(func, bpf_lsm_task_prctl)
BTF_ID(func, bpf_lsm_task_setscheduler)
BTF_ID(func, bpf_lsm_task_to_inode)
BTF_SET_END(sleepable_lsm_hooks)
bool bpf_lsm_is_sleepable_hook(u32 btf_id)
{
return btf_id_set_contains(&sleepable_lsm_hooks, btf_id);
}
const struct bpf_prog_ops lsm_prog_ops = {
};
......
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2020 Facebook
* Copyright 2020 Google LLC.
*/
#include <linux/pid.h>
#include <linux/sched.h>
#include <linux/rculist.h>
#include <linux/list.h>
#include <linux/hash.h>
#include <linux/types.h>
#include <linux/spinlock.h>
#include <linux/bpf.h>
#include <linux/bpf_local_storage.h>
#include <linux/filter.h>
#include <uapi/linux/btf.h>
#include <linux/bpf_lsm.h>
#include <linux/btf_ids.h>
#include <linux/fdtable.h>
DEFINE_BPF_STORAGE_CACHE(task_cache);
static struct bpf_local_storage __rcu **task_storage_ptr(void *owner)
{
struct task_struct *task = owner;
struct bpf_storage_blob *bsb;
bsb = bpf_task(task);
if (!bsb)
return NULL;
return &bsb->storage;
}
static struct bpf_local_storage_data *
task_storage_lookup(struct task_struct *task, struct bpf_map *map,
bool cacheit_lockit)
{
struct bpf_local_storage *task_storage;
struct bpf_local_storage_map *smap;
struct bpf_storage_blob *bsb;
bsb = bpf_task(task);
if (!bsb)
return NULL;
task_storage = rcu_dereference(bsb->storage);
if (!task_storage)
return NULL;
smap = (struct bpf_local_storage_map *)map;
return bpf_local_storage_lookup(task_storage, smap, cacheit_lockit);
}
void bpf_task_storage_free(struct task_struct *task)
{
struct bpf_local_storage_elem *selem;
struct bpf_local_storage *local_storage;
bool free_task_storage = false;
struct bpf_storage_blob *bsb;
struct hlist_node *n;
bsb = bpf_task(task);
if (!bsb)
return;
rcu_read_lock();
local_storage = rcu_dereference(bsb->storage);
if (!local_storage) {
rcu_read_unlock();
return;
}
/* Neither the bpf_prog nor the bpf-map's syscall
* could be modifying the local_storage->list now.
* Thus, no elem can be added-to or deleted-from the
* local_storage->list by the bpf_prog or by the bpf-map's syscall.
*
* It is racing with bpf_local_storage_map_free() alone
* when unlinking elem from the local_storage->list and
* the map's bucket->list.
*/
raw_spin_lock_bh(&local_storage->lock);
hlist_for_each_entry_safe(selem, n, &local_storage->list, snode) {
/* Always unlink from map before unlinking from
* local_storage.
*/
bpf_selem_unlink_map(selem);
free_task_storage = bpf_selem_unlink_storage_nolock(
local_storage, selem, false);
}
raw_spin_unlock_bh(&local_storage->lock);
rcu_read_unlock();
/* free_task_storage should always be true as long as
* local_storage->list was non-empty.
*/
if (free_task_storage)
kfree_rcu(local_storage, rcu);
}
static void *bpf_pid_task_storage_lookup_elem(struct bpf_map *map, void *key)
{
struct bpf_local_storage_data *sdata;
struct task_struct *task;
unsigned int f_flags;
struct pid *pid;
int fd, err;
fd = *(int *)key;
pid = pidfd_get_pid(fd, &f_flags);
if (IS_ERR(pid))
return ERR_CAST(pid);
/* We should be in an RCU read side critical section, it should be safe
* to call pid_task.
*/
WARN_ON_ONCE(!rcu_read_lock_held());
task = pid_task(pid, PIDTYPE_PID);
if (!task) {
err = -ENOENT;
goto out;
}
sdata = task_storage_lookup(task, map, true);
put_pid(pid);
return sdata ? sdata->data : NULL;
out:
put_pid(pid);
return ERR_PTR(err);
}
static int bpf_pid_task_storage_update_elem(struct bpf_map *map, void *key,
void *value, u64 map_flags)
{
struct bpf_local_storage_data *sdata;
struct task_struct *task;
unsigned int f_flags;
struct pid *pid;
int fd, err;
fd = *(int *)key;
pid = pidfd_get_pid(fd, &f_flags);
if (IS_ERR(pid))
return PTR_ERR(pid);
/* We should be in an RCU read side critical section, it should be safe
* to call pid_task.
*/
WARN_ON_ONCE(!rcu_read_lock_held());
task = pid_task(pid, PIDTYPE_PID);
if (!task || !task_storage_ptr(task)) {
err = -ENOENT;
goto out;
}
sdata = bpf_local_storage_update(
task, (struct bpf_local_storage_map *)map, value, map_flags);
err = PTR_ERR_OR_ZERO(sdata);
out:
put_pid(pid);
return err;
}
static int task_storage_delete(struct task_struct *task, struct bpf_map *map)
{
struct bpf_local_storage_data *sdata;
sdata = task_storage_lookup(task, map, false);
if (!sdata)
return -ENOENT;
bpf_selem_unlink(SELEM(sdata));
return 0;
}
static int bpf_pid_task_storage_delete_elem(struct bpf_map *map, void *key)
{
struct task_struct *task;
unsigned int f_flags;
struct pid *pid;
int fd, err;
fd = *(int *)key;
pid = pidfd_get_pid(fd, &f_flags);
if (IS_ERR(pid))
return PTR_ERR(pid);
/* We should be in an RCU read side critical section, it should be safe
* to call pid_task.
*/
WARN_ON_ONCE(!rcu_read_lock_held());
task = pid_task(pid, PIDTYPE_PID);
if (!task) {
err = -ENOENT;
goto out;
}
err = task_storage_delete(task, map);
out:
put_pid(pid);
return err;
}
BPF_CALL_4(bpf_task_storage_get, struct bpf_map *, map, struct task_struct *,
task, void *, value, u64, flags)
{
struct bpf_local_storage_data *sdata;
if (flags & ~(BPF_LOCAL_STORAGE_GET_F_CREATE))
return (unsigned long)NULL;
/* explicitly check that the task_storage_ptr is not
* NULL as task_storage_lookup returns NULL in this case and
* bpf_local_storage_update expects the owner to have a
* valid storage pointer.
*/
if (!task_storage_ptr(task))
return (unsigned long)NULL;
sdata = task_storage_lookup(task, map, true);
if (sdata)
return (unsigned long)sdata->data;
/* This helper must only be called from places where the lifetime of the task
* is guaranteed. Either by being refcounted or by being protected
* by an RCU read-side critical section.
*/
if (flags & BPF_LOCAL_STORAGE_GET_F_CREATE) {
sdata = bpf_local_storage_update(
task, (struct bpf_local_storage_map *)map, value,
BPF_NOEXIST);
return IS_ERR(sdata) ? (unsigned long)NULL :
(unsigned long)sdata->data;
}
return (unsigned long)NULL;
}
BPF_CALL_2(bpf_task_storage_delete, struct bpf_map *, map, struct task_struct *,
task)
{
/* This helper must only be called from places where the lifetime of the task
* is guaranteed. Either by being refcounted or by being protected
* by an RCU read-side critical section.
*/
return task_storage_delete(task, map);
}
static int notsupp_get_next_key(struct bpf_map *map, void *key, void *next_key)
{
return -ENOTSUPP;
}
static struct bpf_map *task_storage_map_alloc(union bpf_attr *attr)
{
struct bpf_local_storage_map *smap;
smap = bpf_local_storage_map_alloc(attr);
if (IS_ERR(smap))
return ERR_CAST(smap);
smap->cache_idx = bpf_local_storage_cache_idx_get(&task_cache);
return &smap->map;
}
static void task_storage_map_free(struct bpf_map *map)
{
struct bpf_local_storage_map *smap;
smap = (struct bpf_local_storage_map *)map;
bpf_local_storage_cache_idx_free(&task_cache, smap->cache_idx);
bpf_local_storage_map_free(smap);
}
static int task_storage_map_btf_id;
const struct bpf_map_ops task_storage_map_ops = {
.map_meta_equal = bpf_map_meta_equal,
.map_alloc_check = bpf_local_storage_map_alloc_check,
.map_alloc = task_storage_map_alloc,
.map_free = task_storage_map_free,
.map_get_next_key = notsupp_get_next_key,
.map_lookup_elem = bpf_pid_task_storage_lookup_elem,
.map_update_elem = bpf_pid_task_storage_update_elem,
.map_delete_elem = bpf_pid_task_storage_delete_elem,
.map_check_btf = bpf_local_storage_map_check_btf,
.map_btf_name = "bpf_local_storage_map",
.map_btf_id = &task_storage_map_btf_id,
.map_owner_storage_ptr = task_storage_ptr,
};
BTF_ID_LIST_SINGLE(bpf_task_storage_btf_ids, struct, task_struct)
const struct bpf_func_proto bpf_task_storage_get_proto = {
.func = bpf_task_storage_get,
.gpl_only = false,
.ret_type = RET_PTR_TO_MAP_VALUE_OR_NULL,
.arg1_type = ARG_CONST_MAP_PTR,
.arg2_type = ARG_PTR_TO_BTF_ID,
.arg2_btf_id = &bpf_task_storage_btf_ids[0],
.arg3_type = ARG_PTR_TO_MAP_VALUE_OR_NULL,
.arg4_type = ARG_ANYTHING,
};
const struct bpf_func_proto bpf_task_storage_delete_proto = {
.func = bpf_task_storage_delete,
.gpl_only = false,
.ret_type = RET_INTEGER,
.arg1_type = ARG_CONST_MAP_PTR,
.arg2_type = ARG_PTR_TO_BTF_ID,
.arg2_btf_id = &bpf_task_storage_btf_ids[0],
};
This diff is collapsed.
This diff is collapsed.
......@@ -773,7 +773,8 @@ static int map_check_btf(struct bpf_map *map, const struct btf *btf,
map->map_type != BPF_MAP_TYPE_ARRAY &&
map->map_type != BPF_MAP_TYPE_CGROUP_STORAGE &&
map->map_type != BPF_MAP_TYPE_SK_STORAGE &&
map->map_type != BPF_MAP_TYPE_INODE_STORAGE)
map->map_type != BPF_MAP_TYPE_INODE_STORAGE &&
map->map_type != BPF_MAP_TYPE_TASK_STORAGE)
return -ENOTSUPP;
if (map->spin_lock_off + sizeof(struct bpf_spin_lock) >
map->value_size) {
......
......@@ -26,7 +26,7 @@ static struct bin_attribute bin_attr_btf_vmlinux __ro_after_init = {
.read = btf_vmlinux_read,
};
static struct kobject *btf_kobj;
struct kobject *btf_kobj;
static int __init btf_vmlinux_init(void)
{
......
......@@ -337,6 +337,7 @@ static const struct bpf_iter_seq_info task_seq_info = {
static struct bpf_iter_reg task_reg_info = {
.target = "task",
.feature = BPF_ITER_RESCHED,
.ctx_arg_info_size = 1,
.ctx_arg_info = {
{ offsetof(struct bpf_iter__task, task),
......@@ -354,6 +355,7 @@ static const struct bpf_iter_seq_info task_file_seq_info = {
static struct bpf_iter_reg task_file_reg_info = {
.target = "task_file",
.feature = BPF_ITER_RESCHED,
.ctx_arg_info_size = 2,
.ctx_arg_info = {
{ offsetof(struct bpf_iter__task_file, task),
......
This diff is collapsed.
......@@ -380,6 +380,35 @@ static void *section_objs(const struct load_info *info,
return (void *)info->sechdrs[sec].sh_addr;
}
/* Find a module section: 0 means not found. Ignores SHF_ALLOC flag. */
static unsigned int find_any_sec(const struct load_info *info, const char *name)
{
unsigned int i;
for (i = 1; i < info->hdr->e_shnum; i++) {
Elf_Shdr *shdr = &info->sechdrs[i];
if (strcmp(info->secstrings + shdr->sh_name, name) == 0)
return i;
}
return 0;
}
/*
* Find a module section, or NULL. Fill in number of "objects" in section.
* Ignores SHF_ALLOC flag.
*/
static __maybe_unused void *any_section_objs(const struct load_info *info,
const char *name,
size_t object_size,
unsigned int *num)
{
unsigned int sec = find_any_sec(info, name);
/* Section 0 has sh_addr 0 and sh_size 0. */
*num = info->sechdrs[sec].sh_size / object_size;
return (void *)info->sechdrs[sec].sh_addr;
}
/* Provided by the linker */
extern const struct kernel_symbol __start___ksymtab[];
extern const struct kernel_symbol __stop___ksymtab[];
......@@ -3250,6 +3279,9 @@ static int find_module_sections(struct module *mod, struct load_info *info)
sizeof(*mod->bpf_raw_events),
&mod->num_bpf_raw_events);
#endif
#ifdef CONFIG_DEBUG_INFO_BTF_MODULES
mod->btf_data = any_section_objs(info, ".BTF", 1, &mod->btf_data_size);
#endif
#ifdef CONFIG_JUMP_LABEL
mod->jump_entries = section_objs(info, "__jump_table",
sizeof(*mod->jump_entries),
......
......@@ -16,6 +16,9 @@
#include <linux/syscalls.h>
#include <linux/error-injection.h>
#include <linux/btf_ids.h>
#include <linux/bpf_lsm.h>
#include <net/bpf_sk_storage.h>
#include <uapi/linux/bpf.h>
#include <uapi/linux/btf.h>
......@@ -1022,6 +1025,20 @@ const struct bpf_func_proto bpf_get_current_task_proto = {
.ret_type = RET_INTEGER,
};
BPF_CALL_0(bpf_get_current_task_btf)
{
return (unsigned long) current;
}
BTF_ID_LIST_SINGLE(bpf_get_current_btf_ids, struct, task_struct)
static const struct bpf_func_proto bpf_get_current_task_btf_proto = {
.func = bpf_get_current_task_btf,
.gpl_only = true,
.ret_type = RET_PTR_TO_BTF_ID,
.ret_btf_id = &bpf_get_current_btf_ids[0],
};
BPF_CALL_2(bpf_current_task_under_cgroup, struct bpf_map *, map, u32, idx)
{
struct bpf_array *array = container_of(map, struct bpf_array, map);
......@@ -1164,7 +1181,11 @@ BTF_SET_END(btf_allowlist_d_path)
static bool bpf_d_path_allowed(const struct bpf_prog *prog)
{
return btf_id_set_contains(&btf_allowlist_d_path, prog->aux->attach_btf_id);
if (prog->type == BPF_PROG_TYPE_LSM)
return bpf_lsm_is_sleepable_hook(prog->aux->attach_btf_id);
return btf_id_set_contains(&btf_allowlist_d_path,
prog->aux->attach_btf_id);
}
BTF_ID_LIST_SINGLE(bpf_d_path_btf_ids, struct, path)
......@@ -1265,6 +1286,8 @@ bpf_tracing_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
return &bpf_get_current_pid_tgid_proto;
case BPF_FUNC_get_current_task:
return &bpf_get_current_task_proto;
case BPF_FUNC_get_current_task_btf:
return &bpf_get_current_task_btf_proto;
case BPF_FUNC_get_current_uid_gid:
return &bpf_get_current_uid_gid_proto;
case BPF_FUNC_get_current_comm:
......@@ -1719,6 +1742,10 @@ tracing_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
return &bpf_skc_to_tcp_request_sock_proto;
case BPF_FUNC_skc_to_udp6_sock:
return &bpf_skc_to_udp6_sock_proto;
case BPF_FUNC_sk_storage_get:
return &bpf_sk_storage_get_tracing_proto;
case BPF_FUNC_sk_storage_delete:
return &bpf_sk_storage_delete_tracing_proto;
#endif
case BPF_FUNC_seq_printf:
return prog->expected_attach_type == BPF_TRACE_ITER ?
......
......@@ -274,6 +274,15 @@ config DEBUG_INFO_BTF
Turning this on expects presence of pahole tool, which will convert
DWARF type info into equivalent deduplicated BTF type info.
config PAHOLE_HAS_SPLIT_BTF
def_bool $(success, test `$(PAHOLE) --version | sed -E 's/v([0-9]+)\.([0-9]+)/\1\2/'` -ge "119")
config DEBUG_INFO_BTF_MODULES
def_bool y
depends on DEBUG_INFO_BTF && MODULES && PAHOLE_HAS_SPLIT_BTF
help
Generate compact split BTF type information for kernel modules.
config GDB_SCRIPTS
bool "Provide GDB scripts for kernel debugging"
help
......
......@@ -6,6 +6,7 @@
#include <linux/types.h>
#include <linux/spinlock.h>
#include <linux/bpf.h>
#include <linux/btf.h>
#include <linux/btf_ids.h>
#include <linux/bpf_local_storage.h>
#include <net/bpf_sk_storage.h>
......@@ -15,20 +16,8 @@
DEFINE_BPF_STORAGE_CACHE(sk_cache);
static int omem_charge(struct sock *sk, unsigned int size)
{
/* same check as in sock_kmalloc() */
if (size <= sysctl_optmem_max &&
atomic_read(&sk->sk_omem_alloc) + size < sysctl_optmem_max) {
atomic_add(size, &sk->sk_omem_alloc);
return 0;
}
return -ENOMEM;
}
static struct bpf_local_storage_data *
sk_storage_lookup(struct sock *sk, struct bpf_map *map, bool cacheit_lockit)
bpf_sk_storage_lookup(struct sock *sk, struct bpf_map *map, bool cacheit_lockit)
{
struct bpf_local_storage *sk_storage;
struct bpf_local_storage_map *smap;
......@@ -41,11 +30,11 @@ sk_storage_lookup(struct sock *sk, struct bpf_map *map, bool cacheit_lockit)
return bpf_local_storage_lookup(sk_storage, smap, cacheit_lockit);
}
static int sk_storage_delete(struct sock *sk, struct bpf_map *map)
static int bpf_sk_storage_del(struct sock *sk, struct bpf_map *map)
{
struct bpf_local_storage_data *sdata;
sdata = sk_storage_lookup(sk, map, false);
sdata = bpf_sk_storage_lookup(sk, map, false);
if (!sdata)
return -ENOENT;
......@@ -94,7 +83,7 @@ void bpf_sk_storage_free(struct sock *sk)
kfree_rcu(sk_storage, rcu);
}
static void sk_storage_map_free(struct bpf_map *map)
static void bpf_sk_storage_map_free(struct bpf_map *map)
{
struct bpf_local_storage_map *smap;
......@@ -103,7 +92,7 @@ static void sk_storage_map_free(struct bpf_map *map)
bpf_local_storage_map_free(smap);
}
static struct bpf_map *sk_storage_map_alloc(union bpf_attr *attr)
static struct bpf_map *bpf_sk_storage_map_alloc(union bpf_attr *attr)
{
struct bpf_local_storage_map *smap;
......@@ -130,7 +119,7 @@ static void *bpf_fd_sk_storage_lookup_elem(struct bpf_map *map, void *key)
fd = *(int *)key;
sock = sockfd_lookup(fd, &err);
if (sock) {
sdata = sk_storage_lookup(sock->sk, map, true);
sdata = bpf_sk_storage_lookup(sock->sk, map, true);
sockfd_put(sock);
return sdata ? sdata->data : NULL;
}
......@@ -166,7 +155,7 @@ static int bpf_fd_sk_storage_delete_elem(struct bpf_map *map, void *key)
fd = *(int *)key;
sock = sockfd_lookup(fd, &err);
if (sock) {
err = sk_storage_delete(sock->sk, map);
err = bpf_sk_storage_del(sock->sk, map);
sockfd_put(sock);
return err;
}
......@@ -272,7 +261,7 @@ BPF_CALL_4(bpf_sk_storage_get, struct bpf_map *, map, struct sock *, sk,
if (!sk || !sk_fullsock(sk) || flags > BPF_SK_STORAGE_GET_F_CREATE)
return (unsigned long)NULL;
sdata = sk_storage_lookup(sk, map, true);
sdata = bpf_sk_storage_lookup(sk, map, true);
if (sdata)
return (unsigned long)sdata->data;
......@@ -305,7 +294,7 @@ BPF_CALL_2(bpf_sk_storage_delete, struct bpf_map *, map, struct sock *, sk)
if (refcount_inc_not_zero(&sk->sk_refcnt)) {
int err;
err = sk_storage_delete(sk, map);
err = bpf_sk_storage_del(sk, map);
sock_put(sk);
return err;
}
......@@ -313,14 +302,23 @@ BPF_CALL_2(bpf_sk_storage_delete, struct bpf_map *, map, struct sock *, sk)
return -ENOENT;
}
static int sk_storage_charge(struct bpf_local_storage_map *smap,
void *owner, u32 size)
static int bpf_sk_storage_charge(struct bpf_local_storage_map *smap,
void *owner, u32 size)
{
return omem_charge(owner, size);
struct sock *sk = (struct sock *)owner;
/* same check as in sock_kmalloc() */
if (size <= sysctl_optmem_max &&
atomic_read(&sk->sk_omem_alloc) + size < sysctl_optmem_max) {
atomic_add(size, &sk->sk_omem_alloc);
return 0;
}
return -ENOMEM;
}
static void sk_storage_uncharge(struct bpf_local_storage_map *smap,
void *owner, u32 size)
static void bpf_sk_storage_uncharge(struct bpf_local_storage_map *smap,
void *owner, u32 size)
{
struct sock *sk = owner;
......@@ -328,7 +326,7 @@ static void sk_storage_uncharge(struct bpf_local_storage_map *smap,
}
static struct bpf_local_storage __rcu **
sk_storage_ptr(void *owner)
bpf_sk_storage_ptr(void *owner)
{
struct sock *sk = owner;
......@@ -339,8 +337,8 @@ static int sk_storage_map_btf_id;
const struct bpf_map_ops sk_storage_map_ops = {
.map_meta_equal = bpf_map_meta_equal,
.map_alloc_check = bpf_local_storage_map_alloc_check,
.map_alloc = sk_storage_map_alloc,
.map_free = sk_storage_map_free,
.map_alloc = bpf_sk_storage_map_alloc,
.map_free = bpf_sk_storage_map_free,
.map_get_next_key = notsupp_get_next_key,
.map_lookup_elem = bpf_fd_sk_storage_lookup_elem,
.map_update_elem = bpf_fd_sk_storage_update_elem,
......@@ -348,9 +346,9 @@ const struct bpf_map_ops sk_storage_map_ops = {
.map_check_btf = bpf_local_storage_map_check_btf,
.map_btf_name = "bpf_local_storage_map",
.map_btf_id = &sk_storage_map_btf_id,
.map_local_storage_charge = sk_storage_charge,
.map_local_storage_uncharge = sk_storage_uncharge,
.map_owner_storage_ptr = sk_storage_ptr,
.map_local_storage_charge = bpf_sk_storage_charge,
.map_local_storage_uncharge = bpf_sk_storage_uncharge,
.map_owner_storage_ptr = bpf_sk_storage_ptr,
};
const struct bpf_func_proto bpf_sk_storage_get_proto = {
......@@ -381,6 +379,79 @@ const struct bpf_func_proto bpf_sk_storage_delete_proto = {
.arg2_type = ARG_PTR_TO_BTF_ID_SOCK_COMMON,
};
static bool bpf_sk_storage_tracing_allowed(const struct bpf_prog *prog)
{
const struct btf *btf_vmlinux;
const struct btf_type *t;
const char *tname;
u32 btf_id;
if (prog->aux->dst_prog)
return false;
/* Ensure the tracing program is not tracing
* any bpf_sk_storage*() function and also
* use the bpf_sk_storage_(get|delete) helper.
*/
switch (prog->expected_attach_type) {
case BPF_TRACE_RAW_TP:
/* bpf_sk_storage has no trace point */
return true;
case BPF_TRACE_FENTRY:
case BPF_TRACE_FEXIT:
btf_vmlinux = bpf_get_btf_vmlinux();
btf_id = prog->aux->attach_btf_id;
t = btf_type_by_id(btf_vmlinux, btf_id);
tname = btf_name_by_offset(btf_vmlinux, t->name_off);
return !!strncmp(tname, "bpf_sk_storage",
strlen("bpf_sk_storage"));
default:
return false;
}
return false;
}
BPF_CALL_4(bpf_sk_storage_get_tracing, struct bpf_map *, map, struct sock *, sk,
void *, value, u64, flags)
{
if (!in_serving_softirq() && !in_task())
return (unsigned long)NULL;
return (unsigned long)____bpf_sk_storage_get(map, sk, value, flags);
}
BPF_CALL_2(bpf_sk_storage_delete_tracing, struct bpf_map *, map,
struct sock *, sk)
{
if (!in_serving_softirq() && !in_task())
return -EPERM;
return ____bpf_sk_storage_delete(map, sk);
}
const struct bpf_func_proto bpf_sk_storage_get_tracing_proto = {
.func = bpf_sk_storage_get_tracing,
.gpl_only = false,
.ret_type = RET_PTR_TO_MAP_VALUE_OR_NULL,
.arg1_type = ARG_CONST_MAP_PTR,
.arg2_type = ARG_PTR_TO_BTF_ID,
.arg2_btf_id = &btf_sock_ids[BTF_SOCK_TYPE_SOCK_COMMON],
.arg3_type = ARG_PTR_TO_MAP_VALUE_OR_NULL,
.arg4_type = ARG_ANYTHING,
.allowed = bpf_sk_storage_tracing_allowed,
};
const struct bpf_func_proto bpf_sk_storage_delete_tracing_proto = {
.func = bpf_sk_storage_delete_tracing,
.gpl_only = false,
.ret_type = RET_INTEGER,
.arg1_type = ARG_CONST_MAP_PTR,
.arg2_type = ARG_PTR_TO_BTF_ID,
.arg2_btf_id = &btf_sock_ids[BTF_SOCK_TYPE_SOCK_COMMON],
.allowed = bpf_sk_storage_tracing_allowed,
};
struct bpf_sk_storage_diag {
u32 nr_maps;
struct bpf_map *maps[];
......
......@@ -11,6 +11,8 @@
#include <linux/device.h>
#include <net/page_pool.h>
#include <net/xdp.h>
#include <linux/dma-direction.h>
#include <linux/dma-mapping.h>
#include <linux/page-flags.h>
......@@ -362,8 +364,9 @@ static bool pool_page_reusable(struct page_pool *pool, struct page *page)
* If the page refcnt != 1, then the page will be returned to memory
* subsystem.
*/
void page_pool_put_page(struct page_pool *pool, struct page *page,
unsigned int dma_sync_size, bool allow_direct)
static __always_inline struct page *
__page_pool_put_page(struct page_pool *pool, struct page *page,
unsigned int dma_sync_size, bool allow_direct)
{
/* This allocator is optimized for the XDP mode that uses
* one-frame-per-page, but have fallbacks that act like the
......@@ -379,15 +382,12 @@ void page_pool_put_page(struct page_pool *pool, struct page *page,
page_pool_dma_sync_for_device(pool, page,
dma_sync_size);
if (allow_direct && in_serving_softirq())
if (page_pool_recycle_in_cache(page, pool))
return;
if (allow_direct && in_serving_softirq() &&
page_pool_recycle_in_cache(page, pool))
return NULL;
if (!page_pool_recycle_in_ring(pool, page)) {
/* Cache full, fallback to free pages */
page_pool_return_page(pool, page);
}
return;
/* Page found as candidate for recycling */
return page;
}
/* Fallback/non-XDP mode: API user have elevated refcnt.
*
......@@ -405,9 +405,59 @@ void page_pool_put_page(struct page_pool *pool, struct page *page,
/* Do not replace this with page_pool_return_page() */
page_pool_release_page(pool, page);
put_page(page);
return NULL;
}
void page_pool_put_page(struct page_pool *pool, struct page *page,
unsigned int dma_sync_size, bool allow_direct)
{
page = __page_pool_put_page(pool, page, dma_sync_size, allow_direct);
if (page && !page_pool_recycle_in_ring(pool, page)) {
/* Cache full, fallback to free pages */
page_pool_return_page(pool, page);
}
}
EXPORT_SYMBOL(page_pool_put_page);
/* Caller must not use data area after call, as this function overwrites it */
void page_pool_put_page_bulk(struct page_pool *pool, void **data,
int count)
{
int i, bulk_len = 0;
for (i = 0; i < count; i++) {
struct page *page = virt_to_head_page(data[i]);
page = __page_pool_put_page(pool, page, -1, false);
/* Approved for bulk recycling in ptr_ring cache */
if (page)
data[bulk_len++] = page;
}
if (unlikely(!bulk_len))
return;
/* Bulk producer into ptr_ring page_pool cache */
page_pool_ring_lock(pool);
for (i = 0; i < bulk_len; i++) {
if (__ptr_ring_produce(&pool->ring, data[i]))
break; /* ring full */
}
page_pool_ring_unlock(pool);
/* Hopefully all pages was return into ptr_ring */
if (likely(i == bulk_len))
return;
/* ptr_ring cache full, free remaining pages outside producer lock
* since put_page() with refcnt == 1 can be an expensive operation
*/
for (; i < bulk_len; i++)
page_pool_return_page(pool, data[i]);
}
EXPORT_SYMBOL(page_pool_put_page_bulk);
static void page_pool_empty_ring(struct page_pool *pool)
{
struct page *page;
......
......@@ -380,6 +380,60 @@ void xdp_return_frame_rx_napi(struct xdp_frame *xdpf)
}
EXPORT_SYMBOL_GPL(xdp_return_frame_rx_napi);
/* XDP bulk APIs introduce a defer/flush mechanism to return
* pages belonging to the same xdp_mem_allocator object
* (identified via the mem.id field) in bulk to optimize
* I-cache and D-cache.
* The bulk queue size is set to 16 to be aligned to how
* XDP_REDIRECT bulking works. The bulk is flushed when
* it is full or when mem.id changes.
* xdp_frame_bulk is usually stored/allocated on the function
* call-stack to avoid locking penalties.
*/
void xdp_flush_frame_bulk(struct xdp_frame_bulk *bq)
{
struct xdp_mem_allocator *xa = bq->xa;
if (unlikely(!xa || !bq->count))
return;
page_pool_put_page_bulk(xa->page_pool, bq->q, bq->count);
/* bq->xa is not cleared to save lookup, if mem.id same in next bulk */
bq->count = 0;
}
EXPORT_SYMBOL_GPL(xdp_flush_frame_bulk);
/* Must be called with rcu_read_lock held */
void xdp_return_frame_bulk(struct xdp_frame *xdpf,
struct xdp_frame_bulk *bq)
{
struct xdp_mem_info *mem = &xdpf->mem;
struct xdp_mem_allocator *xa;
if (mem->type != MEM_TYPE_PAGE_POOL) {
__xdp_return(xdpf->data, &xdpf->mem, false);
return;
}
xa = bq->xa;
if (unlikely(!xa)) {
xa = rhashtable_lookup(mem_id_ht, &mem->id, mem_id_rht_params);
bq->count = 0;
bq->xa = xa;
}
if (bq->count == XDP_BULK_QUEUE_SIZE)
xdp_flush_frame_bulk(bq);
if (unlikely(mem->id != xa->mem.id)) {
xdp_flush_frame_bulk(bq);
bq->xa = rhashtable_lookup(mem_id_ht, &mem->id, mem_id_rht_params);
}
bq->q[bq->count++] = xdpf->data;
}
EXPORT_SYMBOL_GPL(xdp_return_frame_bulk);
void xdp_return_buff(struct xdp_buff *xdp)
{
__xdp_return(xdp->data, &xdp->rxq->mem, true);
......
......@@ -51,7 +51,6 @@
#include "cgroup_helpers.h"
#include "hbm.h"
#include "bpf_util.h"
#include <bpf/bpf.h>
#include <bpf/libbpf.h>
bool outFlag = true;
......
#!/bin/bash
# SPDX-License-Identifier: GPL-2.0
function config_device {
ip netns add at_ns0
ip netns add at_ns1
ip netns add at_ns2
ip link add veth0 type veth peer name veth0b
ip link add veth1 type veth peer name veth1b
ip link add veth2 type veth peer name veth2b
ip link set veth0b up
ip link set veth1b up
ip link set veth2b up
ip link set dev veth0b mtu 1500
ip link set dev veth1b mtu 1500
ip link set dev veth2b mtu 1500
ip link set veth0 netns at_ns0
ip link set veth1 netns at_ns1
ip link set veth2 netns at_ns2
ip netns exec at_ns0 ip addr add 172.16.1.100/24 dev veth0
ip netns exec at_ns0 ip addr add 2401:db00::1/64 dev veth0 nodad
ip netns exec at_ns0 ip link set dev veth0 up
ip netns exec at_ns1 ip addr add 172.16.1.101/24 dev veth1
ip netns exec at_ns1 ip addr add 2401:db00::2/64 dev veth1 nodad
ip netns exec at_ns1 ip link set dev veth1 up
ip netns exec at_ns2 ip addr add 172.16.1.200/24 dev veth2
ip netns exec at_ns2 ip addr add 2401:db00::3/64 dev veth2 nodad
ip netns exec at_ns2 ip link set dev veth2 up
ip link add br0 type bridge
ip link set br0 up
ip link set dev br0 mtu 1500
ip link set veth0b master br0
ip link set veth1b master br0
ip link set veth2b master br0
}
function add_ipip_tunnel {
ip netns exec at_ns0 \
ip link add dev $DEV_NS type ipip local 172.16.1.100 remote 172.16.1.200
ip netns exec at_ns0 ip link set dev $DEV_NS up
ip netns exec at_ns0 ip addr add dev $DEV_NS 10.1.1.100/24
ip netns exec at_ns1 \
ip link add dev $DEV_NS type ipip local 172.16.1.101 remote 172.16.1.200
ip netns exec at_ns1 ip link set dev $DEV_NS up
# same inner IP address in at_ns0 and at_ns1
ip netns exec at_ns1 ip addr add dev $DEV_NS 10.1.1.100/24
ip netns exec at_ns2 ip link add dev $DEV type ipip external
ip netns exec at_ns2 ip link set dev $DEV up
ip netns exec at_ns2 ip addr add dev $DEV 10.1.1.200/24
}
function add_ipip6_tunnel {
ip netns exec at_ns0 \
ip link add dev $DEV_NS type ip6tnl mode ipip6 local 2401:db00::1/64 remote 2401:db00::3/64
ip netns exec at_ns0 ip link set dev $DEV_NS up
ip netns exec at_ns0 ip addr add dev $DEV_NS 10.1.1.100/24
ip netns exec at_ns1 \
ip link add dev $DEV_NS type ip6tnl mode ipip6 local 2401:db00::2/64 remote 2401:db00::3/64
ip netns exec at_ns1 ip link set dev $DEV_NS up
# same inner IP address in at_ns0 and at_ns1
ip netns exec at_ns1 ip addr add dev $DEV_NS 10.1.1.100/24
ip netns exec at_ns2 ip link add dev $DEV type ip6tnl mode ipip6 external
ip netns exec at_ns2 ip link set dev $DEV up
ip netns exec at_ns2 ip addr add dev $DEV 10.1.1.200/24
}
function add_ip6ip6_tunnel {
ip netns exec at_ns0 \
ip link add dev $DEV_NS type ip6tnl mode ip6ip6 local 2401:db00::1/64 remote 2401:db00::3/64
ip netns exec at_ns0 ip link set dev $DEV_NS up
ip netns exec at_ns0 ip addr add dev $DEV_NS 2601:646::1/64
ip netns exec at_ns1 \
ip link add dev $DEV_NS type ip6tnl mode ip6ip6 local 2401:db00::2/64 remote 2401:db00::3/64
ip netns exec at_ns1 ip link set dev $DEV_NS up
# same inner IP address in at_ns0 and at_ns1
ip netns exec at_ns1 ip addr add dev $DEV_NS 2601:646::1/64
ip netns exec at_ns2 ip link add dev $DEV type ip6tnl mode ip6ip6 external
ip netns exec at_ns2 ip link set dev $DEV up
ip netns exec at_ns2 ip addr add dev $DEV 2601:646::2/64
}
function attach_bpf {
DEV=$1
SET_TUNNEL=$2
GET_TUNNEL=$3
ip netns exec at_ns2 tc qdisc add dev $DEV clsact
ip netns exec at_ns2 tc filter add dev $DEV egress bpf da obj tcbpf2_kern.o sec $SET_TUNNEL
ip netns exec at_ns2 tc filter add dev $DEV ingress bpf da obj tcbpf2_kern.o sec $GET_TUNNEL
}
function test_ipip {
DEV_NS=ipip_std
DEV=ipip_bpf
config_device
# tcpdump -nei br0 &
cat /sys/kernel/debug/tracing/trace_pipe &
add_ipip_tunnel
attach_bpf $DEV ipip_set_tunnel ipip_get_tunnel
ip netns exec at_ns0 ping -c 1 10.1.1.200
ip netns exec at_ns2 ping -c 1 10.1.1.100
ip netns exec at_ns0 iperf -sD -p 5200 > /dev/null
ip netns exec at_ns1 iperf -sD -p 5201 > /dev/null
sleep 0.2
# tcp check _same_ IP over different tunnels
ip netns exec at_ns2 iperf -c 10.1.1.100 -n 5k -p 5200
ip netns exec at_ns2 iperf -c 10.1.1.100 -n 5k -p 5201
cleanup
}
# IPv4 over IPv6 tunnel
function test_ipip6 {
DEV_NS=ipip_std
DEV=ipip_bpf
config_device
# tcpdump -nei br0 &
cat /sys/kernel/debug/tracing/trace_pipe &
add_ipip6_tunnel
attach_bpf $DEV ipip6_set_tunnel ipip6_get_tunnel
ip netns exec at_ns0 ping -c 1 10.1.1.200
ip netns exec at_ns2 ping -c 1 10.1.1.100
ip netns exec at_ns0 iperf -sD -p 5200 > /dev/null
ip netns exec at_ns1 iperf -sD -p 5201 > /dev/null
sleep 0.2
# tcp check _same_ IP over different tunnels
ip netns exec at_ns2 iperf -c 10.1.1.100 -n 5k -p 5200
ip netns exec at_ns2 iperf -c 10.1.1.100 -n 5k -p 5201
cleanup
}
# IPv6 over IPv6 tunnel
function test_ip6ip6 {
DEV_NS=ipip_std
DEV=ipip_bpf
config_device
# tcpdump -nei br0 &
cat /sys/kernel/debug/tracing/trace_pipe &
add_ip6ip6_tunnel
attach_bpf $DEV ip6ip6_set_tunnel ip6ip6_get_tunnel
ip netns exec at_ns0 ping -6 -c 1 2601:646::2
ip netns exec at_ns2 ping -6 -c 1 2601:646::1
ip netns exec at_ns0 iperf -6sD -p 5200 > /dev/null
ip netns exec at_ns1 iperf -6sD -p 5201 > /dev/null
sleep 0.2
# tcp check _same_ IP over different tunnels
ip netns exec at_ns2 iperf -6c 2601:646::1 -n 5k -p 5200
ip netns exec at_ns2 iperf -6c 2601:646::1 -n 5k -p 5201
cleanup
}
function cleanup {
set +ex
pkill iperf
ip netns delete at_ns0
ip netns delete at_ns1
ip netns delete at_ns2
ip link del veth0
ip link del veth1
ip link del veth2
ip link del br0
pkill tcpdump
pkill cat
set -ex
}
cleanup
echo "Testing IP tunnels..."
test_ipip
test_ipip6
test_ip6ip6
echo "*** PASS ***"
......@@ -6,6 +6,7 @@
PHONY := __modfinal
__modfinal:
include include/config/auto.conf
include $(srctree)/scripts/Kbuild.include
# for c_flags
......@@ -36,8 +37,23 @@ quiet_cmd_ld_ko_o = LD [M] $@
-T scripts/module.lds -o $@ $(filter %.o, $^); \
$(if $(ARCH_POSTLINK), $(MAKE) -f $(ARCH_POSTLINK) $@, true)
$(modules): %.ko: %.o %.mod.o scripts/module.lds FORCE
+$(call if_changed,ld_ko_o)
quiet_cmd_btf_ko = BTF [M] $@
cmd_btf_ko = LLVM_OBJCOPY=$(OBJCOPY) $(PAHOLE) -J --btf_base vmlinux $@
# Same as newer-prereqs, but allows to exclude specified extra dependencies
newer_prereqs_except = $(filter-out $(PHONY) $(1),$?)
# Same as if_changed, but allows to exclude specified extra dependencies
if_changed_except = $(if $(call newer_prereqs_except,$(2))$(cmd-check), \
$(cmd); \
printf '%s\n' 'cmd_$@ := $(make-cmd)' > $(dot-target).cmd, @:)
# Re-generate module BTFs if either module's .ko or vmlinux changed
$(modules): %.ko: %.o %.mod.o scripts/module.lds vmlinux FORCE
+$(call if_changed_except,ld_ko_o,vmlinux)
ifdef CONFIG_DEBUG_INFO_BTF_MODULES
+$(if $(newer-prereqs),$(call cmd,btf_ko))
endif
targets += $(modules) $(modules:.ko=.mod.o)
......
......@@ -12,6 +12,7 @@ static struct security_hook_list bpf_lsm_hooks[] __lsm_ro_after_init = {
#include <linux/lsm_hook_defs.h>
#undef LSM_HOOK
LSM_HOOK_INIT(inode_free_security, bpf_inode_storage_free),
LSM_HOOK_INIT(task_free, bpf_task_storage_free),
};
static int __init bpf_lsm_init(void)
......@@ -23,6 +24,7 @@ static int __init bpf_lsm_init(void)
struct lsm_blob_sizes bpf_lsm_blob_sizes __lsm_ro_after_init = {
.lbs_inode = sizeof(struct bpf_storage_blob),
.lbs_task = sizeof(struct bpf_storage_blob),
};
DEFINE_LSM(bpf) = {
......
# SPDX-License-Identifier: GPL-2.0-only
*.d
/bpftool-bootstrap
/bootstrap/
/bpftool
bpftool*.8
bpf-helpers.*
......
......@@ -50,7 +50,8 @@ MAP COMMANDS
| | **lru_percpu_hash** | **lpm_trie** | **array_of_maps** | **hash_of_maps**
| | **devmap** | **devmap_hash** | **sockmap** | **cpumap** | **xskmap** | **sockhash**
| | **cgroup_storage** | **reuseport_sockarray** | **percpu_cgroup_storage**
| | **queue** | **stack** | **sk_storage** | **struct_ops** | **ringbuf** | **inode_storage** }
| | **queue** | **stack** | **sk_storage** | **struct_ops** | **ringbuf** | **inode_storage**
| **task_storage** }
DESCRIPTION
===========
......
......@@ -19,22 +19,39 @@ BPF_DIR = $(srctree)/tools/lib/bpf/
ifneq ($(OUTPUT),)
LIBBPF_OUTPUT = $(OUTPUT)/libbpf/
LIBBPF_PATH = $(LIBBPF_OUTPUT)
BOOTSTRAP_OUTPUT = $(OUTPUT)/bootstrap/
else
LIBBPF_OUTPUT =
LIBBPF_PATH = $(BPF_DIR)
BOOTSTRAP_OUTPUT = $(CURDIR)/bootstrap/
endif
LIBBPF = $(LIBBPF_PATH)libbpf.a
LIBBPF_BOOTSTRAP_OUTPUT = $(BOOTSTRAP_OUTPUT)libbpf/
LIBBPF_BOOTSTRAP = $(LIBBPF_BOOTSTRAP_OUTPUT)libbpf.a
BPFTOOL_VERSION ?= $(shell make -rR --no-print-directory -sC ../../.. kernelversion)
ifeq ($(BPFTOOL_VERSION),)
BPFTOOL_VERSION := $(shell make -rR --no-print-directory -sC ../../.. kernelversion)
endif
$(LIBBPF_OUTPUT) $(BOOTSTRAP_OUTPUT) $(LIBBPF_BOOTSTRAP_OUTPUT):
$(QUIET_MKDIR)mkdir -p $@
$(LIBBPF): FORCE
$(if $(LIBBPF_OUTPUT),@mkdir -p $(LIBBPF_OUTPUT))
$(LIBBPF): FORCE | $(LIBBPF_OUTPUT)
$(Q)$(MAKE) -C $(BPF_DIR) OUTPUT=$(LIBBPF_OUTPUT) $(LIBBPF_OUTPUT)libbpf.a
$(LIBBPF)-clean:
$(LIBBPF_BOOTSTRAP): FORCE | $(LIBBPF_BOOTSTRAP_OUTPUT)
$(Q)$(MAKE) -C $(BPF_DIR) OUTPUT=$(LIBBPF_BOOTSTRAP_OUTPUT) \
ARCH= CC=$(HOSTCC) LD=$(HOSTLD) $@
$(LIBBPF)-clean: FORCE | $(LIBBPF_OUTPUT)
$(call QUIET_CLEAN, libbpf)
$(Q)$(MAKE) -C $(BPF_DIR) OUTPUT=$(LIBBPF_OUTPUT) clean >/dev/null
$(LIBBPF_BOOTSTRAP)-clean: FORCE | $(LIBBPF_BOOTSTRAP_OUTPUT)
$(call QUIET_CLEAN, libbpf-bootstrap)
$(Q)$(MAKE) -C $(BPF_DIR) OUTPUT=$(LIBBPF_BOOTSTRAP_OUTPUT) clean >/dev/null
prefix ?= /usr/local
bash_compdir ?= /usr/share/bash-completion/completions
......@@ -92,6 +109,7 @@ CFLAGS += -DCOMPAT_NEED_REALLOCARRAY
endif
LIBS = $(LIBBPF) -lelf -lz
LIBS_BOOTSTRAP = $(LIBBPF_BOOTSTRAP) -lelf -lz
ifeq ($(feature-libcap), 1)
CFLAGS += -DUSE_LIBCAP
LIBS += -lcap
......@@ -118,9 +136,9 @@ CFLAGS += -DHAVE_LIBBFD_SUPPORT
SRCS += $(BFD_SRCS)
endif
BPFTOOL_BOOTSTRAP := $(if $(OUTPUT),$(OUTPUT)bpftool-bootstrap,./bpftool-bootstrap)
BPFTOOL_BOOTSTRAP := $(BOOTSTRAP_OUTPUT)bpftool
BOOTSTRAP_OBJS = $(addprefix $(OUTPUT),main.o common.o json_writer.o gen.o btf.o)
BOOTSTRAP_OBJS = $(addprefix $(BOOTSTRAP_OUTPUT),main.o common.o json_writer.o gen.o btf.o)
OBJS = $(patsubst %.c,$(OUTPUT)%.o,$(SRCS)) $(OUTPUT)disasm.o
VMLINUX_BTF_PATHS ?= $(if $(O),$(O)/vmlinux) \
......@@ -167,12 +185,16 @@ $(OUTPUT)disasm.o: $(srctree)/kernel/bpf/disasm.c
$(OUTPUT)feature.o: | zdep
$(BPFTOOL_BOOTSTRAP): $(BOOTSTRAP_OBJS) $(LIBBPF)
$(QUIET_LINK)$(CC) $(CFLAGS) $(LDFLAGS) -o $@ $(BOOTSTRAP_OBJS) $(LIBS)
$(BPFTOOL_BOOTSTRAP): $(BOOTSTRAP_OBJS) $(LIBBPF_BOOTSTRAP)
$(QUIET_LINK)$(HOSTCC) $(CFLAGS) $(LDFLAGS) -o $@ $(BOOTSTRAP_OBJS) \
$(LIBS_BOOTSTRAP)
$(OUTPUT)bpftool: $(OBJS) $(LIBBPF)
$(QUIET_LINK)$(CC) $(CFLAGS) $(LDFLAGS) -o $@ $(OBJS) $(LIBS)
$(BOOTSTRAP_OUTPUT)%.o: %.c | $(BOOTSTRAP_OUTPUT)
$(QUIET_CC)$(HOSTCC) $(CFLAGS) -c -MMD -o $@ $<
$(OUTPUT)%.o: %.c
$(QUIET_CC)$(CC) $(CFLAGS) -c -MMD -o $@ $<
......@@ -180,11 +202,11 @@ feature-detect-clean:
$(call QUIET_CLEAN, feature-detect)
$(Q)$(MAKE) -C $(srctree)/tools/build/feature/ clean >/dev/null
clean: $(LIBBPF)-clean feature-detect-clean
clean: $(LIBBPF)-clean $(LIBBPF_BOOTSTRAP)-clean feature-detect-clean
$(call QUIET_CLEAN, bpftool)
$(Q)$(RM) -- $(OUTPUT)bpftool $(OUTPUT)*.o $(OUTPUT)*.d
$(Q)$(RM) -- $(BPFTOOL_BOOTSTRAP) $(OUTPUT)*.skel.h $(OUTPUT)vmlinux.h
$(Q)$(RM) -r -- $(OUTPUT)libbpf/
$(Q)$(RM) -- $(OUTPUT)*.skel.h $(OUTPUT)vmlinux.h
$(Q)$(RM) -r -- $(LIBBPF_OUTPUT) $(BOOTSTRAP_OUTPUT)
$(call QUIET_CLEAN, core-gen)
$(Q)$(RM) -- $(OUTPUT)FEATURE-DUMP.bpftool
$(Q)$(RM) -r -- $(OUTPUT)feature/
......
......@@ -705,7 +705,7 @@ _bpftool()
hash_of_maps devmap devmap_hash sockmap cpumap \
xskmap sockhash cgroup_storage reuseport_sockarray \
percpu_cgroup_storage queue stack sk_storage \
struct_ops inode_storage' -- \
struct_ops inode_storage task_storage' -- \
"$cur" ) )
return 0
;;
......
......@@ -358,8 +358,12 @@ static int dump_btf_raw(const struct btf *btf,
}
} else {
int cnt = btf__get_nr_types(btf);
int start_id = 1;
for (i = 1; i <= cnt; i++) {
if (base_btf)
start_id = btf__get_nr_types(base_btf) + 1;
for (i = start_id; i <= cnt; i++) {
t = btf__type_by_id(btf, i);
dump_btf_type(btf, i, t);
}
......@@ -438,7 +442,6 @@ static int do_dump(int argc, char **argv)
return -1;
}
src = GET_ARG();
if (is_prefix(src, "map")) {
struct bpf_map_info info = {};
__u32 len = sizeof(info);
......@@ -499,7 +502,7 @@ static int do_dump(int argc, char **argv)
}
NEXT_ARG();
} else if (is_prefix(src, "file")) {
btf = btf__parse(*argv, NULL);
btf = btf__parse_split(*argv, base_btf);
if (IS_ERR(btf)) {
err = -PTR_ERR(btf);
btf = NULL;
......@@ -739,9 +742,14 @@ show_btf_plain(struct bpf_btf_info *info, int fd,
struct btf_attach_table *btf_map_table)
{
struct btf_attach_point *obj;
const char *name = u64_to_ptr(info->name);
int n;
printf("%u: ", info->id);
if (info->kernel_btf)
printf("name [%s] ", name);
else if (name && name[0])
printf("name %s ", name);
printf("size %uB", info->btf_size);
n = 0;
......@@ -768,6 +776,7 @@ show_btf_json(struct bpf_btf_info *info, int fd,
struct btf_attach_table *btf_map_table)
{
struct btf_attach_point *obj;
const char *name = u64_to_ptr(info->name);
jsonw_start_object(json_wtr); /* btf object */
jsonw_uint_field(json_wtr, "id", info->id);
......@@ -793,6 +802,11 @@ show_btf_json(struct bpf_btf_info *info, int fd,
emit_obj_refs_json(&refs_table, info->id, json_wtr); /* pids */
jsonw_bool_field(json_wtr, "kernel", info->kernel_btf);
if (name && name[0])
jsonw_string_field(json_wtr, "name", name);
jsonw_end_object(json_wtr); /* btf object */
}
......@@ -800,15 +814,30 @@ static int
show_btf(int fd, struct btf_attach_table *btf_prog_table,
struct btf_attach_table *btf_map_table)
{
struct bpf_btf_info info = {};
struct bpf_btf_info info;
__u32 len = sizeof(info);
char name[64];
int err;
memset(&info, 0, sizeof(info));
err = bpf_obj_get_info_by_fd(fd, &info, &len);
if (err) {
p_err("can't get BTF object info: %s", strerror(errno));
return -1;
}
/* if kernel support emitting BTF object name, pass name pointer */
if (info.name_len) {
memset(&info, 0, sizeof(info));
info.name_len = sizeof(name);
info.name = ptr_to_u64(name);
len = sizeof(info);
err = bpf_obj_get_info_by_fd(fd, &info, &len);
if (err) {
p_err("can't get BTF object info: %s", strerror(errno));
return -1;
}
}
if (json_output)
show_btf_json(&info, fd, btf_prog_table, btf_map_table);
......
......@@ -11,6 +11,7 @@
#include <bpf/bpf.h>
#include <bpf/libbpf.h>
#include <bpf/btf.h>
#include "main.h"
......@@ -28,6 +29,7 @@ bool show_pinned;
bool block_mount;
bool verifier_logs;
bool relaxed_maps;
struct btf *base_btf;
struct pinned_obj_table prog_table;
struct pinned_obj_table map_table;
struct pinned_obj_table link_table;
......@@ -391,6 +393,7 @@ int main(int argc, char **argv)
{ "mapcompat", no_argument, NULL, 'm' },
{ "nomount", no_argument, NULL, 'n' },
{ "debug", no_argument, NULL, 'd' },
{ "base-btf", required_argument, NULL, 'B' },
{ 0 }
};
int opt, ret;
......@@ -407,7 +410,7 @@ int main(int argc, char **argv)
hash_init(link_table.table);
opterr = 0;
while ((opt = getopt_long(argc, argv, "Vhpjfmnd",
while ((opt = getopt_long(argc, argv, "VhpjfmndB:",
options, NULL)) >= 0) {
switch (opt) {
case 'V':
......@@ -441,6 +444,15 @@ int main(int argc, char **argv)
libbpf_set_print(print_all_levels);
verifier_logs = true;
break;
case 'B':
base_btf = btf__parse(optarg, NULL);
if (libbpf_get_error(base_btf)) {
p_err("failed to parse base BTF at '%s': %ld\n",
optarg, libbpf_get_error(base_btf));
base_btf = NULL;
return -1;
}
break;
default:
p_err("unrecognized option '%s'", argv[optind - 1]);
if (json_output)
......@@ -465,6 +477,7 @@ int main(int argc, char **argv)
delete_pinned_obj_table(&map_table);
delete_pinned_obj_table(&link_table);
}
btf__free(base_btf);
return ret;
}
......@@ -90,6 +90,7 @@ extern bool show_pids;
extern bool block_mount;
extern bool verifier_logs;
extern bool relaxed_maps;
extern struct btf *base_btf;
extern struct pinned_obj_table prog_table;
extern struct pinned_obj_table map_table;
extern struct pinned_obj_table link_table;
......
......@@ -51,6 +51,7 @@ const char * const map_type_name[] = {
[BPF_MAP_TYPE_STRUCT_OPS] = "struct_ops",
[BPF_MAP_TYPE_RINGBUF] = "ringbuf",
[BPF_MAP_TYPE_INODE_STORAGE] = "inode_storage",
[BPF_MAP_TYPE_TASK_STORAGE] = "task_storage",
};
const size_t map_type_name_size = ARRAY_SIZE(map_type_name);
......@@ -1464,7 +1465,8 @@ static int do_help(int argc, char **argv)
" lru_percpu_hash | lpm_trie | array_of_maps | hash_of_maps |\n"
" devmap | devmap_hash | sockmap | cpumap | xskmap | sockhash |\n"
" cgroup_storage | reuseport_sockarray | percpu_cgroup_storage |\n"
" queue | stack | sk_storage | struct_ops | ringbuf | inode_storage }\n"
" queue | stack | sk_storage | struct_ops | ringbuf | inode_storage |\n"
" task_storage }\n"
" " HELP_SPEC_OPTIONS "\n"
"",
bin_name, argv[-2]);
......
......@@ -18,15 +18,6 @@ else
endif
# always use the host compiler
ifneq ($(LLVM),)
HOSTAR ?= llvm-ar
HOSTCC ?= clang
HOSTLD ?= ld.lld
else
HOSTAR ?= ar
HOSTCC ?= gcc
HOSTLD ?= ld
endif
AR = $(HOSTAR)
CC = $(HOSTCC)
LD = $(HOSTLD)
......
# SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
OUTPUT := .output
include ../../scripts/Makefile.include
OUTPUT ?= $(abspath .output)/
CLANG ?= clang
LLC ?= llc
LLVM_STRIP ?= llvm-strip
DEFAULT_BPFTOOL := $(OUTPUT)/sbin/bpftool
BPFTOOL_OUTPUT := $(OUTPUT)bpftool/
DEFAULT_BPFTOOL := $(BPFTOOL_OUTPUT)bpftool
BPFTOOL ?= $(DEFAULT_BPFTOOL)
LIBBPF_SRC := $(abspath ../../lib/bpf)
BPFOBJ := $(OUTPUT)/libbpf.a
BPF_INCLUDE := $(OUTPUT)
BPFOBJ_OUTPUT := $(OUTPUT)libbpf/
BPFOBJ := $(BPFOBJ_OUTPUT)libbpf.a
BPF_INCLUDE := $(BPFOBJ_OUTPUT)
INCLUDES := -I$(OUTPUT) -I$(BPF_INCLUDE) -I$(abspath ../../lib) \
-I$(abspath ../../include/uapi)
CFLAGS := -g -Wall
......@@ -18,13 +23,10 @@ VMLINUX_BTF_PATHS := /sys/kernel/btf/vmlinux /boot/vmlinux-$(KERNEL_REL)
VMLINUX_BTF_PATH := $(or $(VMLINUX_BTF),$(firstword \
$(wildcard $(VMLINUX_BTF_PATHS))))
abs_out := $(abspath $(OUTPUT))
ifeq ($(V),1)
Q =
msg =
else
Q = @
msg = @printf ' %-8s %s%s\n' "$(1)" "$(notdir $(2))" "$(if $(3), $(3))";
MAKEFLAGS += --no-print-directory
submake_extras := feature_display=0
endif
......@@ -37,12 +39,15 @@ all: runqslower
runqslower: $(OUTPUT)/runqslower
clean:
$(call msg,CLEAN)
$(Q)rm -rf $(OUTPUT) runqslower
$(call QUIET_CLEAN, runqslower)
$(Q)$(RM) -r $(BPFOBJ_OUTPUT) $(BPFTOOL_OUTPUT)
$(Q)$(RM) $(OUTPUT)*.o $(OUTPUT)*.d
$(Q)$(RM) $(OUTPUT)*.skel.h $(OUTPUT)vmlinux.h
$(Q)$(RM) $(OUTPUT)runqslower
$(Q)$(RM) -r .output
$(OUTPUT)/runqslower: $(OUTPUT)/runqslower.o $(BPFOBJ)
$(call msg,BINARY,$@)
$(Q)$(CC) $(CFLAGS) $^ -lelf -lz -o $@
$(QUIET_LINK)$(CC) $(CFLAGS) $^ -lelf -lz -o $@
$(OUTPUT)/runqslower.o: runqslower.h $(OUTPUT)/runqslower.skel.h \
$(OUTPUT)/runqslower.bpf.o
......@@ -50,36 +55,30 @@ $(OUTPUT)/runqslower.o: runqslower.h $(OUTPUT)/runqslower.skel.h \
$(OUTPUT)/runqslower.bpf.o: $(OUTPUT)/vmlinux.h runqslower.h
$(OUTPUT)/%.skel.h: $(OUTPUT)/%.bpf.o | $(BPFTOOL)
$(call msg,GEN-SKEL,$@)
$(Q)$(BPFTOOL) gen skeleton $< > $@
$(QUIET_GEN)$(BPFTOOL) gen skeleton $< > $@
$(OUTPUT)/%.bpf.o: %.bpf.c $(BPFOBJ) | $(OUTPUT)
$(call msg,BPF,$@)
$(Q)$(CLANG) -g -O2 -target bpf $(INCLUDES) \
$(QUIET_GEN)$(CLANG) -g -O2 -target bpf $(INCLUDES) \
-c $(filter %.c,$^) -o $@ && \
$(LLVM_STRIP) -g $@
$(OUTPUT)/%.o: %.c | $(OUTPUT)
$(call msg,CC,$@)
$(Q)$(CC) $(CFLAGS) $(INCLUDES) -c $(filter %.c,$^) -o $@
$(QUIET_CC)$(CC) $(CFLAGS) $(INCLUDES) -c $(filter %.c,$^) -o $@
$(OUTPUT):
$(call msg,MKDIR,$@)
$(Q)mkdir -p $(OUTPUT)
$(OUTPUT) $(BPFOBJ_OUTPUT) $(BPFTOOL_OUTPUT):
$(QUIET_MKDIR)mkdir -p $@
$(OUTPUT)/vmlinux.h: $(VMLINUX_BTF_PATH) | $(OUTPUT) $(BPFTOOL)
$(call msg,GEN,$@)
$(Q)if [ ! -e "$(VMLINUX_BTF_PATH)" ] ; then \
echo "Couldn't find kernel BTF; set VMLINUX_BTF to" \
"specify its location." >&2; \
exit 1;\
fi
$(Q)$(BPFTOOL) btf dump file $(VMLINUX_BTF_PATH) format c > $@
$(QUIET_GEN)$(BPFTOOL) btf dump file $(VMLINUX_BTF_PATH) format c > $@
$(BPFOBJ): $(wildcard $(LIBBPF_SRC)/*.[ch] $(LIBBPF_SRC)/Makefile) | $(OUTPUT)
$(Q)$(MAKE) $(submake_extras) -C $(LIBBPF_SRC) \
OUTPUT=$(abspath $(dir $@))/ $(abspath $@)
$(BPFOBJ): $(wildcard $(LIBBPF_SRC)/*.[ch] $(LIBBPF_SRC)/Makefile) | $(BPFOBJ_OUTPUT)
$(Q)$(MAKE) $(submake_extras) -C $(LIBBPF_SRC) OUTPUT=$(BPFOBJ_OUTPUT) $@
$(DEFAULT_BPFTOOL):
$(Q)$(MAKE) $(submake_extras) -C ../bpftool \
prefix= OUTPUT=$(abs_out)/ DESTDIR=$(abs_out) install
$(DEFAULT_BPFTOOL): | $(BPFTOOL_OUTPUT)
$(Q)$(MAKE) $(submake_extras) -C ../bpftool OUTPUT=$(BPFTOOL_OUTPUT) \
CC=$(HOSTCC) LD=$(HOSTLD)
......@@ -15,10 +15,6 @@ endef
$(call allow-override,CC,$(CROSS_COMPILE)gcc)
$(call allow-override,LD,$(CROSS_COMPILE)ld)
HOSTCC ?= gcc
HOSTLD ?= ld
HOSTAR ?= ar
export HOSTCC HOSTLD HOSTAR
ifeq ($(V),1)
......
......@@ -157,6 +157,7 @@ enum bpf_map_type {
BPF_MAP_TYPE_STRUCT_OPS,
BPF_MAP_TYPE_RINGBUF,
BPF_MAP_TYPE_INODE_STORAGE,
BPF_MAP_TYPE_TASK_STORAGE,
};
/* Note that tracing related programs such as
......@@ -3742,6 +3743,50 @@ union bpf_attr {
* Return
* The helper returns **TC_ACT_REDIRECT** on success or
* **TC_ACT_SHOT** on error.
*
* void *bpf_task_storage_get(struct bpf_map *map, struct task_struct *task, void *value, u64 flags)
* Description
* Get a bpf_local_storage from the *task*.
*
* Logically, it could be thought of as getting the value from
* a *map* with *task* as the **key**. From this
* perspective, the usage is not much different from
* **bpf_map_lookup_elem**\ (*map*, **&**\ *task*) except this
* helper enforces the key must be an task_struct and the map must also
* be a **BPF_MAP_TYPE_TASK_STORAGE**.
*
* Underneath, the value is stored locally at *task* instead of
* the *map*. The *map* is used as the bpf-local-storage
* "type". The bpf-local-storage "type" (i.e. the *map*) is
* searched against all bpf_local_storage residing at *task*.
*
* An optional *flags* (**BPF_LOCAL_STORAGE_GET_F_CREATE**) can be
* used such that a new bpf_local_storage will be
* created if one does not exist. *value* can be used
* together with **BPF_LOCAL_STORAGE_GET_F_CREATE** to specify
* the initial value of a bpf_local_storage. If *value* is
* **NULL**, the new bpf_local_storage will be zero initialized.
* Return
* A bpf_local_storage pointer is returned on success.
*
* **NULL** if not found or there was an error in adding
* a new bpf_local_storage.
*
* long bpf_task_storage_delete(struct bpf_map *map, struct task_struct *task)
* Description
* Delete a bpf_local_storage from a *task*.
* Return
* 0 on success.
*
* **-ENOENT** if the bpf_local_storage cannot be found.
*
* struct task_struct *bpf_get_current_task_btf(void)
* Description
* Return a BTF pointer to the "current" task.
* This pointer can also be used in helpers that accept an
* *ARG_PTR_TO_BTF_ID* of type *task_struct*.
* Return
* Pointer to the current task.
*/
#define __BPF_FUNC_MAPPER(FN) \
FN(unspec), \
......@@ -3900,6 +3945,9 @@ union bpf_attr {
FN(bpf_per_cpu_ptr), \
FN(bpf_this_cpu_ptr), \
FN(redirect_peer), \
FN(task_storage_get), \
FN(task_storage_delete), \
FN(get_current_task_btf), \
/* */
/* integer value in 'imm' field of BPF_CALL instruction selects which helper
......@@ -4418,6 +4466,9 @@ struct bpf_btf_info {
__aligned_u64 btf;
__u32 btf_size;
__u32 id;
__aligned_u64 name;
__u32 name_len;
__u32 kernel_btf;
} __attribute__((aligned(8)));
struct bpf_link_info {
......
This diff is collapsed.
......@@ -31,11 +31,19 @@ enum btf_endianness {
};
LIBBPF_API void btf__free(struct btf *btf);
LIBBPF_API struct btf *btf__new(const void *data, __u32 size);
LIBBPF_API struct btf *btf__new_split(const void *data, __u32 size, struct btf *base_btf);
LIBBPF_API struct btf *btf__new_empty(void);
LIBBPF_API struct btf *btf__new_empty_split(struct btf *base_btf);
LIBBPF_API struct btf *btf__parse(const char *path, struct btf_ext **btf_ext);
LIBBPF_API struct btf *btf__parse_split(const char *path, struct btf *base_btf);
LIBBPF_API struct btf *btf__parse_elf(const char *path, struct btf_ext **btf_ext);
LIBBPF_API struct btf *btf__parse_elf_split(const char *path, struct btf *base_btf);
LIBBPF_API struct btf *btf__parse_raw(const char *path);
LIBBPF_API struct btf *btf__parse_raw_split(const char *path, struct btf *base_btf);
LIBBPF_API int btf__finalize_data(struct bpf_object *obj, struct btf *btf);
LIBBPF_API int btf__load(struct btf *btf);
LIBBPF_API __s32 btf__find_by_name(const struct btf *btf,
......
......@@ -337,3 +337,12 @@ LIBBPF_0.2.0 {
perf_buffer__consume_buffer;
xsk_socket__create_shared;
} LIBBPF_0.1.0;
LIBBPF_0.3.0 {
global:
btf__parse_elf_split;
btf__parse_raw_split;
btf__parse_split;
btf__new_empty_split;
btf__new_split;
} LIBBPF_0.2.0;
......@@ -230,6 +230,7 @@ bool bpf_probe_map_type(enum bpf_map_type map_type, __u32 ifindex)
break;
case BPF_MAP_TYPE_SK_STORAGE:
case BPF_MAP_TYPE_INODE_STORAGE:
case BPF_MAP_TYPE_TASK_STORAGE:
btf_key_type_id = 1;
btf_value_type_id = 3;
value_size = 8;
......
......@@ -3,15 +3,6 @@ include ../scripts/Makefile.include
include ../scripts/Makefile.arch
# always use the host compiler
ifneq ($(LLVM),)
HOSTAR ?= llvm-ar
HOSTCC ?= clang
HOSTLD ?= ld.lld
else
HOSTAR ?= ar
HOSTCC ?= gcc
HOSTLD ?= ld
endif
AR = $(HOSTAR)
CC = $(HOSTCC)
LD = $(HOSTLD)
......
......@@ -175,10 +175,6 @@ endef
LD += $(EXTRA_LDFLAGS)
HOSTCC ?= gcc
HOSTLD ?= ld
HOSTAR ?= ar
PKG_CONFIG = $(CROSS_COMPILE)pkg-config
LLVM_CONFIG ?= llvm-config
......
......@@ -54,7 +54,6 @@ INSTALL_SCRIPT = ${INSTALL_PROGRAM}
CROSS = #/usr/i386-linux-uclibc/usr/bin/i386-uclibc-
CROSS_COMPILE ?= $(CROSS)
LD = $(CC)
HOSTCC = gcc
# check if compiler option is supported
cc-supports = ${shell if $(CC) ${1} -S -o /dev/null -x c /dev/null > /dev/null 2>&1; then echo "$(1)"; fi;}
......
......@@ -59,6 +59,16 @@ $(call allow-override,LD,$(CROSS_COMPILE)ld)
$(call allow-override,CXX,$(CROSS_COMPILE)g++)
$(call allow-override,STRIP,$(CROSS_COMPILE)strip)
ifneq ($(LLVM),)
HOSTAR ?= llvm-ar
HOSTCC ?= clang
HOSTLD ?= ld.lld
else
HOSTAR ?= ar
HOSTCC ?= gcc
HOSTLD ?= ld
endif
ifeq ($(CC_NO_CLANG), 1)
EXTRA_WARNINGS += -Wstrict-aliasing=3
endif
......
......@@ -8,7 +8,6 @@ FEATURE-DUMP.libbpf
fixdep
test_dev_cgroup
/test_progs*
test_tcpbpf_user
test_verifier_log
feature
test_sock
......
......@@ -32,7 +32,7 @@ LDLIBS += -lcap -lelf -lz -lrt -lpthread
# Order correspond to 'make run_tests' order
TEST_GEN_PROGS = test_verifier test_tag test_maps test_lru_map test_lpm_map test_progs \
test_verifier_log test_dev_cgroup test_tcpbpf_user \
test_verifier_log test_dev_cgroup \
test_sock test_sockmap get_cgroup_id_user test_socket_cookie \
test_cgroup_storage \
test_netcnt test_tcpnotify_user test_sysctl \
......@@ -163,7 +163,6 @@ $(OUTPUT)/test_sock: cgroup_helpers.c
$(OUTPUT)/test_sock_addr: cgroup_helpers.c
$(OUTPUT)/test_socket_cookie: cgroup_helpers.c
$(OUTPUT)/test_sockmap: cgroup_helpers.c
$(OUTPUT)/test_tcpbpf_user: cgroup_helpers.c
$(OUTPUT)/test_tcpnotify_user: cgroup_helpers.c trace_helpers.c
$(OUTPUT)/get_cgroup_id_user: cgroup_helpers.c
$(OUTPUT)/test_cgroup_storage: cgroup_helpers.c
......@@ -387,7 +386,7 @@ TRUNNER_TESTS_DIR := prog_tests
TRUNNER_BPF_PROGS_DIR := progs
TRUNNER_EXTRA_SOURCES := test_progs.c cgroup_helpers.c trace_helpers.c \
network_helpers.c testing_helpers.c \
flow_dissector_load.h
btf_helpers.c flow_dissector_load.h
TRUNNER_EXTRA_FILES := $(OUTPUT)/urandom_read \
$(wildcard progs/btf_dump_test_case_*.c)
TRUNNER_BPF_BUILD_RULE := CLANG_BPF_BUILD_RULE
......
This diff is collapsed.
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (c) 2020 Facebook */
#ifndef __BTF_HELPERS_H
#define __BTF_HELPERS_H
#include <stdio.h>
#include <bpf/btf.h>
int fprintf_btf_type_raw(FILE *out, const struct btf *btf, __u32 id);
const char *btf_type_raw_dump(const struct btf *btf, int type_id);
int btf_validate_raw(struct btf *btf, int nr_types, const char *exp_types[]);
#define VALIDATE_RAW_BTF(btf, raw_types...) \
btf_validate_raw(btf, \
sizeof((const char *[]){raw_types})/sizeof(void *),\
(const char *[]){raw_types})
const char *btf_type_c_dump(const struct btf *btf);
#endif
This diff is collapsed.
......@@ -17,7 +17,7 @@
#include "test_btf_skc_cls_ingress.skel.h"
static struct test_btf_skc_cls_ingress *skel;
struct sockaddr_in6 srv_sa6;
static struct sockaddr_in6 srv_sa6;
static __u32 duration;
#define PROG_PIN_FILE "/sys/fs/bpf/btf_skc_cls_ingress"
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
......@@ -1223,9 +1223,10 @@ static void test_map_in_map(void)
static void test_map_large(void)
{
struct bigkey {
int a;
char b[116];
char b[4096];
long long c;
} key;
int fd, i, value;
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment