Commit 2786ae33 authored by Jakub Kicinski's avatar Jakub Kicinski

Merge tag 'for-netdev' of https://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf

Daniel Borkmann says:

====================
pull-request: bpf 2024-05-27

We've added 15 non-merge commits during the last 7 day(s) which contain
a total of 18 files changed, 583 insertions(+), 55 deletions(-).

The main changes are:

1) Fix broken BPF multi-uprobe PID filtering logic which filtered by thread
   while the promise was to filter by process, from Andrii Nakryiko.

2) Fix the recent influx of syzkaller reports to sockmap which triggered
   a locking rule violation by performing a map_delete, from Jakub Sitnicki.

3) Fixes to netkit driver in particular on skb->pkt_type override upon pass
   verdict, from Daniel Borkmann.

4) Fix an integer overflow in resolve_btfids which can wrongly trigger build
   failures, from Friedrich Vock.

5) Follow-up fixes for ARC JIT reported by static analyzers,
   from Shahab Vahedi.

* tag 'for-netdev' of https://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf:
  selftests/bpf: Cover verifier checks for mutating sockmap/sockhash
  Revert "bpf, sockmap: Prevent lock inversion deadlock in map delete elem"
  bpf: Allow delete from sockmap/sockhash only if update is allowed
  selftests/bpf: Add netkit test for pkt_type
  selftests/bpf: Add netkit tests for mac address
  netkit: Fix pkt_type override upon netkit pass verdict
  netkit: Fix setting mac address in l2 mode
  ARC, bpf: Fix issues reported by the static analyzers
  selftests/bpf: extend multi-uprobe tests with USDTs
  selftests/bpf: extend multi-uprobe tests with child thread case
  libbpf: detect broken PID filtering logic for multi-uprobe
  bpf: remove unnecessary rcu_read_{lock,unlock}() in multi-uprobe attach logic
  bpf: fix multi-uprobe PID filtering logic
  bpf: Fix potential integer overflow in resolve_btfids
  MAINTAINERS: Add myself as reviewer of ARM64 BPF JIT
====================

Link: https://lore.kernel.org/r/20240527203551.29712-1-daniel@iogearbox.netSigned-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parents 52a2f060 a63bf556
...@@ -3855,6 +3855,7 @@ BPF JIT for ARM64 ...@@ -3855,6 +3855,7 @@ BPF JIT for ARM64
M: Daniel Borkmann <daniel@iogearbox.net> M: Daniel Borkmann <daniel@iogearbox.net>
M: Alexei Starovoitov <ast@kernel.org> M: Alexei Starovoitov <ast@kernel.org>
M: Puranjay Mohan <puranjay@kernel.org> M: Puranjay Mohan <puranjay@kernel.org>
R: Xu Kuohai <xukuohai@huaweicloud.com>
L: bpf@vger.kernel.org L: bpf@vger.kernel.org
S: Supported S: Supported
F: arch/arm64/net/ F: arch/arm64/net/
......
...@@ -39,7 +39,7 @@ ...@@ -39,7 +39,7 @@
/************** Functions that the back-end must provide **************/ /************** Functions that the back-end must provide **************/
/* Extension for 32-bit operations. */ /* Extension for 32-bit operations. */
inline u8 zext(u8 *buf, u8 rd); u8 zext(u8 *buf, u8 rd);
/***** Moves *****/ /***** Moves *****/
u8 mov_r32(u8 *buf, u8 rd, u8 rs, u8 sign_ext); u8 mov_r32(u8 *buf, u8 rd, u8 rs, u8 sign_ext);
u8 mov_r32_i32(u8 *buf, u8 reg, s32 imm); u8 mov_r32_i32(u8 *buf, u8 reg, s32 imm);
......
...@@ -62,7 +62,7 @@ enum { ...@@ -62,7 +62,7 @@ enum {
* If/when we decide to add ARCv2 instructions that do use register pairs, * If/when we decide to add ARCv2 instructions that do use register pairs,
* the mapping, hopefully, doesn't need to be revisited. * the mapping, hopefully, doesn't need to be revisited.
*/ */
const u8 bpf2arc[][2] = { static const u8 bpf2arc[][2] = {
/* Return value from in-kernel function, and exit value from eBPF */ /* Return value from in-kernel function, and exit value from eBPF */
[BPF_REG_0] = {ARC_R_8, ARC_R_9}, [BPF_REG_0] = {ARC_R_8, ARC_R_9},
/* Arguments from eBPF program to in-kernel function */ /* Arguments from eBPF program to in-kernel function */
...@@ -1302,7 +1302,7 @@ static u8 arc_b(u8 *buf, s32 offset) ...@@ -1302,7 +1302,7 @@ static u8 arc_b(u8 *buf, s32 offset)
/************* Packers (Deal with BPF_REGs) **************/ /************* Packers (Deal with BPF_REGs) **************/
inline u8 zext(u8 *buf, u8 rd) u8 zext(u8 *buf, u8 rd)
{ {
if (rd != BPF_REG_FP) if (rd != BPF_REG_FP)
return arc_movi_r(buf, REG_HI(rd), 0); return arc_movi_r(buf, REG_HI(rd), 0);
...@@ -2235,6 +2235,7 @@ u8 gen_swap(u8 *buf, u8 rd, u8 size, u8 endian, bool force, bool do_zext) ...@@ -2235,6 +2235,7 @@ u8 gen_swap(u8 *buf, u8 rd, u8 size, u8 endian, bool force, bool do_zext)
break; break;
default: default:
/* The caller must have handled this. */ /* The caller must have handled this. */
break;
} }
} else { } else {
/* /*
...@@ -2253,6 +2254,7 @@ u8 gen_swap(u8 *buf, u8 rd, u8 size, u8 endian, bool force, bool do_zext) ...@@ -2253,6 +2254,7 @@ u8 gen_swap(u8 *buf, u8 rd, u8 size, u8 endian, bool force, bool do_zext)
break; break;
default: default:
/* The caller must have handled this. */ /* The caller must have handled this. */
break;
} }
} }
...@@ -2517,7 +2519,7 @@ u8 arc_epilogue(u8 *buf, u32 usage, u16 frame_size) ...@@ -2517,7 +2519,7 @@ u8 arc_epilogue(u8 *buf, u32 usage, u16 frame_size)
#define JCC64_NR_OF_JMPS 3 /* Number of jumps in jcc64 template. */ #define JCC64_NR_OF_JMPS 3 /* Number of jumps in jcc64 template. */
#define JCC64_INSNS_TO_END 3 /* Number of insn. inclusive the 2nd jmp to end. */ #define JCC64_INSNS_TO_END 3 /* Number of insn. inclusive the 2nd jmp to end. */
#define JCC64_SKIP_JMP 1 /* Index of the "skip" jump to "end". */ #define JCC64_SKIP_JMP 1 /* Index of the "skip" jump to "end". */
const struct { static const struct {
/* /*
* "jit_off" is common between all "jmp[]" and is coupled with * "jit_off" is common between all "jmp[]" and is coupled with
* "cond" of each "jmp[]" instance. e.g.: * "cond" of each "jmp[]" instance. e.g.:
...@@ -2883,7 +2885,7 @@ u8 gen_jmp_64(u8 *buf, u8 rd, u8 rs, u8 cond, u32 curr_off, u32 targ_off) ...@@ -2883,7 +2885,7 @@ u8 gen_jmp_64(u8 *buf, u8 rd, u8 rs, u8 cond, u32 curr_off, u32 targ_off)
* The "ARC_CC_SET" becomes "CC_unequal" because of the "tst" * The "ARC_CC_SET" becomes "CC_unequal" because of the "tst"
* instruction that precedes the conditional branch. * instruction that precedes the conditional branch.
*/ */
const u8 arcv2_32_jmps[ARC_CC_LAST] = { static const u8 arcv2_32_jmps[ARC_CC_LAST] = {
[ARC_CC_UGT] = CC_great_u, [ARC_CC_UGT] = CC_great_u,
[ARC_CC_UGE] = CC_great_eq_u, [ARC_CC_UGE] = CC_great_eq_u,
[ARC_CC_ULT] = CC_less_u, [ARC_CC_ULT] = CC_less_u,
......
...@@ -159,7 +159,7 @@ static void jit_dump(const struct jit_context *ctx) ...@@ -159,7 +159,7 @@ static void jit_dump(const struct jit_context *ctx)
/* Initialise the context so there's no garbage. */ /* Initialise the context so there's no garbage. */
static int jit_ctx_init(struct jit_context *ctx, struct bpf_prog *prog) static int jit_ctx_init(struct jit_context *ctx, struct bpf_prog *prog)
{ {
memset(ctx, 0, sizeof(ctx)); memset(ctx, 0, sizeof(*ctx));
ctx->orig_prog = prog; ctx->orig_prog = prog;
...@@ -167,7 +167,7 @@ static int jit_ctx_init(struct jit_context *ctx, struct bpf_prog *prog) ...@@ -167,7 +167,7 @@ static int jit_ctx_init(struct jit_context *ctx, struct bpf_prog *prog)
ctx->prog = bpf_jit_blind_constants(prog); ctx->prog = bpf_jit_blind_constants(prog);
if (IS_ERR(ctx->prog)) if (IS_ERR(ctx->prog))
return PTR_ERR(ctx->prog); return PTR_ERR(ctx->prog);
ctx->blinded = (ctx->prog == ctx->orig_prog ? false : true); ctx->blinded = (ctx->prog != ctx->orig_prog);
/* If the verifier doesn't zero-extend, then we have to do it. */ /* If the verifier doesn't zero-extend, then we have to do it. */
ctx->do_zext = !ctx->prog->aux->verifier_zext; ctx->do_zext = !ctx->prog->aux->verifier_zext;
...@@ -1182,12 +1182,12 @@ static int jit_prepare(struct jit_context *ctx) ...@@ -1182,12 +1182,12 @@ static int jit_prepare(struct jit_context *ctx)
} }
/* /*
* All the "handle_*()" functions have been called before by the * jit_compile() is the real compilation phase. jit_prepare() is
* "jit_prepare()". If there was an error, we would know by now. * invoked before jit_compile() as a dry-run to make sure everything
* Therefore, no extra error checking at this point, other than * will go OK and allocate the necessary memory.
* a sanity check at the end that expects the calculated length *
* (jit.len) to be equal to the length of generated instructions * In the end, jit_compile() checks if it has produced the same number
* (jit.index). * of instructions as jit_prepare() would.
*/ */
static int jit_compile(struct jit_context *ctx) static int jit_compile(struct jit_context *ctx)
{ {
...@@ -1407,9 +1407,9 @@ static struct bpf_prog *do_extra_pass(struct bpf_prog *prog) ...@@ -1407,9 +1407,9 @@ static struct bpf_prog *do_extra_pass(struct bpf_prog *prog)
/* /*
* This function may be invoked twice for the same stream of BPF * This function may be invoked twice for the same stream of BPF
* instructions. The "extra pass" happens, when there are "call"s * instructions. The "extra pass" happens, when there are
* involved that their addresses are not known during the first * (re)locations involved that their addresses are not known
* invocation. * during the first run.
*/ */
struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog) struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
{ {
......
...@@ -55,6 +55,7 @@ static void netkit_prep_forward(struct sk_buff *skb, bool xnet) ...@@ -55,6 +55,7 @@ static void netkit_prep_forward(struct sk_buff *skb, bool xnet)
skb_scrub_packet(skb, xnet); skb_scrub_packet(skb, xnet);
skb->priority = 0; skb->priority = 0;
nf_skip_egress(skb, true); nf_skip_egress(skb, true);
skb_reset_mac_header(skb);
} }
static struct netkit *netkit_priv(const struct net_device *dev) static struct netkit *netkit_priv(const struct net_device *dev)
...@@ -78,6 +79,7 @@ static netdev_tx_t netkit_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -78,6 +79,7 @@ static netdev_tx_t netkit_xmit(struct sk_buff *skb, struct net_device *dev)
skb_orphan_frags(skb, GFP_ATOMIC))) skb_orphan_frags(skb, GFP_ATOMIC)))
goto drop; goto drop;
netkit_prep_forward(skb, !net_eq(dev_net(dev), dev_net(peer))); netkit_prep_forward(skb, !net_eq(dev_net(dev), dev_net(peer)));
eth_skb_pkt_type(skb, peer);
skb->dev = peer; skb->dev = peer;
entry = rcu_dereference(nk->active); entry = rcu_dereference(nk->active);
if (entry) if (entry)
...@@ -85,7 +87,7 @@ static netdev_tx_t netkit_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -85,7 +87,7 @@ static netdev_tx_t netkit_xmit(struct sk_buff *skb, struct net_device *dev)
switch (ret) { switch (ret) {
case NETKIT_NEXT: case NETKIT_NEXT:
case NETKIT_PASS: case NETKIT_PASS:
skb->protocol = eth_type_trans(skb, skb->dev); eth_skb_pull_mac(skb);
skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN); skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN);
if (likely(__netif_rx(skb) == NET_RX_SUCCESS)) { if (likely(__netif_rx(skb) == NET_RX_SUCCESS)) {
dev_sw_netstats_tx_add(dev, 1, len); dev_sw_netstats_tx_add(dev, 1, len);
...@@ -155,6 +157,16 @@ static void netkit_set_multicast(struct net_device *dev) ...@@ -155,6 +157,16 @@ static void netkit_set_multicast(struct net_device *dev)
/* Nothing to do, we receive whatever gets pushed to us! */ /* Nothing to do, we receive whatever gets pushed to us! */
} }
static int netkit_set_macaddr(struct net_device *dev, void *sa)
{
struct netkit *nk = netkit_priv(dev);
if (nk->mode != NETKIT_L2)
return -EOPNOTSUPP;
return eth_mac_addr(dev, sa);
}
static void netkit_set_headroom(struct net_device *dev, int headroom) static void netkit_set_headroom(struct net_device *dev, int headroom)
{ {
struct netkit *nk = netkit_priv(dev), *nk2; struct netkit *nk = netkit_priv(dev), *nk2;
...@@ -198,6 +210,7 @@ static const struct net_device_ops netkit_netdev_ops = { ...@@ -198,6 +210,7 @@ static const struct net_device_ops netkit_netdev_ops = {
.ndo_start_xmit = netkit_xmit, .ndo_start_xmit = netkit_xmit,
.ndo_set_rx_mode = netkit_set_multicast, .ndo_set_rx_mode = netkit_set_multicast,
.ndo_set_rx_headroom = netkit_set_headroom, .ndo_set_rx_headroom = netkit_set_headroom,
.ndo_set_mac_address = netkit_set_macaddr,
.ndo_get_iflink = netkit_get_iflink, .ndo_get_iflink = netkit_get_iflink,
.ndo_get_peer_dev = netkit_peer_dev, .ndo_get_peer_dev = netkit_peer_dev,
.ndo_get_stats64 = netkit_get_stats, .ndo_get_stats64 = netkit_get_stats,
...@@ -300,9 +313,11 @@ static int netkit_validate(struct nlattr *tb[], struct nlattr *data[], ...@@ -300,9 +313,11 @@ static int netkit_validate(struct nlattr *tb[], struct nlattr *data[],
if (!attr) if (!attr)
return 0; return 0;
NL_SET_ERR_MSG_ATTR(extack, attr, if (nla_len(attr) != ETH_ALEN)
"Setting Ethernet address is not supported"); return -EINVAL;
return -EOPNOTSUPP; if (!is_valid_ether_addr(nla_data(attr)))
return -EADDRNOTAVAIL;
return 0;
} }
static struct rtnl_link_ops netkit_link_ops; static struct rtnl_link_ops netkit_link_ops;
...@@ -365,6 +380,9 @@ static int netkit_new_link(struct net *src_net, struct net_device *dev, ...@@ -365,6 +380,9 @@ static int netkit_new_link(struct net *src_net, struct net_device *dev,
strscpy(ifname, "nk%d", IFNAMSIZ); strscpy(ifname, "nk%d", IFNAMSIZ);
ifname_assign_type = NET_NAME_ENUM; ifname_assign_type = NET_NAME_ENUM;
} }
if (mode != NETKIT_L2 &&
(tb[IFLA_ADDRESS] || tbp[IFLA_ADDRESS]))
return -EOPNOTSUPP;
net = rtnl_link_get_net(src_net, tbp); net = rtnl_link_get_net(src_net, tbp);
if (IS_ERR(net)) if (IS_ERR(net))
...@@ -379,7 +397,7 @@ static int netkit_new_link(struct net *src_net, struct net_device *dev, ...@@ -379,7 +397,7 @@ static int netkit_new_link(struct net *src_net, struct net_device *dev,
netif_inherit_tso_max(peer, dev); netif_inherit_tso_max(peer, dev);
if (mode == NETKIT_L2) if (mode == NETKIT_L2 && !(ifmp && tbp[IFLA_ADDRESS]))
eth_hw_addr_random(peer); eth_hw_addr_random(peer);
if (ifmp && dev->ifindex) if (ifmp && dev->ifindex)
peer->ifindex = ifmp->ifi_index; peer->ifindex = ifmp->ifi_index;
...@@ -402,7 +420,7 @@ static int netkit_new_link(struct net *src_net, struct net_device *dev, ...@@ -402,7 +420,7 @@ static int netkit_new_link(struct net *src_net, struct net_device *dev,
if (err < 0) if (err < 0)
goto err_configure_peer; goto err_configure_peer;
if (mode == NETKIT_L2) if (mode == NETKIT_L2 && !tb[IFLA_ADDRESS])
eth_hw_addr_random(dev); eth_hw_addr_random(dev);
if (tb[IFLA_IFNAME]) if (tb[IFLA_IFNAME])
nla_strscpy(dev->name, tb[IFLA_IFNAME], IFNAMSIZ); nla_strscpy(dev->name, tb[IFLA_IFNAME], IFNAMSIZ);
......
...@@ -636,6 +636,14 @@ static inline void eth_skb_pkt_type(struct sk_buff *skb, ...@@ -636,6 +636,14 @@ static inline void eth_skb_pkt_type(struct sk_buff *skb,
} }
} }
static inline struct ethhdr *eth_skb_pull_mac(struct sk_buff *skb)
{
struct ethhdr *eth = (struct ethhdr *)skb->data;
skb_pull_inline(skb, ETH_HLEN);
return eth;
}
/** /**
* eth_skb_pad - Pad buffer to mininum number of octets for Ethernet frame * eth_skb_pad - Pad buffer to mininum number of octets for Ethernet frame
* @skb: Buffer to pad * @skb: Buffer to pad
......
...@@ -8882,7 +8882,8 @@ static bool may_update_sockmap(struct bpf_verifier_env *env, int func_id) ...@@ -8882,7 +8882,8 @@ static bool may_update_sockmap(struct bpf_verifier_env *env, int func_id)
enum bpf_attach_type eatype = env->prog->expected_attach_type; enum bpf_attach_type eatype = env->prog->expected_attach_type;
enum bpf_prog_type type = resolve_prog_type(env->prog); enum bpf_prog_type type = resolve_prog_type(env->prog);
if (func_id != BPF_FUNC_map_update_elem) if (func_id != BPF_FUNC_map_update_elem &&
func_id != BPF_FUNC_map_delete_elem)
return false; return false;
/* It's not possible to get access to a locked struct sock in these /* It's not possible to get access to a locked struct sock in these
...@@ -8893,6 +8894,11 @@ static bool may_update_sockmap(struct bpf_verifier_env *env, int func_id) ...@@ -8893,6 +8894,11 @@ static bool may_update_sockmap(struct bpf_verifier_env *env, int func_id)
if (eatype == BPF_TRACE_ITER) if (eatype == BPF_TRACE_ITER)
return true; return true;
break; break;
case BPF_PROG_TYPE_SOCK_OPS:
/* map_update allowed only via dedicated helpers with event type checks */
if (func_id == BPF_FUNC_map_delete_elem)
return true;
break;
case BPF_PROG_TYPE_SOCKET_FILTER: case BPF_PROG_TYPE_SOCKET_FILTER:
case BPF_PROG_TYPE_SCHED_CLS: case BPF_PROG_TYPE_SCHED_CLS:
case BPF_PROG_TYPE_SCHED_ACT: case BPF_PROG_TYPE_SCHED_ACT:
...@@ -8988,7 +8994,6 @@ static int check_map_func_compatibility(struct bpf_verifier_env *env, ...@@ -8988,7 +8994,6 @@ static int check_map_func_compatibility(struct bpf_verifier_env *env,
case BPF_MAP_TYPE_SOCKMAP: case BPF_MAP_TYPE_SOCKMAP:
if (func_id != BPF_FUNC_sk_redirect_map && if (func_id != BPF_FUNC_sk_redirect_map &&
func_id != BPF_FUNC_sock_map_update && func_id != BPF_FUNC_sock_map_update &&
func_id != BPF_FUNC_map_delete_elem &&
func_id != BPF_FUNC_msg_redirect_map && func_id != BPF_FUNC_msg_redirect_map &&
func_id != BPF_FUNC_sk_select_reuseport && func_id != BPF_FUNC_sk_select_reuseport &&
func_id != BPF_FUNC_map_lookup_elem && func_id != BPF_FUNC_map_lookup_elem &&
...@@ -8998,7 +9003,6 @@ static int check_map_func_compatibility(struct bpf_verifier_env *env, ...@@ -8998,7 +9003,6 @@ static int check_map_func_compatibility(struct bpf_verifier_env *env,
case BPF_MAP_TYPE_SOCKHASH: case BPF_MAP_TYPE_SOCKHASH:
if (func_id != BPF_FUNC_sk_redirect_hash && if (func_id != BPF_FUNC_sk_redirect_hash &&
func_id != BPF_FUNC_sock_hash_update && func_id != BPF_FUNC_sock_hash_update &&
func_id != BPF_FUNC_map_delete_elem &&
func_id != BPF_FUNC_msg_redirect_hash && func_id != BPF_FUNC_msg_redirect_hash &&
func_id != BPF_FUNC_sk_select_reuseport && func_id != BPF_FUNC_sk_select_reuseport &&
func_id != BPF_FUNC_map_lookup_elem && func_id != BPF_FUNC_map_lookup_elem &&
......
...@@ -3295,7 +3295,7 @@ static int uprobe_prog_run(struct bpf_uprobe *uprobe, ...@@ -3295,7 +3295,7 @@ static int uprobe_prog_run(struct bpf_uprobe *uprobe,
struct bpf_run_ctx *old_run_ctx; struct bpf_run_ctx *old_run_ctx;
int err = 0; int err = 0;
if (link->task && current != link->task) if (link->task && current->mm != link->task->mm)
return 0; return 0;
if (sleepable) if (sleepable)
...@@ -3396,8 +3396,9 @@ int bpf_uprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *pr ...@@ -3396,8 +3396,9 @@ int bpf_uprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *pr
upath = u64_to_user_ptr(attr->link_create.uprobe_multi.path); upath = u64_to_user_ptr(attr->link_create.uprobe_multi.path);
uoffsets = u64_to_user_ptr(attr->link_create.uprobe_multi.offsets); uoffsets = u64_to_user_ptr(attr->link_create.uprobe_multi.offsets);
cnt = attr->link_create.uprobe_multi.cnt; cnt = attr->link_create.uprobe_multi.cnt;
pid = attr->link_create.uprobe_multi.pid;
if (!upath || !uoffsets || !cnt) if (!upath || !uoffsets || !cnt || pid < 0)
return -EINVAL; return -EINVAL;
if (cnt > MAX_UPROBE_MULTI_CNT) if (cnt > MAX_UPROBE_MULTI_CNT)
return -E2BIG; return -E2BIG;
...@@ -3421,11 +3422,8 @@ int bpf_uprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *pr ...@@ -3421,11 +3422,8 @@ int bpf_uprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *pr
goto error_path_put; goto error_path_put;
} }
pid = attr->link_create.uprobe_multi.pid;
if (pid) { if (pid) {
rcu_read_lock(); task = get_pid_task(find_vpid(pid), PIDTYPE_TGID);
task = get_pid_task(find_vpid(pid), PIDTYPE_PID);
rcu_read_unlock();
if (!task) { if (!task) {
err = -ESRCH; err = -ESRCH;
goto error_path_put; goto error_path_put;
......
...@@ -423,9 +423,6 @@ static int __sock_map_delete(struct bpf_stab *stab, struct sock *sk_test, ...@@ -423,9 +423,6 @@ static int __sock_map_delete(struct bpf_stab *stab, struct sock *sk_test,
struct sock *sk; struct sock *sk;
int err = 0; int err = 0;
if (irqs_disabled())
return -EOPNOTSUPP; /* locks here are hardirq-unsafe */
spin_lock_bh(&stab->lock); spin_lock_bh(&stab->lock);
sk = *psk; sk = *psk;
if (!sk_test || sk_test == sk) if (!sk_test || sk_test == sk)
...@@ -948,9 +945,6 @@ static long sock_hash_delete_elem(struct bpf_map *map, void *key) ...@@ -948,9 +945,6 @@ static long sock_hash_delete_elem(struct bpf_map *map, void *key)
struct bpf_shtab_elem *elem; struct bpf_shtab_elem *elem;
int ret = -ENOENT; int ret = -ENOENT;
if (irqs_disabled())
return -EOPNOTSUPP; /* locks here are hardirq-unsafe */
hash = sock_hash_bucket_hash(key, key_size); hash = sock_hash_bucket_hash(key, key_size);
bucket = sock_hash_select_bucket(htab, hash); bucket = sock_hash_select_bucket(htab, hash);
......
...@@ -161,9 +161,7 @@ __be16 eth_type_trans(struct sk_buff *skb, struct net_device *dev) ...@@ -161,9 +161,7 @@ __be16 eth_type_trans(struct sk_buff *skb, struct net_device *dev)
skb->dev = dev; skb->dev = dev;
skb_reset_mac_header(skb); skb_reset_mac_header(skb);
eth = (struct ethhdr *)skb->data; eth = eth_skb_pull_mac(skb);
skb_pull_inline(skb, ETH_HLEN);
eth_skb_pkt_type(skb, dev); eth_skb_pkt_type(skb, dev);
/* /*
......
...@@ -728,7 +728,7 @@ static int sets_patch(struct object *obj) ...@@ -728,7 +728,7 @@ static int sets_patch(struct object *obj)
static int symbols_patch(struct object *obj) static int symbols_patch(struct object *obj)
{ {
int err; off_t err;
if (__symbols_patch(obj, &obj->structs) || if (__symbols_patch(obj, &obj->structs) ||
__symbols_patch(obj, &obj->unions) || __symbols_patch(obj, &obj->unions) ||
......
...@@ -392,11 +392,40 @@ static int probe_uprobe_multi_link(int token_fd) ...@@ -392,11 +392,40 @@ static int probe_uprobe_multi_link(int token_fd)
link_fd = bpf_link_create(prog_fd, -1, BPF_TRACE_UPROBE_MULTI, &link_opts); link_fd = bpf_link_create(prog_fd, -1, BPF_TRACE_UPROBE_MULTI, &link_opts);
err = -errno; /* close() can clobber errno */ err = -errno; /* close() can clobber errno */
if (link_fd >= 0 || err != -EBADF) {
close(link_fd);
close(prog_fd);
return 0;
}
/* Initial multi-uprobe support in kernel didn't handle PID filtering
* correctly (it was doing thread filtering, not process filtering).
* So now we'll detect if PID filtering logic was fixed, and, if not,
* we'll pretend multi-uprobes are not supported, if not.
* Multi-uprobes are used in USDT attachment logic, and we need to be
* conservative here, because multi-uprobe selection happens early at
* load time, while the use of PID filtering is known late at
* attachment time, at which point it's too late to undo multi-uprobe
* selection.
*
* Creating uprobe with pid == -1 for (invalid) '/' binary will fail
* early with -EINVAL on kernels with fixed PID filtering logic;
* otherwise -ESRCH would be returned if passed correct binary path
* (but we'll just get -BADF, of course).
*/
link_opts.uprobe_multi.pid = -1; /* invalid PID */
link_opts.uprobe_multi.path = "/"; /* invalid path */
link_opts.uprobe_multi.offsets = &offset;
link_opts.uprobe_multi.cnt = 1;
link_fd = bpf_link_create(prog_fd, -1, BPF_TRACE_UPROBE_MULTI, &link_opts);
err = -errno; /* close() can clobber errno */
if (link_fd >= 0) if (link_fd >= 0)
close(link_fd); close(link_fd);
close(prog_fd); close(prog_fd);
return link_fd < 0 && err == -EBADF; return link_fd < 0 && err == -EINVAL;
} }
static int probe_kern_bpf_cookie(int token_fd) static int probe_kern_bpf_cookie(int token_fd)
......
...@@ -73,6 +73,16 @@ static int create_netkit(int mode, int policy, int peer_policy, int *ifindex, ...@@ -73,6 +73,16 @@ static int create_netkit(int mode, int policy, int peer_policy, int *ifindex,
"up primary"); "up primary");
ASSERT_OK(system("ip addr add dev " netkit_name " 10.0.0.1/24"), ASSERT_OK(system("ip addr add dev " netkit_name " 10.0.0.1/24"),
"addr primary"); "addr primary");
if (mode == NETKIT_L3) {
ASSERT_EQ(system("ip link set dev " netkit_name
" addr ee:ff:bb:cc:aa:dd 2> /dev/null"), 512,
"set hwaddress");
} else {
ASSERT_OK(system("ip link set dev " netkit_name
" addr ee:ff:bb:cc:aa:dd"),
"set hwaddress");
}
if (same_netns) { if (same_netns) {
ASSERT_OK(system("ip link set dev " netkit_peer " up"), ASSERT_OK(system("ip link set dev " netkit_peer " up"),
"up peer"); "up peer");
...@@ -89,6 +99,16 @@ static int create_netkit(int mode, int policy, int peer_policy, int *ifindex, ...@@ -89,6 +99,16 @@ static int create_netkit(int mode, int policy, int peer_policy, int *ifindex,
return err; return err;
} }
static void move_netkit(void)
{
ASSERT_OK(system("ip link set " netkit_peer " netns foo"),
"move peer");
ASSERT_OK(system("ip netns exec foo ip link set dev "
netkit_peer " up"), "up peer");
ASSERT_OK(system("ip netns exec foo ip addr add dev "
netkit_peer " 10.0.0.2/24"), "addr peer");
}
static void destroy_netkit(void) static void destroy_netkit(void)
{ {
ASSERT_OK(system("ip link del dev " netkit_name), "del primary"); ASSERT_OK(system("ip link del dev " netkit_name), "del primary");
...@@ -685,3 +705,77 @@ void serial_test_tc_netkit_neigh_links(void) ...@@ -685,3 +705,77 @@ void serial_test_tc_netkit_neigh_links(void)
serial_test_tc_netkit_neigh_links_target(NETKIT_L2, BPF_NETKIT_PRIMARY); serial_test_tc_netkit_neigh_links_target(NETKIT_L2, BPF_NETKIT_PRIMARY);
serial_test_tc_netkit_neigh_links_target(NETKIT_L3, BPF_NETKIT_PRIMARY); serial_test_tc_netkit_neigh_links_target(NETKIT_L3, BPF_NETKIT_PRIMARY);
} }
static void serial_test_tc_netkit_pkt_type_mode(int mode)
{
LIBBPF_OPTS(bpf_netkit_opts, optl_nk);
LIBBPF_OPTS(bpf_tcx_opts, optl_tcx);
int err, ifindex, ifindex2;
struct test_tc_link *skel;
struct bpf_link *link;
err = create_netkit(mode, NETKIT_PASS, NETKIT_PASS,
&ifindex, true);
if (err)
return;
ifindex2 = if_nametoindex(netkit_peer);
ASSERT_NEQ(ifindex, ifindex2, "ifindex_1_2");
skel = test_tc_link__open();
if (!ASSERT_OK_PTR(skel, "skel_open"))
goto cleanup;
ASSERT_EQ(bpf_program__set_expected_attach_type(skel->progs.tc1,
BPF_NETKIT_PRIMARY), 0, "tc1_attach_type");
ASSERT_EQ(bpf_program__set_expected_attach_type(skel->progs.tc7,
BPF_TCX_INGRESS), 0, "tc7_attach_type");
err = test_tc_link__load(skel);
if (!ASSERT_OK(err, "skel_load"))
goto cleanup;
assert_mprog_count_ifindex(ifindex, BPF_NETKIT_PRIMARY, 0);
assert_mprog_count_ifindex(ifindex2, BPF_TCX_INGRESS, 0);
link = bpf_program__attach_netkit(skel->progs.tc1, ifindex, &optl_nk);
if (!ASSERT_OK_PTR(link, "link_attach"))
goto cleanup;
skel->links.tc1 = link;
assert_mprog_count_ifindex(ifindex, BPF_NETKIT_PRIMARY, 1);
assert_mprog_count_ifindex(ifindex2, BPF_TCX_INGRESS, 0);
link = bpf_program__attach_tcx(skel->progs.tc7, ifindex2, &optl_tcx);
if (!ASSERT_OK_PTR(link, "link_attach"))
goto cleanup;
skel->links.tc7 = link;
assert_mprog_count_ifindex(ifindex, BPF_NETKIT_PRIMARY, 1);
assert_mprog_count_ifindex(ifindex2, BPF_TCX_INGRESS, 1);
move_netkit();
tc_skel_reset_all_seen(skel);
skel->bss->set_type = true;
ASSERT_EQ(send_icmp(), 0, "icmp_pkt");
ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1");
ASSERT_EQ(skel->bss->seen_tc7, true, "seen_tc7");
ASSERT_EQ(skel->bss->seen_host, true, "seen_host");
ASSERT_EQ(skel->bss->seen_mcast, true, "seen_mcast");
cleanup:
test_tc_link__destroy(skel);
assert_mprog_count_ifindex(ifindex, BPF_NETKIT_PRIMARY, 0);
destroy_netkit();
}
void serial_test_tc_netkit_pkt_type(void)
{
serial_test_tc_netkit_pkt_type_mode(NETKIT_L2);
serial_test_tc_netkit_pkt_type_mode(NETKIT_L3);
}
// SPDX-License-Identifier: GPL-2.0 // SPDX-License-Identifier: GPL-2.0
#include <unistd.h> #include <unistd.h>
#include <pthread.h>
#include <test_progs.h> #include <test_progs.h>
#include "uprobe_multi.skel.h" #include "uprobe_multi.skel.h"
#include "uprobe_multi_bench.skel.h" #include "uprobe_multi_bench.skel.h"
#include "uprobe_multi_usdt.skel.h" #include "uprobe_multi_usdt.skel.h"
#include "bpf/libbpf_internal.h" #include "bpf/libbpf_internal.h"
#include "testing_helpers.h" #include "testing_helpers.h"
#include "../sdt.h"
static char test_data[] = "test_data"; static char test_data[] = "test_data";
...@@ -25,9 +27,17 @@ noinline void uprobe_multi_func_3(void) ...@@ -25,9 +27,17 @@ noinline void uprobe_multi_func_3(void)
asm volatile (""); asm volatile ("");
} }
noinline void usdt_trigger(void)
{
STAP_PROBE(test, pid_filter_usdt);
}
struct child { struct child {
int go[2]; int go[2];
int c2p[2]; /* child -> parent channel */
int pid; int pid;
int tid;
pthread_t thread;
}; };
static void release_child(struct child *child) static void release_child(struct child *child)
...@@ -38,6 +48,10 @@ static void release_child(struct child *child) ...@@ -38,6 +48,10 @@ static void release_child(struct child *child)
return; return;
close(child->go[1]); close(child->go[1]);
close(child->go[0]); close(child->go[0]);
if (child->thread)
pthread_join(child->thread, NULL);
close(child->c2p[0]);
close(child->c2p[1]);
if (child->pid > 0) if (child->pid > 0)
waitpid(child->pid, &child_status, 0); waitpid(child->pid, &child_status, 0);
} }
...@@ -63,7 +77,7 @@ static struct child *spawn_child(void) ...@@ -63,7 +77,7 @@ static struct child *spawn_child(void)
if (pipe(child.go)) if (pipe(child.go))
return NULL; return NULL;
child.pid = fork(); child.pid = child.tid = fork();
if (child.pid < 0) { if (child.pid < 0) {
release_child(&child); release_child(&child);
errno = EINVAL; errno = EINVAL;
...@@ -82,6 +96,7 @@ static struct child *spawn_child(void) ...@@ -82,6 +96,7 @@ static struct child *spawn_child(void)
uprobe_multi_func_1(); uprobe_multi_func_1();
uprobe_multi_func_2(); uprobe_multi_func_2();
uprobe_multi_func_3(); uprobe_multi_func_3();
usdt_trigger();
exit(errno); exit(errno);
} }
...@@ -89,6 +104,67 @@ static struct child *spawn_child(void) ...@@ -89,6 +104,67 @@ static struct child *spawn_child(void)
return &child; return &child;
} }
static void *child_thread(void *ctx)
{
struct child *child = ctx;
int c = 0, err;
child->tid = syscall(SYS_gettid);
/* let parent know we are ready */
err = write(child->c2p[1], &c, 1);
if (err != 1)
pthread_exit(&err);
/* wait for parent's kick */
err = read(child->go[0], &c, 1);
if (err != 1)
pthread_exit(&err);
uprobe_multi_func_1();
uprobe_multi_func_2();
uprobe_multi_func_3();
usdt_trigger();
err = 0;
pthread_exit(&err);
}
static struct child *spawn_thread(void)
{
static struct child child;
int c, err;
/* pipe to notify child to execute the trigger functions */
if (pipe(child.go))
return NULL;
/* pipe to notify parent that child thread is ready */
if (pipe(child.c2p)) {
close(child.go[0]);
close(child.go[1]);
return NULL;
}
child.pid = getpid();
err = pthread_create(&child.thread, NULL, child_thread, &child);
if (err) {
err = -errno;
close(child.go[0]);
close(child.go[1]);
close(child.c2p[0]);
close(child.c2p[1]);
errno = -err;
return NULL;
}
err = read(child.c2p[0], &c, 1);
if (!ASSERT_EQ(err, 1, "child_thread_ready"))
return NULL;
return &child;
}
static void uprobe_multi_test_run(struct uprobe_multi *skel, struct child *child) static void uprobe_multi_test_run(struct uprobe_multi *skel, struct child *child)
{ {
skel->bss->uprobe_multi_func_1_addr = (__u64) uprobe_multi_func_1; skel->bss->uprobe_multi_func_1_addr = (__u64) uprobe_multi_func_1;
...@@ -103,15 +179,23 @@ static void uprobe_multi_test_run(struct uprobe_multi *skel, struct child *child ...@@ -103,15 +179,23 @@ static void uprobe_multi_test_run(struct uprobe_multi *skel, struct child *child
* passed at the probe attach. * passed at the probe attach.
*/ */
skel->bss->pid = child ? 0 : getpid(); skel->bss->pid = child ? 0 : getpid();
skel->bss->expect_pid = child ? child->pid : 0;
/* trigger all probes, if we are testing child *process*, just to make
* sure that PID filtering doesn't let through activations from wrong
* PIDs; when we test child *thread*, we don't want to do this to
* avoid double counting number of triggering events
*/
if (!child || !child->thread) {
uprobe_multi_func_1();
uprobe_multi_func_2();
uprobe_multi_func_3();
usdt_trigger();
}
if (child) if (child)
kick_child(child); kick_child(child);
/* trigger all probes */
uprobe_multi_func_1();
uprobe_multi_func_2();
uprobe_multi_func_3();
/* /*
* There are 2 entry and 2 exit probe called for each uprobe_multi_func_[123] * There are 2 entry and 2 exit probe called for each uprobe_multi_func_[123]
* function and each slepable probe (6) increments uprobe_multi_sleep_result. * function and each slepable probe (6) increments uprobe_multi_sleep_result.
...@@ -126,8 +210,12 @@ static void uprobe_multi_test_run(struct uprobe_multi *skel, struct child *child ...@@ -126,8 +210,12 @@ static void uprobe_multi_test_run(struct uprobe_multi *skel, struct child *child
ASSERT_EQ(skel->bss->uprobe_multi_sleep_result, 6, "uprobe_multi_sleep_result"); ASSERT_EQ(skel->bss->uprobe_multi_sleep_result, 6, "uprobe_multi_sleep_result");
if (child) ASSERT_FALSE(skel->bss->bad_pid_seen, "bad_pid_seen");
if (child) {
ASSERT_EQ(skel->bss->child_pid, child->pid, "uprobe_multi_child_pid"); ASSERT_EQ(skel->bss->child_pid, child->pid, "uprobe_multi_child_pid");
ASSERT_EQ(skel->bss->child_tid, child->tid, "uprobe_multi_child_tid");
}
} }
static void test_skel_api(void) static void test_skel_api(void)
...@@ -190,8 +278,24 @@ __test_attach_api(const char *binary, const char *pattern, struct bpf_uprobe_mul ...@@ -190,8 +278,24 @@ __test_attach_api(const char *binary, const char *pattern, struct bpf_uprobe_mul
if (!ASSERT_OK_PTR(skel->links.uprobe_extra, "bpf_program__attach_uprobe_multi")) if (!ASSERT_OK_PTR(skel->links.uprobe_extra, "bpf_program__attach_uprobe_multi"))
goto cleanup; goto cleanup;
/* Attach (uprobe-backed) USDTs */
skel->links.usdt_pid = bpf_program__attach_usdt(skel->progs.usdt_pid, pid, binary,
"test", "pid_filter_usdt", NULL);
if (!ASSERT_OK_PTR(skel->links.usdt_pid, "attach_usdt_pid"))
goto cleanup;
skel->links.usdt_extra = bpf_program__attach_usdt(skel->progs.usdt_extra, -1, binary,
"test", "pid_filter_usdt", NULL);
if (!ASSERT_OK_PTR(skel->links.usdt_extra, "attach_usdt_extra"))
goto cleanup;
uprobe_multi_test_run(skel, child); uprobe_multi_test_run(skel, child);
ASSERT_FALSE(skel->bss->bad_pid_seen_usdt, "bad_pid_seen_usdt");
if (child) {
ASSERT_EQ(skel->bss->child_pid_usdt, child->pid, "usdt_multi_child_pid");
ASSERT_EQ(skel->bss->child_tid_usdt, child->tid, "usdt_multi_child_tid");
}
cleanup: cleanup:
uprobe_multi__destroy(skel); uprobe_multi__destroy(skel);
} }
...@@ -210,6 +314,13 @@ test_attach_api(const char *binary, const char *pattern, struct bpf_uprobe_multi ...@@ -210,6 +314,13 @@ test_attach_api(const char *binary, const char *pattern, struct bpf_uprobe_multi
return; return;
__test_attach_api(binary, pattern, opts, child); __test_attach_api(binary, pattern, opts, child);
/* pid filter (thread) */
child = spawn_thread();
if (!ASSERT_OK_PTR(child, "spawn_thread"))
return;
__test_attach_api(binary, pattern, opts, child);
} }
static void test_attach_api_pattern(void) static void test_attach_api_pattern(void)
...@@ -397,7 +508,7 @@ static void test_attach_api_fails(void) ...@@ -397,7 +508,7 @@ static void test_attach_api_fails(void)
link_fd = bpf_link_create(prog_fd, 0, BPF_TRACE_UPROBE_MULTI, &opts); link_fd = bpf_link_create(prog_fd, 0, BPF_TRACE_UPROBE_MULTI, &opts);
if (!ASSERT_ERR(link_fd, "link_fd")) if (!ASSERT_ERR(link_fd, "link_fd"))
goto cleanup; goto cleanup;
ASSERT_EQ(link_fd, -ESRCH, "pid_is_wrong"); ASSERT_EQ(link_fd, -EINVAL, "pid_is_wrong");
cleanup: cleanup:
if (link_fd >= 0) if (link_fd >= 0)
...@@ -495,6 +606,13 @@ static void test_link_api(void) ...@@ -495,6 +606,13 @@ static void test_link_api(void)
return; return;
__test_link_api(child); __test_link_api(child);
/* pid filter (thread) */
child = spawn_thread();
if (!ASSERT_OK_PTR(child, "spawn_thread"))
return;
__test_link_api(child);
} }
static void test_bench_attach_uprobe(void) static void test_bench_attach_uprobe(void)
......
...@@ -67,6 +67,7 @@ ...@@ -67,6 +67,7 @@
#include "verifier_search_pruning.skel.h" #include "verifier_search_pruning.skel.h"
#include "verifier_sock.skel.h" #include "verifier_sock.skel.h"
#include "verifier_sock_addr.skel.h" #include "verifier_sock_addr.skel.h"
#include "verifier_sockmap_mutate.skel.h"
#include "verifier_spill_fill.skel.h" #include "verifier_spill_fill.skel.h"
#include "verifier_spin_lock.skel.h" #include "verifier_spin_lock.skel.h"
#include "verifier_stack_ptr.skel.h" #include "verifier_stack_ptr.skel.h"
...@@ -183,6 +184,7 @@ void test_verifier_sdiv(void) { RUN(verifier_sdiv); } ...@@ -183,6 +184,7 @@ void test_verifier_sdiv(void) { RUN(verifier_sdiv); }
void test_verifier_search_pruning(void) { RUN(verifier_search_pruning); } void test_verifier_search_pruning(void) { RUN(verifier_search_pruning); }
void test_verifier_sock(void) { RUN(verifier_sock); } void test_verifier_sock(void) { RUN(verifier_sock); }
void test_verifier_sock_addr(void) { RUN(verifier_sock_addr); } void test_verifier_sock_addr(void) { RUN(verifier_sock_addr); }
void test_verifier_sockmap_mutate(void) { RUN(verifier_sockmap_mutate); }
void test_verifier_spill_fill(void) { RUN(verifier_spill_fill); } void test_verifier_spill_fill(void) { RUN(verifier_spill_fill); }
void test_verifier_spin_lock(void) { RUN(verifier_spin_lock); } void test_verifier_spin_lock(void) { RUN(verifier_spin_lock); }
void test_verifier_stack_ptr(void) { RUN(verifier_stack_ptr); } void test_verifier_stack_ptr(void) { RUN(verifier_stack_ptr); }
......
...@@ -4,7 +4,8 @@ ...@@ -4,7 +4,8 @@
#include <linux/bpf.h> #include <linux/bpf.h>
#include <linux/if_ether.h> #include <linux/if_ether.h>
#include <linux/stddef.h>
#include <linux/if_packet.h>
#include <bpf/bpf_endian.h> #include <bpf/bpf_endian.h>
#include <bpf/bpf_helpers.h> #include <bpf/bpf_helpers.h>
...@@ -16,7 +17,13 @@ bool seen_tc3; ...@@ -16,7 +17,13 @@ bool seen_tc3;
bool seen_tc4; bool seen_tc4;
bool seen_tc5; bool seen_tc5;
bool seen_tc6; bool seen_tc6;
bool seen_tc7;
bool set_type;
bool seen_eth; bool seen_eth;
bool seen_host;
bool seen_mcast;
SEC("tc/ingress") SEC("tc/ingress")
int tc1(struct __sk_buff *skb) int tc1(struct __sk_buff *skb)
...@@ -28,8 +35,16 @@ int tc1(struct __sk_buff *skb) ...@@ -28,8 +35,16 @@ int tc1(struct __sk_buff *skb)
if (bpf_skb_load_bytes(skb, 0, &eth, sizeof(eth))) if (bpf_skb_load_bytes(skb, 0, &eth, sizeof(eth)))
goto out; goto out;
seen_eth = eth.h_proto == bpf_htons(ETH_P_IP); seen_eth = eth.h_proto == bpf_htons(ETH_P_IP);
seen_host = skb->pkt_type == PACKET_HOST;
if (seen_host && set_type) {
eth.h_dest[0] = 4;
if (bpf_skb_store_bytes(skb, 0, &eth, sizeof(eth), 0))
goto fail;
bpf_skb_change_type(skb, PACKET_MULTICAST);
}
out: out:
seen_tc1 = true; seen_tc1 = true;
fail:
return TCX_NEXT; return TCX_NEXT;
} }
...@@ -67,3 +82,21 @@ int tc6(struct __sk_buff *skb) ...@@ -67,3 +82,21 @@ int tc6(struct __sk_buff *skb)
seen_tc6 = true; seen_tc6 = true;
return TCX_PASS; return TCX_PASS;
} }
SEC("tc/ingress")
int tc7(struct __sk_buff *skb)
{
struct ethhdr eth = {};
if (skb->protocol != __bpf_constant_htons(ETH_P_IP))
goto out;
if (bpf_skb_load_bytes(skb, 0, &eth, sizeof(eth)))
goto out;
if (eth.h_dest[0] == 4 && set_type) {
seen_mcast = skb->pkt_type == PACKET_MULTICAST;
bpf_skb_change_type(skb, PACKET_HOST);
}
out:
seen_tc7 = true;
return TCX_PASS;
}
// SPDX-License-Identifier: GPL-2.0 // SPDX-License-Identifier: GPL-2.0
#include <linux/bpf.h> #include "vmlinux.h"
#include <bpf/bpf_helpers.h> #include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h> #include <bpf/bpf_tracing.h>
#include <stdbool.h> #include <bpf/usdt.bpf.h>
char _license[] SEC("license") = "GPL"; char _license[] SEC("license") = "GPL";
...@@ -22,6 +22,13 @@ __u64 uprobe_multi_sleep_result = 0; ...@@ -22,6 +22,13 @@ __u64 uprobe_multi_sleep_result = 0;
int pid = 0; int pid = 0;
int child_pid = 0; int child_pid = 0;
int child_tid = 0;
int child_pid_usdt = 0;
int child_tid_usdt = 0;
int expect_pid = 0;
bool bad_pid_seen = false;
bool bad_pid_seen_usdt = false;
bool test_cookie = false; bool test_cookie = false;
void *user_ptr = 0; void *user_ptr = 0;
...@@ -36,11 +43,19 @@ static __always_inline bool verify_sleepable_user_copy(void) ...@@ -36,11 +43,19 @@ static __always_inline bool verify_sleepable_user_copy(void)
static void uprobe_multi_check(void *ctx, bool is_return, bool is_sleep) static void uprobe_multi_check(void *ctx, bool is_return, bool is_sleep)
{ {
child_pid = bpf_get_current_pid_tgid() >> 32; __u64 cur_pid_tgid = bpf_get_current_pid_tgid();
__u32 cur_pid;
if (pid && child_pid != pid) cur_pid = cur_pid_tgid >> 32;
if (pid && cur_pid != pid)
return; return;
if (expect_pid && cur_pid != expect_pid)
bad_pid_seen = true;
child_pid = cur_pid_tgid >> 32;
child_tid = (__u32)cur_pid_tgid;
__u64 cookie = test_cookie ? bpf_get_attach_cookie(ctx) : 0; __u64 cookie = test_cookie ? bpf_get_attach_cookie(ctx) : 0;
__u64 addr = bpf_get_func_ip(ctx); __u64 addr = bpf_get_func_ip(ctx);
...@@ -97,5 +112,32 @@ int uretprobe_sleep(struct pt_regs *ctx) ...@@ -97,5 +112,32 @@ int uretprobe_sleep(struct pt_regs *ctx)
SEC("uprobe.multi//proc/self/exe:uprobe_multi_func_*") SEC("uprobe.multi//proc/self/exe:uprobe_multi_func_*")
int uprobe_extra(struct pt_regs *ctx) int uprobe_extra(struct pt_regs *ctx)
{ {
/* we need this one just to mix PID-filtered and global uprobes */
return 0;
}
SEC("usdt")
int usdt_pid(struct pt_regs *ctx)
{
__u64 cur_pid_tgid = bpf_get_current_pid_tgid();
__u32 cur_pid;
cur_pid = cur_pid_tgid >> 32;
if (pid && cur_pid != pid)
return 0;
if (expect_pid && cur_pid != expect_pid)
bad_pid_seen_usdt = true;
child_pid_usdt = cur_pid_tgid >> 32;
child_tid_usdt = (__u32)cur_pid_tgid;
return 0;
}
SEC("usdt")
int usdt_extra(struct pt_regs *ctx)
{
/* we need this one just to mix PID-filtered and global USDT probes */
return 0; return 0;
} }
// SPDX-License-Identifier: GPL-2.0
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
#include "bpf_misc.h"
#define __always_unused __attribute__((unused))
char _license[] SEC("license") = "GPL";
struct sock {
} __attribute__((preserve_access_index));
struct bpf_iter__sockmap {
union {
struct sock *sk;
};
} __attribute__((preserve_access_index));
struct {
__uint(type, BPF_MAP_TYPE_SOCKHASH);
__uint(max_entries, 1);
__type(key, int);
__type(value, int);
} sockhash SEC(".maps");
struct {
__uint(type, BPF_MAP_TYPE_SOCKMAP);
__uint(max_entries, 1);
__type(key, int);
__type(value, int);
} sockmap SEC(".maps");
enum { CG_OK = 1 };
int zero = 0;
static __always_inline void test_sockmap_delete(void)
{
bpf_map_delete_elem(&sockmap, &zero);
bpf_map_delete_elem(&sockhash, &zero);
}
static __always_inline void test_sockmap_update(void *sk)
{
if (sk) {
bpf_map_update_elem(&sockmap, &zero, sk, BPF_ANY);
bpf_map_update_elem(&sockhash, &zero, sk, BPF_ANY);
}
}
static __always_inline void test_sockmap_lookup_and_update(void)
{
struct bpf_sock *sk = bpf_map_lookup_elem(&sockmap, &zero);
if (sk) {
test_sockmap_update(sk);
bpf_sk_release(sk);
}
}
static __always_inline void test_sockmap_mutate(void *sk)
{
test_sockmap_delete();
test_sockmap_update(sk);
}
static __always_inline void test_sockmap_lookup_and_mutate(void)
{
test_sockmap_delete();
test_sockmap_lookup_and_update();
}
SEC("action")
__success
int test_sched_act(struct __sk_buff *skb)
{
test_sockmap_mutate(skb->sk);
return 0;
}
SEC("classifier")
__success
int test_sched_cls(struct __sk_buff *skb)
{
test_sockmap_mutate(skb->sk);
return 0;
}
SEC("flow_dissector")
__success
int test_flow_dissector_delete(struct __sk_buff *skb __always_unused)
{
test_sockmap_delete();
return 0;
}
SEC("flow_dissector")
__failure __msg("program of this type cannot use helper bpf_sk_release")
int test_flow_dissector_update(struct __sk_buff *skb __always_unused)
{
test_sockmap_lookup_and_update(); /* no access to skb->sk */
return 0;
}
SEC("iter/sockmap")
__success
int test_trace_iter(struct bpf_iter__sockmap *ctx)
{
test_sockmap_mutate(ctx->sk);
return 0;
}
SEC("raw_tp/kfree")
__failure __msg("cannot update sockmap in this context")
int test_raw_tp_delete(const void *ctx __always_unused)
{
test_sockmap_delete();
return 0;
}
SEC("raw_tp/kfree")
__failure __msg("cannot update sockmap in this context")
int test_raw_tp_update(const void *ctx __always_unused)
{
test_sockmap_lookup_and_update();
return 0;
}
SEC("sk_lookup")
__success
int test_sk_lookup(struct bpf_sk_lookup *ctx)
{
test_sockmap_mutate(ctx->sk);
return 0;
}
SEC("sk_reuseport")
__success
int test_sk_reuseport(struct sk_reuseport_md *ctx)
{
test_sockmap_mutate(ctx->sk);
return 0;
}
SEC("socket")
__success
int test_socket_filter(struct __sk_buff *skb)
{
test_sockmap_mutate(skb->sk);
return 0;
}
SEC("sockops")
__success
int test_sockops_delete(struct bpf_sock_ops *ctx __always_unused)
{
test_sockmap_delete();
return CG_OK;
}
SEC("sockops")
__failure __msg("cannot update sockmap in this context")
int test_sockops_update(struct bpf_sock_ops *ctx)
{
test_sockmap_update(ctx->sk);
return CG_OK;
}
SEC("sockops")
__success
int test_sockops_update_dedicated(struct bpf_sock_ops *ctx)
{
bpf_sock_map_update(ctx, &sockmap, &zero, BPF_ANY);
bpf_sock_hash_update(ctx, &sockhash, &zero, BPF_ANY);
return CG_OK;
}
SEC("xdp")
__success
int test_xdp(struct xdp_md *ctx __always_unused)
{
test_sockmap_lookup_and_mutate();
return XDP_PASS;
}
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment