Commit 25694738 authored by Alexei Starovoitov's avatar Alexei Starovoitov

Merge branch 'bpf_tcp_check_syncookie'

Lorenz Bauer says:

====================
This series adds the necessary helpers to determine wheter a given
(encapsulated) TCP packet belongs to a connection known to the network stack.

* bpf_skc_lookup_tcp gives access to request and timewait sockets
* bpf_tcp_check_syncookie identifies the final 3WHS ACK when syncookies
  are enabled

The goal is to be able to implement load-balancing approaches like
glb-director [1] or Beamer [2] in pure eBPF. Specifically, we'd like to replace
the functionality of the glb-redirect kernel module [3] by an XDP program or
tc classifier.

Changes in v3:
* Fix missing check for ip4->ihl
* Only cast to unsigned long in BPF_CALLs

Changes in v2:
* Rename bpf_sk_check_syncookie to bpf_tcp_check_syncookie.
* Add bpf_skc_lookup_tcp. Without it bpf_tcp_check_syncookie doesn't make sense.
* Check tcp_synq_no_recent_overflow() in bpf_tcp_check_syncookie.
* Check th->syn in bpf_tcp_check_syncookie.
* Require CONFIG_IPV6 to be a built in.

1: https://github.com/github/glb-director
2: https://www.usenix.org/conference/nsdi18/presentation/olteanu
3: https://github.com/github/glb-director/tree/master/src/glb-redirect
====================
Signed-off-by: default avatarAlexei Starovoitov <ast@kernel.org>
parents 48e5d98a bafc0ba8
...@@ -205,6 +205,7 @@ enum bpf_return_type { ...@@ -205,6 +205,7 @@ enum bpf_return_type {
RET_PTR_TO_MAP_VALUE_OR_NULL, /* returns a pointer to map elem value or NULL */ RET_PTR_TO_MAP_VALUE_OR_NULL, /* returns a pointer to map elem value or NULL */
RET_PTR_TO_SOCKET_OR_NULL, /* returns a pointer to a socket or NULL */ RET_PTR_TO_SOCKET_OR_NULL, /* returns a pointer to a socket or NULL */
RET_PTR_TO_TCP_SOCK_OR_NULL, /* returns a pointer to a tcp_sock or NULL */ RET_PTR_TO_TCP_SOCK_OR_NULL, /* returns a pointer to a tcp_sock or NULL */
RET_PTR_TO_SOCK_COMMON_OR_NULL, /* returns a pointer to a sock_common or NULL */
}; };
/* eBPF function prototype used by verifier to allow BPF_CALLs from eBPF programs /* eBPF function prototype used by verifier to allow BPF_CALLs from eBPF programs
......
...@@ -2431,6 +2431,38 @@ union bpf_attr { ...@@ -2431,6 +2431,38 @@ union bpf_attr {
* Return * Return
* A **struct bpf_sock** pointer on success, or **NULL** in * A **struct bpf_sock** pointer on success, or **NULL** in
* case of failure. * case of failure.
*
* struct bpf_sock *bpf_skc_lookup_tcp(void *ctx, struct bpf_sock_tuple *tuple, u32 tuple_size, u64 netns, u64 flags)
* Description
* Look for TCP socket matching *tuple*, optionally in a child
* network namespace *netns*. The return value must be checked,
* and if non-**NULL**, released via **bpf_sk_release**\ ().
*
* This function is identical to bpf_sk_lookup_tcp, except that it
* also returns timewait or request sockets. Use bpf_sk_fullsock
* or bpf_tcp_socket to access the full structure.
*
* This helper is available only if the kernel was compiled with
* **CONFIG_NET** configuration option.
* Return
* Pointer to **struct bpf_sock**, or **NULL** in case of failure.
* For sockets with reuseport option, the **struct bpf_sock**
* result is from **reuse->socks**\ [] using the hash of the tuple.
*
* int bpf_tcp_check_syncookie(struct bpf_sock *sk, void *iph, u32 iph_len, struct tcphdr *th, u32 th_len)
* Description
* Check whether iph and th contain a valid SYN cookie ACK for
* the listening socket in sk.
*
* iph points to the start of the IPv4 or IPv6 header, while
* iph_len contains sizeof(struct iphdr) or sizeof(struct ip6hdr).
*
* th points to the start of the TCP header, while th_len contains
* sizeof(struct tcphdr).
*
* Return
* 0 if iph and th are a valid SYN cookie ACK, or a negative error
* otherwise.
*/ */
#define __BPF_FUNC_MAPPER(FN) \ #define __BPF_FUNC_MAPPER(FN) \
FN(unspec), \ FN(unspec), \
...@@ -2531,7 +2563,9 @@ union bpf_attr { ...@@ -2531,7 +2563,9 @@ union bpf_attr {
FN(sk_fullsock), \ FN(sk_fullsock), \
FN(tcp_sock), \ FN(tcp_sock), \
FN(skb_ecn_set_ce), \ FN(skb_ecn_set_ce), \
FN(get_listener_sock), FN(get_listener_sock), \
FN(skc_lookup_tcp), \
FN(tcp_check_syncookie),
/* integer value in 'imm' field of BPF_CALL instruction selects which helper /* integer value in 'imm' field of BPF_CALL instruction selects which helper
* function eBPF program intends to call * function eBPF program intends to call
......
...@@ -369,7 +369,8 @@ static bool is_release_function(enum bpf_func_id func_id) ...@@ -369,7 +369,8 @@ static bool is_release_function(enum bpf_func_id func_id)
static bool is_acquire_function(enum bpf_func_id func_id) static bool is_acquire_function(enum bpf_func_id func_id)
{ {
return func_id == BPF_FUNC_sk_lookup_tcp || return func_id == BPF_FUNC_sk_lookup_tcp ||
func_id == BPF_FUNC_sk_lookup_udp; func_id == BPF_FUNC_sk_lookup_udp ||
func_id == BPF_FUNC_skc_lookup_tcp;
} }
static bool is_ptr_cast_function(enum bpf_func_id func_id) static bool is_ptr_cast_function(enum bpf_func_id func_id)
...@@ -3147,19 +3148,11 @@ static int check_helper_call(struct bpf_verifier_env *env, int func_id, int insn ...@@ -3147,19 +3148,11 @@ static int check_helper_call(struct bpf_verifier_env *env, int func_id, int insn
} else if (fn->ret_type == RET_PTR_TO_SOCKET_OR_NULL) { } else if (fn->ret_type == RET_PTR_TO_SOCKET_OR_NULL) {
mark_reg_known_zero(env, regs, BPF_REG_0); mark_reg_known_zero(env, regs, BPF_REG_0);
regs[BPF_REG_0].type = PTR_TO_SOCKET_OR_NULL; regs[BPF_REG_0].type = PTR_TO_SOCKET_OR_NULL;
if (is_acquire_function(func_id)) { regs[BPF_REG_0].id = ++env->id_gen;
int id = acquire_reference_state(env, insn_idx); } else if (fn->ret_type == RET_PTR_TO_SOCK_COMMON_OR_NULL) {
mark_reg_known_zero(env, regs, BPF_REG_0);
if (id < 0) regs[BPF_REG_0].type = PTR_TO_SOCK_COMMON_OR_NULL;
return id; regs[BPF_REG_0].id = ++env->id_gen;
/* For mark_ptr_or_null_reg() */
regs[BPF_REG_0].id = id;
/* For release_reference() */
regs[BPF_REG_0].ref_obj_id = id;
} else {
/* For mark_ptr_or_null_reg() */
regs[BPF_REG_0].id = ++env->id_gen;
}
} else if (fn->ret_type == RET_PTR_TO_TCP_SOCK_OR_NULL) { } else if (fn->ret_type == RET_PTR_TO_TCP_SOCK_OR_NULL) {
mark_reg_known_zero(env, regs, BPF_REG_0); mark_reg_known_zero(env, regs, BPF_REG_0);
regs[BPF_REG_0].type = PTR_TO_TCP_SOCK_OR_NULL; regs[BPF_REG_0].type = PTR_TO_TCP_SOCK_OR_NULL;
...@@ -3170,9 +3163,19 @@ static int check_helper_call(struct bpf_verifier_env *env, int func_id, int insn ...@@ -3170,9 +3163,19 @@ static int check_helper_call(struct bpf_verifier_env *env, int func_id, int insn
return -EINVAL; return -EINVAL;
} }
if (is_ptr_cast_function(func_id)) if (is_ptr_cast_function(func_id)) {
/* For release_reference() */ /* For release_reference() */
regs[BPF_REG_0].ref_obj_id = meta.ref_obj_id; regs[BPF_REG_0].ref_obj_id = meta.ref_obj_id;
} else if (is_acquire_function(func_id)) {
int id = acquire_reference_state(env, insn_idx);
if (id < 0)
return id;
/* For mark_ptr_or_null_reg() */
regs[BPF_REG_0].id = id;
/* For release_reference() */
regs[BPF_REG_0].ref_obj_id = id;
}
do_refine_retval_range(regs, fn->ret_type, func_id, &meta); do_refine_retval_range(regs, fn->ret_type, func_id, &meta);
......
...@@ -5156,15 +5156,15 @@ static struct sock *sk_lookup(struct net *net, struct bpf_sock_tuple *tuple, ...@@ -5156,15 +5156,15 @@ static struct sock *sk_lookup(struct net *net, struct bpf_sock_tuple *tuple,
return sk; return sk;
} }
/* bpf_sk_lookup performs the core lookup for different types of sockets, /* bpf_skc_lookup performs the core lookup for different types of sockets,
* taking a reference on the socket if it doesn't have the flag SOCK_RCU_FREE. * taking a reference on the socket if it doesn't have the flag SOCK_RCU_FREE.
* Returns the socket as an 'unsigned long' to simplify the casting in the * Returns the socket as an 'unsigned long' to simplify the casting in the
* callers to satisfy BPF_CALL declarations. * callers to satisfy BPF_CALL declarations.
*/ */
static unsigned long static struct sock *
__bpf_sk_lookup(struct sk_buff *skb, struct bpf_sock_tuple *tuple, u32 len, __bpf_skc_lookup(struct sk_buff *skb, struct bpf_sock_tuple *tuple, u32 len,
struct net *caller_net, u32 ifindex, u8 proto, u64 netns_id, struct net *caller_net, u32 ifindex, u8 proto, u64 netns_id,
u64 flags) u64 flags)
{ {
struct sock *sk = NULL; struct sock *sk = NULL;
u8 family = AF_UNSPEC; u8 family = AF_UNSPEC;
...@@ -5192,15 +5192,27 @@ __bpf_sk_lookup(struct sk_buff *skb, struct bpf_sock_tuple *tuple, u32 len, ...@@ -5192,15 +5192,27 @@ __bpf_sk_lookup(struct sk_buff *skb, struct bpf_sock_tuple *tuple, u32 len,
put_net(net); put_net(net);
} }
out:
return sk;
}
static struct sock *
__bpf_sk_lookup(struct sk_buff *skb, struct bpf_sock_tuple *tuple, u32 len,
struct net *caller_net, u32 ifindex, u8 proto, u64 netns_id,
u64 flags)
{
struct sock *sk = __bpf_skc_lookup(skb, tuple, len, caller_net,
ifindex, proto, netns_id, flags);
if (sk) if (sk)
sk = sk_to_full_sk(sk); sk = sk_to_full_sk(sk);
out:
return (unsigned long) sk; return sk;
} }
static unsigned long static struct sock *
bpf_sk_lookup(struct sk_buff *skb, struct bpf_sock_tuple *tuple, u32 len, bpf_skc_lookup(struct sk_buff *skb, struct bpf_sock_tuple *tuple, u32 len,
u8 proto, u64 netns_id, u64 flags) u8 proto, u64 netns_id, u64 flags)
{ {
struct net *caller_net; struct net *caller_net;
int ifindex; int ifindex;
...@@ -5213,14 +5225,47 @@ bpf_sk_lookup(struct sk_buff *skb, struct bpf_sock_tuple *tuple, u32 len, ...@@ -5213,14 +5225,47 @@ bpf_sk_lookup(struct sk_buff *skb, struct bpf_sock_tuple *tuple, u32 len,
ifindex = 0; ifindex = 0;
} }
return __bpf_sk_lookup(skb, tuple, len, caller_net, ifindex, return __bpf_skc_lookup(skb, tuple, len, caller_net, ifindex, proto,
proto, netns_id, flags); netns_id, flags);
}
static struct sock *
bpf_sk_lookup(struct sk_buff *skb, struct bpf_sock_tuple *tuple, u32 len,
u8 proto, u64 netns_id, u64 flags)
{
struct sock *sk = bpf_skc_lookup(skb, tuple, len, proto, netns_id,
flags);
if (sk)
sk = sk_to_full_sk(sk);
return sk;
} }
BPF_CALL_5(bpf_skc_lookup_tcp, struct sk_buff *, skb,
struct bpf_sock_tuple *, tuple, u32, len, u64, netns_id, u64, flags)
{
return (unsigned long)bpf_skc_lookup(skb, tuple, len, IPPROTO_TCP,
netns_id, flags);
}
static const struct bpf_func_proto bpf_skc_lookup_tcp_proto = {
.func = bpf_skc_lookup_tcp,
.gpl_only = false,
.pkt_access = true,
.ret_type = RET_PTR_TO_SOCK_COMMON_OR_NULL,
.arg1_type = ARG_PTR_TO_CTX,
.arg2_type = ARG_PTR_TO_MEM,
.arg3_type = ARG_CONST_SIZE,
.arg4_type = ARG_ANYTHING,
.arg5_type = ARG_ANYTHING,
};
BPF_CALL_5(bpf_sk_lookup_tcp, struct sk_buff *, skb, BPF_CALL_5(bpf_sk_lookup_tcp, struct sk_buff *, skb,
struct bpf_sock_tuple *, tuple, u32, len, u64, netns_id, u64, flags) struct bpf_sock_tuple *, tuple, u32, len, u64, netns_id, u64, flags)
{ {
return bpf_sk_lookup(skb, tuple, len, IPPROTO_TCP, netns_id, flags); return (unsigned long)bpf_sk_lookup(skb, tuple, len, IPPROTO_TCP,
netns_id, flags);
} }
static const struct bpf_func_proto bpf_sk_lookup_tcp_proto = { static const struct bpf_func_proto bpf_sk_lookup_tcp_proto = {
...@@ -5238,7 +5283,8 @@ static const struct bpf_func_proto bpf_sk_lookup_tcp_proto = { ...@@ -5238,7 +5283,8 @@ static const struct bpf_func_proto bpf_sk_lookup_tcp_proto = {
BPF_CALL_5(bpf_sk_lookup_udp, struct sk_buff *, skb, BPF_CALL_5(bpf_sk_lookup_udp, struct sk_buff *, skb,
struct bpf_sock_tuple *, tuple, u32, len, u64, netns_id, u64, flags) struct bpf_sock_tuple *, tuple, u32, len, u64, netns_id, u64, flags)
{ {
return bpf_sk_lookup(skb, tuple, len, IPPROTO_UDP, netns_id, flags); return (unsigned long)bpf_sk_lookup(skb, tuple, len, IPPROTO_UDP,
netns_id, flags);
} }
static const struct bpf_func_proto bpf_sk_lookup_udp_proto = { static const struct bpf_func_proto bpf_sk_lookup_udp_proto = {
...@@ -5273,8 +5319,9 @@ BPF_CALL_5(bpf_xdp_sk_lookup_udp, struct xdp_buff *, ctx, ...@@ -5273,8 +5319,9 @@ BPF_CALL_5(bpf_xdp_sk_lookup_udp, struct xdp_buff *, ctx,
struct net *caller_net = dev_net(ctx->rxq->dev); struct net *caller_net = dev_net(ctx->rxq->dev);
int ifindex = ctx->rxq->dev->ifindex; int ifindex = ctx->rxq->dev->ifindex;
return __bpf_sk_lookup(NULL, tuple, len, caller_net, ifindex, return (unsigned long)__bpf_sk_lookup(NULL, tuple, len, caller_net,
IPPROTO_UDP, netns_id, flags); ifindex, IPPROTO_UDP, netns_id,
flags);
} }
static const struct bpf_func_proto bpf_xdp_sk_lookup_udp_proto = { static const struct bpf_func_proto bpf_xdp_sk_lookup_udp_proto = {
...@@ -5289,14 +5336,38 @@ static const struct bpf_func_proto bpf_xdp_sk_lookup_udp_proto = { ...@@ -5289,14 +5336,38 @@ static const struct bpf_func_proto bpf_xdp_sk_lookup_udp_proto = {
.arg5_type = ARG_ANYTHING, .arg5_type = ARG_ANYTHING,
}; };
BPF_CALL_5(bpf_xdp_skc_lookup_tcp, struct xdp_buff *, ctx,
struct bpf_sock_tuple *, tuple, u32, len, u32, netns_id, u64, flags)
{
struct net *caller_net = dev_net(ctx->rxq->dev);
int ifindex = ctx->rxq->dev->ifindex;
return (unsigned long)__bpf_skc_lookup(NULL, tuple, len, caller_net,
ifindex, IPPROTO_TCP, netns_id,
flags);
}
static const struct bpf_func_proto bpf_xdp_skc_lookup_tcp_proto = {
.func = bpf_xdp_skc_lookup_tcp,
.gpl_only = false,
.pkt_access = true,
.ret_type = RET_PTR_TO_SOCK_COMMON_OR_NULL,
.arg1_type = ARG_PTR_TO_CTX,
.arg2_type = ARG_PTR_TO_MEM,
.arg3_type = ARG_CONST_SIZE,
.arg4_type = ARG_ANYTHING,
.arg5_type = ARG_ANYTHING,
};
BPF_CALL_5(bpf_xdp_sk_lookup_tcp, struct xdp_buff *, ctx, BPF_CALL_5(bpf_xdp_sk_lookup_tcp, struct xdp_buff *, ctx,
struct bpf_sock_tuple *, tuple, u32, len, u32, netns_id, u64, flags) struct bpf_sock_tuple *, tuple, u32, len, u32, netns_id, u64, flags)
{ {
struct net *caller_net = dev_net(ctx->rxq->dev); struct net *caller_net = dev_net(ctx->rxq->dev);
int ifindex = ctx->rxq->dev->ifindex; int ifindex = ctx->rxq->dev->ifindex;
return __bpf_sk_lookup(NULL, tuple, len, caller_net, ifindex, return (unsigned long)__bpf_sk_lookup(NULL, tuple, len, caller_net,
IPPROTO_TCP, netns_id, flags); ifindex, IPPROTO_TCP, netns_id,
flags);
} }
static const struct bpf_func_proto bpf_xdp_sk_lookup_tcp_proto = { static const struct bpf_func_proto bpf_xdp_sk_lookup_tcp_proto = {
...@@ -5311,11 +5382,31 @@ static const struct bpf_func_proto bpf_xdp_sk_lookup_tcp_proto = { ...@@ -5311,11 +5382,31 @@ static const struct bpf_func_proto bpf_xdp_sk_lookup_tcp_proto = {
.arg5_type = ARG_ANYTHING, .arg5_type = ARG_ANYTHING,
}; };
BPF_CALL_5(bpf_sock_addr_skc_lookup_tcp, struct bpf_sock_addr_kern *, ctx,
struct bpf_sock_tuple *, tuple, u32, len, u64, netns_id, u64, flags)
{
return (unsigned long)__bpf_skc_lookup(NULL, tuple, len,
sock_net(ctx->sk), 0,
IPPROTO_TCP, netns_id, flags);
}
static const struct bpf_func_proto bpf_sock_addr_skc_lookup_tcp_proto = {
.func = bpf_sock_addr_skc_lookup_tcp,
.gpl_only = false,
.ret_type = RET_PTR_TO_SOCK_COMMON_OR_NULL,
.arg1_type = ARG_PTR_TO_CTX,
.arg2_type = ARG_PTR_TO_MEM,
.arg3_type = ARG_CONST_SIZE,
.arg4_type = ARG_ANYTHING,
.arg5_type = ARG_ANYTHING,
};
BPF_CALL_5(bpf_sock_addr_sk_lookup_tcp, struct bpf_sock_addr_kern *, ctx, BPF_CALL_5(bpf_sock_addr_sk_lookup_tcp, struct bpf_sock_addr_kern *, ctx,
struct bpf_sock_tuple *, tuple, u32, len, u64, netns_id, u64, flags) struct bpf_sock_tuple *, tuple, u32, len, u64, netns_id, u64, flags)
{ {
return __bpf_sk_lookup(NULL, tuple, len, sock_net(ctx->sk), 0, return (unsigned long)__bpf_sk_lookup(NULL, tuple, len,
IPPROTO_TCP, netns_id, flags); sock_net(ctx->sk), 0, IPPROTO_TCP,
netns_id, flags);
} }
static const struct bpf_func_proto bpf_sock_addr_sk_lookup_tcp_proto = { static const struct bpf_func_proto bpf_sock_addr_sk_lookup_tcp_proto = {
...@@ -5332,8 +5423,9 @@ static const struct bpf_func_proto bpf_sock_addr_sk_lookup_tcp_proto = { ...@@ -5332,8 +5423,9 @@ static const struct bpf_func_proto bpf_sock_addr_sk_lookup_tcp_proto = {
BPF_CALL_5(bpf_sock_addr_sk_lookup_udp, struct bpf_sock_addr_kern *, ctx, BPF_CALL_5(bpf_sock_addr_sk_lookup_udp, struct bpf_sock_addr_kern *, ctx,
struct bpf_sock_tuple *, tuple, u32, len, u64, netns_id, u64, flags) struct bpf_sock_tuple *, tuple, u32, len, u64, netns_id, u64, flags)
{ {
return __bpf_sk_lookup(NULL, tuple, len, sock_net(ctx->sk), 0, return (unsigned long)__bpf_sk_lookup(NULL, tuple, len,
IPPROTO_UDP, netns_id, flags); sock_net(ctx->sk), 0, IPPROTO_UDP,
netns_id, flags);
} }
static const struct bpf_func_proto bpf_sock_addr_sk_lookup_udp_proto = { static const struct bpf_func_proto bpf_sock_addr_sk_lookup_udp_proto = {
...@@ -5461,6 +5553,74 @@ static const struct bpf_func_proto bpf_skb_ecn_set_ce_proto = { ...@@ -5461,6 +5553,74 @@ static const struct bpf_func_proto bpf_skb_ecn_set_ce_proto = {
.ret_type = RET_INTEGER, .ret_type = RET_INTEGER,
.arg1_type = ARG_PTR_TO_CTX, .arg1_type = ARG_PTR_TO_CTX,
}; };
BPF_CALL_5(bpf_tcp_check_syncookie, struct sock *, sk, void *, iph, u32, iph_len,
struct tcphdr *, th, u32, th_len)
{
#ifdef CONFIG_SYN_COOKIES
u32 cookie;
int ret;
if (unlikely(th_len < sizeof(*th)))
return -EINVAL;
/* sk_listener() allows TCP_NEW_SYN_RECV, which makes no sense here. */
if (sk->sk_protocol != IPPROTO_TCP || sk->sk_state != TCP_LISTEN)
return -EINVAL;
if (!sock_net(sk)->ipv4.sysctl_tcp_syncookies)
return -EINVAL;
if (!th->ack || th->rst || th->syn)
return -ENOENT;
if (tcp_synq_no_recent_overflow(sk))
return -ENOENT;
cookie = ntohl(th->ack_seq) - 1;
switch (sk->sk_family) {
case AF_INET:
if (unlikely(iph_len < sizeof(struct iphdr)))
return -EINVAL;
ret = __cookie_v4_check((struct iphdr *)iph, th, cookie);
break;
#if IS_BUILTIN(CONFIG_IPV6)
case AF_INET6:
if (unlikely(iph_len < sizeof(struct ipv6hdr)))
return -EINVAL;
ret = __cookie_v6_check((struct ipv6hdr *)iph, th, cookie);
break;
#endif /* CONFIG_IPV6 */
default:
return -EPROTONOSUPPORT;
}
if (ret > 0)
return 0;
return -ENOENT;
#else
return -ENOTSUPP;
#endif
}
static const struct bpf_func_proto bpf_tcp_check_syncookie_proto = {
.func = bpf_tcp_check_syncookie,
.gpl_only = true,
.pkt_access = true,
.ret_type = RET_INTEGER,
.arg1_type = ARG_PTR_TO_SOCK_COMMON,
.arg2_type = ARG_PTR_TO_MEM,
.arg3_type = ARG_CONST_SIZE,
.arg4_type = ARG_PTR_TO_MEM,
.arg5_type = ARG_CONST_SIZE,
};
#endif /* CONFIG_INET */ #endif /* CONFIG_INET */
bool bpf_helper_changes_pkt_data(void *func) bool bpf_helper_changes_pkt_data(void *func)
...@@ -5586,6 +5746,8 @@ sock_addr_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) ...@@ -5586,6 +5746,8 @@ sock_addr_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
return &bpf_sock_addr_sk_lookup_udp_proto; return &bpf_sock_addr_sk_lookup_udp_proto;
case BPF_FUNC_sk_release: case BPF_FUNC_sk_release:
return &bpf_sk_release_proto; return &bpf_sk_release_proto;
case BPF_FUNC_skc_lookup_tcp:
return &bpf_sock_addr_skc_lookup_tcp_proto;
#endif /* CONFIG_INET */ #endif /* CONFIG_INET */
default: default:
return bpf_base_func_proto(func_id); return bpf_base_func_proto(func_id);
...@@ -5719,6 +5881,10 @@ tc_cls_act_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) ...@@ -5719,6 +5881,10 @@ tc_cls_act_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
return &bpf_tcp_sock_proto; return &bpf_tcp_sock_proto;
case BPF_FUNC_get_listener_sock: case BPF_FUNC_get_listener_sock:
return &bpf_get_listener_sock_proto; return &bpf_get_listener_sock_proto;
case BPF_FUNC_skc_lookup_tcp:
return &bpf_skc_lookup_tcp_proto;
case BPF_FUNC_tcp_check_syncookie:
return &bpf_tcp_check_syncookie_proto;
#endif #endif
default: default:
return bpf_base_func_proto(func_id); return bpf_base_func_proto(func_id);
...@@ -5754,6 +5920,10 @@ xdp_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) ...@@ -5754,6 +5920,10 @@ xdp_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
return &bpf_xdp_sk_lookup_tcp_proto; return &bpf_xdp_sk_lookup_tcp_proto;
case BPF_FUNC_sk_release: case BPF_FUNC_sk_release:
return &bpf_sk_release_proto; return &bpf_sk_release_proto;
case BPF_FUNC_skc_lookup_tcp:
return &bpf_xdp_skc_lookup_tcp_proto;
case BPF_FUNC_tcp_check_syncookie:
return &bpf_tcp_check_syncookie_proto;
#endif #endif
default: default:
return bpf_base_func_proto(func_id); return bpf_base_func_proto(func_id);
...@@ -5846,6 +6016,8 @@ sk_skb_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) ...@@ -5846,6 +6016,8 @@ sk_skb_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
return &bpf_sk_lookup_udp_proto; return &bpf_sk_lookup_udp_proto;
case BPF_FUNC_sk_release: case BPF_FUNC_sk_release:
return &bpf_sk_release_proto; return &bpf_sk_release_proto;
case BPF_FUNC_skc_lookup_tcp:
return &bpf_skc_lookup_tcp_proto;
#endif #endif
default: default:
return bpf_base_func_proto(func_id); return bpf_base_func_proto(func_id);
......
...@@ -2431,6 +2431,38 @@ union bpf_attr { ...@@ -2431,6 +2431,38 @@ union bpf_attr {
* Return * Return
* A **struct bpf_sock** pointer on success, or **NULL** in * A **struct bpf_sock** pointer on success, or **NULL** in
* case of failure. * case of failure.
*
* struct bpf_sock *bpf_skc_lookup_tcp(void *ctx, struct bpf_sock_tuple *tuple, u32 tuple_size, u64 netns, u64 flags)
* Description
* Look for TCP socket matching *tuple*, optionally in a child
* network namespace *netns*. The return value must be checked,
* and if non-**NULL**, released via **bpf_sk_release**\ ().
*
* This function is identical to bpf_sk_lookup_tcp, except that it
* also returns timewait or request sockets. Use bpf_sk_fullsock
* or bpf_tcp_socket to access the full structure.
*
* This helper is available only if the kernel was compiled with
* **CONFIG_NET** configuration option.
* Return
* Pointer to **struct bpf_sock**, or **NULL** in case of failure.
* For sockets with reuseport option, the **struct bpf_sock**
* result is from **reuse->socks**\ [] using the hash of the tuple.
*
* int bpf_tcp_check_syncookie(struct bpf_sock *sk, void *iph, u32 iph_len, struct tcphdr *th, u32 th_len)
* Description
* Check whether iph and th contain a valid SYN cookie ACK for
* the listening socket in sk.
*
* iph points to the start of the IPv4 or IPv6 header, while
* iph_len contains sizeof(struct iphdr) or sizeof(struct ip6hdr).
*
* th points to the start of the TCP header, while th_len contains
* sizeof(struct tcphdr).
*
* Return
* 0 if iph and th are a valid SYN cookie ACK, or a negative error
* otherwise.
*/ */
#define __BPF_FUNC_MAPPER(FN) \ #define __BPF_FUNC_MAPPER(FN) \
FN(unspec), \ FN(unspec), \
...@@ -2531,7 +2563,9 @@ union bpf_attr { ...@@ -2531,7 +2563,9 @@ union bpf_attr {
FN(sk_fullsock), \ FN(sk_fullsock), \
FN(tcp_sock), \ FN(tcp_sock), \
FN(skb_ecn_set_ce), \ FN(skb_ecn_set_ce), \
FN(get_listener_sock), FN(get_listener_sock), \
FN(skc_lookup_tcp), \
FN(tcp_check_syncookie),
/* integer value in 'imm' field of BPF_CALL instruction selects which helper /* integer value in 'imm' field of BPF_CALL instruction selects which helper
* function eBPF program intends to call * function eBPF program intends to call
......
...@@ -30,4 +30,5 @@ test_netcnt ...@@ -30,4 +30,5 @@ test_netcnt
test_section_names test_section_names
test_tcpnotify_user test_tcpnotify_user
test_libbpf test_libbpf
test_tcp_check_syncookie_user
alu32 alu32
...@@ -51,7 +51,8 @@ TEST_PROGS := test_kmod.sh \ ...@@ -51,7 +51,8 @@ TEST_PROGS := test_kmod.sh \
test_skb_cgroup_id.sh \ test_skb_cgroup_id.sh \
test_flow_dissector.sh \ test_flow_dissector.sh \
test_xdp_vlan.sh \ test_xdp_vlan.sh \
test_lwt_ip_encap.sh test_lwt_ip_encap.sh \
test_tcp_check_syncookie.sh
TEST_PROGS_EXTENDED := with_addr.sh \ TEST_PROGS_EXTENDED := with_addr.sh \
with_tunnels.sh \ with_tunnels.sh \
...@@ -60,7 +61,7 @@ TEST_PROGS_EXTENDED := with_addr.sh \ ...@@ -60,7 +61,7 @@ TEST_PROGS_EXTENDED := with_addr.sh \
# Compile but not part of 'make run_tests' # Compile but not part of 'make run_tests'
TEST_GEN_PROGS_EXTENDED = test_libbpf_open test_sock_addr test_skb_cgroup_id_user \ TEST_GEN_PROGS_EXTENDED = test_libbpf_open test_sock_addr test_skb_cgroup_id_user \
flow_dissector_load test_flow_dissector flow_dissector_load test_flow_dissector test_tcp_check_syncookie_user
include ../lib.mk include ../lib.mk
......
...@@ -159,6 +159,11 @@ static struct bpf_sock *(*bpf_sk_lookup_tcp)(void *ctx, ...@@ -159,6 +159,11 @@ static struct bpf_sock *(*bpf_sk_lookup_tcp)(void *ctx,
int size, unsigned long long netns_id, int size, unsigned long long netns_id,
unsigned long long flags) = unsigned long long flags) =
(void *) BPF_FUNC_sk_lookup_tcp; (void *) BPF_FUNC_sk_lookup_tcp;
static struct bpf_sock *(*bpf_skc_lookup_tcp)(void *ctx,
struct bpf_sock_tuple *tuple,
int size, unsigned long long netns_id,
unsigned long long flags) =
(void *) BPF_FUNC_skc_lookup_tcp;
static struct bpf_sock *(*bpf_sk_lookup_udp)(void *ctx, static struct bpf_sock *(*bpf_sk_lookup_udp)(void *ctx,
struct bpf_sock_tuple *tuple, struct bpf_sock_tuple *tuple,
int size, unsigned long long netns_id, int size, unsigned long long netns_id,
...@@ -184,6 +189,9 @@ static struct bpf_sock *(*bpf_get_listener_sock)(struct bpf_sock *sk) = ...@@ -184,6 +189,9 @@ static struct bpf_sock *(*bpf_get_listener_sock)(struct bpf_sock *sk) =
(void *) BPF_FUNC_get_listener_sock; (void *) BPF_FUNC_get_listener_sock;
static int (*bpf_skb_ecn_set_ce)(void *ctx) = static int (*bpf_skb_ecn_set_ce)(void *ctx) =
(void *) BPF_FUNC_skb_ecn_set_ce; (void *) BPF_FUNC_skb_ecn_set_ce;
static int (*bpf_tcp_check_syncookie)(struct bpf_sock *sk,
void *ip, int ip_len, void *tcp, int tcp_len) =
(void *) BPF_FUNC_tcp_check_syncookie;
/* llvm builtin functions that eBPF C program may use to /* llvm builtin functions that eBPF C program may use to
* emit BPF_LD_ABS and BPF_LD_IND instructions * emit BPF_LD_ABS and BPF_LD_IND instructions
......
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2018 Facebook
// Copyright (c) 2019 Cloudflare
#include <string.h>
#include <linux/bpf.h>
#include <linux/pkt_cls.h>
#include <linux/if_ether.h>
#include <linux/in.h>
#include <linux/ip.h>
#include <linux/ipv6.h>
#include <sys/socket.h>
#include <linux/tcp.h>
#include "bpf_helpers.h"
#include "bpf_endian.h"
struct bpf_map_def SEC("maps") results = {
.type = BPF_MAP_TYPE_ARRAY,
.key_size = sizeof(__u32),
.value_size = sizeof(__u64),
.max_entries = 1,
};
static __always_inline void check_syncookie(void *ctx, void *data,
void *data_end)
{
struct bpf_sock_tuple tup;
struct bpf_sock *sk;
struct ethhdr *ethh;
struct iphdr *ipv4h;
struct ipv6hdr *ipv6h;
struct tcphdr *tcph;
int ret;
__u32 key = 0;
__u64 value = 1;
ethh = data;
if (ethh + 1 > data_end)
return;
switch (bpf_ntohs(ethh->h_proto)) {
case ETH_P_IP:
ipv4h = data + sizeof(struct ethhdr);
if (ipv4h + 1 > data_end)
return;
if (ipv4h->ihl != 5)
return;
tcph = data + sizeof(struct ethhdr) + sizeof(struct iphdr);
if (tcph + 1 > data_end)
return;
tup.ipv4.saddr = ipv4h->saddr;
tup.ipv4.daddr = ipv4h->daddr;
tup.ipv4.sport = tcph->source;
tup.ipv4.dport = tcph->dest;
sk = bpf_skc_lookup_tcp(ctx, &tup, sizeof(tup.ipv4),
BPF_F_CURRENT_NETNS, 0);
if (!sk)
return;
if (sk->state != BPF_TCP_LISTEN)
goto release;
ret = bpf_tcp_check_syncookie(sk, ipv4h, sizeof(*ipv4h),
tcph, sizeof(*tcph));
break;
case ETH_P_IPV6:
ipv6h = data + sizeof(struct ethhdr);
if (ipv6h + 1 > data_end)
return;
if (ipv6h->nexthdr != IPPROTO_TCP)
return;
tcph = data + sizeof(struct ethhdr) + sizeof(struct ipv6hdr);
if (tcph + 1 > data_end)
return;
memcpy(tup.ipv6.saddr, &ipv6h->saddr, sizeof(tup.ipv6.saddr));
memcpy(tup.ipv6.daddr, &ipv6h->daddr, sizeof(tup.ipv6.daddr));
tup.ipv6.sport = tcph->source;
tup.ipv6.dport = tcph->dest;
sk = bpf_skc_lookup_tcp(ctx, &tup, sizeof(tup.ipv6),
BPF_F_CURRENT_NETNS, 0);
if (!sk)
return;
if (sk->state != BPF_TCP_LISTEN)
goto release;
ret = bpf_tcp_check_syncookie(sk, ipv6h, sizeof(*ipv6h),
tcph, sizeof(*tcph));
break;
default:
return;
}
if (ret == 0)
bpf_map_update_elem(&results, &key, &value, 0);
release:
bpf_sk_release(sk);
}
SEC("clsact/check_syncookie")
int check_syncookie_clsact(struct __sk_buff *skb)
{
check_syncookie(skb, (void *)(long)skb->data,
(void *)(long)skb->data_end);
return TC_ACT_OK;
}
SEC("xdp/check_syncookie")
int check_syncookie_xdp(struct xdp_md *ctx)
{
check_syncookie(ctx, (void *)(long)ctx->data,
(void *)(long)ctx->data_end);
return XDP_PASS;
}
char _license[] SEC("license") = "GPL";
#!/bin/sh
# SPDX-License-Identifier: GPL-2.0
# Copyright (c) 2018 Facebook
# Copyright (c) 2019 Cloudflare
set -eu
wait_for_ip()
{
local _i
printf "Wait for IP %s to become available " "$1"
for _i in $(seq ${MAX_PING_TRIES}); do
printf "."
if ns1_exec ping -c 1 -W 1 "$1" >/dev/null 2>&1; then
echo " OK"
return
fi
sleep 1
done
echo 1>&2 "ERROR: Timeout waiting for test IP to become available."
exit 1
}
get_prog_id()
{
awk '/ id / {sub(/.* id /, "", $0); print($1)}'
}
ns1_exec()
{
ip netns exec ns1 "$@"
}
setup()
{
ip netns add ns1
ns1_exec ip link set lo up
ns1_exec sysctl -w net.ipv4.tcp_syncookies=2
wait_for_ip 127.0.0.1
wait_for_ip ::1
}
cleanup()
{
ip netns del ns1 2>/dev/null || :
}
main()
{
trap cleanup EXIT 2 3 6 15
setup
printf "Testing clsact..."
ns1_exec tc qdisc add dev "${TEST_IF}" clsact
ns1_exec tc filter add dev "${TEST_IF}" ingress \
bpf obj "${BPF_PROG_OBJ}" sec "${CLSACT_SECTION}" da
BPF_PROG_ID=$(ns1_exec tc filter show dev "${TEST_IF}" ingress | \
get_prog_id)
ns1_exec "${PROG}" "${BPF_PROG_ID}"
ns1_exec tc qdisc del dev "${TEST_IF}" clsact
printf "Testing XDP..."
ns1_exec ip link set "${TEST_IF}" xdp \
object "${BPF_PROG_OBJ}" section "${XDP_SECTION}"
BPF_PROG_ID=$(ns1_exec ip link show "${TEST_IF}" | get_prog_id)
ns1_exec "${PROG}" "${BPF_PROG_ID}"
}
DIR=$(dirname $0)
TEST_IF=lo
MAX_PING_TRIES=5
BPF_PROG_OBJ="${DIR}/test_tcp_check_syncookie_kern.o"
CLSACT_SECTION="clsact/check_syncookie"
XDP_SECTION="xdp/check_syncookie"
BPF_PROG_ID=0
PROG="${DIR}/test_tcp_check_syncookie_user"
main
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2018 Facebook
// Copyright (c) 2019 Cloudflare
#include <string.h>
#include <stdlib.h>
#include <unistd.h>
#include <arpa/inet.h>
#include <netinet/in.h>
#include <sys/types.h>
#include <sys/socket.h>
#include <bpf/bpf.h>
#include <bpf/libbpf.h>
#include "bpf_rlimit.h"
#include "cgroup_helpers.h"
static int start_server(const struct sockaddr *addr, socklen_t len)
{
int fd;
fd = socket(addr->sa_family, SOCK_STREAM, 0);
if (fd == -1) {
log_err("Failed to create server socket");
goto out;
}
if (bind(fd, addr, len) == -1) {
log_err("Failed to bind server socket");
goto close_out;
}
if (listen(fd, 128) == -1) {
log_err("Failed to listen on server socket");
goto close_out;
}
goto out;
close_out:
close(fd);
fd = -1;
out:
return fd;
}
static int connect_to_server(int server_fd)
{
struct sockaddr_storage addr;
socklen_t len = sizeof(addr);
int fd = -1;
if (getsockname(server_fd, (struct sockaddr *)&addr, &len)) {
log_err("Failed to get server addr");
goto out;
}
fd = socket(addr.ss_family, SOCK_STREAM, 0);
if (fd == -1) {
log_err("Failed to create client socket");
goto out;
}
if (connect(fd, (const struct sockaddr *)&addr, len) == -1) {
log_err("Fail to connect to server");
goto close_out;
}
goto out;
close_out:
close(fd);
fd = -1;
out:
return fd;
}
static int get_map_fd_by_prog_id(int prog_id)
{
struct bpf_prog_info info = {};
__u32 info_len = sizeof(info);
__u32 map_ids[1];
int prog_fd = -1;
int map_fd = -1;
prog_fd = bpf_prog_get_fd_by_id(prog_id);
if (prog_fd < 0) {
log_err("Failed to get fd by prog id %d", prog_id);
goto err;
}
info.nr_map_ids = 1;
info.map_ids = (__u64)(unsigned long)map_ids;
if (bpf_obj_get_info_by_fd(prog_fd, &info, &info_len)) {
log_err("Failed to get info by prog fd %d", prog_fd);
goto err;
}
if (!info.nr_map_ids) {
log_err("No maps found for prog fd %d", prog_fd);
goto err;
}
map_fd = bpf_map_get_fd_by_id(map_ids[0]);
if (map_fd < 0)
log_err("Failed to get fd by map id %d", map_ids[0]);
err:
if (prog_fd >= 0)
close(prog_fd);
return map_fd;
}
static int run_test(int server_fd, int results_fd)
{
int client = -1, srv_client = -1;
int ret = 0;
__u32 key = 0;
__u64 value = 0;
if (bpf_map_update_elem(results_fd, &key, &value, 0) < 0) {
log_err("Can't clear results");
goto err;
}
client = connect_to_server(server_fd);
if (client == -1)
goto err;
srv_client = accept(server_fd, NULL, 0);
if (srv_client == -1) {
log_err("Can't accept connection");
goto err;
}
if (bpf_map_lookup_elem(results_fd, &key, &value) < 0) {
log_err("Can't lookup result");
goto err;
}
if (value != 1) {
log_err("Didn't match syncookie: %llu", value);
goto err;
}
goto out;
err:
ret = 1;
out:
close(client);
close(srv_client);
return ret;
}
int main(int argc, char **argv)
{
struct sockaddr_in addr4;
struct sockaddr_in6 addr6;
int server = -1;
int server_v6 = -1;
int results = -1;
int err = 0;
if (argc < 2) {
fprintf(stderr, "Usage: %s prog_id\n", argv[0]);
exit(1);
}
results = get_map_fd_by_prog_id(atoi(argv[1]));
if (results < 0) {
log_err("Can't get map");
goto err;
}
memset(&addr4, 0, sizeof(addr4));
addr4.sin_family = AF_INET;
addr4.sin_addr.s_addr = htonl(INADDR_LOOPBACK);
addr4.sin_port = 0;
memset(&addr6, 0, sizeof(addr6));
addr6.sin6_family = AF_INET6;
addr6.sin6_addr = in6addr_loopback;
addr6.sin6_port = 0;
server = start_server((const struct sockaddr *)&addr4, sizeof(addr4));
if (server == -1)
goto err;
server_v6 = start_server((const struct sockaddr *)&addr6,
sizeof(addr6));
if (server_v6 == -1)
goto err;
if (run_test(server, results))
goto err;
if (run_test(server_v6, results))
goto err;
printf("ok\n");
goto out;
err:
err = 1;
out:
close(server);
close(server_v6);
close(results);
return err;
}
...@@ -198,7 +198,7 @@ static void bpf_fill_rand_ld_dw(struct bpf_test *self) ...@@ -198,7 +198,7 @@ static void bpf_fill_rand_ld_dw(struct bpf_test *self)
} }
/* BPF_SK_LOOKUP contains 13 instructions, if you need to fix up maps */ /* BPF_SK_LOOKUP contains 13 instructions, if you need to fix up maps */
#define BPF_SK_LOOKUP \ #define BPF_SK_LOOKUP(func) \
/* struct bpf_sock_tuple tuple = {} */ \ /* struct bpf_sock_tuple tuple = {} */ \
BPF_MOV64_IMM(BPF_REG_2, 0), \ BPF_MOV64_IMM(BPF_REG_2, 0), \
BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_2, -8), \ BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_2, -8), \
...@@ -207,13 +207,13 @@ static void bpf_fill_rand_ld_dw(struct bpf_test *self) ...@@ -207,13 +207,13 @@ static void bpf_fill_rand_ld_dw(struct bpf_test *self)
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -32), \ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -32), \
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -40), \ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -40), \
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -48), \ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -48), \
/* sk = sk_lookup_tcp(ctx, &tuple, sizeof tuple, 0, 0) */ \ /* sk = func(ctx, &tuple, sizeof tuple, 0, 0) */ \
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), \ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), \
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -48), \ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -48), \
BPF_MOV64_IMM(BPF_REG_3, sizeof(struct bpf_sock_tuple)), \ BPF_MOV64_IMM(BPF_REG_3, sizeof(struct bpf_sock_tuple)), \
BPF_MOV64_IMM(BPF_REG_4, 0), \ BPF_MOV64_IMM(BPF_REG_4, 0), \
BPF_MOV64_IMM(BPF_REG_5, 0), \ BPF_MOV64_IMM(BPF_REG_5, 0), \
BPF_EMIT_CALL(BPF_FUNC_sk_lookup_tcp) BPF_EMIT_CALL(BPF_FUNC_ ## func)
/* BPF_DIRECT_PKT_R2 contains 7 instructions, it initializes default return /* BPF_DIRECT_PKT_R2 contains 7 instructions, it initializes default return
* value into 0 and does necessary preparation for direct packet access * value into 0 and does necessary preparation for direct packet access
......
{ {
"reference tracking: leak potential reference", "reference tracking: leak potential reference",
.insns = { .insns = {
BPF_SK_LOOKUP, BPF_SK_LOOKUP(sk_lookup_tcp),
BPF_MOV64_REG(BPF_REG_6, BPF_REG_0), /* leak reference */
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.errstr = "Unreleased reference",
.result = REJECT,
},
{
"reference tracking: leak potential reference to sock_common",
.insns = {
BPF_SK_LOOKUP(skc_lookup_tcp),
BPF_MOV64_REG(BPF_REG_6, BPF_REG_0), /* leak reference */ BPF_MOV64_REG(BPF_REG_6, BPF_REG_0), /* leak reference */
BPF_EXIT_INSN(), BPF_EXIT_INSN(),
}, },
...@@ -12,7 +23,7 @@ ...@@ -12,7 +23,7 @@
{ {
"reference tracking: leak potential reference on stack", "reference tracking: leak potential reference on stack",
.insns = { .insns = {
BPF_SK_LOOKUP, BPF_SK_LOOKUP(sk_lookup_tcp),
BPF_MOV64_REG(BPF_REG_4, BPF_REG_10), BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8), BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_0, 0), BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_0, 0),
...@@ -26,7 +37,7 @@ ...@@ -26,7 +37,7 @@
{ {
"reference tracking: leak potential reference on stack 2", "reference tracking: leak potential reference on stack 2",
.insns = { .insns = {
BPF_SK_LOOKUP, BPF_SK_LOOKUP(sk_lookup_tcp),
BPF_MOV64_REG(BPF_REG_4, BPF_REG_10), BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8), BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_0, 0), BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_0, 0),
...@@ -41,7 +52,18 @@ ...@@ -41,7 +52,18 @@
{ {
"reference tracking: zero potential reference", "reference tracking: zero potential reference",
.insns = { .insns = {
BPF_SK_LOOKUP, BPF_SK_LOOKUP(sk_lookup_tcp),
BPF_MOV64_IMM(BPF_REG_0, 0), /* leak reference */
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.errstr = "Unreleased reference",
.result = REJECT,
},
{
"reference tracking: zero potential reference to sock_common",
.insns = {
BPF_SK_LOOKUP(skc_lookup_tcp),
BPF_MOV64_IMM(BPF_REG_0, 0), /* leak reference */ BPF_MOV64_IMM(BPF_REG_0, 0), /* leak reference */
BPF_EXIT_INSN(), BPF_EXIT_INSN(),
}, },
...@@ -52,7 +74,7 @@ ...@@ -52,7 +74,7 @@
{ {
"reference tracking: copy and zero potential references", "reference tracking: copy and zero potential references",
.insns = { .insns = {
BPF_SK_LOOKUP, BPF_SK_LOOKUP(sk_lookup_tcp),
BPF_MOV64_REG(BPF_REG_7, BPF_REG_0), BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
BPF_MOV64_IMM(BPF_REG_0, 0), BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_MOV64_IMM(BPF_REG_7, 0), /* leak reference */ BPF_MOV64_IMM(BPF_REG_7, 0), /* leak reference */
...@@ -65,7 +87,7 @@ ...@@ -65,7 +87,7 @@
{ {
"reference tracking: release reference without check", "reference tracking: release reference without check",
.insns = { .insns = {
BPF_SK_LOOKUP, BPF_SK_LOOKUP(sk_lookup_tcp),
/* reference in r0 may be NULL */ /* reference in r0 may be NULL */
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
BPF_MOV64_IMM(BPF_REG_2, 0), BPF_MOV64_IMM(BPF_REG_2, 0),
...@@ -76,10 +98,36 @@ ...@@ -76,10 +98,36 @@
.errstr = "type=sock_or_null expected=sock", .errstr = "type=sock_or_null expected=sock",
.result = REJECT, .result = REJECT,
}, },
{
"reference tracking: release reference to sock_common without check",
.insns = {
BPF_SK_LOOKUP(skc_lookup_tcp),
/* reference in r0 may be NULL */
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
BPF_MOV64_IMM(BPF_REG_2, 0),
BPF_EMIT_CALL(BPF_FUNC_sk_release),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.errstr = "type=sock_common_or_null expected=sock",
.result = REJECT,
},
{ {
"reference tracking: release reference", "reference tracking: release reference",
.insns = { .insns = {
BPF_SK_LOOKUP, BPF_SK_LOOKUP(sk_lookup_tcp),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
BPF_EMIT_CALL(BPF_FUNC_sk_release),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.result = ACCEPT,
},
{
"reference tracking: release reference to sock_common",
.insns = {
BPF_SK_LOOKUP(skc_lookup_tcp),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1), BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
BPF_EMIT_CALL(BPF_FUNC_sk_release), BPF_EMIT_CALL(BPF_FUNC_sk_release),
...@@ -91,7 +139,7 @@ ...@@ -91,7 +139,7 @@
{ {
"reference tracking: release reference 2", "reference tracking: release reference 2",
.insns = { .insns = {
BPF_SK_LOOKUP, BPF_SK_LOOKUP(sk_lookup_tcp),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1), BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
BPF_EXIT_INSN(), BPF_EXIT_INSN(),
...@@ -104,7 +152,7 @@ ...@@ -104,7 +152,7 @@
{ {
"reference tracking: release reference twice", "reference tracking: release reference twice",
.insns = { .insns = {
BPF_SK_LOOKUP, BPF_SK_LOOKUP(sk_lookup_tcp),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
BPF_MOV64_REG(BPF_REG_6, BPF_REG_0), BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1), BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
...@@ -120,7 +168,7 @@ ...@@ -120,7 +168,7 @@
{ {
"reference tracking: release reference twice inside branch", "reference tracking: release reference twice inside branch",
.insns = { .insns = {
BPF_SK_LOOKUP, BPF_SK_LOOKUP(sk_lookup_tcp),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
BPF_MOV64_REG(BPF_REG_6, BPF_REG_0), BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3), /* goto end */ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3), /* goto end */
...@@ -147,7 +195,7 @@ ...@@ -147,7 +195,7 @@
BPF_EXIT_INSN(), BPF_EXIT_INSN(),
BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_2, BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_2,
offsetof(struct __sk_buff, mark)), offsetof(struct __sk_buff, mark)),
BPF_SK_LOOKUP, BPF_SK_LOOKUP(sk_lookup_tcp),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_6, 0, 1), /* mark == 0? */ BPF_JMP_IMM(BPF_JEQ, BPF_REG_6, 0, 1), /* mark == 0? */
/* Leak reference in R0 */ /* Leak reference in R0 */
BPF_EXIT_INSN(), BPF_EXIT_INSN(),
...@@ -175,7 +223,7 @@ ...@@ -175,7 +223,7 @@
BPF_EXIT_INSN(), BPF_EXIT_INSN(),
BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_2, BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_2,
offsetof(struct __sk_buff, mark)), offsetof(struct __sk_buff, mark)),
BPF_SK_LOOKUP, BPF_SK_LOOKUP(sk_lookup_tcp),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_6, 0, 4), /* mark == 0? */ BPF_JMP_IMM(BPF_JEQ, BPF_REG_6, 0, 4), /* mark == 0? */
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2), /* sk NULL? */ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2), /* sk NULL? */
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
...@@ -193,7 +241,7 @@ ...@@ -193,7 +241,7 @@
{ {
"reference tracking in call: free reference in subprog", "reference tracking in call: free reference in subprog",
.insns = { .insns = {
BPF_SK_LOOKUP, BPF_SK_LOOKUP(sk_lookup_tcp),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), /* unchecked reference */ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), /* unchecked reference */
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2), BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
BPF_MOV64_IMM(BPF_REG_0, 0), BPF_MOV64_IMM(BPF_REG_0, 0),
...@@ -211,7 +259,7 @@ ...@@ -211,7 +259,7 @@
{ {
"reference tracking in call: free reference in subprog and outside", "reference tracking in call: free reference in subprog and outside",
.insns = { .insns = {
BPF_SK_LOOKUP, BPF_SK_LOOKUP(sk_lookup_tcp),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), /* unchecked reference */ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), /* unchecked reference */
BPF_MOV64_REG(BPF_REG_6, BPF_REG_0), BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3), BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
...@@ -241,7 +289,7 @@ ...@@ -241,7 +289,7 @@
/* subprog 1 */ /* subprog 1 */
BPF_MOV64_REG(BPF_REG_6, BPF_REG_4), BPF_MOV64_REG(BPF_REG_6, BPF_REG_4),
BPF_SK_LOOKUP, BPF_SK_LOOKUP(sk_lookup_tcp),
/* spill unchecked sk_ptr into stack of caller */ /* spill unchecked sk_ptr into stack of caller */
BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0), BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
...@@ -262,7 +310,7 @@ ...@@ -262,7 +310,7 @@
BPF_EXIT_INSN(), BPF_EXIT_INSN(),
/* subprog 1 */ /* subprog 1 */
BPF_SK_LOOKUP, BPF_SK_LOOKUP(sk_lookup_tcp),
BPF_EXIT_INSN(), /* return sk */ BPF_EXIT_INSN(), /* return sk */
}, },
.prog_type = BPF_PROG_TYPE_SCHED_CLS, .prog_type = BPF_PROG_TYPE_SCHED_CLS,
...@@ -291,7 +339,7 @@ ...@@ -291,7 +339,7 @@
BPF_EXIT_INSN(), BPF_EXIT_INSN(),
/* subprog 2 */ /* subprog 2 */
BPF_SK_LOOKUP, BPF_SK_LOOKUP(sk_lookup_tcp),
BPF_EXIT_INSN(), BPF_EXIT_INSN(),
}, },
.prog_type = BPF_PROG_TYPE_SCHED_CLS, .prog_type = BPF_PROG_TYPE_SCHED_CLS,
...@@ -324,7 +372,7 @@ ...@@ -324,7 +372,7 @@
BPF_EXIT_INSN(), BPF_EXIT_INSN(),
/* subprog 2 */ /* subprog 2 */
BPF_SK_LOOKUP, BPF_SK_LOOKUP(sk_lookup_tcp),
BPF_EXIT_INSN(), BPF_EXIT_INSN(),
}, },
.prog_type = BPF_PROG_TYPE_SCHED_CLS, .prog_type = BPF_PROG_TYPE_SCHED_CLS,
...@@ -334,7 +382,7 @@ ...@@ -334,7 +382,7 @@
"reference tracking: allow LD_ABS", "reference tracking: allow LD_ABS",
.insns = { .insns = {
BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
BPF_SK_LOOKUP, BPF_SK_LOOKUP(sk_lookup_tcp),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1), BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
BPF_EMIT_CALL(BPF_FUNC_sk_release), BPF_EMIT_CALL(BPF_FUNC_sk_release),
...@@ -350,7 +398,7 @@ ...@@ -350,7 +398,7 @@
"reference tracking: forbid LD_ABS while holding reference", "reference tracking: forbid LD_ABS while holding reference",
.insns = { .insns = {
BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
BPF_SK_LOOKUP, BPF_SK_LOOKUP(sk_lookup_tcp),
BPF_LD_ABS(BPF_B, 0), BPF_LD_ABS(BPF_B, 0),
BPF_LD_ABS(BPF_H, 0), BPF_LD_ABS(BPF_H, 0),
BPF_LD_ABS(BPF_W, 0), BPF_LD_ABS(BPF_W, 0),
...@@ -367,7 +415,7 @@ ...@@ -367,7 +415,7 @@
"reference tracking: allow LD_IND", "reference tracking: allow LD_IND",
.insns = { .insns = {
BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
BPF_SK_LOOKUP, BPF_SK_LOOKUP(sk_lookup_tcp),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1), BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
BPF_EMIT_CALL(BPF_FUNC_sk_release), BPF_EMIT_CALL(BPF_FUNC_sk_release),
...@@ -384,7 +432,7 @@ ...@@ -384,7 +432,7 @@
"reference tracking: forbid LD_IND while holding reference", "reference tracking: forbid LD_IND while holding reference",
.insns = { .insns = {
BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
BPF_SK_LOOKUP, BPF_SK_LOOKUP(sk_lookup_tcp),
BPF_MOV64_REG(BPF_REG_4, BPF_REG_0), BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
BPF_MOV64_IMM(BPF_REG_7, 1), BPF_MOV64_IMM(BPF_REG_7, 1),
BPF_LD_IND(BPF_W, BPF_REG_7, -0x200000), BPF_LD_IND(BPF_W, BPF_REG_7, -0x200000),
...@@ -402,7 +450,7 @@ ...@@ -402,7 +450,7 @@
"reference tracking: check reference or tail call", "reference tracking: check reference or tail call",
.insns = { .insns = {
BPF_MOV64_REG(BPF_REG_7, BPF_REG_1), BPF_MOV64_REG(BPF_REG_7, BPF_REG_1),
BPF_SK_LOOKUP, BPF_SK_LOOKUP(sk_lookup_tcp),
/* if (sk) bpf_sk_release() */ /* if (sk) bpf_sk_release() */
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 7), BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 7),
...@@ -424,7 +472,7 @@ ...@@ -424,7 +472,7 @@
"reference tracking: release reference then tail call", "reference tracking: release reference then tail call",
.insns = { .insns = {
BPF_MOV64_REG(BPF_REG_7, BPF_REG_1), BPF_MOV64_REG(BPF_REG_7, BPF_REG_1),
BPF_SK_LOOKUP, BPF_SK_LOOKUP(sk_lookup_tcp),
/* if (sk) bpf_sk_release() */ /* if (sk) bpf_sk_release() */
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1), BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
...@@ -446,7 +494,7 @@ ...@@ -446,7 +494,7 @@
.insns = { .insns = {
BPF_MOV64_REG(BPF_REG_7, BPF_REG_1), BPF_MOV64_REG(BPF_REG_7, BPF_REG_1),
/* Look up socket and store in REG_6 */ /* Look up socket and store in REG_6 */
BPF_SK_LOOKUP, BPF_SK_LOOKUP(sk_lookup_tcp),
/* bpf_tail_call() */ /* bpf_tail_call() */
BPF_MOV64_REG(BPF_REG_6, BPF_REG_0), BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
BPF_MOV64_IMM(BPF_REG_3, 2), BPF_MOV64_IMM(BPF_REG_3, 2),
...@@ -470,7 +518,7 @@ ...@@ -470,7 +518,7 @@
.insns = { .insns = {
BPF_MOV64_REG(BPF_REG_7, BPF_REG_1), BPF_MOV64_REG(BPF_REG_7, BPF_REG_1),
/* Look up socket and store in REG_6 */ /* Look up socket and store in REG_6 */
BPF_SK_LOOKUP, BPF_SK_LOOKUP(sk_lookup_tcp),
BPF_MOV64_REG(BPF_REG_6, BPF_REG_0), BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
/* if (!sk) goto end */ /* if (!sk) goto end */
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7), BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
...@@ -492,7 +540,7 @@ ...@@ -492,7 +540,7 @@
{ {
"reference tracking: mangle and release sock_or_null", "reference tracking: mangle and release sock_or_null",
.insns = { .insns = {
BPF_SK_LOOKUP, BPF_SK_LOOKUP(sk_lookup_tcp),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 5), BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 5),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1), BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
...@@ -506,7 +554,7 @@ ...@@ -506,7 +554,7 @@
{ {
"reference tracking: mangle and release sock", "reference tracking: mangle and release sock",
.insns = { .insns = {
BPF_SK_LOOKUP, BPF_SK_LOOKUP(sk_lookup_tcp),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2), BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 5), BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 5),
...@@ -520,7 +568,7 @@ ...@@ -520,7 +568,7 @@
{ {
"reference tracking: access member", "reference tracking: access member",
.insns = { .insns = {
BPF_SK_LOOKUP, BPF_SK_LOOKUP(sk_lookup_tcp),
BPF_MOV64_REG(BPF_REG_6, BPF_REG_0), BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3), BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_0, 4), BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_0, 4),
...@@ -534,7 +582,7 @@ ...@@ -534,7 +582,7 @@
{ {
"reference tracking: write to member", "reference tracking: write to member",
.insns = { .insns = {
BPF_SK_LOOKUP, BPF_SK_LOOKUP(sk_lookup_tcp),
BPF_MOV64_REG(BPF_REG_6, BPF_REG_0), BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5), BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
...@@ -553,7 +601,7 @@ ...@@ -553,7 +601,7 @@
{ {
"reference tracking: invalid 64-bit access of member", "reference tracking: invalid 64-bit access of member",
.insns = { .insns = {
BPF_SK_LOOKUP, BPF_SK_LOOKUP(sk_lookup_tcp),
BPF_MOV64_REG(BPF_REG_6, BPF_REG_0), BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3), BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0), BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
...@@ -568,7 +616,7 @@ ...@@ -568,7 +616,7 @@
{ {
"reference tracking: access after release", "reference tracking: access after release",
.insns = { .insns = {
BPF_SK_LOOKUP, BPF_SK_LOOKUP(sk_lookup_tcp),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2), BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
BPF_EMIT_CALL(BPF_FUNC_sk_release), BPF_EMIT_CALL(BPF_FUNC_sk_release),
...@@ -608,7 +656,7 @@ ...@@ -608,7 +656,7 @@
{ {
"reference tracking: use ptr from bpf_tcp_sock() after release", "reference tracking: use ptr from bpf_tcp_sock() after release",
.insns = { .insns = {
BPF_SK_LOOKUP, BPF_SK_LOOKUP(sk_lookup_tcp),
BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1), BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
BPF_EXIT_INSN(), BPF_EXIT_INSN(),
BPF_MOV64_REG(BPF_REG_6, BPF_REG_0), BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
...@@ -631,7 +679,7 @@ ...@@ -631,7 +679,7 @@
{ {
"reference tracking: use ptr from bpf_sk_fullsock() after release", "reference tracking: use ptr from bpf_sk_fullsock() after release",
.insns = { .insns = {
BPF_SK_LOOKUP, BPF_SK_LOOKUP(sk_lookup_tcp),
BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1), BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
BPF_EXIT_INSN(), BPF_EXIT_INSN(),
BPF_MOV64_REG(BPF_REG_6, BPF_REG_0), BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
...@@ -654,7 +702,7 @@ ...@@ -654,7 +702,7 @@
{ {
"reference tracking: use ptr from bpf_sk_fullsock(tp) after release", "reference tracking: use ptr from bpf_sk_fullsock(tp) after release",
.insns = { .insns = {
BPF_SK_LOOKUP, BPF_SK_LOOKUP(sk_lookup_tcp),
BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1), BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
BPF_EXIT_INSN(), BPF_EXIT_INSN(),
BPF_MOV64_REG(BPF_REG_6, BPF_REG_0), BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
...@@ -681,7 +729,7 @@ ...@@ -681,7 +729,7 @@
{ {
"reference tracking: use sk after bpf_sk_release(tp)", "reference tracking: use sk after bpf_sk_release(tp)",
.insns = { .insns = {
BPF_SK_LOOKUP, BPF_SK_LOOKUP(sk_lookup_tcp),
BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1), BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
BPF_EXIT_INSN(), BPF_EXIT_INSN(),
BPF_MOV64_REG(BPF_REG_6, BPF_REG_0), BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
...@@ -703,7 +751,7 @@ ...@@ -703,7 +751,7 @@
{ {
"reference tracking: use ptr from bpf_get_listener_sock() after bpf_sk_release(sk)", "reference tracking: use ptr from bpf_get_listener_sock() after bpf_sk_release(sk)",
.insns = { .insns = {
BPF_SK_LOOKUP, BPF_SK_LOOKUP(sk_lookup_tcp),
BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1), BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
BPF_EXIT_INSN(), BPF_EXIT_INSN(),
BPF_MOV64_REG(BPF_REG_6, BPF_REG_0), BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
...@@ -725,7 +773,7 @@ ...@@ -725,7 +773,7 @@
{ {
"reference tracking: bpf_sk_release(listen_sk)", "reference tracking: bpf_sk_release(listen_sk)",
.insns = { .insns = {
BPF_SK_LOOKUP, BPF_SK_LOOKUP(sk_lookup_tcp),
BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1), BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
BPF_EXIT_INSN(), BPF_EXIT_INSN(),
BPF_MOV64_REG(BPF_REG_6, BPF_REG_0), BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
...@@ -750,7 +798,7 @@ ...@@ -750,7 +798,7 @@
/* !bpf_sk_fullsock(sk) is checked but !bpf_tcp_sock(sk) is not checked */ /* !bpf_sk_fullsock(sk) is checked but !bpf_tcp_sock(sk) is not checked */
"reference tracking: tp->snd_cwnd after bpf_sk_fullsock(sk) and bpf_tcp_sock(sk)", "reference tracking: tp->snd_cwnd after bpf_sk_fullsock(sk) and bpf_tcp_sock(sk)",
.insns = { .insns = {
BPF_SK_LOOKUP, BPF_SK_LOOKUP(sk_lookup_tcp),
BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1), BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
BPF_EXIT_INSN(), BPF_EXIT_INSN(),
BPF_MOV64_REG(BPF_REG_6, BPF_REG_0), BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
......
...@@ -242,7 +242,7 @@ ...@@ -242,7 +242,7 @@
.insns = { .insns = {
BPF_MOV64_REG(BPF_REG_8, BPF_REG_1), BPF_MOV64_REG(BPF_REG_8, BPF_REG_1),
/* struct bpf_sock *sock = bpf_sock_lookup(...); */ /* struct bpf_sock *sock = bpf_sock_lookup(...); */
BPF_SK_LOOKUP, BPF_SK_LOOKUP(sk_lookup_tcp),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_0), BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
/* u64 foo; */ /* u64 foo; */
/* void *target = &foo; */ /* void *target = &foo; */
...@@ -276,7 +276,7 @@ ...@@ -276,7 +276,7 @@
.insns = { .insns = {
BPF_MOV64_REG(BPF_REG_8, BPF_REG_1), BPF_MOV64_REG(BPF_REG_8, BPF_REG_1),
/* struct bpf_sock *sock = bpf_sock_lookup(...); */ /* struct bpf_sock *sock = bpf_sock_lookup(...); */
BPF_SK_LOOKUP, BPF_SK_LOOKUP(sk_lookup_tcp),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_0), BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
/* u64 foo; */ /* u64 foo; */
/* void *target = &foo; */ /* void *target = &foo; */
...@@ -307,7 +307,7 @@ ...@@ -307,7 +307,7 @@
.insns = { .insns = {
BPF_MOV64_REG(BPF_REG_8, BPF_REG_1), BPF_MOV64_REG(BPF_REG_8, BPF_REG_1),
/* struct bpf_sock *sock = bpf_sock_lookup(...); */ /* struct bpf_sock *sock = bpf_sock_lookup(...); */
BPF_SK_LOOKUP, BPF_SK_LOOKUP(sk_lookup_tcp),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_0), BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
/* u64 foo; */ /* u64 foo; */
/* void *target = &foo; */ /* void *target = &foo; */
...@@ -339,7 +339,7 @@ ...@@ -339,7 +339,7 @@
.insns = { .insns = {
BPF_MOV64_REG(BPF_REG_8, BPF_REG_1), BPF_MOV64_REG(BPF_REG_8, BPF_REG_1),
/* struct bpf_sock *sock = bpf_sock_lookup(...); */ /* struct bpf_sock *sock = bpf_sock_lookup(...); */
BPF_SK_LOOKUP, BPF_SK_LOOKUP(sk_lookup_tcp),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_0), BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
/* u64 foo; */ /* u64 foo; */
/* void *target = &foo; */ /* void *target = &foo; */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment