Commit e3526bb9 authored by Cong Wang's avatar Cong Wang Committed by Alexei Starovoitov

skmsg: Move sk_redir from TCP_SKB_CB to skb

Currently TCP_SKB_CB() is hard-coded in skmsg code, it certainly
does not work for any other non-TCP protocols. We can move them to
skb ext, but it introduces a memory allocation on fast path.

Fortunately, we only need to a word-size to store all the information,
because the flags actually only contains 1 bit so can be just packed
into the lowest bit of the "pointer", which is stored as unsigned
long.

Inside struct sk_buff, '_skb_refdst' can be reused because skb dst is
no longer needed after ->sk_data_ready() so we can just drop it.
Signed-off-by: default avatarCong Wang <cong.wang@bytedance.com>
Signed-off-by: default avatarAlexei Starovoitov <ast@kernel.org>
Acked-by: default avatarJohn Fastabend <john.fastabend@gmail.com>
Acked-by: default avatarJakub Sitnicki <jakub@cloudflare.com>
Link: https://lore.kernel.org/bpf/20210223184934.6054-5-xiyou.wangcong@gmail.com
parent 16137b09
...@@ -755,6 +755,9 @@ struct sk_buff { ...@@ -755,6 +755,9 @@ struct sk_buff {
void (*destructor)(struct sk_buff *skb); void (*destructor)(struct sk_buff *skb);
}; };
struct list_head tcp_tsorted_anchor; struct list_head tcp_tsorted_anchor;
#ifdef CONFIG_NET_SOCK_MSG
unsigned long _sk_redir;
#endif
}; };
#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
......
...@@ -455,4 +455,42 @@ static inline bool sk_psock_strp_enabled(struct sk_psock *psock) ...@@ -455,4 +455,42 @@ static inline bool sk_psock_strp_enabled(struct sk_psock *psock)
return false; return false;
return !!psock->saved_data_ready; return !!psock->saved_data_ready;
} }
#if IS_ENABLED(CONFIG_NET_SOCK_MSG)
/* We only have one bit so far. */
#define BPF_F_PTR_MASK ~(BPF_F_INGRESS)
static inline bool skb_bpf_ingress(const struct sk_buff *skb)
{
unsigned long sk_redir = skb->_sk_redir;
return sk_redir & BPF_F_INGRESS;
}
static inline void skb_bpf_set_ingress(struct sk_buff *skb)
{
skb->_sk_redir |= BPF_F_INGRESS;
}
static inline void skb_bpf_set_redir(struct sk_buff *skb, struct sock *sk_redir,
bool ingress)
{
skb->_sk_redir = (unsigned long)sk_redir;
if (ingress)
skb->_sk_redir |= BPF_F_INGRESS;
}
static inline struct sock *skb_bpf_redirect_fetch(const struct sk_buff *skb)
{
unsigned long sk_redir = skb->_sk_redir;
return (struct sock *)(sk_redir & BPF_F_PTR_MASK);
}
static inline void skb_bpf_redirect_clear(struct sk_buff *skb)
{
skb->_sk_redir = 0;
}
#endif /* CONFIG_NET_SOCK_MSG */
#endif /* _LINUX_SKMSG_H */ #endif /* _LINUX_SKMSG_H */
...@@ -883,30 +883,11 @@ struct tcp_skb_cb { ...@@ -883,30 +883,11 @@ struct tcp_skb_cb {
struct inet6_skb_parm h6; struct inet6_skb_parm h6;
#endif #endif
} header; /* For incoming skbs */ } header; /* For incoming skbs */
struct {
__u32 flags;
struct sock *sk_redir;
} bpf;
}; };
}; };
#define TCP_SKB_CB(__skb) ((struct tcp_skb_cb *)&((__skb)->cb[0])) #define TCP_SKB_CB(__skb) ((struct tcp_skb_cb *)&((__skb)->cb[0]))
static inline bool tcp_skb_bpf_ingress(const struct sk_buff *skb)
{
return TCP_SKB_CB(skb)->bpf.flags & BPF_F_INGRESS;
}
static inline struct sock *tcp_skb_bpf_redirect_fetch(struct sk_buff *skb)
{
return TCP_SKB_CB(skb)->bpf.sk_redir;
}
static inline void tcp_skb_bpf_redirect_clear(struct sk_buff *skb)
{
TCP_SKB_CB(skb)->bpf.sk_redir = NULL;
}
extern const struct inet_connection_sock_af_ops ipv4_specific; extern const struct inet_connection_sock_af_ops ipv4_specific;
#if IS_ENABLED(CONFIG_IPV6) #if IS_ENABLED(CONFIG_IPV6)
......
...@@ -525,7 +525,8 @@ static void sk_psock_backlog(struct work_struct *work) ...@@ -525,7 +525,8 @@ static void sk_psock_backlog(struct work_struct *work)
len = skb->len; len = skb->len;
off = 0; off = 0;
start: start:
ingress = tcp_skb_bpf_ingress(skb); ingress = skb_bpf_ingress(skb);
skb_bpf_redirect_clear(skb);
do { do {
ret = -EIO; ret = -EIO;
if (likely(psock->sk->sk_socket)) if (likely(psock->sk->sk_socket))
...@@ -631,7 +632,12 @@ void __sk_psock_purge_ingress_msg(struct sk_psock *psock) ...@@ -631,7 +632,12 @@ void __sk_psock_purge_ingress_msg(struct sk_psock *psock)
static void sk_psock_zap_ingress(struct sk_psock *psock) static void sk_psock_zap_ingress(struct sk_psock *psock)
{ {
__skb_queue_purge(&psock->ingress_skb); struct sk_buff *skb;
while ((skb = __skb_dequeue(&psock->ingress_skb)) != NULL) {
skb_bpf_redirect_clear(skb);
kfree_skb(skb);
}
__sk_psock_purge_ingress_msg(psock); __sk_psock_purge_ingress_msg(psock);
} }
...@@ -754,7 +760,7 @@ static void sk_psock_skb_redirect(struct sk_buff *skb) ...@@ -754,7 +760,7 @@ static void sk_psock_skb_redirect(struct sk_buff *skb)
struct sk_psock *psock_other; struct sk_psock *psock_other;
struct sock *sk_other; struct sock *sk_other;
sk_other = tcp_skb_bpf_redirect_fetch(skb); sk_other = skb_bpf_redirect_fetch(skb);
/* This error is a buggy BPF program, it returned a redirect /* This error is a buggy BPF program, it returned a redirect
* return code, but then didn't set a redirect interface. * return code, but then didn't set a redirect interface.
*/ */
...@@ -804,9 +810,10 @@ int sk_psock_tls_strp_read(struct sk_psock *psock, struct sk_buff *skb) ...@@ -804,9 +810,10 @@ int sk_psock_tls_strp_read(struct sk_psock *psock, struct sk_buff *skb)
* TLS context. * TLS context.
*/ */
skb->sk = psock->sk; skb->sk = psock->sk;
tcp_skb_bpf_redirect_clear(skb); skb_dst_drop(skb);
skb_bpf_redirect_clear(skb);
ret = sk_psock_bpf_run(psock, prog, skb); ret = sk_psock_bpf_run(psock, prog, skb);
ret = sk_psock_map_verd(ret, tcp_skb_bpf_redirect_fetch(skb)); ret = sk_psock_map_verd(ret, skb_bpf_redirect_fetch(skb));
skb->sk = NULL; skb->sk = NULL;
} }
sk_psock_tls_verdict_apply(skb, psock->sk, ret); sk_psock_tls_verdict_apply(skb, psock->sk, ret);
...@@ -818,7 +825,6 @@ EXPORT_SYMBOL_GPL(sk_psock_tls_strp_read); ...@@ -818,7 +825,6 @@ EXPORT_SYMBOL_GPL(sk_psock_tls_strp_read);
static void sk_psock_verdict_apply(struct sk_psock *psock, static void sk_psock_verdict_apply(struct sk_psock *psock,
struct sk_buff *skb, int verdict) struct sk_buff *skb, int verdict)
{ {
struct tcp_skb_cb *tcp;
struct sock *sk_other; struct sock *sk_other;
int err = -EIO; int err = -EIO;
...@@ -830,8 +836,7 @@ static void sk_psock_verdict_apply(struct sk_psock *psock, ...@@ -830,8 +836,7 @@ static void sk_psock_verdict_apply(struct sk_psock *psock,
goto out_free; goto out_free;
} }
tcp = TCP_SKB_CB(skb); skb_bpf_set_ingress(skb);
tcp->bpf.flags |= BPF_F_INGRESS;
/* If the queue is empty then we can submit directly /* If the queue is empty then we can submit directly
* into the msg queue. If its not empty we have to * into the msg queue. If its not empty we have to
...@@ -892,9 +897,10 @@ static void sk_psock_strp_read(struct strparser *strp, struct sk_buff *skb) ...@@ -892,9 +897,10 @@ static void sk_psock_strp_read(struct strparser *strp, struct sk_buff *skb)
skb_set_owner_r(skb, sk); skb_set_owner_r(skb, sk);
prog = READ_ONCE(psock->progs.skb_verdict); prog = READ_ONCE(psock->progs.skb_verdict);
if (likely(prog)) { if (likely(prog)) {
tcp_skb_bpf_redirect_clear(skb); skb_dst_drop(skb);
skb_bpf_redirect_clear(skb);
ret = sk_psock_bpf_run(psock, prog, skb); ret = sk_psock_bpf_run(psock, prog, skb);
ret = sk_psock_map_verd(ret, tcp_skb_bpf_redirect_fetch(skb)); ret = sk_psock_map_verd(ret, skb_bpf_redirect_fetch(skb));
} }
sk_psock_verdict_apply(psock, skb, ret); sk_psock_verdict_apply(psock, skb, ret);
out: out:
...@@ -1011,9 +1017,10 @@ static int sk_psock_verdict_recv(read_descriptor_t *desc, struct sk_buff *skb, ...@@ -1011,9 +1017,10 @@ static int sk_psock_verdict_recv(read_descriptor_t *desc, struct sk_buff *skb,
skb_set_owner_r(skb, sk); skb_set_owner_r(skb, sk);
prog = READ_ONCE(psock->progs.skb_verdict); prog = READ_ONCE(psock->progs.skb_verdict);
if (likely(prog)) { if (likely(prog)) {
tcp_skb_bpf_redirect_clear(skb); skb_dst_drop(skb);
skb_bpf_redirect_clear(skb);
ret = sk_psock_bpf_run(psock, prog, skb); ret = sk_psock_bpf_run(psock, prog, skb);
ret = sk_psock_map_verd(ret, tcp_skb_bpf_redirect_fetch(skb)); ret = sk_psock_map_verd(ret, skb_bpf_redirect_fetch(skb));
} }
sk_psock_verdict_apply(psock, skb, ret); sk_psock_verdict_apply(psock, skb, ret);
out: out:
......
...@@ -657,7 +657,6 @@ const struct bpf_func_proto bpf_sock_map_update_proto = { ...@@ -657,7 +657,6 @@ const struct bpf_func_proto bpf_sock_map_update_proto = {
BPF_CALL_4(bpf_sk_redirect_map, struct sk_buff *, skb, BPF_CALL_4(bpf_sk_redirect_map, struct sk_buff *, skb,
struct bpf_map *, map, u32, key, u64, flags) struct bpf_map *, map, u32, key, u64, flags)
{ {
struct tcp_skb_cb *tcb = TCP_SKB_CB(skb);
struct sock *sk; struct sock *sk;
if (unlikely(flags & ~(BPF_F_INGRESS))) if (unlikely(flags & ~(BPF_F_INGRESS)))
...@@ -667,8 +666,7 @@ BPF_CALL_4(bpf_sk_redirect_map, struct sk_buff *, skb, ...@@ -667,8 +666,7 @@ BPF_CALL_4(bpf_sk_redirect_map, struct sk_buff *, skb,
if (unlikely(!sk || !sock_map_redirect_allowed(sk))) if (unlikely(!sk || !sock_map_redirect_allowed(sk)))
return SK_DROP; return SK_DROP;
tcb->bpf.flags = flags; skb_bpf_set_redir(skb, sk, flags & BPF_F_INGRESS);
tcb->bpf.sk_redir = sk;
return SK_PASS; return SK_PASS;
} }
...@@ -1250,7 +1248,6 @@ const struct bpf_func_proto bpf_sock_hash_update_proto = { ...@@ -1250,7 +1248,6 @@ const struct bpf_func_proto bpf_sock_hash_update_proto = {
BPF_CALL_4(bpf_sk_redirect_hash, struct sk_buff *, skb, BPF_CALL_4(bpf_sk_redirect_hash, struct sk_buff *, skb,
struct bpf_map *, map, void *, key, u64, flags) struct bpf_map *, map, void *, key, u64, flags)
{ {
struct tcp_skb_cb *tcb = TCP_SKB_CB(skb);
struct sock *sk; struct sock *sk;
if (unlikely(flags & ~(BPF_F_INGRESS))) if (unlikely(flags & ~(BPF_F_INGRESS)))
...@@ -1260,8 +1257,7 @@ BPF_CALL_4(bpf_sk_redirect_hash, struct sk_buff *, skb, ...@@ -1260,8 +1257,7 @@ BPF_CALL_4(bpf_sk_redirect_hash, struct sk_buff *, skb,
if (unlikely(!sk || !sock_map_redirect_allowed(sk))) if (unlikely(!sk || !sock_map_redirect_allowed(sk)))
return SK_DROP; return SK_DROP;
tcb->bpf.flags = flags; skb_bpf_set_redir(skb, sk, flags & BPF_F_INGRESS);
tcb->bpf.sk_redir = sk;
return SK_PASS; return SK_PASS;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment