Commit fecef4cd authored by Sebastian Andrzej Siewior's avatar Sebastian Andrzej Siewior Committed by Jakub Kicinski

tun: Assign missing bpf_net_context.

During the introduction of struct bpf_net_context handling for
XDP-redirect, the tun driver has been missed.
Jakub also pointed out that there is another call chain to
do_xdp_generic() originating from netif_receive_skb() and drivers may
use it outside from the NAPI context.

Set the bpf_net_context before invoking BPF XDP program within the TUN
driver. Set the bpf_net_context also in do_xdp_generic() if a xdp
program is available.

Reported-by: syzbot+0b5c75599f1d872bea6f@syzkaller.appspotmail.com
Reported-by: syzbot+5ae46b237278e2369cac@syzkaller.appspotmail.com
Reported-by: syzbot+c1e04a422bbc0f0f2921@syzkaller.appspotmail.com
Fixes: 401cb7da ("net: Reference bpf_redirect_info via task_struct on PREEMPT_RT.")
Signed-off-by: default avatarSebastian Andrzej Siewior <bigeasy@linutronix.de>
Link: https://patch.msgid.link/20240704144815.j8xQda5r@linutronix.deSigned-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parent 3b2aef99
...@@ -1661,6 +1661,7 @@ static struct sk_buff *tun_build_skb(struct tun_struct *tun, ...@@ -1661,6 +1661,7 @@ static struct sk_buff *tun_build_skb(struct tun_struct *tun,
int len, int *skb_xdp) int len, int *skb_xdp)
{ {
struct page_frag *alloc_frag = &current->task_frag; struct page_frag *alloc_frag = &current->task_frag;
struct bpf_net_context __bpf_net_ctx, *bpf_net_ctx;
struct bpf_prog *xdp_prog; struct bpf_prog *xdp_prog;
int buflen = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); int buflen = SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
char *buf; char *buf;
...@@ -1700,6 +1701,7 @@ static struct sk_buff *tun_build_skb(struct tun_struct *tun, ...@@ -1700,6 +1701,7 @@ static struct sk_buff *tun_build_skb(struct tun_struct *tun,
local_bh_disable(); local_bh_disable();
rcu_read_lock(); rcu_read_lock();
bpf_net_ctx = bpf_net_ctx_set(&__bpf_net_ctx);
xdp_prog = rcu_dereference(tun->xdp_prog); xdp_prog = rcu_dereference(tun->xdp_prog);
if (xdp_prog) { if (xdp_prog) {
struct xdp_buff xdp; struct xdp_buff xdp;
...@@ -1728,12 +1730,14 @@ static struct sk_buff *tun_build_skb(struct tun_struct *tun, ...@@ -1728,12 +1730,14 @@ static struct sk_buff *tun_build_skb(struct tun_struct *tun,
pad = xdp.data - xdp.data_hard_start; pad = xdp.data - xdp.data_hard_start;
len = xdp.data_end - xdp.data; len = xdp.data_end - xdp.data;
} }
bpf_net_ctx_clear(bpf_net_ctx);
rcu_read_unlock(); rcu_read_unlock();
local_bh_enable(); local_bh_enable();
return __tun_build_skb(tfile, alloc_frag, buf, buflen, len, pad); return __tun_build_skb(tfile, alloc_frag, buf, buflen, len, pad);
out: out:
bpf_net_ctx_clear(bpf_net_ctx);
rcu_read_unlock(); rcu_read_unlock();
local_bh_enable(); local_bh_enable();
return NULL; return NULL;
...@@ -2566,6 +2570,7 @@ static int tun_sendmsg(struct socket *sock, struct msghdr *m, size_t total_len) ...@@ -2566,6 +2570,7 @@ static int tun_sendmsg(struct socket *sock, struct msghdr *m, size_t total_len)
if (m->msg_controllen == sizeof(struct tun_msg_ctl) && if (m->msg_controllen == sizeof(struct tun_msg_ctl) &&
ctl && ctl->type == TUN_MSG_PTR) { ctl && ctl->type == TUN_MSG_PTR) {
struct bpf_net_context __bpf_net_ctx, *bpf_net_ctx;
struct tun_page tpage; struct tun_page tpage;
int n = ctl->num; int n = ctl->num;
int flush = 0, queued = 0; int flush = 0, queued = 0;
...@@ -2574,6 +2579,7 @@ static int tun_sendmsg(struct socket *sock, struct msghdr *m, size_t total_len) ...@@ -2574,6 +2579,7 @@ static int tun_sendmsg(struct socket *sock, struct msghdr *m, size_t total_len)
local_bh_disable(); local_bh_disable();
rcu_read_lock(); rcu_read_lock();
bpf_net_ctx = bpf_net_ctx_set(&__bpf_net_ctx);
for (i = 0; i < n; i++) { for (i = 0; i < n; i++) {
xdp = &((struct xdp_buff *)ctl->ptr)[i]; xdp = &((struct xdp_buff *)ctl->ptr)[i];
...@@ -2588,6 +2594,7 @@ static int tun_sendmsg(struct socket *sock, struct msghdr *m, size_t total_len) ...@@ -2588,6 +2594,7 @@ static int tun_sendmsg(struct socket *sock, struct msghdr *m, size_t total_len)
if (tfile->napi_enabled && queued > 0) if (tfile->napi_enabled && queued > 0)
napi_schedule(&tfile->napi); napi_schedule(&tfile->napi);
bpf_net_ctx_clear(bpf_net_ctx);
rcu_read_unlock(); rcu_read_unlock();
local_bh_enable(); local_bh_enable();
......
...@@ -5126,11 +5126,14 @@ static DEFINE_STATIC_KEY_FALSE(generic_xdp_needed_key); ...@@ -5126,11 +5126,14 @@ static DEFINE_STATIC_KEY_FALSE(generic_xdp_needed_key);
int do_xdp_generic(struct bpf_prog *xdp_prog, struct sk_buff **pskb) int do_xdp_generic(struct bpf_prog *xdp_prog, struct sk_buff **pskb)
{ {
struct bpf_net_context __bpf_net_ctx, *bpf_net_ctx;
if (xdp_prog) { if (xdp_prog) {
struct xdp_buff xdp; struct xdp_buff xdp;
u32 act; u32 act;
int err; int err;
bpf_net_ctx = bpf_net_ctx_set(&__bpf_net_ctx);
act = netif_receive_generic_xdp(pskb, &xdp, xdp_prog); act = netif_receive_generic_xdp(pskb, &xdp, xdp_prog);
if (act != XDP_PASS) { if (act != XDP_PASS) {
switch (act) { switch (act) {
...@@ -5144,11 +5147,13 @@ int do_xdp_generic(struct bpf_prog *xdp_prog, struct sk_buff **pskb) ...@@ -5144,11 +5147,13 @@ int do_xdp_generic(struct bpf_prog *xdp_prog, struct sk_buff **pskb)
generic_xdp_tx(*pskb, xdp_prog); generic_xdp_tx(*pskb, xdp_prog);
break; break;
} }
bpf_net_ctx_clear(bpf_net_ctx);
return XDP_DROP; return XDP_DROP;
} }
} }
return XDP_PASS; return XDP_PASS;
out_redir: out_redir:
bpf_net_ctx_clear(bpf_net_ctx);
kfree_skb_reason(*pskb, SKB_DROP_REASON_XDP); kfree_skb_reason(*pskb, SKB_DROP_REASON_XDP);
return XDP_DROP; return XDP_DROP;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment