Commit 6cab5e90 authored by Alexei Starovoitov's avatar Alexei Starovoitov Committed by Daniel Borkmann

bpf: run bpf programs with preemption disabled

Disabled preemption is necessary for proper access to per-cpu maps
from BPF programs.

But the sender side of socket filters didn't have preemption disabled:
unix_dgram_sendmsg->sk_filter->sk_filter_trim_cap->bpf_prog_run_save_cb->BPF_PROG_RUN

and a combination of af_packet with tun device didn't disable either:
tpacket_snd->packet_direct_xmit->packet_pick_tx_queue->ndo_select_queue->
  tun_select_queue->tun_ebpf_select_queue->bpf_prog_run_clear_cb->BPF_PROG_RUN

Disable preemption before executing BPF programs (both classic and extended).
Reported-by: default avatarJann Horn <jannh@google.com>
Signed-off-by: default avatarAlexei Starovoitov <ast@kernel.org>
Acked-by: default avatarSong Liu <songliubraving@fb.com>
Signed-off-by: default avatarDaniel Borkmann <daniel@iogearbox.net>
parent 1bb54c40
...@@ -591,8 +591,8 @@ static inline u8 *bpf_skb_cb(struct sk_buff *skb) ...@@ -591,8 +591,8 @@ static inline u8 *bpf_skb_cb(struct sk_buff *skb)
return qdisc_skb_cb(skb)->data; return qdisc_skb_cb(skb)->data;
} }
static inline u32 bpf_prog_run_save_cb(const struct bpf_prog *prog, static inline u32 __bpf_prog_run_save_cb(const struct bpf_prog *prog,
struct sk_buff *skb) struct sk_buff *skb)
{ {
u8 *cb_data = bpf_skb_cb(skb); u8 *cb_data = bpf_skb_cb(skb);
u8 cb_saved[BPF_SKB_CB_LEN]; u8 cb_saved[BPF_SKB_CB_LEN];
...@@ -611,15 +611,30 @@ static inline u32 bpf_prog_run_save_cb(const struct bpf_prog *prog, ...@@ -611,15 +611,30 @@ static inline u32 bpf_prog_run_save_cb(const struct bpf_prog *prog,
return res; return res;
} }
static inline u32 bpf_prog_run_save_cb(const struct bpf_prog *prog,
struct sk_buff *skb)
{
u32 res;
preempt_disable();
res = __bpf_prog_run_save_cb(prog, skb);
preempt_enable();
return res;
}
static inline u32 bpf_prog_run_clear_cb(const struct bpf_prog *prog, static inline u32 bpf_prog_run_clear_cb(const struct bpf_prog *prog,
struct sk_buff *skb) struct sk_buff *skb)
{ {
u8 *cb_data = bpf_skb_cb(skb); u8 *cb_data = bpf_skb_cb(skb);
u32 res;
if (unlikely(prog->cb_access)) if (unlikely(prog->cb_access))
memset(cb_data, 0, BPF_SKB_CB_LEN); memset(cb_data, 0, BPF_SKB_CB_LEN);
return BPF_PROG_RUN(prog, skb); preempt_disable();
res = BPF_PROG_RUN(prog, skb);
preempt_enable();
return res;
} }
static __always_inline u32 bpf_prog_run_xdp(const struct bpf_prog *prog, static __always_inline u32 bpf_prog_run_xdp(const struct bpf_prog *prog,
......
...@@ -572,7 +572,7 @@ int __cgroup_bpf_run_filter_skb(struct sock *sk, ...@@ -572,7 +572,7 @@ int __cgroup_bpf_run_filter_skb(struct sock *sk,
bpf_compute_and_save_data_end(skb, &saved_data_end); bpf_compute_and_save_data_end(skb, &saved_data_end);
ret = BPF_PROG_RUN_ARRAY(cgrp->bpf.effective[type], skb, ret = BPF_PROG_RUN_ARRAY(cgrp->bpf.effective[type], skb,
bpf_prog_run_save_cb); __bpf_prog_run_save_cb);
bpf_restore_data_end(skb, saved_data_end); bpf_restore_data_end(skb, saved_data_end);
__skb_pull(skb, offset); __skb_pull(skb, offset);
skb->sk = save_sk; skb->sk = save_sk;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment