Commit 9facc336 authored by Daniel Borkmann's avatar Daniel Borkmann Committed by Alexei Starovoitov

bpf: reject any prog that failed read-only lock

We currently lock any JITed image as read-only via bpf_jit_binary_lock_ro()
as well as the BPF image as read-only through bpf_prog_lock_ro(). In
the case any of these would fail we throw a WARN_ON_ONCE() in order to
yell loudly to the log. Perhaps, to some extend, this may be comparable
to an allocation where __GFP_NOWARN is explicitly not set.

Added via 65869a47 ("bpf: improve read-only handling"), this behavior
is slightly different compared to any of the other in-kernel set_memory_ro()
users who do not check the return code of set_memory_ro() and friends /at
all/ (e.g. in the case of module_enable_ro() / module_disable_ro()). Given
in BPF this is mandatory hardening step, we want to know whether there
are any issues that would leave both BPF data writable. So it happens
that syzkaller enabled fault injection and it triggered memory allocation
failure deep inside x86's change_page_attr_set_clr() which was triggered
from set_memory_ro().

Now, there are two options: i) leaving everything as is, and ii) reworking
the image locking code in order to have a final checkpoint out of the
central bpf_prog_select_runtime() which probes whether any of the calls
during prog setup weren't successful, and then bailing out with an error.
Option ii) is a better approach since this additional paranoia avoids
altogether leaving any potential W+X pages from BPF side in the system.
Therefore, lets be strict about it, and reject programs in such unlikely
occasion. While testing I noticed also that one bpf_prog_lock_ro()
call was missing on the outer dummy prog in case of calls, e.g. in the
destructor we call bpf_prog_free_deferred() on the main prog where we
try to bpf_prog_unlock_free() the program, and since we go via
bpf_prog_select_runtime() do that as well.

Reported-by: syzbot+3b889862e65a98317058@syzkaller.appspotmail.com
Reported-by: syzbot+9e762b52dd17e616a7a5@syzkaller.appspotmail.com
Signed-off-by: default avatarDaniel Borkmann <daniel@iogearbox.net>
Acked-by: default avatarMartin KaFai Lau <kafai@fb.com>
Signed-off-by: default avatarAlexei Starovoitov <ast@kernel.org>
parent 7d1982b4
...@@ -469,7 +469,8 @@ struct sock_fprog_kern { ...@@ -469,7 +469,8 @@ struct sock_fprog_kern {
}; };
struct bpf_binary_header { struct bpf_binary_header {
unsigned int pages; u16 pages;
u16 locked:1;
u8 image[]; u8 image[];
}; };
...@@ -671,15 +672,18 @@ bpf_ctx_narrow_access_ok(u32 off, u32 size, u32 size_default) ...@@ -671,15 +672,18 @@ bpf_ctx_narrow_access_ok(u32 off, u32 size, u32 size_default)
#define bpf_classic_proglen(fprog) (fprog->len * sizeof(fprog->filter[0])) #define bpf_classic_proglen(fprog) (fprog->len * sizeof(fprog->filter[0]))
#ifdef CONFIG_ARCH_HAS_SET_MEMORY
static inline void bpf_prog_lock_ro(struct bpf_prog *fp) static inline void bpf_prog_lock_ro(struct bpf_prog *fp)
{ {
#ifdef CONFIG_ARCH_HAS_SET_MEMORY
fp->locked = 1; fp->locked = 1;
WARN_ON_ONCE(set_memory_ro((unsigned long)fp, fp->pages)); if (set_memory_ro((unsigned long)fp, fp->pages))
fp->locked = 0;
#endif
} }
static inline void bpf_prog_unlock_ro(struct bpf_prog *fp) static inline void bpf_prog_unlock_ro(struct bpf_prog *fp)
{ {
#ifdef CONFIG_ARCH_HAS_SET_MEMORY
if (fp->locked) { if (fp->locked) {
WARN_ON_ONCE(set_memory_rw((unsigned long)fp, fp->pages)); WARN_ON_ONCE(set_memory_rw((unsigned long)fp, fp->pages));
/* In case set_memory_rw() fails, we want to be the first /* In case set_memory_rw() fails, we want to be the first
...@@ -687,34 +691,30 @@ static inline void bpf_prog_unlock_ro(struct bpf_prog *fp) ...@@ -687,34 +691,30 @@ static inline void bpf_prog_unlock_ro(struct bpf_prog *fp)
*/ */
fp->locked = 0; fp->locked = 0;
} }
#endif
} }
static inline void bpf_jit_binary_lock_ro(struct bpf_binary_header *hdr) static inline void bpf_jit_binary_lock_ro(struct bpf_binary_header *hdr)
{ {
WARN_ON_ONCE(set_memory_ro((unsigned long)hdr, hdr->pages)); #ifdef CONFIG_ARCH_HAS_SET_MEMORY
hdr->locked = 1;
if (set_memory_ro((unsigned long)hdr, hdr->pages))
hdr->locked = 0;
#endif
} }
static inline void bpf_jit_binary_unlock_ro(struct bpf_binary_header *hdr) static inline void bpf_jit_binary_unlock_ro(struct bpf_binary_header *hdr)
{ {
#ifdef CONFIG_ARCH_HAS_SET_MEMORY
if (hdr->locked) {
WARN_ON_ONCE(set_memory_rw((unsigned long)hdr, hdr->pages)); WARN_ON_ONCE(set_memory_rw((unsigned long)hdr, hdr->pages));
/* In case set_memory_rw() fails, we want to be the first
* to crash here instead of some random place later on.
*/
hdr->locked = 0;
}
#endif
} }
#else
static inline void bpf_prog_lock_ro(struct bpf_prog *fp)
{
}
static inline void bpf_prog_unlock_ro(struct bpf_prog *fp)
{
}
static inline void bpf_jit_binary_lock_ro(struct bpf_binary_header *hdr)
{
}
static inline void bpf_jit_binary_unlock_ro(struct bpf_binary_header *hdr)
{
}
#endif /* CONFIG_ARCH_HAS_SET_MEMORY */
static inline struct bpf_binary_header * static inline struct bpf_binary_header *
bpf_jit_binary_hdr(const struct bpf_prog *fp) bpf_jit_binary_hdr(const struct bpf_prog *fp)
...@@ -725,6 +725,22 @@ bpf_jit_binary_hdr(const struct bpf_prog *fp) ...@@ -725,6 +725,22 @@ bpf_jit_binary_hdr(const struct bpf_prog *fp)
return (void *)addr; return (void *)addr;
} }
#ifdef CONFIG_ARCH_HAS_SET_MEMORY
static inline int bpf_prog_check_pages_ro_single(const struct bpf_prog *fp)
{
if (!fp->locked)
return -ENOLCK;
if (fp->jited) {
const struct bpf_binary_header *hdr = bpf_jit_binary_hdr(fp);
if (!hdr->locked)
return -ENOLCK;
}
return 0;
}
#endif
int sk_filter_trim_cap(struct sock *sk, struct sk_buff *skb, unsigned int cap); int sk_filter_trim_cap(struct sock *sk, struct sk_buff *skb, unsigned int cap);
static inline int sk_filter(struct sock *sk, struct sk_buff *skb) static inline int sk_filter(struct sock *sk, struct sk_buff *skb)
{ {
......
...@@ -598,6 +598,8 @@ bpf_jit_binary_alloc(unsigned int proglen, u8 **image_ptr, ...@@ -598,6 +598,8 @@ bpf_jit_binary_alloc(unsigned int proglen, u8 **image_ptr,
bpf_fill_ill_insns(hdr, size); bpf_fill_ill_insns(hdr, size);
hdr->pages = size / PAGE_SIZE; hdr->pages = size / PAGE_SIZE;
hdr->locked = 0;
hole = min_t(unsigned int, size - (proglen + sizeof(*hdr)), hole = min_t(unsigned int, size - (proglen + sizeof(*hdr)),
PAGE_SIZE - sizeof(*hdr)); PAGE_SIZE - sizeof(*hdr));
start = (get_random_int() % hole) & ~(alignment - 1); start = (get_random_int() % hole) & ~(alignment - 1);
...@@ -1448,6 +1450,33 @@ static int bpf_check_tail_call(const struct bpf_prog *fp) ...@@ -1448,6 +1450,33 @@ static int bpf_check_tail_call(const struct bpf_prog *fp)
return 0; return 0;
} }
static int bpf_prog_check_pages_ro_locked(const struct bpf_prog *fp)
{
#ifdef CONFIG_ARCH_HAS_SET_MEMORY
int i, err;
for (i = 0; i < fp->aux->func_cnt; i++) {
err = bpf_prog_check_pages_ro_single(fp->aux->func[i]);
if (err)
return err;
}
return bpf_prog_check_pages_ro_single(fp);
#endif
return 0;
}
static void bpf_prog_select_func(struct bpf_prog *fp)
{
#ifndef CONFIG_BPF_JIT_ALWAYS_ON
u32 stack_depth = max_t(u32, fp->aux->stack_depth, 1);
fp->bpf_func = interpreters[(round_up(stack_depth, 32) / 32) - 1];
#else
fp->bpf_func = __bpf_prog_ret0_warn;
#endif
}
/** /**
* bpf_prog_select_runtime - select exec runtime for BPF program * bpf_prog_select_runtime - select exec runtime for BPF program
* @fp: bpf_prog populated with internal BPF program * @fp: bpf_prog populated with internal BPF program
...@@ -1458,13 +1487,13 @@ static int bpf_check_tail_call(const struct bpf_prog *fp) ...@@ -1458,13 +1487,13 @@ static int bpf_check_tail_call(const struct bpf_prog *fp)
*/ */
struct bpf_prog *bpf_prog_select_runtime(struct bpf_prog *fp, int *err) struct bpf_prog *bpf_prog_select_runtime(struct bpf_prog *fp, int *err)
{ {
#ifndef CONFIG_BPF_JIT_ALWAYS_ON /* In case of BPF to BPF calls, verifier did all the prep
u32 stack_depth = max_t(u32, fp->aux->stack_depth, 1); * work with regards to JITing, etc.
*/
if (fp->bpf_func)
goto finalize;
fp->bpf_func = interpreters[(round_up(stack_depth, 32) / 32) - 1]; bpf_prog_select_func(fp);
#else
fp->bpf_func = __bpf_prog_ret0_warn;
#endif
/* eBPF JITs can rewrite the program in case constant /* eBPF JITs can rewrite the program in case constant
* blinding is active. However, in case of error during * blinding is active. However, in case of error during
...@@ -1485,6 +1514,8 @@ struct bpf_prog *bpf_prog_select_runtime(struct bpf_prog *fp, int *err) ...@@ -1485,6 +1514,8 @@ struct bpf_prog *bpf_prog_select_runtime(struct bpf_prog *fp, int *err)
if (*err) if (*err)
return fp; return fp;
} }
finalize:
bpf_prog_lock_ro(fp); bpf_prog_lock_ro(fp);
/* The tail call compatibility check can only be done at /* The tail call compatibility check can only be done at
...@@ -1493,7 +1524,17 @@ struct bpf_prog *bpf_prog_select_runtime(struct bpf_prog *fp, int *err) ...@@ -1493,7 +1524,17 @@ struct bpf_prog *bpf_prog_select_runtime(struct bpf_prog *fp, int *err)
* all eBPF JITs might immediately support all features. * all eBPF JITs might immediately support all features.
*/ */
*err = bpf_check_tail_call(fp); *err = bpf_check_tail_call(fp);
if (*err)
return fp;
/* Checkpoint: at this point onwards any cBPF -> eBPF or
* native eBPF program is read-only. If we failed to change
* the page attributes (e.g. allocation failure from
* splitting large pages), then reject the whole program
* in order to guarantee not ending up with any W+X pages
* from BPF side in kernel.
*/
*err = bpf_prog_check_pages_ro_locked(fp);
return fp; return fp;
} }
EXPORT_SYMBOL_GPL(bpf_prog_select_runtime); EXPORT_SYMBOL_GPL(bpf_prog_select_runtime);
......
...@@ -1353,8 +1353,6 @@ static int bpf_prog_load(union bpf_attr *attr) ...@@ -1353,8 +1353,6 @@ static int bpf_prog_load(union bpf_attr *attr)
if (err < 0) if (err < 0)
goto free_used_maps; goto free_used_maps;
/* eBPF program is ready to be JITed */
if (!prog->bpf_func)
prog = bpf_prog_select_runtime(prog, &err); prog = bpf_prog_select_runtime(prog, &err);
if (err < 0) if (err < 0)
goto free_used_maps; goto free_used_maps;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment