Commit 6219d055 authored by David S. Miller's avatar David S. Miller

Merge branch 'bpf-fixes'

Daniel Borkmann says:

====================
Couple of BPF fixes

This set contains three BPF fixes for net, one that addresses the
complaint from Geert wrt static allocations, and the other is a fix
wrt mem accounting that I found recently during testing and there's
still one more fix on the map value marking.

Thanks!

v1 -> v2:
  - Patch 1 as is.
  - Fixed kbuild bot issue by letting charging helpers stay in the
    syscall.c, since there locked_vm is valid and only export the
    ones needed by bpf_prog_realloc(). Add empty stubs in case the
    bpf syscall is not enabled.
  - Added patch 3 that addresses one more issue in map val marking.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 40e972ab 6760bf2d
...@@ -216,7 +216,7 @@ u64 bpf_tail_call(u64 ctx, u64 r2, u64 index, u64 r4, u64 r5); ...@@ -216,7 +216,7 @@ u64 bpf_tail_call(u64 ctx, u64 r2, u64 index, u64 r4, u64 r5);
u64 bpf_get_stackid(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5); u64 bpf_get_stackid(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
bool bpf_prog_array_compatible(struct bpf_array *array, const struct bpf_prog *fp); bool bpf_prog_array_compatible(struct bpf_array *array, const struct bpf_prog *fp);
void bpf_prog_calc_digest(struct bpf_prog *fp); int bpf_prog_calc_digest(struct bpf_prog *fp);
const struct bpf_func_proto *bpf_get_trace_printk_proto(void); const struct bpf_func_proto *bpf_get_trace_printk_proto(void);
...@@ -238,6 +238,8 @@ struct bpf_prog * __must_check bpf_prog_add(struct bpf_prog *prog, int i); ...@@ -238,6 +238,8 @@ struct bpf_prog * __must_check bpf_prog_add(struct bpf_prog *prog, int i);
void bpf_prog_sub(struct bpf_prog *prog, int i); void bpf_prog_sub(struct bpf_prog *prog, int i);
struct bpf_prog * __must_check bpf_prog_inc(struct bpf_prog *prog); struct bpf_prog * __must_check bpf_prog_inc(struct bpf_prog *prog);
void bpf_prog_put(struct bpf_prog *prog); void bpf_prog_put(struct bpf_prog *prog);
int __bpf_prog_charge(struct user_struct *user, u32 pages);
void __bpf_prog_uncharge(struct user_struct *user, u32 pages);
struct bpf_map *bpf_map_get_with_uref(u32 ufd); struct bpf_map *bpf_map_get_with_uref(u32 ufd);
struct bpf_map *__bpf_map_get(struct fd f); struct bpf_map *__bpf_map_get(struct fd f);
...@@ -318,6 +320,15 @@ static inline struct bpf_prog * __must_check bpf_prog_inc(struct bpf_prog *prog) ...@@ -318,6 +320,15 @@ static inline struct bpf_prog * __must_check bpf_prog_inc(struct bpf_prog *prog)
{ {
return ERR_PTR(-EOPNOTSUPP); return ERR_PTR(-EOPNOTSUPP);
} }
static inline int __bpf_prog_charge(struct user_struct *user, u32 pages)
{
return 0;
}
static inline void __bpf_prog_uncharge(struct user_struct *user, u32 pages)
{
}
#endif /* CONFIG_BPF_SYSCALL */ #endif /* CONFIG_BPF_SYSCALL */
/* verifier prototypes for helper functions called from eBPF programs */ /* verifier prototypes for helper functions called from eBPF programs */
......
...@@ -57,9 +57,6 @@ struct bpf_prog_aux; ...@@ -57,9 +57,6 @@ struct bpf_prog_aux;
/* BPF program can access up to 512 bytes of stack space. */ /* BPF program can access up to 512 bytes of stack space. */
#define MAX_BPF_STACK 512 #define MAX_BPF_STACK 512
/* Maximum BPF program size in bytes. */
#define MAX_BPF_SIZE (BPF_MAXINSNS * sizeof(struct bpf_insn))
/* Helper macros for filter block array initializers. */ /* Helper macros for filter block array initializers. */
/* ALU ops on registers, bpf_add|sub|...: dst_reg += src_reg */ /* ALU ops on registers, bpf_add|sub|...: dst_reg += src_reg */
...@@ -517,6 +514,17 @@ static __always_inline u32 bpf_prog_run_xdp(const struct bpf_prog *prog, ...@@ -517,6 +514,17 @@ static __always_inline u32 bpf_prog_run_xdp(const struct bpf_prog *prog,
return BPF_PROG_RUN(prog, xdp); return BPF_PROG_RUN(prog, xdp);
} }
static inline u32 bpf_prog_insn_size(const struct bpf_prog *prog)
{
return prog->len * sizeof(struct bpf_insn);
}
static inline u32 bpf_prog_digest_scratch_size(const struct bpf_prog *prog)
{
return round_up(bpf_prog_insn_size(prog) +
sizeof(__be64) + 1, SHA_MESSAGE_BYTES);
}
static inline unsigned int bpf_prog_size(unsigned int proglen) static inline unsigned int bpf_prog_size(unsigned int proglen)
{ {
return max(sizeof(struct bpf_prog), return max(sizeof(struct bpf_prog),
......
...@@ -105,19 +105,29 @@ struct bpf_prog *bpf_prog_realloc(struct bpf_prog *fp_old, unsigned int size, ...@@ -105,19 +105,29 @@ struct bpf_prog *bpf_prog_realloc(struct bpf_prog *fp_old, unsigned int size,
gfp_t gfp_flags = GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO | gfp_t gfp_flags = GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO |
gfp_extra_flags; gfp_extra_flags;
struct bpf_prog *fp; struct bpf_prog *fp;
u32 pages, delta;
int ret;
BUG_ON(fp_old == NULL); BUG_ON(fp_old == NULL);
size = round_up(size, PAGE_SIZE); size = round_up(size, PAGE_SIZE);
if (size <= fp_old->pages * PAGE_SIZE) pages = size / PAGE_SIZE;
if (pages <= fp_old->pages)
return fp_old; return fp_old;
delta = pages - fp_old->pages;
ret = __bpf_prog_charge(fp_old->aux->user, delta);
if (ret)
return NULL;
fp = __vmalloc(size, gfp_flags, PAGE_KERNEL); fp = __vmalloc(size, gfp_flags, PAGE_KERNEL);
if (fp != NULL) { if (fp == NULL) {
__bpf_prog_uncharge(fp_old->aux->user, delta);
} else {
kmemcheck_annotate_bitfield(fp, meta); kmemcheck_annotate_bitfield(fp, meta);
memcpy(fp, fp_old, fp_old->pages * PAGE_SIZE); memcpy(fp, fp_old, fp_old->pages * PAGE_SIZE);
fp->pages = size / PAGE_SIZE; fp->pages = pages;
fp->aux->prog = fp; fp->aux->prog = fp;
/* We keep fp->aux from fp_old around in the new /* We keep fp->aux from fp_old around in the new
...@@ -136,28 +146,29 @@ void __bpf_prog_free(struct bpf_prog *fp) ...@@ -136,28 +146,29 @@ void __bpf_prog_free(struct bpf_prog *fp)
vfree(fp); vfree(fp);
} }
#define SHA_BPF_RAW_SIZE \ int bpf_prog_calc_digest(struct bpf_prog *fp)
round_up(MAX_BPF_SIZE + sizeof(__be64) + 1, SHA_MESSAGE_BYTES)
/* Called under verifier mutex. */
void bpf_prog_calc_digest(struct bpf_prog *fp)
{ {
const u32 bits_offset = SHA_MESSAGE_BYTES - sizeof(__be64); const u32 bits_offset = SHA_MESSAGE_BYTES - sizeof(__be64);
static u32 ws[SHA_WORKSPACE_WORDS]; u32 raw_size = bpf_prog_digest_scratch_size(fp);
static u8 raw[SHA_BPF_RAW_SIZE]; u32 ws[SHA_WORKSPACE_WORDS];
struct bpf_insn *dst = (void *)raw;
u32 i, bsize, psize, blocks; u32 i, bsize, psize, blocks;
struct bpf_insn *dst;
bool was_ld_map; bool was_ld_map;
u8 *todo = raw; u8 *raw, *todo;
__be32 *result; __be32 *result;
__be64 *bits; __be64 *bits;
raw = vmalloc(raw_size);
if (!raw)
return -ENOMEM;
sha_init(fp->digest); sha_init(fp->digest);
memset(ws, 0, sizeof(ws)); memset(ws, 0, sizeof(ws));
/* We need to take out the map fd for the digest calculation /* We need to take out the map fd for the digest calculation
* since they are unstable from user space side. * since they are unstable from user space side.
*/ */
dst = (void *)raw;
for (i = 0, was_ld_map = false; i < fp->len; i++) { for (i = 0, was_ld_map = false; i < fp->len; i++) {
dst[i] = fp->insnsi[i]; dst[i] = fp->insnsi[i];
if (!was_ld_map && if (!was_ld_map &&
...@@ -177,12 +188,13 @@ void bpf_prog_calc_digest(struct bpf_prog *fp) ...@@ -177,12 +188,13 @@ void bpf_prog_calc_digest(struct bpf_prog *fp)
} }
} }
psize = fp->len * sizeof(struct bpf_insn); psize = bpf_prog_insn_size(fp);
memset(&raw[psize], 0, sizeof(raw) - psize); memset(&raw[psize], 0, raw_size - psize);
raw[psize++] = 0x80; raw[psize++] = 0x80;
bsize = round_up(psize, SHA_MESSAGE_BYTES); bsize = round_up(psize, SHA_MESSAGE_BYTES);
blocks = bsize / SHA_MESSAGE_BYTES; blocks = bsize / SHA_MESSAGE_BYTES;
todo = raw;
if (bsize - psize >= sizeof(__be64)) { if (bsize - psize >= sizeof(__be64)) {
bits = (__be64 *)(todo + bsize - sizeof(__be64)); bits = (__be64 *)(todo + bsize - sizeof(__be64));
} else { } else {
...@@ -199,6 +211,9 @@ void bpf_prog_calc_digest(struct bpf_prog *fp) ...@@ -199,6 +211,9 @@ void bpf_prog_calc_digest(struct bpf_prog *fp)
result = (__force __be32 *)fp->digest; result = (__force __be32 *)fp->digest;
for (i = 0; i < SHA_DIGEST_WORDS; i++) for (i = 0; i < SHA_DIGEST_WORDS; i++)
result[i] = cpu_to_be32(fp->digest[i]); result[i] = cpu_to_be32(fp->digest[i]);
vfree(raw);
return 0;
} }
static bool bpf_is_jmp_and_has_target(const struct bpf_insn *insn) static bool bpf_is_jmp_and_has_target(const struct bpf_insn *insn)
......
...@@ -615,19 +615,39 @@ static void free_used_maps(struct bpf_prog_aux *aux) ...@@ -615,19 +615,39 @@ static void free_used_maps(struct bpf_prog_aux *aux)
kfree(aux->used_maps); kfree(aux->used_maps);
} }
int __bpf_prog_charge(struct user_struct *user, u32 pages)
{
unsigned long memlock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
unsigned long user_bufs;
if (user) {
user_bufs = atomic_long_add_return(pages, &user->locked_vm);
if (user_bufs > memlock_limit) {
atomic_long_sub(pages, &user->locked_vm);
return -EPERM;
}
}
return 0;
}
void __bpf_prog_uncharge(struct user_struct *user, u32 pages)
{
if (user)
atomic_long_sub(pages, &user->locked_vm);
}
static int bpf_prog_charge_memlock(struct bpf_prog *prog) static int bpf_prog_charge_memlock(struct bpf_prog *prog)
{ {
struct user_struct *user = get_current_user(); struct user_struct *user = get_current_user();
unsigned long memlock_limit; int ret;
memlock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT; ret = __bpf_prog_charge(user, prog->pages);
if (ret) {
atomic_long_add(prog->pages, &user->locked_vm);
if (atomic_long_read(&user->locked_vm) > memlock_limit) {
atomic_long_sub(prog->pages, &user->locked_vm);
free_uid(user); free_uid(user);
return -EPERM; return ret;
} }
prog->aux->user = user; prog->aux->user = user;
return 0; return 0;
} }
...@@ -636,7 +656,7 @@ static void bpf_prog_uncharge_memlock(struct bpf_prog *prog) ...@@ -636,7 +656,7 @@ static void bpf_prog_uncharge_memlock(struct bpf_prog *prog)
{ {
struct user_struct *user = prog->aux->user; struct user_struct *user = prog->aux->user;
atomic_long_sub(prog->pages, &user->locked_vm); __bpf_prog_uncharge(user, prog->pages);
free_uid(user); free_uid(user);
} }
...@@ -811,7 +831,7 @@ static int bpf_prog_load(union bpf_attr *attr) ...@@ -811,7 +831,7 @@ static int bpf_prog_load(union bpf_attr *attr)
err = -EFAULT; err = -EFAULT;
if (copy_from_user(prog->insns, u64_to_user_ptr(attr->insns), if (copy_from_user(prog->insns, u64_to_user_ptr(attr->insns),
prog->len * sizeof(struct bpf_insn)) != 0) bpf_prog_insn_size(prog)) != 0)
goto free_prog; goto free_prog;
prog->orig_prog = NULL; prog->orig_prog = NULL;
......
...@@ -462,14 +462,19 @@ static void init_reg_state(struct bpf_reg_state *regs) ...@@ -462,14 +462,19 @@ static void init_reg_state(struct bpf_reg_state *regs)
regs[BPF_REG_1].type = PTR_TO_CTX; regs[BPF_REG_1].type = PTR_TO_CTX;
} }
static void mark_reg_unknown_value(struct bpf_reg_state *regs, u32 regno) static void __mark_reg_unknown_value(struct bpf_reg_state *regs, u32 regno)
{ {
BUG_ON(regno >= MAX_BPF_REG);
regs[regno].type = UNKNOWN_VALUE; regs[regno].type = UNKNOWN_VALUE;
regs[regno].id = 0; regs[regno].id = 0;
regs[regno].imm = 0; regs[regno].imm = 0;
} }
static void mark_reg_unknown_value(struct bpf_reg_state *regs, u32 regno)
{
BUG_ON(regno >= MAX_BPF_REG);
__mark_reg_unknown_value(regs, regno);
}
static void reset_reg_range_values(struct bpf_reg_state *regs, u32 regno) static void reset_reg_range_values(struct bpf_reg_state *regs, u32 regno)
{ {
regs[regno].min_value = BPF_REGISTER_MIN_RANGE; regs[regno].min_value = BPF_REGISTER_MIN_RANGE;
...@@ -1976,7 +1981,7 @@ static void mark_map_reg(struct bpf_reg_state *regs, u32 regno, u32 id, ...@@ -1976,7 +1981,7 @@ static void mark_map_reg(struct bpf_reg_state *regs, u32 regno, u32 id,
*/ */
reg->id = 0; reg->id = 0;
if (type == UNKNOWN_VALUE) if (type == UNKNOWN_VALUE)
mark_reg_unknown_value(regs, regno); __mark_reg_unknown_value(regs, regno);
} }
} }
...@@ -2931,6 +2936,10 @@ static int replace_map_fd_with_map_ptr(struct bpf_verifier_env *env) ...@@ -2931,6 +2936,10 @@ static int replace_map_fd_with_map_ptr(struct bpf_verifier_env *env)
int insn_cnt = env->prog->len; int insn_cnt = env->prog->len;
int i, j, err; int i, j, err;
err = bpf_prog_calc_digest(env->prog);
if (err)
return err;
for (i = 0; i < insn_cnt; i++, insn++) { for (i = 0; i < insn_cnt; i++, insn++) {
if (BPF_CLASS(insn->code) == BPF_LDX && if (BPF_CLASS(insn->code) == BPF_LDX &&
(BPF_MODE(insn->code) != BPF_MEM || insn->imm != 0)) { (BPF_MODE(insn->code) != BPF_MEM || insn->imm != 0)) {
...@@ -3178,8 +3187,6 @@ int bpf_check(struct bpf_prog **prog, union bpf_attr *attr) ...@@ -3178,8 +3187,6 @@ int bpf_check(struct bpf_prog **prog, union bpf_attr *attr)
log_level = 0; log_level = 0;
} }
bpf_prog_calc_digest(env->prog);
ret = replace_map_fd_with_map_ptr(env); ret = replace_map_fd_with_map_ptr(env);
if (ret < 0) if (ret < 0)
goto skip_full_check; goto skip_full_check;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment