Commit 3db9128f authored by Daniel Borkmann's avatar Daniel Borkmann

Merge branch 'bpf-verifier-sec-fixes'

Alexei Starovoitov says:

====================
This patch set addresses a set of security vulnerabilities
in bpf verifier logic discovered by Jann Horn.
All of the patches are candidates for 4.14 stable.
====================
Signed-off-by: default avatarDaniel Borkmann <daniel@iogearbox.net>
parents 19c832ed 2255f8d5
...@@ -15,11 +15,11 @@ ...@@ -15,11 +15,11 @@
* In practice this is far bigger than any realistic pointer offset; this limit * In practice this is far bigger than any realistic pointer offset; this limit
* ensures that umax_value + (int)off + (int)size cannot overflow a u64. * ensures that umax_value + (int)off + (int)size cannot overflow a u64.
*/ */
#define BPF_MAX_VAR_OFF (1ULL << 31) #define BPF_MAX_VAR_OFF (1 << 29)
/* Maximum variable size permitted for ARG_CONST_SIZE[_OR_ZERO]. This ensures /* Maximum variable size permitted for ARG_CONST_SIZE[_OR_ZERO]. This ensures
* that converting umax_value to int cannot overflow. * that converting umax_value to int cannot overflow.
*/ */
#define BPF_MAX_VAR_SIZ INT_MAX #define BPF_MAX_VAR_SIZ (1 << 29)
/* Liveness marks, used for registers and spilled-regs (in stack slots). /* Liveness marks, used for registers and spilled-regs (in stack slots).
* Read marks propagate upwards until they find a write mark; they record that * Read marks propagate upwards until they find a write mark; they record that
......
...@@ -1059,6 +1059,11 @@ static int check_ptr_alignment(struct bpf_verifier_env *env, ...@@ -1059,6 +1059,11 @@ static int check_ptr_alignment(struct bpf_verifier_env *env,
break; break;
case PTR_TO_STACK: case PTR_TO_STACK:
pointer_desc = "stack "; pointer_desc = "stack ";
/* The stack spill tracking logic in check_stack_write()
* and check_stack_read() relies on stack accesses being
* aligned.
*/
strict = true;
break; break;
default: default:
break; break;
...@@ -1067,6 +1072,29 @@ static int check_ptr_alignment(struct bpf_verifier_env *env, ...@@ -1067,6 +1072,29 @@ static int check_ptr_alignment(struct bpf_verifier_env *env,
strict); strict);
} }
/* truncate register to smaller size (in bytes)
* must be called with size < BPF_REG_SIZE
*/
static void coerce_reg_to_size(struct bpf_reg_state *reg, int size)
{
u64 mask;
/* clear high bits in bit representation */
reg->var_off = tnum_cast(reg->var_off, size);
/* fix arithmetic bounds */
mask = ((u64)1 << (size * 8)) - 1;
if ((reg->umin_value & ~mask) == (reg->umax_value & ~mask)) {
reg->umin_value &= mask;
reg->umax_value &= mask;
} else {
reg->umin_value = 0;
reg->umax_value = mask;
}
reg->smin_value = reg->umin_value;
reg->smax_value = reg->umax_value;
}
/* check whether memory at (regno + off) is accessible for t = (read | write) /* check whether memory at (regno + off) is accessible for t = (read | write)
* if t==write, value_regno is a register which value is stored into memory * if t==write, value_regno is a register which value is stored into memory
* if t==read, value_regno is a register which will receive the value from memory * if t==read, value_regno is a register which will receive the value from memory
...@@ -1200,9 +1228,7 @@ static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regn ...@@ -1200,9 +1228,7 @@ static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regn
if (!err && size < BPF_REG_SIZE && value_regno >= 0 && t == BPF_READ && if (!err && size < BPF_REG_SIZE && value_regno >= 0 && t == BPF_READ &&
regs[value_regno].type == SCALAR_VALUE) { regs[value_regno].type == SCALAR_VALUE) {
/* b/h/w load zero-extends, mark upper bits as known 0 */ /* b/h/w load zero-extends, mark upper bits as known 0 */
regs[value_regno].var_off = coerce_reg_to_size(&regs[value_regno], size);
tnum_cast(regs[value_regno].var_off, size);
__update_reg_bounds(&regs[value_regno]);
} }
return err; return err;
} }
...@@ -1282,6 +1308,7 @@ static int check_stack_boundary(struct bpf_verifier_env *env, int regno, ...@@ -1282,6 +1308,7 @@ static int check_stack_boundary(struct bpf_verifier_env *env, int regno,
tnum_strn(tn_buf, sizeof(tn_buf), regs[regno].var_off); tnum_strn(tn_buf, sizeof(tn_buf), regs[regno].var_off);
verbose(env, "invalid variable stack read R%d var_off=%s\n", verbose(env, "invalid variable stack read R%d var_off=%s\n",
regno, tn_buf); regno, tn_buf);
return -EACCES;
} }
off = regs[regno].off + regs[regno].var_off.value; off = regs[regno].off + regs[regno].var_off.value;
if (off >= 0 || off < -MAX_BPF_STACK || off + access_size > 0 || if (off >= 0 || off < -MAX_BPF_STACK || off + access_size > 0 ||
...@@ -1772,14 +1799,6 @@ static int check_call(struct bpf_verifier_env *env, int func_id, int insn_idx) ...@@ -1772,14 +1799,6 @@ static int check_call(struct bpf_verifier_env *env, int func_id, int insn_idx)
return 0; return 0;
} }
static void coerce_reg_to_32(struct bpf_reg_state *reg)
{
/* clear high 32 bits */
reg->var_off = tnum_cast(reg->var_off, 4);
/* Update bounds */
__update_reg_bounds(reg);
}
static bool signed_add_overflows(s64 a, s64 b) static bool signed_add_overflows(s64 a, s64 b)
{ {
/* Do the add in u64, where overflow is well-defined */ /* Do the add in u64, where overflow is well-defined */
...@@ -1800,6 +1819,41 @@ static bool signed_sub_overflows(s64 a, s64 b) ...@@ -1800,6 +1819,41 @@ static bool signed_sub_overflows(s64 a, s64 b)
return res > a; return res > a;
} }
static bool check_reg_sane_offset(struct bpf_verifier_env *env,
const struct bpf_reg_state *reg,
enum bpf_reg_type type)
{
bool known = tnum_is_const(reg->var_off);
s64 val = reg->var_off.value;
s64 smin = reg->smin_value;
if (known && (val >= BPF_MAX_VAR_OFF || val <= -BPF_MAX_VAR_OFF)) {
verbose(env, "math between %s pointer and %lld is not allowed\n",
reg_type_str[type], val);
return false;
}
if (reg->off >= BPF_MAX_VAR_OFF || reg->off <= -BPF_MAX_VAR_OFF) {
verbose(env, "%s pointer offset %d is not allowed\n",
reg_type_str[type], reg->off);
return false;
}
if (smin == S64_MIN) {
verbose(env, "math between %s pointer and register with unbounded min value is not allowed\n",
reg_type_str[type]);
return false;
}
if (smin >= BPF_MAX_VAR_OFF || smin <= -BPF_MAX_VAR_OFF) {
verbose(env, "value %lld makes %s pointer be out of bounds\n",
smin, reg_type_str[type]);
return false;
}
return true;
}
/* Handles arithmetic on a pointer and a scalar: computes new min/max and var_off. /* Handles arithmetic on a pointer and a scalar: computes new min/max and var_off.
* Caller should also handle BPF_MOV case separately. * Caller should also handle BPF_MOV case separately.
* If we return -EACCES, caller may want to try again treating pointer as a * If we return -EACCES, caller may want to try again treating pointer as a
...@@ -1868,6 +1922,10 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env, ...@@ -1868,6 +1922,10 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
dst_reg->type = ptr_reg->type; dst_reg->type = ptr_reg->type;
dst_reg->id = ptr_reg->id; dst_reg->id = ptr_reg->id;
if (!check_reg_sane_offset(env, off_reg, ptr_reg->type) ||
!check_reg_sane_offset(env, ptr_reg, ptr_reg->type))
return -EINVAL;
switch (opcode) { switch (opcode) {
case BPF_ADD: case BPF_ADD:
/* We can take a fixed offset as long as it doesn't overflow /* We can take a fixed offset as long as it doesn't overflow
...@@ -1998,12 +2056,19 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env, ...@@ -1998,12 +2056,19 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
return -EACCES; return -EACCES;
} }
if (!check_reg_sane_offset(env, dst_reg, ptr_reg->type))
return -EINVAL;
__update_reg_bounds(dst_reg); __update_reg_bounds(dst_reg);
__reg_deduce_bounds(dst_reg); __reg_deduce_bounds(dst_reg);
__reg_bound_offset(dst_reg); __reg_bound_offset(dst_reg);
return 0; return 0;
} }
/* WARNING: This function does calculations on 64-bit values, but the actual
* execution may occur on 32-bit values. Therefore, things like bitshifts
* need extra checks in the 32-bit case.
*/
static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env, static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env,
struct bpf_insn *insn, struct bpf_insn *insn,
struct bpf_reg_state *dst_reg, struct bpf_reg_state *dst_reg,
...@@ -2014,12 +2079,8 @@ static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env, ...@@ -2014,12 +2079,8 @@ static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env,
bool src_known, dst_known; bool src_known, dst_known;
s64 smin_val, smax_val; s64 smin_val, smax_val;
u64 umin_val, umax_val; u64 umin_val, umax_val;
u64 insn_bitness = (BPF_CLASS(insn->code) == BPF_ALU64) ? 64 : 32;
if (BPF_CLASS(insn->code) != BPF_ALU64) {
/* 32-bit ALU ops are (32,32)->64 */
coerce_reg_to_32(dst_reg);
coerce_reg_to_32(&src_reg);
}
smin_val = src_reg.smin_value; smin_val = src_reg.smin_value;
smax_val = src_reg.smax_value; smax_val = src_reg.smax_value;
umin_val = src_reg.umin_value; umin_val = src_reg.umin_value;
...@@ -2027,6 +2088,12 @@ static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env, ...@@ -2027,6 +2088,12 @@ static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env,
src_known = tnum_is_const(src_reg.var_off); src_known = tnum_is_const(src_reg.var_off);
dst_known = tnum_is_const(dst_reg->var_off); dst_known = tnum_is_const(dst_reg->var_off);
if (!src_known &&
opcode != BPF_ADD && opcode != BPF_SUB && opcode != BPF_AND) {
__mark_reg_unknown(dst_reg);
return 0;
}
switch (opcode) { switch (opcode) {
case BPF_ADD: case BPF_ADD:
if (signed_add_overflows(dst_reg->smin_value, smin_val) || if (signed_add_overflows(dst_reg->smin_value, smin_val) ||
...@@ -2155,9 +2222,9 @@ static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env, ...@@ -2155,9 +2222,9 @@ static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env,
__update_reg_bounds(dst_reg); __update_reg_bounds(dst_reg);
break; break;
case BPF_LSH: case BPF_LSH:
if (umax_val > 63) { if (umax_val >= insn_bitness) {
/* Shifts greater than 63 are undefined. This includes /* Shifts greater than 31 or 63 are undefined.
* shifts by a negative number. * This includes shifts by a negative number.
*/ */
mark_reg_unknown(env, regs, insn->dst_reg); mark_reg_unknown(env, regs, insn->dst_reg);
break; break;
...@@ -2183,27 +2250,29 @@ static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env, ...@@ -2183,27 +2250,29 @@ static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env,
__update_reg_bounds(dst_reg); __update_reg_bounds(dst_reg);
break; break;
case BPF_RSH: case BPF_RSH:
if (umax_val > 63) { if (umax_val >= insn_bitness) {
/* Shifts greater than 63 are undefined. This includes /* Shifts greater than 31 or 63 are undefined.
* shifts by a negative number. * This includes shifts by a negative number.
*/ */
mark_reg_unknown(env, regs, insn->dst_reg); mark_reg_unknown(env, regs, insn->dst_reg);
break; break;
} }
/* BPF_RSH is an unsigned shift, so make the appropriate casts */ /* BPF_RSH is an unsigned shift. If the value in dst_reg might
if (dst_reg->smin_value < 0) { * be negative, then either:
if (umin_val) { * 1) src_reg might be zero, so the sign bit of the result is
/* Sign bit will be cleared */ * unknown, so we lose our signed bounds
dst_reg->smin_value = 0; * 2) it's known negative, thus the unsigned bounds capture the
} else { * signed bounds
/* Lost sign bit information */ * 3) the signed bounds cross zero, so they tell us nothing
* about the result
* If the value in dst_reg is known nonnegative, then again the
* unsigned bounts capture the signed bounds.
* Thus, in all cases it suffices to blow away our signed bounds
* and rely on inferring new ones from the unsigned bounds and
* var_off of the result.
*/
dst_reg->smin_value = S64_MIN; dst_reg->smin_value = S64_MIN;
dst_reg->smax_value = S64_MAX; dst_reg->smax_value = S64_MAX;
}
} else {
dst_reg->smin_value =
(u64)(dst_reg->smin_value) >> umax_val;
}
if (src_known) if (src_known)
dst_reg->var_off = tnum_rshift(dst_reg->var_off, dst_reg->var_off = tnum_rshift(dst_reg->var_off,
umin_val); umin_val);
...@@ -2219,6 +2288,12 @@ static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env, ...@@ -2219,6 +2288,12 @@ static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env,
break; break;
} }
if (BPF_CLASS(insn->code) != BPF_ALU64) {
/* 32-bit ALU ops are (32,32)->32 */
coerce_reg_to_size(dst_reg, 4);
coerce_reg_to_size(&src_reg, 4);
}
__reg_deduce_bounds(dst_reg); __reg_deduce_bounds(dst_reg);
__reg_bound_offset(dst_reg); __reg_bound_offset(dst_reg);
return 0; return 0;
...@@ -2396,17 +2471,20 @@ static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn) ...@@ -2396,17 +2471,20 @@ static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn)
return -EACCES; return -EACCES;
} }
mark_reg_unknown(env, regs, insn->dst_reg); mark_reg_unknown(env, regs, insn->dst_reg);
/* high 32 bits are known zero. */ coerce_reg_to_size(&regs[insn->dst_reg], 4);
regs[insn->dst_reg].var_off = tnum_cast(
regs[insn->dst_reg].var_off, 4);
__update_reg_bounds(&regs[insn->dst_reg]);
} }
} else { } else {
/* case: R = imm /* case: R = imm
* remember the value we stored into this reg * remember the value we stored into this reg
*/ */
regs[insn->dst_reg].type = SCALAR_VALUE; regs[insn->dst_reg].type = SCALAR_VALUE;
__mark_reg_known(regs + insn->dst_reg, insn->imm); if (BPF_CLASS(insn->code) == BPF_ALU64) {
__mark_reg_known(regs + insn->dst_reg,
insn->imm);
} else {
__mark_reg_known(regs + insn->dst_reg,
(u32)insn->imm);
}
} }
} else if (opcode > BPF_END) { } else if (opcode > BPF_END) {
...@@ -3437,15 +3515,14 @@ static bool regsafe(struct bpf_reg_state *rold, struct bpf_reg_state *rcur, ...@@ -3437,15 +3515,14 @@ static bool regsafe(struct bpf_reg_state *rold, struct bpf_reg_state *rcur,
return range_within(rold, rcur) && return range_within(rold, rcur) &&
tnum_in(rold->var_off, rcur->var_off); tnum_in(rold->var_off, rcur->var_off);
} else { } else {
/* if we knew anything about the old value, we're not /* We're trying to use a pointer in place of a scalar.
* equal, because we can't know anything about the * Even if the scalar was unbounded, this could lead to
* scalar value of the pointer in the new value. * pointer leaks because scalars are allowed to leak
*/ * while pointers are not. We could make this safe in
return rold->umin_value == 0 && * special cases if root is calling us, but it's
rold->umax_value == U64_MAX && * probably not worth the hassle.
rold->smin_value == S64_MIN && */
rold->smax_value == S64_MAX && return false;
tnum_is_unknown(rold->var_off);
} }
case PTR_TO_MAP_VALUE: case PTR_TO_MAP_VALUE:
/* If the new min/max/var_off satisfy the old ones and /* If the new min/max/var_off satisfy the old ones and
......
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment