Commit ee230090 authored by Alexei Starovoitov's avatar Alexei Starovoitov

Merge branch 'fix-truncation-bug-in-coerce_reg_to_size_sx-and-extend-selftests'

Dimitar Kanaliev says:

====================
Fix truncation bug in coerce_reg_to_size_sx and extend selftests.

This patch series addresses a truncation bug in the eBPF verifier function
coerce_reg_to_size_sx(). The issue was caused by the incorrect ordering
of assignments between 32-bit and 64-bit min/max values, leading to
improper truncation when updating the register state. This issue has been
reported previously by Zac Ecob[1] , but was not followed up on.

The first patch fixes the assignment order in coerce_reg_to_size_sx()
to ensure correct truncation. The subsequent patches add selftests for
coerce_{reg,subreg}_to_size_sx.

Changelog:
	v1 -> v2:
	 - Moved selftests inside the conditional check for cpuv4

[1] (https://lore.kernel.org/bpf/h3qKLDEO6m9nhif0eAQX4fVrqdO0D_OPb0y5HfMK9jBePEKK33wQ3K-bqSVnr0hiZdFZtSJOsbNkcEQGpv_yJk61PAAiO8fUkgMRSO-lB50=@protonmail.com/)
====================

Link: https://lore.kernel.org/r/20241014121155.92887-1-dimitar.kanaliev@siteground.comSigned-off-by: default avatarAlexei Starovoitov <ast@kernel.org>
parents b836cbdf 35ccd576
...@@ -6339,10 +6339,10 @@ static void coerce_reg_to_size_sx(struct bpf_reg_state *reg, int size) ...@@ -6339,10 +6339,10 @@ static void coerce_reg_to_size_sx(struct bpf_reg_state *reg, int size)
/* both of s64_max/s64_min positive or negative */ /* both of s64_max/s64_min positive or negative */
if ((s64_max >= 0) == (s64_min >= 0)) { if ((s64_max >= 0) == (s64_min >= 0)) {
reg->smin_value = reg->s32_min_value = s64_min; reg->s32_min_value = reg->smin_value = s64_min;
reg->smax_value = reg->s32_max_value = s64_max; reg->s32_max_value = reg->smax_value = s64_max;
reg->umin_value = reg->u32_min_value = s64_min; reg->u32_min_value = reg->umin_value = s64_min;
reg->umax_value = reg->u32_max_value = s64_max; reg->u32_max_value = reg->umax_value = s64_max;
reg->var_off = tnum_range(s64_min, s64_max); reg->var_off = tnum_range(s64_min, s64_max);
return; return;
} }
......
...@@ -287,6 +287,46 @@ l0_%=: \ ...@@ -287,6 +287,46 @@ l0_%=: \
: __clobber_all); : __clobber_all);
} }
SEC("socket")
__description("MOV64SX, S8, unsigned range_check")
__success __retval(0)
__naked void mov64sx_s8_range_check(void)
{
asm volatile (" \
call %[bpf_get_prandom_u32]; \
r0 &= 0x1; \
r0 += 0xfe; \
r0 = (s8)r0; \
if r0 < 0xfffffffffffffffe goto label_%=; \
r0 = 0; \
exit; \
label_%=: \
exit; \
" :
: __imm(bpf_get_prandom_u32)
: __clobber_all);
}
SEC("socket")
__description("MOV32SX, S8, unsigned range_check")
__success __retval(0)
__naked void mov32sx_s8_range_check(void)
{
asm volatile (" \
call %[bpf_get_prandom_u32]; \
w0 &= 0x1; \
w0 += 0xfe; \
w0 = (s8)w0; \
if w0 < 0xfffffffe goto label_%=; \
r0 = 0; \
exit; \
label_%=: \
exit; \
" :
: __imm(bpf_get_prandom_u32)
: __clobber_all);
}
#else #else
SEC("socket") SEC("socket")
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment