Commit 4f999b76 authored by Andrii Nakryiko's avatar Andrii Nakryiko Committed by Alexei Starovoitov

selftests/bpf: make test_align selftest more robust

test_align selftest relies on BPF verifier log emitting register states
for specific instructions in expected format. Unfortunately, BPF
verifier precision backtracking log interferes with such expectations.
And instruction on which precision propagation happens sometimes don't
output full expected register states. This does indeed look like
something to be improved in BPF verifier, but is beyond the scope of
this patch set.

So to make test_align a bit more robust, inject few dummy R4 = R5
instructions which capture desired state of R5 and won't have precision
tracking logs on them. This fixes tests until we can improve BPF
verifier output in the presence of precision tracking.
Signed-off-by: default avatarAndrii Nakryiko <andrii@kernel.org>
Link: https://lore.kernel.org/r/20221104163649.121784-7-andrii@kernel.orgSigned-off-by: default avatarAlexei Starovoitov <ast@kernel.org>
parent 7a830b53
...@@ -2,7 +2,7 @@ ...@@ -2,7 +2,7 @@
#include <test_progs.h> #include <test_progs.h>
#define MAX_INSNS 512 #define MAX_INSNS 512
#define MAX_MATCHES 16 #define MAX_MATCHES 24
struct bpf_reg_match { struct bpf_reg_match {
unsigned int line; unsigned int line;
...@@ -267,6 +267,7 @@ static struct bpf_align_test tests[] = { ...@@ -267,6 +267,7 @@ static struct bpf_align_test tests[] = {
*/ */
BPF_MOV64_REG(BPF_REG_5, BPF_REG_2), BPF_MOV64_REG(BPF_REG_5, BPF_REG_2),
BPF_ALU64_REG(BPF_ADD, BPF_REG_5, BPF_REG_6), BPF_ALU64_REG(BPF_ADD, BPF_REG_5, BPF_REG_6),
BPF_MOV64_REG(BPF_REG_4, BPF_REG_5),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 14), BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 14),
BPF_MOV64_REG(BPF_REG_4, BPF_REG_5), BPF_MOV64_REG(BPF_REG_4, BPF_REG_5),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4), BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4),
...@@ -280,6 +281,7 @@ static struct bpf_align_test tests[] = { ...@@ -280,6 +281,7 @@ static struct bpf_align_test tests[] = {
BPF_MOV64_REG(BPF_REG_5, BPF_REG_2), BPF_MOV64_REG(BPF_REG_5, BPF_REG_2),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 14), BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 14),
BPF_ALU64_REG(BPF_ADD, BPF_REG_5, BPF_REG_6), BPF_ALU64_REG(BPF_ADD, BPF_REG_5, BPF_REG_6),
BPF_MOV64_REG(BPF_REG_4, BPF_REG_5),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 4), BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 4),
BPF_ALU64_REG(BPF_ADD, BPF_REG_5, BPF_REG_6), BPF_ALU64_REG(BPF_ADD, BPF_REG_5, BPF_REG_6),
BPF_MOV64_REG(BPF_REG_4, BPF_REG_5), BPF_MOV64_REG(BPF_REG_4, BPF_REG_5),
...@@ -311,44 +313,52 @@ static struct bpf_align_test tests[] = { ...@@ -311,44 +313,52 @@ static struct bpf_align_test tests[] = {
{15, "R4=pkt(id=1,off=18,r=18,umax=1020,var_off=(0x0; 0x3fc))"}, {15, "R4=pkt(id=1,off=18,r=18,umax=1020,var_off=(0x0; 0x3fc))"},
{15, "R5=pkt(id=1,off=14,r=18,umax=1020,var_off=(0x0; 0x3fc))"}, {15, "R5=pkt(id=1,off=14,r=18,umax=1020,var_off=(0x0; 0x3fc))"},
/* Variable offset is added to R5 packet pointer, /* Variable offset is added to R5 packet pointer,
* resulting in auxiliary alignment of 4. * resulting in auxiliary alignment of 4. To avoid BPF
* verifier's precision backtracking logging
* interfering we also have a no-op R4 = R5
* instruction to validate R5 state. We also check
* that R4 is what it should be in such case.
*/ */
{17, "R5_w=pkt(id=2,off=0,r=0,umax=1020,var_off=(0x0; 0x3fc))"}, {18, "R4_w=pkt(id=2,off=0,r=0,umax=1020,var_off=(0x0; 0x3fc))"},
{18, "R5_w=pkt(id=2,off=0,r=0,umax=1020,var_off=(0x0; 0x3fc))"},
/* Constant offset is added to R5, resulting in /* Constant offset is added to R5, resulting in
* reg->off of 14. * reg->off of 14.
*/ */
{18, "R5_w=pkt(id=2,off=14,r=0,umax=1020,var_off=(0x0; 0x3fc))"}, {19, "R5_w=pkt(id=2,off=14,r=0,umax=1020,var_off=(0x0; 0x3fc))"},
/* At the time the word size load is performed from R5, /* At the time the word size load is performed from R5,
* its total fixed offset is NET_IP_ALIGN + reg->off * its total fixed offset is NET_IP_ALIGN + reg->off
* (14) which is 16. Then the variable offset is 4-byte * (14) which is 16. Then the variable offset is 4-byte
* aligned, so the total offset is 4-byte aligned and * aligned, so the total offset is 4-byte aligned and
* meets the load's requirements. * meets the load's requirements.
*/ */
{23, "R4=pkt(id=2,off=18,r=18,umax=1020,var_off=(0x0; 0x3fc))"}, {24, "R4=pkt(id=2,off=18,r=18,umax=1020,var_off=(0x0; 0x3fc))"},
{23, "R5=pkt(id=2,off=14,r=18,umax=1020,var_off=(0x0; 0x3fc))"}, {24, "R5=pkt(id=2,off=14,r=18,umax=1020,var_off=(0x0; 0x3fc))"},
/* Constant offset is added to R5 packet pointer, /* Constant offset is added to R5 packet pointer,
* resulting in reg->off value of 14. * resulting in reg->off value of 14.
*/ */
{25, "R5_w=pkt(off=14,r=8"}, {26, "R5_w=pkt(off=14,r=8"},
/* Variable offset is added to R5, resulting in a /* Variable offset is added to R5, resulting in a
* variable offset of (4n). * variable offset of (4n). See comment for insn #18
* for R4 = R5 trick.
*/ */
{26, "R5_w=pkt(id=3,off=14,r=0,umax=1020,var_off=(0x0; 0x3fc))"}, {28, "R4_w=pkt(id=3,off=14,r=0,umax=1020,var_off=(0x0; 0x3fc))"},
{28, "R5_w=pkt(id=3,off=14,r=0,umax=1020,var_off=(0x0; 0x3fc))"},
/* Constant is added to R5 again, setting reg->off to 18. */ /* Constant is added to R5 again, setting reg->off to 18. */
{27, "R5_w=pkt(id=3,off=18,r=0,umax=1020,var_off=(0x0; 0x3fc))"}, {29, "R5_w=pkt(id=3,off=18,r=0,umax=1020,var_off=(0x0; 0x3fc))"},
/* And once more we add a variable; resulting var_off /* And once more we add a variable; resulting var_off
* is still (4n), fixed offset is not changed. * is still (4n), fixed offset is not changed.
* Also, we create a new reg->id. * Also, we create a new reg->id.
*/ */
{28, "R5_w=pkt(id=4,off=18,r=0,umax=2040,var_off=(0x0; 0x7fc)"}, {31, "R4_w=pkt(id=4,off=18,r=0,umax=2040,var_off=(0x0; 0x7fc)"},
{31, "R5_w=pkt(id=4,off=18,r=0,umax=2040,var_off=(0x0; 0x7fc)"},
/* At the time the word size load is performed from R5, /* At the time the word size load is performed from R5,
* its total fixed offset is NET_IP_ALIGN + reg->off (18) * its total fixed offset is NET_IP_ALIGN + reg->off (18)
* which is 20. Then the variable offset is (4n), so * which is 20. Then the variable offset is (4n), so
* the total offset is 4-byte aligned and meets the * the total offset is 4-byte aligned and meets the
* load's requirements. * load's requirements.
*/ */
{33, "R4=pkt(id=4,off=22,r=22,umax=2040,var_off=(0x0; 0x7fc)"}, {35, "R4=pkt(id=4,off=22,r=22,umax=2040,var_off=(0x0; 0x7fc)"},
{33, "R5=pkt(id=4,off=18,r=22,umax=2040,var_off=(0x0; 0x7fc)"}, {35, "R5=pkt(id=4,off=18,r=22,umax=2040,var_off=(0x0; 0x7fc)"},
}, },
}, },
{ {
...@@ -681,6 +691,6 @@ void test_align(void) ...@@ -681,6 +691,6 @@ void test_align(void)
if (!test__start_subtest(test->descr)) if (!test__start_subtest(test->descr))
continue; continue;
CHECK_FAIL(do_test_single(test)); ASSERT_OK(do_test_single(test), test->descr);
} }
} }
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment