Commit a038eacd authored by Eduard Zingerman's avatar Eduard Zingerman Committed by Alexei Starovoitov

selftests/bpf: validate __xlated same way as __jited

Both __xlated and __jited work with disassembly.
It is logical to have both work in a similar manner.

This commit updates __xlated macro handling in test_loader.c by making
it expect matches on sequential lines, same way as __jited operates.
For example:

    __xlated("1: *(u64 *)(r10 -16) = r1")      ;; matched on line N
    __xlated("3: r0 = &(void __percpu *)(r0)") ;; matched on line N+1

Also:

    __xlated("1: *(u64 *)(r10 -16) = r1")      ;; matched on line N
    __xlated("...")                            ;; not matched
    __xlated("3: r0 = &(void __percpu *)(r0)") ;; mantched on any
                                               ;; line >= N
Signed-off-by: default avatarEduard Zingerman <eddyz87@gmail.com>
Link: https://lore.kernel.org/r/20240820102357.3372779-10-eddyz87@gmail.comSigned-off-by: default avatarAlexei Starovoitov <ast@kernel.org>
parent e5bdd6a8
...@@ -78,6 +78,7 @@ __naked void canary_arm64_riscv64(void) ...@@ -78,6 +78,7 @@ __naked void canary_arm64_riscv64(void)
SEC("raw_tp") SEC("raw_tp")
__arch_x86_64 __arch_x86_64
__xlated("1: r0 = &(void __percpu *)(r0)") __xlated("1: r0 = &(void __percpu *)(r0)")
__xlated("...")
__xlated("3: exit") __xlated("3: exit")
__success __success
__naked void canary_zero_spills(void) __naked void canary_zero_spills(void)
...@@ -94,7 +95,9 @@ SEC("raw_tp") ...@@ -94,7 +95,9 @@ SEC("raw_tp")
__arch_x86_64 __arch_x86_64
__log_level(4) __msg("stack depth 16") __log_level(4) __msg("stack depth 16")
__xlated("1: *(u64 *)(r10 -16) = r1") __xlated("1: *(u64 *)(r10 -16) = r1")
__xlated("...")
__xlated("3: r0 = &(void __percpu *)(r0)") __xlated("3: r0 = &(void __percpu *)(r0)")
__xlated("...")
__xlated("5: r2 = *(u64 *)(r10 -16)") __xlated("5: r2 = *(u64 *)(r10 -16)")
__success __success
__naked void wrong_reg_in_pattern1(void) __naked void wrong_reg_in_pattern1(void)
...@@ -113,7 +116,9 @@ __naked void wrong_reg_in_pattern1(void) ...@@ -113,7 +116,9 @@ __naked void wrong_reg_in_pattern1(void)
SEC("raw_tp") SEC("raw_tp")
__arch_x86_64 __arch_x86_64
__xlated("1: *(u64 *)(r10 -16) = r6") __xlated("1: *(u64 *)(r10 -16) = r6")
__xlated("...")
__xlated("3: r0 = &(void __percpu *)(r0)") __xlated("3: r0 = &(void __percpu *)(r0)")
__xlated("...")
__xlated("5: r6 = *(u64 *)(r10 -16)") __xlated("5: r6 = *(u64 *)(r10 -16)")
__success __success
__naked void wrong_reg_in_pattern2(void) __naked void wrong_reg_in_pattern2(void)
...@@ -132,7 +137,9 @@ __naked void wrong_reg_in_pattern2(void) ...@@ -132,7 +137,9 @@ __naked void wrong_reg_in_pattern2(void)
SEC("raw_tp") SEC("raw_tp")
__arch_x86_64 __arch_x86_64
__xlated("1: *(u64 *)(r10 -16) = r0") __xlated("1: *(u64 *)(r10 -16) = r0")
__xlated("...")
__xlated("3: r0 = &(void __percpu *)(r0)") __xlated("3: r0 = &(void __percpu *)(r0)")
__xlated("...")
__xlated("5: r0 = *(u64 *)(r10 -16)") __xlated("5: r0 = *(u64 *)(r10 -16)")
__success __success
__naked void wrong_reg_in_pattern3(void) __naked void wrong_reg_in_pattern3(void)
...@@ -151,7 +158,9 @@ __naked void wrong_reg_in_pattern3(void) ...@@ -151,7 +158,9 @@ __naked void wrong_reg_in_pattern3(void)
SEC("raw_tp") SEC("raw_tp")
__arch_x86_64 __arch_x86_64
__xlated("2: *(u64 *)(r2 -16) = r1") __xlated("2: *(u64 *)(r2 -16) = r1")
__xlated("...")
__xlated("4: r0 = &(void __percpu *)(r0)") __xlated("4: r0 = &(void __percpu *)(r0)")
__xlated("...")
__xlated("6: r1 = *(u64 *)(r10 -16)") __xlated("6: r1 = *(u64 *)(r10 -16)")
__success __success
__naked void wrong_base_in_pattern(void) __naked void wrong_base_in_pattern(void)
...@@ -171,7 +180,9 @@ __naked void wrong_base_in_pattern(void) ...@@ -171,7 +180,9 @@ __naked void wrong_base_in_pattern(void)
SEC("raw_tp") SEC("raw_tp")
__arch_x86_64 __arch_x86_64
__xlated("1: *(u64 *)(r10 -16) = r1") __xlated("1: *(u64 *)(r10 -16) = r1")
__xlated("...")
__xlated("3: r0 = &(void __percpu *)(r0)") __xlated("3: r0 = &(void __percpu *)(r0)")
__xlated("...")
__xlated("5: r2 = 1") __xlated("5: r2 = 1")
__success __success
__naked void wrong_insn_in_pattern(void) __naked void wrong_insn_in_pattern(void)
...@@ -191,7 +202,9 @@ __naked void wrong_insn_in_pattern(void) ...@@ -191,7 +202,9 @@ __naked void wrong_insn_in_pattern(void)
SEC("raw_tp") SEC("raw_tp")
__arch_x86_64 __arch_x86_64
__xlated("2: *(u64 *)(r10 -16) = r1") __xlated("2: *(u64 *)(r10 -16) = r1")
__xlated("...")
__xlated("4: r0 = &(void __percpu *)(r0)") __xlated("4: r0 = &(void __percpu *)(r0)")
__xlated("...")
__xlated("6: r1 = *(u64 *)(r10 -8)") __xlated("6: r1 = *(u64 *)(r10 -8)")
__success __success
__naked void wrong_off_in_pattern1(void) __naked void wrong_off_in_pattern1(void)
...@@ -211,7 +224,9 @@ __naked void wrong_off_in_pattern1(void) ...@@ -211,7 +224,9 @@ __naked void wrong_off_in_pattern1(void)
SEC("raw_tp") SEC("raw_tp")
__arch_x86_64 __arch_x86_64
__xlated("1: *(u32 *)(r10 -4) = r1") __xlated("1: *(u32 *)(r10 -4) = r1")
__xlated("...")
__xlated("3: r0 = &(void __percpu *)(r0)") __xlated("3: r0 = &(void __percpu *)(r0)")
__xlated("...")
__xlated("5: r1 = *(u32 *)(r10 -4)") __xlated("5: r1 = *(u32 *)(r10 -4)")
__success __success
__naked void wrong_off_in_pattern2(void) __naked void wrong_off_in_pattern2(void)
...@@ -230,7 +245,9 @@ __naked void wrong_off_in_pattern2(void) ...@@ -230,7 +245,9 @@ __naked void wrong_off_in_pattern2(void)
SEC("raw_tp") SEC("raw_tp")
__arch_x86_64 __arch_x86_64
__xlated("1: *(u32 *)(r10 -16) = r1") __xlated("1: *(u32 *)(r10 -16) = r1")
__xlated("...")
__xlated("3: r0 = &(void __percpu *)(r0)") __xlated("3: r0 = &(void __percpu *)(r0)")
__xlated("...")
__xlated("5: r1 = *(u32 *)(r10 -16)") __xlated("5: r1 = *(u32 *)(r10 -16)")
__success __success
__naked void wrong_size_in_pattern(void) __naked void wrong_size_in_pattern(void)
...@@ -249,7 +266,9 @@ __naked void wrong_size_in_pattern(void) ...@@ -249,7 +266,9 @@ __naked void wrong_size_in_pattern(void)
SEC("raw_tp") SEC("raw_tp")
__arch_x86_64 __arch_x86_64
__xlated("2: *(u32 *)(r10 -8) = r1") __xlated("2: *(u32 *)(r10 -8) = r1")
__xlated("...")
__xlated("4: r0 = &(void __percpu *)(r0)") __xlated("4: r0 = &(void __percpu *)(r0)")
__xlated("...")
__xlated("6: r1 = *(u32 *)(r10 -8)") __xlated("6: r1 = *(u32 *)(r10 -8)")
__success __success
__naked void partial_pattern(void) __naked void partial_pattern(void)
...@@ -275,11 +294,15 @@ __xlated("1: r2 = 2") ...@@ -275,11 +294,15 @@ __xlated("1: r2 = 2")
/* not patched, spills for -8, -16 not removed */ /* not patched, spills for -8, -16 not removed */
__xlated("2: *(u64 *)(r10 -8) = r1") __xlated("2: *(u64 *)(r10 -8) = r1")
__xlated("3: *(u64 *)(r10 -16) = r2") __xlated("3: *(u64 *)(r10 -16) = r2")
__xlated("...")
__xlated("5: r0 = &(void __percpu *)(r0)") __xlated("5: r0 = &(void __percpu *)(r0)")
__xlated("...")
__xlated("7: r2 = *(u64 *)(r10 -16)") __xlated("7: r2 = *(u64 *)(r10 -16)")
__xlated("8: r1 = *(u64 *)(r10 -8)") __xlated("8: r1 = *(u64 *)(r10 -8)")
/* patched, spills for -24, -32 removed */ /* patched, spills for -24, -32 removed */
__xlated("...")
__xlated("10: r0 = &(void __percpu *)(r0)") __xlated("10: r0 = &(void __percpu *)(r0)")
__xlated("...")
__xlated("12: exit") __xlated("12: exit")
__success __success
__naked void min_stack_offset(void) __naked void min_stack_offset(void)
...@@ -308,7 +331,9 @@ __naked void min_stack_offset(void) ...@@ -308,7 +331,9 @@ __naked void min_stack_offset(void)
SEC("raw_tp") SEC("raw_tp")
__arch_x86_64 __arch_x86_64
__xlated("1: *(u64 *)(r10 -8) = r1") __xlated("1: *(u64 *)(r10 -8) = r1")
__xlated("...")
__xlated("3: r0 = &(void __percpu *)(r0)") __xlated("3: r0 = &(void __percpu *)(r0)")
__xlated("...")
__xlated("5: r1 = *(u64 *)(r10 -8)") __xlated("5: r1 = *(u64 *)(r10 -8)")
__success __success
__naked void bad_fixed_read(void) __naked void bad_fixed_read(void)
...@@ -330,7 +355,9 @@ __naked void bad_fixed_read(void) ...@@ -330,7 +355,9 @@ __naked void bad_fixed_read(void)
SEC("raw_tp") SEC("raw_tp")
__arch_x86_64 __arch_x86_64
__xlated("1: *(u64 *)(r10 -8) = r1") __xlated("1: *(u64 *)(r10 -8) = r1")
__xlated("...")
__xlated("3: r0 = &(void __percpu *)(r0)") __xlated("3: r0 = &(void __percpu *)(r0)")
__xlated("...")
__xlated("5: r1 = *(u64 *)(r10 -8)") __xlated("5: r1 = *(u64 *)(r10 -8)")
__success __success
__naked void bad_fixed_write(void) __naked void bad_fixed_write(void)
...@@ -352,7 +379,9 @@ __naked void bad_fixed_write(void) ...@@ -352,7 +379,9 @@ __naked void bad_fixed_write(void)
SEC("raw_tp") SEC("raw_tp")
__arch_x86_64 __arch_x86_64
__xlated("6: *(u64 *)(r10 -16) = r1") __xlated("6: *(u64 *)(r10 -16) = r1")
__xlated("...")
__xlated("8: r0 = &(void __percpu *)(r0)") __xlated("8: r0 = &(void __percpu *)(r0)")
__xlated("...")
__xlated("10: r1 = *(u64 *)(r10 -16)") __xlated("10: r1 = *(u64 *)(r10 -16)")
__success __success
__naked void bad_varying_read(void) __naked void bad_varying_read(void)
...@@ -379,7 +408,9 @@ __naked void bad_varying_read(void) ...@@ -379,7 +408,9 @@ __naked void bad_varying_read(void)
SEC("raw_tp") SEC("raw_tp")
__arch_x86_64 __arch_x86_64
__xlated("6: *(u64 *)(r10 -16) = r1") __xlated("6: *(u64 *)(r10 -16) = r1")
__xlated("...")
__xlated("8: r0 = &(void __percpu *)(r0)") __xlated("8: r0 = &(void __percpu *)(r0)")
__xlated("...")
__xlated("10: r1 = *(u64 *)(r10 -16)") __xlated("10: r1 = *(u64 *)(r10 -16)")
__success __success
__naked void bad_varying_write(void) __naked void bad_varying_write(void)
...@@ -406,7 +437,9 @@ __naked void bad_varying_write(void) ...@@ -406,7 +437,9 @@ __naked void bad_varying_write(void)
SEC("raw_tp") SEC("raw_tp")
__arch_x86_64 __arch_x86_64
__xlated("1: *(u64 *)(r10 -8) = r1") __xlated("1: *(u64 *)(r10 -8) = r1")
__xlated("...")
__xlated("3: r0 = &(void __percpu *)(r0)") __xlated("3: r0 = &(void __percpu *)(r0)")
__xlated("...")
__xlated("5: r1 = *(u64 *)(r10 -8)") __xlated("5: r1 = *(u64 *)(r10 -8)")
__success __success
__naked void bad_write_in_subprog(void) __naked void bad_write_in_subprog(void)
...@@ -438,7 +471,9 @@ __naked static void bad_write_in_subprog_aux(void) ...@@ -438,7 +471,9 @@ __naked static void bad_write_in_subprog_aux(void)
SEC("raw_tp") SEC("raw_tp")
__arch_x86_64 __arch_x86_64
__xlated("1: *(u64 *)(r10 -8) = r1") __xlated("1: *(u64 *)(r10 -8) = r1")
__xlated("...")
__xlated("3: r0 = &(void __percpu *)(r0)") __xlated("3: r0 = &(void __percpu *)(r0)")
__xlated("...")
__xlated("5: r1 = *(u64 *)(r10 -8)") __xlated("5: r1 = *(u64 *)(r10 -8)")
__success __success
__naked void bad_helper_write(void) __naked void bad_helper_write(void)
...@@ -466,13 +501,19 @@ SEC("raw_tp") ...@@ -466,13 +501,19 @@ SEC("raw_tp")
__arch_x86_64 __arch_x86_64
/* main, not patched */ /* main, not patched */
__xlated("1: *(u64 *)(r10 -8) = r1") __xlated("1: *(u64 *)(r10 -8) = r1")
__xlated("...")
__xlated("3: r0 = &(void __percpu *)(r0)") __xlated("3: r0 = &(void __percpu *)(r0)")
__xlated("...")
__xlated("5: r1 = *(u64 *)(r10 -8)") __xlated("5: r1 = *(u64 *)(r10 -8)")
__xlated("...")
__xlated("9: call pc+1") __xlated("9: call pc+1")
__xlated("...")
__xlated("10: exit") __xlated("10: exit")
/* subprogram, patched */ /* subprogram, patched */
__xlated("11: r1 = 1") __xlated("11: r1 = 1")
__xlated("...")
__xlated("13: r0 = &(void __percpu *)(r0)") __xlated("13: r0 = &(void __percpu *)(r0)")
__xlated("...")
__xlated("15: exit") __xlated("15: exit")
__success __success
__naked void invalidate_one_subprog(void) __naked void invalidate_one_subprog(void)
...@@ -510,12 +551,16 @@ SEC("raw_tp") ...@@ -510,12 +551,16 @@ SEC("raw_tp")
__arch_x86_64 __arch_x86_64
/* main */ /* main */
__xlated("0: r1 = 1") __xlated("0: r1 = 1")
__xlated("...")
__xlated("2: r0 = &(void __percpu *)(r0)") __xlated("2: r0 = &(void __percpu *)(r0)")
__xlated("...")
__xlated("4: call pc+1") __xlated("4: call pc+1")
__xlated("5: exit") __xlated("5: exit")
/* subprogram */ /* subprogram */
__xlated("6: r1 = 1") __xlated("6: r1 = 1")
__xlated("...")
__xlated("8: r0 = &(void __percpu *)(r0)") __xlated("8: r0 = &(void __percpu *)(r0)")
__xlated("...")
__xlated("10: *(u64 *)(r10 -16) = r1") __xlated("10: *(u64 *)(r10 -16) = r1")
__xlated("11: exit") __xlated("11: exit")
__success __success
...@@ -576,7 +621,9 @@ __log_level(4) __msg("stack depth 16") ...@@ -576,7 +621,9 @@ __log_level(4) __msg("stack depth 16")
/* may_goto counter at -16 */ /* may_goto counter at -16 */
__xlated("0: *(u64 *)(r10 -16) =") __xlated("0: *(u64 *)(r10 -16) =")
__xlated("1: r1 = 1") __xlated("1: r1 = 1")
__xlated("...")
__xlated("3: r0 = &(void __percpu *)(r0)") __xlated("3: r0 = &(void __percpu *)(r0)")
__xlated("...")
/* may_goto expansion starts */ /* may_goto expansion starts */
__xlated("5: r11 = *(u64 *)(r10 -16)") __xlated("5: r11 = *(u64 *)(r10 -16)")
__xlated("6: if r11 == 0x0 goto pc+3") __xlated("6: if r11 == 0x0 goto pc+3")
...@@ -623,13 +670,15 @@ __xlated("5: r0 = *(u32 *)(r0 +0)") ...@@ -623,13 +670,15 @@ __xlated("5: r0 = *(u32 *)(r0 +0)")
__xlated("6: r2 =") __xlated("6: r2 =")
__xlated("7: r3 = 0") __xlated("7: r3 = 0")
__xlated("8: r4 = 0") __xlated("8: r4 = 0")
__xlated("...")
/* ... part of the inlined bpf_loop */ /* ... part of the inlined bpf_loop */
__xlated("12: *(u64 *)(r10 -32) = r6") __xlated("12: *(u64 *)(r10 -32) = r6")
__xlated("13: *(u64 *)(r10 -24) = r7") __xlated("13: *(u64 *)(r10 -24) = r7")
__xlated("14: *(u64 *)(r10 -16) = r8") __xlated("14: *(u64 *)(r10 -16) = r8")
/* ... */ __xlated("...")
__xlated("21: call pc+8") /* dummy_loop_callback */ __xlated("21: call pc+8") /* dummy_loop_callback */
/* ... last insns of the bpf_loop_interaction1 */ /* ... last insns of the bpf_loop_interaction1 */
__xlated("...")
__xlated("28: r0 = 0") __xlated("28: r0 = 0")
__xlated("29: exit") __xlated("29: exit")
/* dummy_loop_callback */ /* dummy_loop_callback */
...@@ -670,7 +719,7 @@ __xlated("5: r0 = *(u32 *)(r0 +0)") ...@@ -670,7 +719,7 @@ __xlated("5: r0 = *(u32 *)(r0 +0)")
__xlated("6: *(u64 *)(r10 -16) = r1") __xlated("6: *(u64 *)(r10 -16) = r1")
__xlated("7: call") __xlated("7: call")
__xlated("8: r1 = *(u64 *)(r10 -16)") __xlated("8: r1 = *(u64 *)(r10 -16)")
/* ... */ __xlated("...")
/* ... part of the inlined bpf_loop */ /* ... part of the inlined bpf_loop */
__xlated("15: *(u64 *)(r10 -40) = r6") __xlated("15: *(u64 *)(r10 -40) = r6")
__xlated("16: *(u64 *)(r10 -32) = r7") __xlated("16: *(u64 *)(r10 -32) = r7")
......
...@@ -365,6 +365,8 @@ static int parse_test_spec(struct test_loader *tester, ...@@ -365,6 +365,8 @@ static int parse_test_spec(struct test_loader *tester,
const char *description = NULL; const char *description = NULL;
bool has_unpriv_result = false; bool has_unpriv_result = false;
bool has_unpriv_retval = false; bool has_unpriv_retval = false;
bool unpriv_xlated_on_next_line = true;
bool xlated_on_next_line = true;
bool unpriv_jit_on_next_line; bool unpriv_jit_on_next_line;
bool jit_on_next_line; bool jit_on_next_line;
bool collect_jit = false; bool collect_jit = false;
...@@ -461,12 +463,14 @@ static int parse_test_spec(struct test_loader *tester, ...@@ -461,12 +463,14 @@ static int parse_test_spec(struct test_loader *tester,
spec->mode_mask |= UNPRIV; spec->mode_mask |= UNPRIV;
} }
} else if ((msg = skip_dynamic_pfx(s, TEST_TAG_EXPECT_XLATED_PFX))) { } else if ((msg = skip_dynamic_pfx(s, TEST_TAG_EXPECT_XLATED_PFX))) {
err = push_msg(msg, &spec->priv.expect_xlated); err = push_disasm_msg(msg, &xlated_on_next_line,
&spec->priv.expect_xlated);
if (err) if (err)
goto cleanup; goto cleanup;
spec->mode_mask |= PRIV; spec->mode_mask |= PRIV;
} else if ((msg = skip_dynamic_pfx(s, TEST_TAG_EXPECT_XLATED_PFX_UNPRIV))) { } else if ((msg = skip_dynamic_pfx(s, TEST_TAG_EXPECT_XLATED_PFX_UNPRIV))) {
err = push_msg(msg, &spec->unpriv.expect_xlated); err = push_disasm_msg(msg, &unpriv_xlated_on_next_line,
&spec->unpriv.expect_xlated);
if (err) if (err)
goto cleanup; goto cleanup;
spec->mode_mask |= UNPRIV; spec->mode_mask |= UNPRIV;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment