Commit 3e3f0695 authored by Peter Zijlstra's avatar Peter Zijlstra

x86/ibt: Annotate text references

Annotate away some of the generic code references. This is things
where we take the address of a symbol for exception handling or return
addresses (eg. context switch).
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Acked-by: default avatarJosh Poimboeuf <jpoimboe@redhat.com>
Link: https://lore.kernel.org/r/20220308154318.877758523@infradead.org
parent fe379fa4
...@@ -277,6 +277,7 @@ SYM_FUNC_END(__switch_to_asm) ...@@ -277,6 +277,7 @@ SYM_FUNC_END(__switch_to_asm)
.pushsection .text, "ax" .pushsection .text, "ax"
SYM_CODE_START(ret_from_fork) SYM_CODE_START(ret_from_fork)
UNWIND_HINT_EMPTY UNWIND_HINT_EMPTY
ANNOTATE_NOENDBR // copy_thread
movq %rax, %rdi movq %rax, %rdi
call schedule_tail /* rdi: 'prev' task parameter */ call schedule_tail /* rdi: 'prev' task parameter */
...@@ -569,6 +570,7 @@ __irqentry_text_start: ...@@ -569,6 +570,7 @@ __irqentry_text_start:
.align 16 .align 16
.globl __irqentry_text_end .globl __irqentry_text_end
__irqentry_text_end: __irqentry_text_end:
ANNOTATE_NOENDBR
SYM_CODE_START_LOCAL(common_interrupt_return) SYM_CODE_START_LOCAL(common_interrupt_return)
SYM_INNER_LABEL(swapgs_restore_regs_and_return_to_usermode, SYM_L_GLOBAL) SYM_INNER_LABEL(swapgs_restore_regs_and_return_to_usermode, SYM_L_GLOBAL)
...@@ -650,6 +652,7 @@ SYM_INNER_LABEL(early_xen_iret_patch, SYM_L_GLOBAL) ...@@ -650,6 +652,7 @@ SYM_INNER_LABEL(early_xen_iret_patch, SYM_L_GLOBAL)
#endif #endif
SYM_INNER_LABEL(native_irq_return_iret, SYM_L_GLOBAL) SYM_INNER_LABEL(native_irq_return_iret, SYM_L_GLOBAL)
ANNOTATE_NOENDBR // exc_double_fault
/* /*
* This may fault. Non-paranoid faults on return to userspace are * This may fault. Non-paranoid faults on return to userspace are
* handled by fixup_bad_iret. These include #SS, #GP, and #NP. * handled by fixup_bad_iret. These include #SS, #GP, and #NP.
...@@ -744,6 +747,7 @@ SYM_FUNC_START(asm_load_gs_index) ...@@ -744,6 +747,7 @@ SYM_FUNC_START(asm_load_gs_index)
FRAME_BEGIN FRAME_BEGIN
swapgs swapgs
.Lgs_change: .Lgs_change:
ANNOTATE_NOENDBR // error_entry
movl %edi, %gs movl %edi, %gs
2: ALTERNATIVE "", "mfence", X86_BUG_SWAPGS_FENCE 2: ALTERNATIVE "", "mfence", X86_BUG_SWAPGS_FENCE
swapgs swapgs
...@@ -1322,6 +1326,7 @@ first_nmi: ...@@ -1322,6 +1326,7 @@ first_nmi:
#endif #endif
repeat_nmi: repeat_nmi:
ANNOTATE_NOENDBR // this code
/* /*
* If there was a nested NMI, the first NMI's iret will return * If there was a nested NMI, the first NMI's iret will return
* here. But NMIs are still enabled and we can take another * here. But NMIs are still enabled and we can take another
...@@ -1350,6 +1355,7 @@ repeat_nmi: ...@@ -1350,6 +1355,7 @@ repeat_nmi:
.endr .endr
subq $(5*8), %rsp subq $(5*8), %rsp
end_repeat_nmi: end_repeat_nmi:
ANNOTATE_NOENDBR // this code
/* /*
* Everything below this point can be preempted by a nested NMI. * Everything below this point can be preempted by a nested NMI.
......
...@@ -148,6 +148,7 @@ SYM_INNER_LABEL(entry_SYSENTER_compat_after_hwframe, SYM_L_GLOBAL) ...@@ -148,6 +148,7 @@ SYM_INNER_LABEL(entry_SYSENTER_compat_after_hwframe, SYM_L_GLOBAL)
popfq popfq
jmp .Lsysenter_flags_fixed jmp .Lsysenter_flags_fixed
SYM_INNER_LABEL(__end_entry_SYSENTER_compat, SYM_L_GLOBAL) SYM_INNER_LABEL(__end_entry_SYSENTER_compat, SYM_L_GLOBAL)
ANNOTATE_NOENDBR // is_sysenter_singlestep
SYM_CODE_END(entry_SYSENTER_compat) SYM_CODE_END(entry_SYSENTER_compat)
/* /*
......
...@@ -713,6 +713,7 @@ asm ( ...@@ -713,6 +713,7 @@ asm (
" .pushsection .init.text, \"ax\", @progbits\n" " .pushsection .init.text, \"ax\", @progbits\n"
" .type int3_magic, @function\n" " .type int3_magic, @function\n"
"int3_magic:\n" "int3_magic:\n"
ANNOTATE_NOENDBR
" movl $1, (%" _ASM_ARG1 ")\n" " movl $1, (%" _ASM_ARG1 ")\n"
ASM_RET ASM_RET
" .size int3_magic, .-int3_magic\n" " .size int3_magic, .-int3_magic\n"
...@@ -724,16 +725,19 @@ extern void int3_selftest_ip(void); /* defined in asm below */ ...@@ -724,16 +725,19 @@ extern void int3_selftest_ip(void); /* defined in asm below */
static int __init static int __init
int3_exception_notify(struct notifier_block *self, unsigned long val, void *data) int3_exception_notify(struct notifier_block *self, unsigned long val, void *data)
{ {
unsigned long selftest = (unsigned long)&int3_selftest_ip;
struct die_args *args = data; struct die_args *args = data;
struct pt_regs *regs = args->regs; struct pt_regs *regs = args->regs;
OPTIMIZER_HIDE_VAR(selftest);
if (!regs || user_mode(regs)) if (!regs || user_mode(regs))
return NOTIFY_DONE; return NOTIFY_DONE;
if (val != DIE_INT3) if (val != DIE_INT3)
return NOTIFY_DONE; return NOTIFY_DONE;
if (regs->ip - INT3_INSN_SIZE != (unsigned long)&int3_selftest_ip) if (regs->ip - INT3_INSN_SIZE != selftest)
return NOTIFY_DONE; return NOTIFY_DONE;
int3_emulate_call(regs, (unsigned long)&int3_magic); int3_emulate_call(regs, (unsigned long)&int3_magic);
...@@ -757,7 +761,9 @@ static noinline void __init int3_selftest(void) ...@@ -757,7 +761,9 @@ static noinline void __init int3_selftest(void)
* INT3 padded with NOP to CALL_INSN_SIZE. The int3_exception_nb * INT3 padded with NOP to CALL_INSN_SIZE. The int3_exception_nb
* notifier above will emulate CALL for us. * notifier above will emulate CALL for us.
*/ */
asm volatile ("int3_selftest_ip: int3; nop; nop; nop; nop\n\t" asm volatile ("int3_selftest_ip:\n\t"
ANNOTATE_NOENDBR
" int3; nop; nop; nop; nop\n\t"
: ASM_CALL_CONSTRAINT : ASM_CALL_CONSTRAINT
: __ASM_SEL_RAW(a, D) (&val) : __ASM_SEL_RAW(a, D) (&val)
: "memory"); : "memory");
......
...@@ -99,6 +99,7 @@ SYM_CODE_END(startup_64) ...@@ -99,6 +99,7 @@ SYM_CODE_END(startup_64)
SYM_CODE_START(secondary_startup_64) SYM_CODE_START(secondary_startup_64)
UNWIND_HINT_EMPTY UNWIND_HINT_EMPTY
ANNOTATE_NOENDBR
/* /*
* At this point the CPU runs in 64bit mode CS.L = 1 CS.D = 0, * At this point the CPU runs in 64bit mode CS.L = 1 CS.D = 0,
* and someone has loaded a mapped page table. * and someone has loaded a mapped page table.
...@@ -127,6 +128,7 @@ SYM_CODE_START(secondary_startup_64) ...@@ -127,6 +128,7 @@ SYM_CODE_START(secondary_startup_64)
*/ */
SYM_INNER_LABEL(secondary_startup_64_no_verify, SYM_L_GLOBAL) SYM_INNER_LABEL(secondary_startup_64_no_verify, SYM_L_GLOBAL)
UNWIND_HINT_EMPTY UNWIND_HINT_EMPTY
ANNOTATE_NOENDBR
/* /*
* Retrieve the modifier (SME encryption mask if SME is active) to be * Retrieve the modifier (SME encryption mask if SME is active) to be
...@@ -192,6 +194,7 @@ SYM_INNER_LABEL(secondary_startup_64_no_verify, SYM_L_GLOBAL) ...@@ -192,6 +194,7 @@ SYM_INNER_LABEL(secondary_startup_64_no_verify, SYM_L_GLOBAL)
jmp *%rax jmp *%rax
1: 1:
UNWIND_HINT_EMPTY UNWIND_HINT_EMPTY
ANNOTATE_NOENDBR // above
/* /*
* We must switch to a new descriptor in kernel space for the GDT * We must switch to a new descriptor in kernel space for the GDT
...@@ -299,6 +302,7 @@ SYM_INNER_LABEL(secondary_startup_64_no_verify, SYM_L_GLOBAL) ...@@ -299,6 +302,7 @@ SYM_INNER_LABEL(secondary_startup_64_no_verify, SYM_L_GLOBAL)
pushq %rax # target address in negative space pushq %rax # target address in negative space
lretq lretq
.Lafter_lret: .Lafter_lret:
ANNOTATE_NOENDBR
SYM_CODE_END(secondary_startup_64) SYM_CODE_END(secondary_startup_64)
#include "verify_cpu.S" #include "verify_cpu.S"
......
...@@ -1033,6 +1033,7 @@ asm( ...@@ -1033,6 +1033,7 @@ asm(
".type __kretprobe_trampoline, @function\n" ".type __kretprobe_trampoline, @function\n"
"__kretprobe_trampoline:\n" "__kretprobe_trampoline:\n"
#ifdef CONFIG_X86_64 #ifdef CONFIG_X86_64
ANNOTATE_NOENDBR
/* Push a fake return address to tell the unwinder it's a kretprobe. */ /* Push a fake return address to tell the unwinder it's a kretprobe. */
" pushq $__kretprobe_trampoline\n" " pushq $__kretprobe_trampoline\n"
UNWIND_HINT_FUNC UNWIND_HINT_FUNC
......
...@@ -42,6 +42,7 @@ ...@@ -42,6 +42,7 @@
.code64 .code64
SYM_CODE_START_NOALIGN(relocate_kernel) SYM_CODE_START_NOALIGN(relocate_kernel)
UNWIND_HINT_EMPTY UNWIND_HINT_EMPTY
ANNOTATE_NOENDBR
/* /*
* %rdi indirection_page * %rdi indirection_page
* %rsi page_list * %rsi page_list
...@@ -223,6 +224,7 @@ SYM_CODE_END(identity_mapped) ...@@ -223,6 +224,7 @@ SYM_CODE_END(identity_mapped)
SYM_CODE_START_LOCAL_NOALIGN(virtual_mapped) SYM_CODE_START_LOCAL_NOALIGN(virtual_mapped)
UNWIND_HINT_EMPTY UNWIND_HINT_EMPTY
ANNOTATE_NOENDBR // RET target, above
movq RSP(%r8), %rsp movq RSP(%r8), %rsp
movq CR4(%r8), %rax movq CR4(%r8), %rax
movq %rax, %cr4 movq %rax, %cr4
......
...@@ -3,6 +3,7 @@ ...@@ -3,6 +3,7 @@
#include <linux/linkage.h> #include <linux/linkage.h>
#include <linux/error-injection.h> #include <linux/error-injection.h>
#include <linux/kprobes.h> #include <linux/kprobes.h>
#include <linux/objtool.h>
asmlinkage void just_return_func(void); asmlinkage void just_return_func(void);
...@@ -11,6 +12,7 @@ asm( ...@@ -11,6 +12,7 @@ asm(
".type just_return_func, @function\n" ".type just_return_func, @function\n"
".globl just_return_func\n" ".globl just_return_func\n"
"just_return_func:\n" "just_return_func:\n"
ANNOTATE_NOENDBR
ASM_RET ASM_RET
".size just_return_func, .-just_return_func\n" ".size just_return_func, .-just_return_func\n"
); );
......
...@@ -55,6 +55,7 @@ SYM_INNER_LABEL(__x86_indirect_thunk_\reg, SYM_L_GLOBAL) ...@@ -55,6 +55,7 @@ SYM_INNER_LABEL(__x86_indirect_thunk_\reg, SYM_L_GLOBAL)
.align RETPOLINE_THUNK_SIZE .align RETPOLINE_THUNK_SIZE
SYM_CODE_START(__x86_indirect_thunk_array) SYM_CODE_START(__x86_indirect_thunk_array)
ANNOTATE_NOENDBR // apply_retpolines
#define GEN(reg) THUNK reg #define GEN(reg) THUNK reg
#include <asm/GEN-for-each-reg.h> #include <asm/GEN-for-each-reg.h>
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment