Commit bc7b11c0 authored by Jiri Slaby's avatar Jiri Slaby Committed by Borislav Petkov

x86/asm/64: Change all ENTRY+END to SYM_CODE_*

Change all assembly code which is marked using END (and not ENDPROC).
Switch all these to the appropriate new annotation SYM_CODE_START and
SYM_CODE_END.
Signed-off-by: default avatarJiri Slaby <jslaby@suse.cz>
Signed-off-by: default avatarBorislav Petkov <bp@suse.de>
Reviewed-by: Boris Ostrovsky <boris.ostrovsky@oracle.com> [xen bits]
Cc: Andy Lutomirski <luto@kernel.org>
Cc: Cao jin <caoj.fnst@cn.fujitsu.com>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Jiri Kosina <jkosina@suse.cz>
Cc: Josh Poimboeuf <jpoimboe@redhat.com>
Cc: Juergen Gross <jgross@suse.com>
Cc: linux-arch@vger.kernel.org
Cc: Maran Wilson <maran.wilson@oracle.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Stefano Stabellini <sstabellini@kernel.org>
Cc: "Steven Rostedt (VMware)" <rostedt@goodmis.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: x86-ml <x86@kernel.org>
Cc: xen-devel@lists.xenproject.org
Link: https://lkml.kernel.org/r/20191011115108.12392-24-jslaby@suse.cz
parent 4aec216b
...@@ -46,11 +46,11 @@ ...@@ -46,11 +46,11 @@
.section .entry.text, "ax" .section .entry.text, "ax"
#ifdef CONFIG_PARAVIRT #ifdef CONFIG_PARAVIRT
ENTRY(native_usergs_sysret64) SYM_CODE_START(native_usergs_sysret64)
UNWIND_HINT_EMPTY UNWIND_HINT_EMPTY
swapgs swapgs
sysretq sysretq
END(native_usergs_sysret64) SYM_CODE_END(native_usergs_sysret64)
#endif /* CONFIG_PARAVIRT */ #endif /* CONFIG_PARAVIRT */
.macro TRACE_IRQS_FLAGS flags:req .macro TRACE_IRQS_FLAGS flags:req
...@@ -142,7 +142,7 @@ END(native_usergs_sysret64) ...@@ -142,7 +142,7 @@ END(native_usergs_sysret64)
* with them due to bugs in both AMD and Intel CPUs. * with them due to bugs in both AMD and Intel CPUs.
*/ */
ENTRY(entry_SYSCALL_64) SYM_CODE_START(entry_SYSCALL_64)
UNWIND_HINT_EMPTY UNWIND_HINT_EMPTY
/* /*
* Interrupts are off on entry. * Interrupts are off on entry.
...@@ -273,13 +273,13 @@ syscall_return_via_sysret: ...@@ -273,13 +273,13 @@ syscall_return_via_sysret:
popq %rdi popq %rdi
popq %rsp popq %rsp
USERGS_SYSRET64 USERGS_SYSRET64
END(entry_SYSCALL_64) SYM_CODE_END(entry_SYSCALL_64)
/* /*
* %rdi: prev task * %rdi: prev task
* %rsi: next task * %rsi: next task
*/ */
ENTRY(__switch_to_asm) SYM_CODE_START(__switch_to_asm)
UNWIND_HINT_FUNC UNWIND_HINT_FUNC
/* /*
* Save callee-saved registers * Save callee-saved registers
...@@ -321,7 +321,7 @@ ENTRY(__switch_to_asm) ...@@ -321,7 +321,7 @@ ENTRY(__switch_to_asm)
popq %rbp popq %rbp
jmp __switch_to jmp __switch_to
END(__switch_to_asm) SYM_CODE_END(__switch_to_asm)
/* /*
* A newly forked process directly context switches into this address. * A newly forked process directly context switches into this address.
...@@ -330,7 +330,7 @@ END(__switch_to_asm) ...@@ -330,7 +330,7 @@ END(__switch_to_asm)
* rbx: kernel thread func (NULL for user thread) * rbx: kernel thread func (NULL for user thread)
* r12: kernel thread arg * r12: kernel thread arg
*/ */
ENTRY(ret_from_fork) SYM_CODE_START(ret_from_fork)
UNWIND_HINT_EMPTY UNWIND_HINT_EMPTY
movq %rax, %rdi movq %rax, %rdi
call schedule_tail /* rdi: 'prev' task parameter */ call schedule_tail /* rdi: 'prev' task parameter */
...@@ -357,14 +357,14 @@ ENTRY(ret_from_fork) ...@@ -357,14 +357,14 @@ ENTRY(ret_from_fork)
*/ */
movq $0, RAX(%rsp) movq $0, RAX(%rsp)
jmp 2b jmp 2b
END(ret_from_fork) SYM_CODE_END(ret_from_fork)
/* /*
* Build the entry stubs with some assembler magic. * Build the entry stubs with some assembler magic.
* We pack 1 stub into every 8-byte block. * We pack 1 stub into every 8-byte block.
*/ */
.align 8 .align 8
ENTRY(irq_entries_start) SYM_CODE_START(irq_entries_start)
vector=FIRST_EXTERNAL_VECTOR vector=FIRST_EXTERNAL_VECTOR
.rept (FIRST_SYSTEM_VECTOR - FIRST_EXTERNAL_VECTOR) .rept (FIRST_SYSTEM_VECTOR - FIRST_EXTERNAL_VECTOR)
UNWIND_HINT_IRET_REGS UNWIND_HINT_IRET_REGS
...@@ -373,10 +373,10 @@ ENTRY(irq_entries_start) ...@@ -373,10 +373,10 @@ ENTRY(irq_entries_start)
.align 8 .align 8
vector=vector+1 vector=vector+1
.endr .endr
END(irq_entries_start) SYM_CODE_END(irq_entries_start)
.align 8 .align 8
ENTRY(spurious_entries_start) SYM_CODE_START(spurious_entries_start)
vector=FIRST_SYSTEM_VECTOR vector=FIRST_SYSTEM_VECTOR
.rept (NR_VECTORS - FIRST_SYSTEM_VECTOR) .rept (NR_VECTORS - FIRST_SYSTEM_VECTOR)
UNWIND_HINT_IRET_REGS UNWIND_HINT_IRET_REGS
...@@ -385,7 +385,7 @@ ENTRY(spurious_entries_start) ...@@ -385,7 +385,7 @@ ENTRY(spurious_entries_start)
.align 8 .align 8
vector=vector+1 vector=vector+1
.endr .endr
END(spurious_entries_start) SYM_CODE_END(spurious_entries_start)
.macro DEBUG_ENTRY_ASSERT_IRQS_OFF .macro DEBUG_ENTRY_ASSERT_IRQS_OFF
#ifdef CONFIG_DEBUG_ENTRY #ifdef CONFIG_DEBUG_ENTRY
...@@ -511,7 +511,7 @@ END(spurious_entries_start) ...@@ -511,7 +511,7 @@ END(spurious_entries_start)
* | return address | * | return address |
* +----------------------------------------------------+ * +----------------------------------------------------+
*/ */
ENTRY(interrupt_entry) SYM_CODE_START(interrupt_entry)
UNWIND_HINT_FUNC UNWIND_HINT_FUNC
ASM_CLAC ASM_CLAC
cld cld
...@@ -579,7 +579,7 @@ ENTRY(interrupt_entry) ...@@ -579,7 +579,7 @@ ENTRY(interrupt_entry)
TRACE_IRQS_OFF TRACE_IRQS_OFF
ret ret
END(interrupt_entry) SYM_CODE_END(interrupt_entry)
_ASM_NOKPROBE(interrupt_entry) _ASM_NOKPROBE(interrupt_entry)
...@@ -795,7 +795,7 @@ _ASM_NOKPROBE(common_interrupt) ...@@ -795,7 +795,7 @@ _ASM_NOKPROBE(common_interrupt)
* APIC interrupts. * APIC interrupts.
*/ */
.macro apicinterrupt3 num sym do_sym .macro apicinterrupt3 num sym do_sym
ENTRY(\sym) SYM_CODE_START(\sym)
UNWIND_HINT_IRET_REGS UNWIND_HINT_IRET_REGS
pushq $~(\num) pushq $~(\num)
.Lcommon_\sym: .Lcommon_\sym:
...@@ -803,7 +803,7 @@ ENTRY(\sym) ...@@ -803,7 +803,7 @@ ENTRY(\sym)
UNWIND_HINT_REGS indirect=1 UNWIND_HINT_REGS indirect=1
call \do_sym /* rdi points to pt_regs */ call \do_sym /* rdi points to pt_regs */
jmp ret_from_intr jmp ret_from_intr
END(\sym) SYM_CODE_END(\sym)
_ASM_NOKPROBE(\sym) _ASM_NOKPROBE(\sym)
.endm .endm
...@@ -968,7 +968,7 @@ apicinterrupt IRQ_WORK_VECTOR irq_work_interrupt smp_irq_work_interrupt ...@@ -968,7 +968,7 @@ apicinterrupt IRQ_WORK_VECTOR irq_work_interrupt smp_irq_work_interrupt
* #DF: if the thread stack is somehow unusable, we'll still get a useful OOPS. * #DF: if the thread stack is somehow unusable, we'll still get a useful OOPS.
*/ */
.macro idtentry sym do_sym has_error_code:req paranoid=0 shift_ist=-1 ist_offset=0 create_gap=0 read_cr2=0 .macro idtentry sym do_sym has_error_code:req paranoid=0 shift_ist=-1 ist_offset=0 create_gap=0 read_cr2=0
ENTRY(\sym) SYM_CODE_START(\sym)
UNWIND_HINT_IRET_REGS offset=\has_error_code*8 UNWIND_HINT_IRET_REGS offset=\has_error_code*8
/* Sanity check */ /* Sanity check */
...@@ -1018,7 +1018,7 @@ ENTRY(\sym) ...@@ -1018,7 +1018,7 @@ ENTRY(\sym)
.endif .endif
_ASM_NOKPROBE(\sym) _ASM_NOKPROBE(\sym)
END(\sym) SYM_CODE_END(\sym)
.endm .endm
idtentry divide_error do_divide_error has_error_code=0 idtentry divide_error do_divide_error has_error_code=0
...@@ -1135,7 +1135,7 @@ SYM_CODE_END(xen_do_hypervisor_callback) ...@@ -1135,7 +1135,7 @@ SYM_CODE_END(xen_do_hypervisor_callback)
* We distinguish between categories by comparing each saved segment register * We distinguish between categories by comparing each saved segment register
* with its current contents: any discrepancy means we in category 1. * with its current contents: any discrepancy means we in category 1.
*/ */
ENTRY(xen_failsafe_callback) SYM_CODE_START(xen_failsafe_callback)
UNWIND_HINT_EMPTY UNWIND_HINT_EMPTY
movl %ds, %ecx movl %ds, %ecx
cmpw %cx, 0x10(%rsp) cmpw %cx, 0x10(%rsp)
...@@ -1165,7 +1165,7 @@ ENTRY(xen_failsafe_callback) ...@@ -1165,7 +1165,7 @@ ENTRY(xen_failsafe_callback)
PUSH_AND_CLEAR_REGS PUSH_AND_CLEAR_REGS
ENCODE_FRAME_POINTER ENCODE_FRAME_POINTER
jmp error_exit jmp error_exit
END(xen_failsafe_callback) SYM_CODE_END(xen_failsafe_callback)
#endif /* CONFIG_XEN_PV */ #endif /* CONFIG_XEN_PV */
#ifdef CONFIG_XEN_PVHVM #ifdef CONFIG_XEN_PVHVM
...@@ -1384,7 +1384,7 @@ SYM_CODE_END(error_exit) ...@@ -1384,7 +1384,7 @@ SYM_CODE_END(error_exit)
* %r14: Used to save/restore the CR3 of the interrupted context * %r14: Used to save/restore the CR3 of the interrupted context
* when PAGE_TABLE_ISOLATION is in use. Do not clobber. * when PAGE_TABLE_ISOLATION is in use. Do not clobber.
*/ */
ENTRY(nmi) SYM_CODE_START(nmi)
UNWIND_HINT_IRET_REGS UNWIND_HINT_IRET_REGS
/* /*
...@@ -1719,21 +1719,21 @@ nmi_restore: ...@@ -1719,21 +1719,21 @@ nmi_restore:
* about espfix64 on the way back to kernel mode. * about espfix64 on the way back to kernel mode.
*/ */
iretq iretq
END(nmi) SYM_CODE_END(nmi)
#ifndef CONFIG_IA32_EMULATION #ifndef CONFIG_IA32_EMULATION
/* /*
* This handles SYSCALL from 32-bit code. There is no way to program * This handles SYSCALL from 32-bit code. There is no way to program
* MSRs to fully disable 32-bit SYSCALL. * MSRs to fully disable 32-bit SYSCALL.
*/ */
ENTRY(ignore_sysret) SYM_CODE_START(ignore_sysret)
UNWIND_HINT_EMPTY UNWIND_HINT_EMPTY
mov $-ENOSYS, %eax mov $-ENOSYS, %eax
sysret sysret
END(ignore_sysret) SYM_CODE_END(ignore_sysret)
#endif #endif
ENTRY(rewind_stack_do_exit) SYM_CODE_START(rewind_stack_do_exit)
UNWIND_HINT_FUNC UNWIND_HINT_FUNC
/* Prevent any naive code from trying to unwind to our caller. */ /* Prevent any naive code from trying to unwind to our caller. */
xorl %ebp, %ebp xorl %ebp, %ebp
...@@ -1743,4 +1743,4 @@ ENTRY(rewind_stack_do_exit) ...@@ -1743,4 +1743,4 @@ ENTRY(rewind_stack_do_exit)
UNWIND_HINT_FUNC sp_offset=PTREGS_SIZE UNWIND_HINT_FUNC sp_offset=PTREGS_SIZE
call do_exit call do_exit
END(rewind_stack_do_exit) SYM_CODE_END(rewind_stack_do_exit)
...@@ -196,7 +196,7 @@ ENDPROC(entry_SYSENTER_compat) ...@@ -196,7 +196,7 @@ ENDPROC(entry_SYSENTER_compat)
* esp user stack * esp user stack
* 0(%esp) arg6 * 0(%esp) arg6
*/ */
ENTRY(entry_SYSCALL_compat) SYM_CODE_START(entry_SYSCALL_compat)
/* Interrupts are off on entry. */ /* Interrupts are off on entry. */
swapgs swapgs
...@@ -311,7 +311,7 @@ sysret32_from_system_call: ...@@ -311,7 +311,7 @@ sysret32_from_system_call:
xorl %r10d, %r10d xorl %r10d, %r10d
swapgs swapgs
sysretl sysretl
END(entry_SYSCALL_compat) SYM_CODE_END(entry_SYSCALL_compat)
/* /*
* 32-bit legacy system call entry. * 32-bit legacy system call entry.
...@@ -339,7 +339,7 @@ END(entry_SYSCALL_compat) ...@@ -339,7 +339,7 @@ END(entry_SYSCALL_compat)
* edi arg5 * edi arg5
* ebp arg6 * ebp arg6
*/ */
ENTRY(entry_INT80_compat) SYM_CODE_START(entry_INT80_compat)
/* /*
* Interrupts are off on entry. * Interrupts are off on entry.
*/ */
...@@ -416,4 +416,4 @@ ENTRY(entry_INT80_compat) ...@@ -416,4 +416,4 @@ ENTRY(entry_INT80_compat)
/* Go back to user mode. */ /* Go back to user mode. */
TRACE_IRQS_ON TRACE_IRQS_ON
jmp swapgs_restore_regs_and_return_to_usermode jmp swapgs_restore_regs_and_return_to_usermode
END(entry_INT80_compat) SYM_CODE_END(entry_INT80_compat)
...@@ -296,7 +296,7 @@ ENTRY(ftrace_graph_caller) ...@@ -296,7 +296,7 @@ ENTRY(ftrace_graph_caller)
retq retq
ENDPROC(ftrace_graph_caller) ENDPROC(ftrace_graph_caller)
ENTRY(return_to_handler) SYM_CODE_START(return_to_handler)
UNWIND_HINT_EMPTY UNWIND_HINT_EMPTY
subq $24, %rsp subq $24, %rsp
...@@ -312,5 +312,5 @@ ENTRY(return_to_handler) ...@@ -312,5 +312,5 @@ ENTRY(return_to_handler)
movq (%rsp), %rax movq (%rsp), %rax
addq $24, %rsp addq $24, %rsp
JMP_NOSPEC %rdi JMP_NOSPEC %rdi
END(return_to_handler) SYM_CODE_END(return_to_handler)
#endif #endif
...@@ -91,7 +91,7 @@ SYM_CODE_START_NOALIGN(startup_64) ...@@ -91,7 +91,7 @@ SYM_CODE_START_NOALIGN(startup_64)
jmp 1f jmp 1f
SYM_CODE_END(startup_64) SYM_CODE_END(startup_64)
ENTRY(secondary_startup_64) SYM_CODE_START(secondary_startup_64)
UNWIND_HINT_EMPTY UNWIND_HINT_EMPTY
/* /*
* At this point the CPU runs in 64bit mode CS.L = 1 CS.D = 0, * At this point the CPU runs in 64bit mode CS.L = 1 CS.D = 0,
...@@ -241,7 +241,7 @@ ENTRY(secondary_startup_64) ...@@ -241,7 +241,7 @@ ENTRY(secondary_startup_64)
pushq %rax # target address in negative space pushq %rax # target address in negative space
lretq lretq
.Lafter_lret: .Lafter_lret:
END(secondary_startup_64) SYM_CODE_END(secondary_startup_64)
#include "verify_cpu.S" #include "verify_cpu.S"
...@@ -251,11 +251,11 @@ END(secondary_startup_64) ...@@ -251,11 +251,11 @@ END(secondary_startup_64)
* up already except stack. We just set up stack here. Then call * up already except stack. We just set up stack here. Then call
* start_secondary() via .Ljump_to_C_code. * start_secondary() via .Ljump_to_C_code.
*/ */
ENTRY(start_cpu0) SYM_CODE_START(start_cpu0)
UNWIND_HINT_EMPTY UNWIND_HINT_EMPTY
movq initial_stack(%rip), %rsp movq initial_stack(%rip), %rsp
jmp .Ljump_to_C_code jmp .Ljump_to_C_code
END(start_cpu0) SYM_CODE_END(start_cpu0)
#endif #endif
/* Both SMP bootup and ACPI suspend change these variables */ /* Both SMP bootup and ACPI suspend change these variables */
...@@ -272,7 +272,7 @@ SYM_DATA(initial_stack, .quad init_thread_union + THREAD_SIZE - SIZEOF_PTREGS) ...@@ -272,7 +272,7 @@ SYM_DATA(initial_stack, .quad init_thread_union + THREAD_SIZE - SIZEOF_PTREGS)
__FINITDATA __FINITDATA
__INIT __INIT
ENTRY(early_idt_handler_array) SYM_CODE_START(early_idt_handler_array)
i = 0 i = 0
.rept NUM_EXCEPTION_VECTORS .rept NUM_EXCEPTION_VECTORS
.if ((EXCEPTION_ERRCODE_MASK >> i) & 1) == 0 .if ((EXCEPTION_ERRCODE_MASK >> i) & 1) == 0
...@@ -288,7 +288,7 @@ ENTRY(early_idt_handler_array) ...@@ -288,7 +288,7 @@ ENTRY(early_idt_handler_array)
.fill early_idt_handler_array + i*EARLY_IDT_HANDLER_SIZE - ., 1, 0xcc .fill early_idt_handler_array + i*EARLY_IDT_HANDLER_SIZE - ., 1, 0xcc
.endr .endr
UNWIND_HINT_IRET_REGS offset=16 UNWIND_HINT_IRET_REGS offset=16
END(early_idt_handler_array) SYM_CODE_END(early_idt_handler_array)
SYM_CODE_START_LOCAL(early_idt_handler_common) SYM_CODE_START_LOCAL(early_idt_handler_common)
/* /*
......
...@@ -20,11 +20,11 @@ ...@@ -20,11 +20,11 @@
#include <linux/linkage.h> #include <linux/linkage.h>
.macro xen_pv_trap name .macro xen_pv_trap name
ENTRY(xen_\name) SYM_CODE_START(xen_\name)
pop %rcx pop %rcx
pop %r11 pop %r11
jmp \name jmp \name
END(xen_\name) SYM_CODE_END(xen_\name)
_ASM_NOKPROBE(xen_\name) _ASM_NOKPROBE(xen_\name)
.endm .endm
...@@ -57,7 +57,7 @@ xen_pv_trap entry_INT80_compat ...@@ -57,7 +57,7 @@ xen_pv_trap entry_INT80_compat
xen_pv_trap hypervisor_callback xen_pv_trap hypervisor_callback
__INIT __INIT
ENTRY(xen_early_idt_handler_array) SYM_CODE_START(xen_early_idt_handler_array)
i = 0 i = 0
.rept NUM_EXCEPTION_VECTORS .rept NUM_EXCEPTION_VECTORS
pop %rcx pop %rcx
...@@ -66,7 +66,7 @@ ENTRY(xen_early_idt_handler_array) ...@@ -66,7 +66,7 @@ ENTRY(xen_early_idt_handler_array)
i = i + 1 i = i + 1
.fill xen_early_idt_handler_array + i*XEN_EARLY_IDT_HANDLER_SIZE - ., 1, 0xcc .fill xen_early_idt_handler_array + i*XEN_EARLY_IDT_HANDLER_SIZE - ., 1, 0xcc
.endr .endr
END(xen_early_idt_handler_array) SYM_CODE_END(xen_early_idt_handler_array)
__FINIT __FINIT
hypercall_iret = hypercall_page + __HYPERVISOR_iret * 32 hypercall_iret = hypercall_page + __HYPERVISOR_iret * 32
......
...@@ -22,7 +22,7 @@ ...@@ -22,7 +22,7 @@
#ifdef CONFIG_XEN_PV #ifdef CONFIG_XEN_PV
__INIT __INIT
ENTRY(startup_xen) SYM_CODE_START(startup_xen)
UNWIND_HINT_EMPTY UNWIND_HINT_EMPTY
cld cld
...@@ -52,13 +52,13 @@ ENTRY(startup_xen) ...@@ -52,13 +52,13 @@ ENTRY(startup_xen)
#endif #endif
jmp xen_start_kernel jmp xen_start_kernel
END(startup_xen) SYM_CODE_END(startup_xen)
__FINIT __FINIT
#endif #endif
.pushsection .text .pushsection .text
.balign PAGE_SIZE .balign PAGE_SIZE
ENTRY(hypercall_page) SYM_CODE_START(hypercall_page)
.rept (PAGE_SIZE / 32) .rept (PAGE_SIZE / 32)
UNWIND_HINT_EMPTY UNWIND_HINT_EMPTY
.skip 32 .skip 32
...@@ -69,7 +69,7 @@ ENTRY(hypercall_page) ...@@ -69,7 +69,7 @@ ENTRY(hypercall_page)
.type xen_hypercall_##n, @function; .size xen_hypercall_##n, 32 .type xen_hypercall_##n, @function; .size xen_hypercall_##n, 32
#include <asm/xen-hypercalls.h> #include <asm/xen-hypercalls.h>
#undef HYPERCALL #undef HYPERCALL
END(hypercall_page) SYM_CODE_END(hypercall_page)
.popsection .popsection
ELFNOTE(Xen, XEN_ELFNOTE_GUEST_OS, .asciz "linux") ELFNOTE(Xen, XEN_ELFNOTE_GUEST_OS, .asciz "linux")
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment