Commit b2502b41 authored by Ingo Molnar's avatar Ingo Molnar

x86/asm/entry: Untangle 'system_call' into two entry points: entry_SYSCALL_64 and entry_INT80_32

The 'system_call' entry points differ starkly between native 32-bit and 64-bit
kernels: on 32-bit kernels it defines the INT 0x80 entry point, while on
64-bit it's the SYSCALL entry point.

This is pretty confusing when looking at generic code, and it also obscures
the nature of the entry point at the assembly level.

So unangle this by splitting the name into its two uses:

	system_call (32) -> entry_INT80_32
	system_call (64) -> entry_SYSCALL_64

As per the generic naming scheme for x86 system call entry points:

	entry_MNEMONIC_qualifier

where 'qualifier' is one of _32, _64 or _compat.

Cc: Andy Lutomirski <luto@amacapital.net>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Brian Gerst <brgerst@gmail.com>
Cc: Denys Vlasenko <dvlasenk@redhat.com>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: linux-kernel@vger.kernel.org
Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent 4c8cd0c5
...@@ -415,7 +415,7 @@ sysexit_audit: ...@@ -415,7 +415,7 @@ sysexit_audit:
ENDPROC(entry_SYSENTER_32) ENDPROC(entry_SYSENTER_32)
# system call handler stub # system call handler stub
ENTRY(system_call) ENTRY(entry_INT80_32)
ASM_CLAC ASM_CLAC
pushl %eax # save orig_eax pushl %eax # save orig_eax
SAVE_ALL SAVE_ALL
...@@ -508,7 +508,7 @@ ldt_ss: ...@@ -508,7 +508,7 @@ ldt_ss:
lss (%esp), %esp /* switch to espfix segment */ lss (%esp), %esp /* switch to espfix segment */
jmp restore_nocheck jmp restore_nocheck
#endif #endif
ENDPROC(system_call) ENDPROC(entry_INT80_32)
# perform work that needs to be done immediately before resumption # perform work that needs to be done immediately before resumption
ALIGN ALIGN
......
...@@ -137,7 +137,7 @@ ENDPROC(native_usergs_sysret64) ...@@ -137,7 +137,7 @@ ENDPROC(native_usergs_sysret64)
* with them due to bugs in both AMD and Intel CPUs. * with them due to bugs in both AMD and Intel CPUs.
*/ */
ENTRY(system_call) ENTRY(entry_SYSCALL_64)
/* /*
* Interrupts are off on entry. * Interrupts are off on entry.
* We do not frame this tiny irq-off block with TRACE_IRQS_OFF/ON, * We do not frame this tiny irq-off block with TRACE_IRQS_OFF/ON,
...@@ -149,7 +149,7 @@ ENTRY(system_call) ...@@ -149,7 +149,7 @@ ENTRY(system_call)
* after the swapgs, so that it can do the swapgs * after the swapgs, so that it can do the swapgs
* for the guest and jump here on syscall. * for the guest and jump here on syscall.
*/ */
GLOBAL(system_call_after_swapgs) GLOBAL(entry_SYSCALL_64_after_swapgs)
movq %rsp,PER_CPU_VAR(rsp_scratch) movq %rsp,PER_CPU_VAR(rsp_scratch)
movq PER_CPU_VAR(cpu_current_top_of_stack),%rsp movq PER_CPU_VAR(cpu_current_top_of_stack),%rsp
...@@ -182,7 +182,7 @@ GLOBAL(system_call_after_swapgs) ...@@ -182,7 +182,7 @@ GLOBAL(system_call_after_swapgs)
testl $_TIF_WORK_SYSCALL_ENTRY, ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS) testl $_TIF_WORK_SYSCALL_ENTRY, ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS)
jnz tracesys jnz tracesys
system_call_fastpath: entry_SYSCALL_64_fastpath:
#if __SYSCALL_MASK == ~0 #if __SYSCALL_MASK == ~0
cmpq $__NR_syscall_max,%rax cmpq $__NR_syscall_max,%rax
#else #else
...@@ -246,7 +246,7 @@ tracesys: ...@@ -246,7 +246,7 @@ tracesys:
jnz tracesys_phase2 /* if needed, run the slow path */ jnz tracesys_phase2 /* if needed, run the slow path */
RESTORE_C_REGS_EXCEPT_RAX /* else restore clobbered regs */ RESTORE_C_REGS_EXCEPT_RAX /* else restore clobbered regs */
movq ORIG_RAX(%rsp), %rax movq ORIG_RAX(%rsp), %rax
jmp system_call_fastpath /* and return to the fast path */ jmp entry_SYSCALL_64_fastpath /* and return to the fast path */
tracesys_phase2: tracesys_phase2:
SAVE_EXTRA_REGS SAVE_EXTRA_REGS
...@@ -411,7 +411,7 @@ syscall_return_via_sysret: ...@@ -411,7 +411,7 @@ syscall_return_via_sysret:
opportunistic_sysret_failed: opportunistic_sysret_failed:
SWAPGS SWAPGS
jmp restore_c_regs_and_iret jmp restore_c_regs_and_iret
END(system_call) END(entry_SYSCALL_64)
.macro FORK_LIKE func .macro FORK_LIKE func
......
...@@ -5,11 +5,12 @@ ...@@ -5,11 +5,12 @@
/* misc architecture specific prototypes */ /* misc architecture specific prototypes */
void system_call(void);
void syscall_init(void); void syscall_init(void);
void entry_INT80_compat(void); void entry_SYSCALL_64(void);
void entry_SYSCALL_compat(void); void entry_SYSCALL_compat(void);
void entry_INT80_32(void);
void entry_INT80_compat(void);
void entry_SYSENTER_32(void); void entry_SYSENTER_32(void);
void entry_SYSENTER_compat(void); void entry_SYSENTER_compat(void);
......
...@@ -1204,7 +1204,7 @@ void syscall_init(void) ...@@ -1204,7 +1204,7 @@ void syscall_init(void)
* set CS/DS but only a 32bit target. LSTAR sets the 64bit rip. * set CS/DS but only a 32bit target. LSTAR sets the 64bit rip.
*/ */
wrmsrl(MSR_STAR, ((u64)__USER32_CS)<<48 | ((u64)__KERNEL_CS)<<32); wrmsrl(MSR_STAR, ((u64)__USER32_CS)<<48 | ((u64)__KERNEL_CS)<<32);
wrmsrl(MSR_LSTAR, system_call); wrmsrl(MSR_LSTAR, entry_SYSCALL_64);
#ifdef CONFIG_IA32_EMULATION #ifdef CONFIG_IA32_EMULATION
wrmsrl(MSR_CSTAR, entry_SYSCALL_compat); wrmsrl(MSR_CSTAR, entry_SYSCALL_compat);
......
...@@ -72,8 +72,7 @@ gate_desc debug_idt_table[NR_VECTORS] __page_aligned_bss; ...@@ -72,8 +72,7 @@ gate_desc debug_idt_table[NR_VECTORS] __page_aligned_bss;
#else #else
#include <asm/processor-flags.h> #include <asm/processor-flags.h>
#include <asm/setup.h> #include <asm/setup.h>
#include <asm/proto.h>
asmlinkage int system_call(void);
#endif #endif
/* Must be page-aligned because the real IDT is used in a fixmap. */ /* Must be page-aligned because the real IDT is used in a fixmap. */
...@@ -997,7 +996,7 @@ void __init trap_init(void) ...@@ -997,7 +996,7 @@ void __init trap_init(void)
#endif #endif
#ifdef CONFIG_X86_32 #ifdef CONFIG_X86_32
set_system_trap_gate(IA32_SYSCALL_VECTOR, &system_call); set_system_trap_gate(IA32_SYSCALL_VECTOR, entry_INT80_32);
set_bit(IA32_SYSCALL_VECTOR, used_vectors); set_bit(IA32_SYSCALL_VECTOR, used_vectors);
#endif #endif
......
...@@ -114,7 +114,7 @@ RELOC(xen_sysret32, 1b+1) ...@@ -114,7 +114,7 @@ RELOC(xen_sysret32, 1b+1)
/* Normal 64-bit system call target */ /* Normal 64-bit system call target */
ENTRY(xen_syscall_target) ENTRY(xen_syscall_target)
undo_xen_syscall undo_xen_syscall
jmp system_call_after_swapgs jmp entry_SYSCALL_64_after_swapgs
ENDPROC(xen_syscall_target) ENDPROC(xen_syscall_target)
#ifdef CONFIG_IA32_EMULATION #ifdef CONFIG_IA32_EMULATION
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment