Commit 9b47feb7 authored by Denys Vlasenko's avatar Denys Vlasenko Committed by Ingo Molnar

x86/asm/entry: Clean up entry*.S style, final bits

A few bits were missed.
Signed-off-by: default avatarDenys Vlasenko <dvlasenk@redhat.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent bace7117
...@@ -12,7 +12,7 @@ ...@@ -12,7 +12,7 @@
* 0(%esp) - %ebx * 0(%esp) - %ebx
* 4(%esp) - %ecx * 4(%esp) - %ecx
* 8(%esp) - %edx * 8(%esp) - %edx
* C(%esp) - %esi * C(%esp) - %esi
* 10(%esp) - %edi * 10(%esp) - %edi
* 14(%esp) - %ebp * 14(%esp) - %ebp
* 18(%esp) - %eax * 18(%esp) - %eax
...@@ -128,7 +128,7 @@ ...@@ -128,7 +128,7 @@
.macro POP_GS pop=0 .macro POP_GS pop=0
98: popl %gs 98: popl %gs
.if \pop <> 0 .if \pop <> 0
add $\pop, %esp add $\pop, %esp
.endif .endif
.endm .endm
.macro POP_GS_EX .macro POP_GS_EX
...@@ -487,8 +487,8 @@ ldt_ss: ...@@ -487,8 +487,8 @@ ldt_ss:
mov %esp, %edx /* load kernel esp */ mov %esp, %edx /* load kernel esp */
mov PT_OLDESP(%esp), %eax /* load userspace esp */ mov PT_OLDESP(%esp), %eax /* load userspace esp */
mov %dx, %ax /* eax: new kernel esp */ mov %dx, %ax /* eax: new kernel esp */
sub %eax, %edx /* offset (low word is 0) */ sub %eax, %edx /* offset (low word is 0) */
shr $16, %edx shr $16, %edx
mov %dl, GDT_ESPFIX_SS + 4 /* bits 16..23 */ mov %dl, GDT_ESPFIX_SS + 4 /* bits 16..23 */
mov %dh, GDT_ESPFIX_SS + 7 /* bits 24..31 */ mov %dh, GDT_ESPFIX_SS + 7 /* bits 24..31 */
pushl $__ESPFIX_SS pushl $__ESPFIX_SS
...@@ -507,7 +507,7 @@ ENDPROC(entry_INT80_32) ...@@ -507,7 +507,7 @@ ENDPROC(entry_INT80_32)
# perform work that needs to be done immediately before resumption # perform work that needs to be done immediately before resumption
ALIGN ALIGN
work_pending: work_pending:
testb $_TIF_NEED_RESCHED, %cl testb $_TIF_NEED_RESCHED, %cl
jz work_notifysig jz work_notifysig
work_resched: work_resched:
call schedule call schedule
...@@ -520,7 +520,7 @@ work_resched: ...@@ -520,7 +520,7 @@ work_resched:
andl $_TIF_WORK_MASK, %ecx # is there any work to be done other andl $_TIF_WORK_MASK, %ecx # is there any work to be done other
# than syscall tracing? # than syscall tracing?
jz restore_all jz restore_all
testb $_TIF_NEED_RESCHED, %cl testb $_TIF_NEED_RESCHED, %cl
jnz work_resched jnz work_resched
work_notifysig: # deal with pending signals and work_notifysig: # deal with pending signals and
...@@ -537,8 +537,8 @@ work_notifysig: # deal with pending signals and ...@@ -537,8 +537,8 @@ work_notifysig: # deal with pending signals and
TRACE_IRQS_ON TRACE_IRQS_ON
ENABLE_INTERRUPTS(CLBR_NONE) ENABLE_INTERRUPTS(CLBR_NONE)
movb PT_CS(%esp), %bl movb PT_CS(%esp), %bl
andb $SEGMENT_RPL_MASK, %bl andb $SEGMENT_RPL_MASK, %bl
cmpb $USER_RPL, %bl cmpb $USER_RPL, %bl
jb resume_kernel jb resume_kernel
xorl %edx, %edx xorl %edx, %edx
call do_notify_resume call do_notify_resume
...@@ -609,7 +609,7 @@ END(sysenter_badsys) ...@@ -609,7 +609,7 @@ END(sysenter_badsys)
/* fixup the stack */ /* fixup the stack */
mov GDT_ESPFIX_SS + 4, %al /* bits 16..23 */ mov GDT_ESPFIX_SS + 4, %al /* bits 16..23 */
mov GDT_ESPFIX_SS + 7, %ah /* bits 24..31 */ mov GDT_ESPFIX_SS + 7, %ah /* bits 24..31 */
shl $16, %eax shl $16, %eax
addl %esp, %eax /* the adjusted stack pointer */ addl %esp, %eax /* the adjusted stack pointer */
pushl $__KERNEL_DS pushl $__KERNEL_DS
pushl %eax pushl %eax
......
...@@ -106,8 +106,8 @@ ENTRY(entry_SYSENTER_compat) ...@@ -106,8 +106,8 @@ ENTRY(entry_SYSENTER_compat)
jnz sysenter_fix_flags jnz sysenter_fix_flags
sysenter_flags_fixed: sysenter_flags_fixed:
orl $TS_COMPAT, ASM_THREAD_INFO(TI_status, %rsp, SIZEOF_PTREGS) orl $TS_COMPAT, ASM_THREAD_INFO(TI_status, %rsp, SIZEOF_PTREGS)
testl $_TIF_WORK_SYSCALL_ENTRY, ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS) testl $_TIF_WORK_SYSCALL_ENTRY, ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS)
jnz sysenter_tracesys jnz sysenter_tracesys
sysenter_do_call: sysenter_do_call:
...@@ -138,7 +138,7 @@ sysexit_from_sys_call: ...@@ -138,7 +138,7 @@ sysexit_from_sys_call:
* This code path is still called 'sysexit' because it pairs * This code path is still called 'sysexit' because it pairs
* with 'sysenter' and it uses the SYSENTER calling convention. * with 'sysenter' and it uses the SYSENTER calling convention.
*/ */
andl $~TS_COMPAT, ASM_THREAD_INFO(TI_status, %rsp, SIZEOF_PTREGS) andl $~TS_COMPAT, ASM_THREAD_INFO(TI_status, %rsp, SIZEOF_PTREGS)
movl RIP(%rsp), %ecx /* User %eip */ movl RIP(%rsp), %ecx /* User %eip */
RESTORE_RSI_RDI RESTORE_RSI_RDI
xorl %edx, %edx /* Do not leak kernel information */ xorl %edx, %edx /* Do not leak kernel information */
...@@ -229,7 +229,7 @@ sysexit_audit: ...@@ -229,7 +229,7 @@ sysexit_audit:
#endif #endif
sysenter_fix_flags: sysenter_fix_flags:
pushq $(X86_EFLAGS_IF|X86_EFLAGS_FIXED) pushq $(X86_EFLAGS_IF|X86_EFLAGS_FIXED)
popfq popfq
jmp sysenter_flags_fixed jmp sysenter_flags_fixed
...@@ -325,9 +325,9 @@ ENTRY(entry_SYSCALL_compat) ...@@ -325,9 +325,9 @@ ENTRY(entry_SYSCALL_compat)
1: movl (%r8), %ebp 1: movl (%r8), %ebp
_ASM_EXTABLE(1b, ia32_badarg) _ASM_EXTABLE(1b, ia32_badarg)
ASM_CLAC ASM_CLAC
orl $TS_COMPAT, ASM_THREAD_INFO(TI_status, %rsp, SIZEOF_PTREGS) orl $TS_COMPAT, ASM_THREAD_INFO(TI_status, %rsp, SIZEOF_PTREGS)
testl $_TIF_WORK_SYSCALL_ENTRY, ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS) testl $_TIF_WORK_SYSCALL_ENTRY, ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS)
jnz cstar_tracesys jnz cstar_tracesys
cstar_do_call: cstar_do_call:
/* 32-bit syscall -> 64-bit C ABI argument conversion */ /* 32-bit syscall -> 64-bit C ABI argument conversion */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment