Commit 9b47feb7 authored by Denys Vlasenko's avatar Denys Vlasenko Committed by Ingo Molnar

x86/asm/entry: Clean up entry*.S style, final bits

A few bits were missed.
Signed-off-by: default avatarDenys Vlasenko <dvlasenk@redhat.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent bace7117
......@@ -12,7 +12,7 @@
* 0(%esp) - %ebx
* 4(%esp) - %ecx
* 8(%esp) - %edx
* C(%esp) - %esi
* C(%esp) - %esi
* 10(%esp) - %edi
* 14(%esp) - %ebp
* 18(%esp) - %eax
......@@ -128,7 +128,7 @@
.macro POP_GS pop=0
98: popl %gs
.if \pop <> 0
add $\pop, %esp
add $\pop, %esp
.endif
.endm
.macro POP_GS_EX
......@@ -487,8 +487,8 @@ ldt_ss:
mov %esp, %edx /* load kernel esp */
mov PT_OLDESP(%esp), %eax /* load userspace esp */
mov %dx, %ax /* eax: new kernel esp */
sub %eax, %edx /* offset (low word is 0) */
shr $16, %edx
sub %eax, %edx /* offset (low word is 0) */
shr $16, %edx
mov %dl, GDT_ESPFIX_SS + 4 /* bits 16..23 */
mov %dh, GDT_ESPFIX_SS + 7 /* bits 24..31 */
pushl $__ESPFIX_SS
......@@ -507,7 +507,7 @@ ENDPROC(entry_INT80_32)
# perform work that needs to be done immediately before resumption
ALIGN
work_pending:
testb $_TIF_NEED_RESCHED, %cl
testb $_TIF_NEED_RESCHED, %cl
jz work_notifysig
work_resched:
call schedule
......@@ -520,7 +520,7 @@ work_resched:
andl $_TIF_WORK_MASK, %ecx # is there any work to be done other
# than syscall tracing?
jz restore_all
testb $_TIF_NEED_RESCHED, %cl
testb $_TIF_NEED_RESCHED, %cl
jnz work_resched
work_notifysig: # deal with pending signals and
......@@ -537,8 +537,8 @@ work_notifysig: # deal with pending signals and
TRACE_IRQS_ON
ENABLE_INTERRUPTS(CLBR_NONE)
movb PT_CS(%esp), %bl
andb $SEGMENT_RPL_MASK, %bl
cmpb $USER_RPL, %bl
andb $SEGMENT_RPL_MASK, %bl
cmpb $USER_RPL, %bl
jb resume_kernel
xorl %edx, %edx
call do_notify_resume
......@@ -609,7 +609,7 @@ END(sysenter_badsys)
/* fixup the stack */
mov GDT_ESPFIX_SS + 4, %al /* bits 16..23 */
mov GDT_ESPFIX_SS + 7, %ah /* bits 24..31 */
shl $16, %eax
shl $16, %eax
addl %esp, %eax /* the adjusted stack pointer */
pushl $__KERNEL_DS
pushl %eax
......
......@@ -106,8 +106,8 @@ ENTRY(entry_SYSENTER_compat)
jnz sysenter_fix_flags
sysenter_flags_fixed:
orl $TS_COMPAT, ASM_THREAD_INFO(TI_status, %rsp, SIZEOF_PTREGS)
testl $_TIF_WORK_SYSCALL_ENTRY, ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS)
orl $TS_COMPAT, ASM_THREAD_INFO(TI_status, %rsp, SIZEOF_PTREGS)
testl $_TIF_WORK_SYSCALL_ENTRY, ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS)
jnz sysenter_tracesys
sysenter_do_call:
......@@ -138,7 +138,7 @@ sysexit_from_sys_call:
* This code path is still called 'sysexit' because it pairs
* with 'sysenter' and it uses the SYSENTER calling convention.
*/
andl $~TS_COMPAT, ASM_THREAD_INFO(TI_status, %rsp, SIZEOF_PTREGS)
andl $~TS_COMPAT, ASM_THREAD_INFO(TI_status, %rsp, SIZEOF_PTREGS)
movl RIP(%rsp), %ecx /* User %eip */
RESTORE_RSI_RDI
xorl %edx, %edx /* Do not leak kernel information */
......@@ -229,7 +229,7 @@ sysexit_audit:
#endif
sysenter_fix_flags:
pushq $(X86_EFLAGS_IF|X86_EFLAGS_FIXED)
pushq $(X86_EFLAGS_IF|X86_EFLAGS_FIXED)
popfq
jmp sysenter_flags_fixed
......@@ -325,9 +325,9 @@ ENTRY(entry_SYSCALL_compat)
1: movl (%r8), %ebp
_ASM_EXTABLE(1b, ia32_badarg)
ASM_CLAC
orl $TS_COMPAT, ASM_THREAD_INFO(TI_status, %rsp, SIZEOF_PTREGS)
testl $_TIF_WORK_SYSCALL_ENTRY, ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS)
jnz cstar_tracesys
orl $TS_COMPAT, ASM_THREAD_INFO(TI_status, %rsp, SIZEOF_PTREGS)
testl $_TIF_WORK_SYSCALL_ENTRY, ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS)
jnz cstar_tracesys
cstar_do_call:
/* 32-bit syscall -> 64-bit C ABI argument conversion */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment