Commit e5862d05 authored by Joerg Roedel's avatar Joerg Roedel Committed by Thomas Gleixner

x86/entry/32: Leave the kernel via trampoline stack

Switch back to the trampoline stack before returning to userspace.
Signed-off-by: default avatarJoerg Roedel <jroedel@suse.de>
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
Tested-by: default avatarPavel Machek <pavel@ucw.cz>
Cc: "H . Peter Anvin" <hpa@zytor.com>
Cc: linux-mm@kvack.org
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Andy Lutomirski <luto@kernel.org>
Cc: Dave Hansen <dave.hansen@intel.com>
Cc: Josh Poimboeuf <jpoimboe@redhat.com>
Cc: Juergen Gross <jgross@suse.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Jiri Kosina <jkosina@suse.cz>
Cc: Boris Ostrovsky <boris.ostrovsky@oracle.com>
Cc: Brian Gerst <brgerst@gmail.com>
Cc: David Laight <David.Laight@aculab.com>
Cc: Denys Vlasenko <dvlasenk@redhat.com>
Cc: Eduardo Valentin <eduval@amazon.com>
Cc: Greg KH <gregkh@linuxfoundation.org>
Cc: Will Deacon <will.deacon@arm.com>
Cc: aliguori@amazon.com
Cc: daniel.gruss@iaik.tugraz.at
Cc: hughd@google.com
Cc: keescook@google.com
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: Waiman Long <llong@redhat.com>
Cc: "David H . Gutteridge" <dhgutteridge@sympatico.ca>
Cc: joro@8bytes.org
Link: https://lkml.kernel.org/r/1531906876-13451-9-git-send-email-joro@8bytes.org
parent 45d7b255
...@@ -342,6 +342,60 @@ ...@@ -342,6 +342,60 @@
.Lend_\@: .Lend_\@:
.endm .endm
/*
* Switch back from the kernel stack to the entry stack.
*
* The %esp register must point to pt_regs on the task stack. It will
* first calculate the size of the stack-frame to copy, depending on
* whether we return to VM86 mode or not. With that it uses 'rep movsl'
* to copy the contents of the stack over to the entry stack.
*
* We must be very careful here, as we can't trust the contents of the
* task-stack once we switched to the entry-stack. When an NMI happens
* while on the entry-stack, the NMI handler will switch back to the top
* of the task stack, overwriting our stack-frame we are about to copy.
* Therefore we switch the stack only after everything is copied over.
*/
.macro SWITCH_TO_ENTRY_STACK
ALTERNATIVE "", "jmp .Lend_\@", X86_FEATURE_XENPV
/* Bytes to copy */
movl $PTREGS_SIZE, %ecx
#ifdef CONFIG_VM86
testl $(X86_EFLAGS_VM), PT_EFLAGS(%esp)
jz .Lcopy_pt_regs_\@
/* Additional 4 registers to copy when returning to VM86 mode */
addl $(4 * 4), %ecx
.Lcopy_pt_regs_\@:
#endif
/* Initialize source and destination for movsl */
movl PER_CPU_VAR(cpu_tss_rw + TSS_sp0), %edi
subl %ecx, %edi
movl %esp, %esi
/* Save future stack pointer in %ebx */
movl %edi, %ebx
/* Copy over the stack-frame */
shrl $2, %ecx
cld
rep movsl
/*
* Switch to entry-stack - needs to happen after everything is
* copied because the NMI handler will overwrite the task-stack
* when on entry-stack
*/
movl %ebx, %esp
.Lend_\@:
.endm
/* /*
* %eax: prev task * %eax: prev task
* %edx: next task * %edx: next task
...@@ -581,25 +635,45 @@ ENTRY(entry_SYSENTER_32) ...@@ -581,25 +635,45 @@ ENTRY(entry_SYSENTER_32)
/* Opportunistic SYSEXIT */ /* Opportunistic SYSEXIT */
TRACE_IRQS_ON /* User mode traces as IRQs on. */ TRACE_IRQS_ON /* User mode traces as IRQs on. */
/*
* Setup entry stack - we keep the pointer in %eax and do the
* switch after almost all user-state is restored.
*/
/* Load entry stack pointer and allocate frame for eflags/eax */
movl PER_CPU_VAR(cpu_tss_rw + TSS_sp0), %eax
subl $(2*4), %eax
/* Copy eflags and eax to entry stack */
movl PT_EFLAGS(%esp), %edi
movl PT_EAX(%esp), %esi
movl %edi, (%eax)
movl %esi, 4(%eax)
/* Restore user registers and segments */
movl PT_EIP(%esp), %edx /* pt_regs->ip */ movl PT_EIP(%esp), %edx /* pt_regs->ip */
movl PT_OLDESP(%esp), %ecx /* pt_regs->sp */ movl PT_OLDESP(%esp), %ecx /* pt_regs->sp */
1: mov PT_FS(%esp), %fs 1: mov PT_FS(%esp), %fs
PTGS_TO_GS PTGS_TO_GS
popl %ebx /* pt_regs->bx */ popl %ebx /* pt_regs->bx */
addl $2*4, %esp /* skip pt_regs->cx and pt_regs->dx */ addl $2*4, %esp /* skip pt_regs->cx and pt_regs->dx */
popl %esi /* pt_regs->si */ popl %esi /* pt_regs->si */
popl %edi /* pt_regs->di */ popl %edi /* pt_regs->di */
popl %ebp /* pt_regs->bp */ popl %ebp /* pt_regs->bp */
popl %eax /* pt_regs->ax */
/* Switch to entry stack */
movl %eax, %esp
/* /*
* Restore all flags except IF. (We restore IF separately because * Restore all flags except IF. (We restore IF separately because
* STI gives a one-instruction window in which we won't be interrupted, * STI gives a one-instruction window in which we won't be interrupted,
* whereas POPF does not.) * whereas POPF does not.)
*/ */
addl $PT_EFLAGS-PT_DS, %esp /* point esp at pt_regs->flags */
btrl $X86_EFLAGS_IF_BIT, (%esp) btrl $X86_EFLAGS_IF_BIT, (%esp)
popfl popfl
popl %eax
/* /*
* Return back to the vDSO, which will pop ecx and edx. * Return back to the vDSO, which will pop ecx and edx.
...@@ -668,6 +742,7 @@ ENTRY(entry_INT80_32) ...@@ -668,6 +742,7 @@ ENTRY(entry_INT80_32)
restore_all: restore_all:
TRACE_IRQS_IRET TRACE_IRQS_IRET
SWITCH_TO_ENTRY_STACK
.Lrestore_all_notrace: .Lrestore_all_notrace:
CHECK_AND_APPLY_ESPFIX CHECK_AND_APPLY_ESPFIX
.Lrestore_nocheck: .Lrestore_nocheck:
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment