Commit 88c15ec9 authored by Boris Ostrovsky's avatar Boris Ostrovsky Committed by Ingo Molnar

x86/paravirt: Remove the unused irq_enable_sysexit pv op

As result of commit "x86/xen: Avoid fast syscall path for Xen PV
guests", the irq_enable_sysexit pv op is not called by Xen PV guests
anymore and since they were the only ones who used it we can
safely remove it.
Signed-off-by: default avatarBoris Ostrovsky <boris.ostrovsky@oracle.com>
Reviewed-by: default avatarBorislav Petkov <bp@suse.de>
Acked-by: default avatarAndy Lutomirski <luto@kernel.org>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Andy Lutomirski <luto@amacapital.net>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Brian Gerst <brgerst@gmail.com>
Cc: Denys Vlasenko <dvlasenk@redhat.com>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: david.vrabel@citrix.com
Cc: konrad.wilk@oracle.com
Cc: virtualization@lists.linux-foundation.org
Cc: xen-devel@lists.xenproject.org
Link: http://lkml.kernel.org/r/1447970147-1733-3-git-send-email-boris.ostrovsky@oracle.comSigned-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent 5fdf5d37
...@@ -329,7 +329,8 @@ sysenter_past_esp: ...@@ -329,7 +329,8 @@ sysenter_past_esp:
* Return back to the vDSO, which will pop ecx and edx. * Return back to the vDSO, which will pop ecx and edx.
* Don't bother with DS and ES (they already contain __USER_DS). * Don't bother with DS and ES (they already contain __USER_DS).
*/ */
ENABLE_INTERRUPTS_SYSEXIT sti
sysexit
.pushsection .fixup, "ax" .pushsection .fixup, "ax"
2: movl $0, PT_FS(%esp) 2: movl $0, PT_FS(%esp)
...@@ -552,11 +553,6 @@ ENTRY(native_iret) ...@@ -552,11 +553,6 @@ ENTRY(native_iret)
iret iret
_ASM_EXTABLE(native_iret, iret_exc) _ASM_EXTABLE(native_iret, iret_exc)
END(native_iret) END(native_iret)
ENTRY(native_irq_enable_sysexit)
sti
sysexit
END(native_irq_enable_sysexit)
#endif #endif
ENTRY(overflow) ENTRY(overflow)
......
...@@ -932,13 +932,6 @@ extern void default_banner(void); ...@@ -932,13 +932,6 @@ extern void default_banner(void);
push %ecx; push %edx; \ push %ecx; push %edx; \
call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0); \ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0); \
pop %edx; pop %ecx pop %edx; pop %ecx
#define ENABLE_INTERRUPTS_SYSEXIT \
PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_irq_enable_sysexit), \
CLBR_NONE, \
jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_irq_enable_sysexit))
#else /* !CONFIG_X86_32 */ #else /* !CONFIG_X86_32 */
/* /*
......
...@@ -157,15 +157,6 @@ struct pv_cpu_ops { ...@@ -157,15 +157,6 @@ struct pv_cpu_ops {
u64 (*read_pmc)(int counter); u64 (*read_pmc)(int counter);
#ifdef CONFIG_X86_32
/*
* Atomically enable interrupts and return to userspace. This
* is only used in 32-bit kernels. 64-bit kernels use
* usergs_sysret32 instead.
*/
void (*irq_enable_sysexit)(void);
#endif
/* /*
* Switch to usermode gs and return to 64-bit usermode using * Switch to usermode gs and return to 64-bit usermode using
* sysret. Only used in 64-bit kernels to return to 64-bit * sysret. Only used in 64-bit kernels to return to 64-bit
......
...@@ -65,9 +65,6 @@ void common(void) { ...@@ -65,9 +65,6 @@ void common(void) {
OFFSET(PV_IRQ_irq_disable, pv_irq_ops, irq_disable); OFFSET(PV_IRQ_irq_disable, pv_irq_ops, irq_disable);
OFFSET(PV_IRQ_irq_enable, pv_irq_ops, irq_enable); OFFSET(PV_IRQ_irq_enable, pv_irq_ops, irq_enable);
OFFSET(PV_CPU_iret, pv_cpu_ops, iret); OFFSET(PV_CPU_iret, pv_cpu_ops, iret);
#ifdef CONFIG_X86_32
OFFSET(PV_CPU_irq_enable_sysexit, pv_cpu_ops, irq_enable_sysexit);
#endif
OFFSET(PV_CPU_read_cr0, pv_cpu_ops, read_cr0); OFFSET(PV_CPU_read_cr0, pv_cpu_ops, read_cr0);
OFFSET(PV_MMU_read_cr2, pv_mmu_ops, read_cr2); OFFSET(PV_MMU_read_cr2, pv_mmu_ops, read_cr2);
#endif #endif
......
...@@ -162,9 +162,6 @@ unsigned paravirt_patch_default(u8 type, u16 clobbers, void *insnbuf, ...@@ -162,9 +162,6 @@ unsigned paravirt_patch_default(u8 type, u16 clobbers, void *insnbuf,
ret = paravirt_patch_ident_64(insnbuf, len); ret = paravirt_patch_ident_64(insnbuf, len);
else if (type == PARAVIRT_PATCH(pv_cpu_ops.iret) || else if (type == PARAVIRT_PATCH(pv_cpu_ops.iret) ||
#ifdef CONFIG_X86_32
type == PARAVIRT_PATCH(pv_cpu_ops.irq_enable_sysexit) ||
#endif
type == PARAVIRT_PATCH(pv_cpu_ops.usergs_sysret32) || type == PARAVIRT_PATCH(pv_cpu_ops.usergs_sysret32) ||
type == PARAVIRT_PATCH(pv_cpu_ops.usergs_sysret64)) type == PARAVIRT_PATCH(pv_cpu_ops.usergs_sysret64))
/* If operation requires a jmp, then jmp */ /* If operation requires a jmp, then jmp */
...@@ -220,7 +217,6 @@ static u64 native_steal_clock(int cpu) ...@@ -220,7 +217,6 @@ static u64 native_steal_clock(int cpu)
/* These are in entry.S */ /* These are in entry.S */
extern void native_iret(void); extern void native_iret(void);
extern void native_irq_enable_sysexit(void);
extern void native_usergs_sysret32(void); extern void native_usergs_sysret32(void);
extern void native_usergs_sysret64(void); extern void native_usergs_sysret64(void);
...@@ -379,9 +375,6 @@ __visible struct pv_cpu_ops pv_cpu_ops = { ...@@ -379,9 +375,6 @@ __visible struct pv_cpu_ops pv_cpu_ops = {
.load_sp0 = native_load_sp0, .load_sp0 = native_load_sp0,
#if defined(CONFIG_X86_32)
.irq_enable_sysexit = native_irq_enable_sysexit,
#endif
#ifdef CONFIG_X86_64 #ifdef CONFIG_X86_64
#ifdef CONFIG_IA32_EMULATION #ifdef CONFIG_IA32_EMULATION
.usergs_sysret32 = native_usergs_sysret32, .usergs_sysret32 = native_usergs_sysret32,
......
...@@ -5,7 +5,6 @@ DEF_NATIVE(pv_irq_ops, irq_enable, "sti"); ...@@ -5,7 +5,6 @@ DEF_NATIVE(pv_irq_ops, irq_enable, "sti");
DEF_NATIVE(pv_irq_ops, restore_fl, "push %eax; popf"); DEF_NATIVE(pv_irq_ops, restore_fl, "push %eax; popf");
DEF_NATIVE(pv_irq_ops, save_fl, "pushf; pop %eax"); DEF_NATIVE(pv_irq_ops, save_fl, "pushf; pop %eax");
DEF_NATIVE(pv_cpu_ops, iret, "iret"); DEF_NATIVE(pv_cpu_ops, iret, "iret");
DEF_NATIVE(pv_cpu_ops, irq_enable_sysexit, "sti; sysexit");
DEF_NATIVE(pv_mmu_ops, read_cr2, "mov %cr2, %eax"); DEF_NATIVE(pv_mmu_ops, read_cr2, "mov %cr2, %eax");
DEF_NATIVE(pv_mmu_ops, write_cr3, "mov %eax, %cr3"); DEF_NATIVE(pv_mmu_ops, write_cr3, "mov %eax, %cr3");
DEF_NATIVE(pv_mmu_ops, read_cr3, "mov %cr3, %eax"); DEF_NATIVE(pv_mmu_ops, read_cr3, "mov %cr3, %eax");
...@@ -46,7 +45,6 @@ unsigned native_patch(u8 type, u16 clobbers, void *ibuf, ...@@ -46,7 +45,6 @@ unsigned native_patch(u8 type, u16 clobbers, void *ibuf,
PATCH_SITE(pv_irq_ops, restore_fl); PATCH_SITE(pv_irq_ops, restore_fl);
PATCH_SITE(pv_irq_ops, save_fl); PATCH_SITE(pv_irq_ops, save_fl);
PATCH_SITE(pv_cpu_ops, iret); PATCH_SITE(pv_cpu_ops, iret);
PATCH_SITE(pv_cpu_ops, irq_enable_sysexit);
PATCH_SITE(pv_mmu_ops, read_cr2); PATCH_SITE(pv_mmu_ops, read_cr2);
PATCH_SITE(pv_mmu_ops, read_cr3); PATCH_SITE(pv_mmu_ops, read_cr3);
PATCH_SITE(pv_mmu_ops, write_cr3); PATCH_SITE(pv_mmu_ops, write_cr3);
......
...@@ -13,7 +13,6 @@ DEF_NATIVE(pv_mmu_ops, flush_tlb_single, "invlpg (%rdi)"); ...@@ -13,7 +13,6 @@ DEF_NATIVE(pv_mmu_ops, flush_tlb_single, "invlpg (%rdi)");
DEF_NATIVE(pv_cpu_ops, clts, "clts"); DEF_NATIVE(pv_cpu_ops, clts, "clts");
DEF_NATIVE(pv_cpu_ops, wbinvd, "wbinvd"); DEF_NATIVE(pv_cpu_ops, wbinvd, "wbinvd");
DEF_NATIVE(pv_cpu_ops, irq_enable_sysexit, "swapgs; sti; sysexit");
DEF_NATIVE(pv_cpu_ops, usergs_sysret64, "swapgs; sysretq"); DEF_NATIVE(pv_cpu_ops, usergs_sysret64, "swapgs; sysretq");
DEF_NATIVE(pv_cpu_ops, usergs_sysret32, "swapgs; sysretl"); DEF_NATIVE(pv_cpu_ops, usergs_sysret32, "swapgs; sysretl");
DEF_NATIVE(pv_cpu_ops, swapgs, "swapgs"); DEF_NATIVE(pv_cpu_ops, swapgs, "swapgs");
......
...@@ -1229,10 +1229,7 @@ static const struct pv_cpu_ops xen_cpu_ops __initconst = { ...@@ -1229,10 +1229,7 @@ static const struct pv_cpu_ops xen_cpu_ops __initconst = {
.iret = xen_iret, .iret = xen_iret,
#ifdef CONFIG_X86_64 #ifdef CONFIG_X86_64
.usergs_sysret32 = xen_sysret32,
.usergs_sysret64 = xen_sysret64, .usergs_sysret64 = xen_sysret64,
#else
.irq_enable_sysexit = xen_sysexit,
#endif #endif
.load_tr_desc = paravirt_nop, .load_tr_desc = paravirt_nop,
......
...@@ -34,20 +34,6 @@ check_events: ...@@ -34,20 +34,6 @@ check_events:
pop %eax pop %eax
ret ret
/*
* We can't use sysexit directly, because we're not running in ring0.
* But we can easily fake it up using iret. Assuming xen_sysexit is
* jumped to with a standard stack frame, we can just strip it back to
* a standard iret frame and use iret.
*/
ENTRY(xen_sysexit)
movl PT_EAX(%esp), %eax /* Shouldn't be necessary? */
orl $X86_EFLAGS_IF, PT_EFLAGS(%esp)
lea PT_EIP(%esp), %esp
jmp xen_iret
ENDPROC(xen_sysexit)
/* /*
* This is run where a normal iret would be run, with the same stack setup: * This is run where a normal iret would be run, with the same stack setup:
* 8: eflags * 8: eflags
......
...@@ -139,9 +139,6 @@ DECL_ASM(void, xen_restore_fl_direct, unsigned long); ...@@ -139,9 +139,6 @@ DECL_ASM(void, xen_restore_fl_direct, unsigned long);
/* These are not functions, and cannot be called normally */ /* These are not functions, and cannot be called normally */
__visible void xen_iret(void); __visible void xen_iret(void);
#ifdef CONFIG_X86_32
__visible void xen_sysexit(void);
#endif
__visible void xen_sysret32(void); __visible void xen_sysret32(void);
__visible void xen_sysret64(void); __visible void xen_sysret64(void);
__visible void xen_adjust_exception_frame(void); __visible void xen_adjust_exception_frame(void);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment