Commit 8b87d8ce authored by Peter Zijlstra's avatar Peter Zijlstra

x86/entry,xen: Early rewrite of restore_regs_and_return_to_kernel()

By doing an early rewrite of 'jmp native_iret` in
restore_regs_and_return_to_kernel() we can get rid of the last
INTERRUPT_RETURN user and paravirt_iret.
Suggested-by: default avatarAndrew Cooper <Andrew.Cooper3@citrix.com>
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Acked-by: default avatarJosh Poimboeuf <jpoimboe@redhat.com>
Link: https://lore.kernel.org/r/20220308154317.815039833@infradead.org
parent 6cf3e4c0
......@@ -609,7 +609,7 @@ SYM_INNER_LABEL(swapgs_restore_regs_and_return_to_usermode, SYM_L_GLOBAL)
/* Restore RDI. */
popq %rdi
swapgs
jmp native_iret
jmp .Lnative_iret
SYM_INNER_LABEL(restore_regs_and_return_to_kernel, SYM_L_GLOBAL)
......@@ -626,9 +626,14 @@ SYM_INNER_LABEL(restore_regs_and_return_to_kernel, SYM_L_GLOBAL)
* ARCH_HAS_MEMBARRIER_SYNC_CORE rely on IRET core serialization
* when returning from IPI handler.
*/
INTERRUPT_RETURN
#ifdef CONFIG_XEN_PV
SYM_INNER_LABEL(early_xen_iret_patch, SYM_L_GLOBAL)
ANNOTATE_NOENDBR
.byte 0xe9
.long .Lnative_iret - (. + 4)
#endif
SYM_INNER_LABEL_ALIGN(native_iret, SYM_L_GLOBAL)
.Lnative_iret:
UNWIND_HINT_IRET_REGS
/*
* Are we returning to a stack segment from the LDT? Note: in
......
......@@ -141,13 +141,8 @@ static __always_inline void arch_local_irq_restore(unsigned long flags)
#ifdef CONFIG_X86_64
#ifdef CONFIG_XEN_PV
#define SWAPGS ALTERNATIVE "swapgs", "", X86_FEATURE_XENPV
#define INTERRUPT_RETURN \
ANNOTATE_RETPOLINE_SAFE; \
ALTERNATIVE_TERNARY("jmp *paravirt_iret(%rip);", \
X86_FEATURE_XENPV, "jmp xen_iret;", "jmp native_iret;")
#else
#define SWAPGS swapgs
#define INTERRUPT_RETURN jmp native_iret
#endif
#endif
#endif /* !__ASSEMBLY__ */
......
......@@ -272,7 +272,6 @@ struct paravirt_patch_template {
extern struct pv_info pv_info;
extern struct paravirt_patch_template pv_ops;
extern void (*paravirt_iret)(void);
#define PARAVIRT_PATCH(x) \
(offsetof(struct paravirt_patch_template, x) / sizeof(void *))
......
......@@ -345,7 +345,6 @@ SYM_CODE_START_NOALIGN(vc_boot_ghcb)
/* Remove Error Code */
addq $8, %rsp
/* Pure iret required here - don't use INTERRUPT_RETURN */
iretq
SYM_CODE_END(vc_boot_ghcb)
#endif
......@@ -426,6 +425,8 @@ SYM_CODE_END(early_idt_handler_common)
* early_idt_handler_array can't be used because it returns via the
* paravirtualized INTERRUPT_RETURN and pv-ops don't work that early.
*
* XXX it does, fix this.
*
* This handler will end up in the .init.text section and not be
* available to boot secondary CPUs.
*/
......
......@@ -132,8 +132,6 @@ void paravirt_set_sched_clock(u64 (*func)(void))
}
/* These are in entry.S */
extern void native_iret(void);
static struct resource reserve_ioports = {
.start = 0,
.end = IO_SPACE_LIMIT,
......@@ -397,8 +395,6 @@ struct paravirt_patch_template pv_ops = {
#ifdef CONFIG_PARAVIRT_XXL
NOKPROBE_SYMBOL(native_load_idt);
void (*paravirt_iret)(void) = native_iret;
#endif
EXPORT_SYMBOL(pv_ops);
......
......@@ -1177,6 +1177,8 @@ static void __init xen_domu_set_legacy_features(void)
x86_platform.legacy.rtc = 0;
}
extern void early_xen_iret_patch(void);
/* First C function to be called on Xen boot */
asmlinkage __visible void __init xen_start_kernel(void)
{
......@@ -1187,6 +1189,10 @@ asmlinkage __visible void __init xen_start_kernel(void)
if (!xen_start_info)
return;
__text_gen_insn(&early_xen_iret_patch,
JMP32_INSN_OPCODE, &early_xen_iret_patch, &xen_iret,
JMP32_INSN_SIZE);
xen_domain_type = XEN_PV_DOMAIN;
xen_start_flags = xen_start_info->flags;
......@@ -1195,7 +1201,6 @@ asmlinkage __visible void __init xen_start_kernel(void)
/* Install Xen paravirt ops */
pv_info = xen_info;
pv_ops.cpu = xen_cpu_ops.cpu;
paravirt_iret = xen_iret;
xen_init_irq_ops();
/*
......
......@@ -189,6 +189,7 @@ hypercall_iret = hypercall_page + __HYPERVISOR_iret * 32
*/
SYM_CODE_START(xen_iret)
UNWIND_HINT_EMPTY
ANNOTATE_NOENDBR
pushq $0
jmp hypercall_iret
SYM_CODE_END(xen_iret)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment