Commit a0ff0611 authored by Thomas Gleixner's avatar Thomas Gleixner Committed by Borislav Petkov

x86/fpu: Move KVMs FPU swapping to FPU core

Swapping the host/guest FPU is directly fiddling with FPU internals which
requires 5 exports. The upcoming support of dynamically enabled states
would even need more.

Implement a swap function in the FPU core code and export that instead.
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
Signed-off-by: default avatarBorislav Petkov <bp@suse.de>
Reviewed-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
Cc: kvm@vger.kernel.org
Link: https://lkml.kernel.org/r/20211015011539.076072399@linutronix.de
parent 63cf05a1
...@@ -12,6 +12,8 @@ ...@@ -12,6 +12,8 @@
#define _ASM_X86_FPU_API_H #define _ASM_X86_FPU_API_H
#include <linux/bottom_half.h> #include <linux/bottom_half.h>
#include <asm/fpu/types.h>
/* /*
* Use kernel_fpu_begin/end() if you intend to use FPU in kernel context. It * Use kernel_fpu_begin/end() if you intend to use FPU in kernel context. It
* disables preemption so be careful if you intend to use it for long periods * disables preemption so be careful if you intend to use it for long periods
...@@ -108,4 +110,10 @@ extern int cpu_has_xfeatures(u64 xfeatures_mask, const char **feature_name); ...@@ -108,4 +110,10 @@ extern int cpu_has_xfeatures(u64 xfeatures_mask, const char **feature_name);
static inline void update_pasid(void) { } static inline void update_pasid(void) { }
/* fpstate-related functions which are exported to KVM */
extern void fpu_init_fpstate_user(struct fpu *fpu);
/* KVM specific functions */
extern void fpu_swap_kvm_fpu(struct fpu *save, struct fpu *rstor, u64 restore_mask);
#endif /* _ASM_X86_FPU_API_H */ #endif /* _ASM_X86_FPU_API_H */
...@@ -74,14 +74,8 @@ static __always_inline __pure bool use_fxsr(void) ...@@ -74,14 +74,8 @@ static __always_inline __pure bool use_fxsr(void)
return static_cpu_has(X86_FEATURE_FXSR); return static_cpu_has(X86_FEATURE_FXSR);
} }
/*
* fpstate handling functions:
*/
extern union fpregs_state init_fpstate; extern union fpregs_state init_fpstate;
extern void fpstate_init_user(union fpregs_state *state); extern void fpstate_init_user(union fpregs_state *state);
extern void fpu_init_fpstate_user(struct fpu *fpu);
#ifdef CONFIG_MATH_EMULATION #ifdef CONFIG_MATH_EMULATION
extern void fpstate_init_soft(struct swregs_state *soft); extern void fpstate_init_soft(struct swregs_state *soft);
...@@ -381,12 +375,7 @@ static inline int os_xrstor_safe(struct xregs_state *xstate, u64 mask) ...@@ -381,12 +375,7 @@ static inline int os_xrstor_safe(struct xregs_state *xstate, u64 mask)
return err; return err;
} }
extern void __restore_fpregs_from_fpstate(union fpregs_state *fpstate, u64 mask); extern void restore_fpregs_from_fpstate(union fpregs_state *fpstate, u64 mask);
static inline void restore_fpregs_from_fpstate(union fpregs_state *fpstate)
{
__restore_fpregs_from_fpstate(fpstate, xfeatures_mask_fpstate());
}
extern bool copy_fpstate_to_sigframe(void __user *buf, void __user *fp, int size); extern bool copy_fpstate_to_sigframe(void __user *buf, void __user *fp, int size);
...@@ -467,7 +456,7 @@ static inline void fpregs_restore_userregs(void) ...@@ -467,7 +456,7 @@ static inline void fpregs_restore_userregs(void)
*/ */
mask = xfeatures_mask_restore_user() | mask = xfeatures_mask_restore_user() |
xfeatures_mask_supervisor(); xfeatures_mask_supervisor();
__restore_fpregs_from_fpstate(&fpu->state, mask); restore_fpregs_from_fpstate(&fpu->state, mask);
fpregs_activate(fpu); fpregs_activate(fpu);
fpu->last_cpu = cpu; fpu->last_cpu = cpu;
......
...@@ -124,9 +124,8 @@ void save_fpregs_to_fpstate(struct fpu *fpu) ...@@ -124,9 +124,8 @@ void save_fpregs_to_fpstate(struct fpu *fpu)
asm volatile("fnsave %[fp]; fwait" : [fp] "=m" (fpu->state.fsave)); asm volatile("fnsave %[fp]; fwait" : [fp] "=m" (fpu->state.fsave));
frstor(&fpu->state.fsave); frstor(&fpu->state.fsave);
} }
EXPORT_SYMBOL(save_fpregs_to_fpstate);
void __restore_fpregs_from_fpstate(union fpregs_state *fpstate, u64 mask) void restore_fpregs_from_fpstate(union fpregs_state *fpstate, u64 mask)
{ {
/* /*
* AMD K7/K8 and later CPUs up to Zen don't save/restore * AMD K7/K8 and later CPUs up to Zen don't save/restore
...@@ -151,7 +150,31 @@ void __restore_fpregs_from_fpstate(union fpregs_state *fpstate, u64 mask) ...@@ -151,7 +150,31 @@ void __restore_fpregs_from_fpstate(union fpregs_state *fpstate, u64 mask)
frstor(&fpstate->fsave); frstor(&fpstate->fsave);
} }
} }
EXPORT_SYMBOL_GPL(__restore_fpregs_from_fpstate);
#if IS_ENABLED(CONFIG_KVM)
void fpu_swap_kvm_fpu(struct fpu *save, struct fpu *rstor, u64 restore_mask)
{
fpregs_lock();
if (save) {
if (test_thread_flag(TIF_NEED_FPU_LOAD)) {
memcpy(&save->state, &current->thread.fpu.state,
fpu_kernel_xstate_size);
} else {
save_fpregs_to_fpstate(save);
}
}
if (rstor) {
restore_mask &= xfeatures_mask_fpstate();
restore_fpregs_from_fpstate(&rstor->state, restore_mask);
}
fpregs_mark_activate();
fpregs_unlock();
}
EXPORT_SYMBOL_GPL(fpu_swap_kvm_fpu);
#endif
void kernel_fpu_begin_mask(unsigned int kfpu_mask) void kernel_fpu_begin_mask(unsigned int kfpu_mask)
{ {
...@@ -457,7 +480,6 @@ void fpregs_mark_activate(void) ...@@ -457,7 +480,6 @@ void fpregs_mark_activate(void)
fpu->last_cpu = smp_processor_id(); fpu->last_cpu = smp_processor_id();
clear_thread_flag(TIF_NEED_FPU_LOAD); clear_thread_flag(TIF_NEED_FPU_LOAD);
} }
EXPORT_SYMBOL_GPL(fpregs_mark_activate);
/* /*
* x87 math exception handling: * x87 math exception handling:
......
...@@ -136,7 +136,6 @@ static void __init fpu__init_system_generic(void) ...@@ -136,7 +136,6 @@ static void __init fpu__init_system_generic(void)
* components into a single, continuous memory block: * components into a single, continuous memory block:
*/ */
unsigned int fpu_kernel_xstate_size __ro_after_init; unsigned int fpu_kernel_xstate_size __ro_after_init;
EXPORT_SYMBOL_GPL(fpu_kernel_xstate_size);
/* Get alignment of the TYPE. */ /* Get alignment of the TYPE. */
#define TYPE_ALIGN(TYPE) offsetof(struct { char x; TYPE test; }, test) #define TYPE_ALIGN(TYPE) offsetof(struct { char x; TYPE test; }, test)
......
...@@ -65,7 +65,6 @@ static short xsave_cpuid_features[] __initdata = { ...@@ -65,7 +65,6 @@ static short xsave_cpuid_features[] __initdata = {
* XSAVE buffer, both supervisor and user xstates. * XSAVE buffer, both supervisor and user xstates.
*/ */
u64 xfeatures_mask_all __ro_after_init; u64 xfeatures_mask_all __ro_after_init;
EXPORT_SYMBOL_GPL(xfeatures_mask_all);
static unsigned int xstate_offsets[XFEATURE_MAX] __ro_after_init = static unsigned int xstate_offsets[XFEATURE_MAX] __ro_after_init =
{ [ 0 ... XFEATURE_MAX - 1] = -1}; { [ 0 ... XFEATURE_MAX - 1] = -1};
......
...@@ -68,7 +68,9 @@ ...@@ -68,7 +68,9 @@
#include <asm/mce.h> #include <asm/mce.h>
#include <asm/pkru.h> #include <asm/pkru.h>
#include <linux/kernel_stat.h> #include <linux/kernel_stat.h>
#include <asm/fpu/internal.h> /* Ugh! */ #include <asm/fpu/api.h>
#include <asm/fpu/xcr.h>
#include <asm/fpu/xstate.h>
#include <asm/pvclock.h> #include <asm/pvclock.h>
#include <asm/div64.h> #include <asm/div64.h>
#include <asm/irq_remapping.h> #include <asm/irq_remapping.h>
...@@ -9913,58 +9915,27 @@ static int complete_emulated_mmio(struct kvm_vcpu *vcpu) ...@@ -9913,58 +9915,27 @@ static int complete_emulated_mmio(struct kvm_vcpu *vcpu)
return 0; return 0;
} }
static void kvm_save_current_fpu(struct fpu *fpu)
{
/*
* If the target FPU state is not resident in the CPU registers, just
* memcpy() from current, else save CPU state directly to the target.
*/
if (test_thread_flag(TIF_NEED_FPU_LOAD))
memcpy(&fpu->state, &current->thread.fpu.state,
fpu_kernel_xstate_size);
else
save_fpregs_to_fpstate(fpu);
}
/* Swap (qemu) user FPU context for the guest FPU context. */ /* Swap (qemu) user FPU context for the guest FPU context. */
static void kvm_load_guest_fpu(struct kvm_vcpu *vcpu) static void kvm_load_guest_fpu(struct kvm_vcpu *vcpu)
{ {
fpregs_lock();
kvm_save_current_fpu(vcpu->arch.user_fpu);
/* /*
* Guests with protected state can't have it set by the hypervisor, * Guests with protected state have guest_fpu == NULL which makes
* so skip trying to set it. * the swap only save the host state. Exclude PKRU from restore as
* it is restored separately in kvm_x86_ops.run().
*/ */
if (vcpu->arch.guest_fpu) fpu_swap_kvm_fpu(vcpu->arch.user_fpu, vcpu->arch.guest_fpu,
/* PKRU is separately restored in kvm_x86_ops.run. */ ~XFEATURE_MASK_PKRU);
__restore_fpregs_from_fpstate(&vcpu->arch.guest_fpu->state,
~XFEATURE_MASK_PKRU);
fpregs_mark_activate();
fpregs_unlock();
trace_kvm_fpu(1); trace_kvm_fpu(1);
} }
/* When vcpu_run ends, restore user space FPU context. */ /* When vcpu_run ends, restore user space FPU context. */
static void kvm_put_guest_fpu(struct kvm_vcpu *vcpu) static void kvm_put_guest_fpu(struct kvm_vcpu *vcpu)
{ {
fpregs_lock();
/* /*
* Guests with protected state can't have it read by the hypervisor, * Guests with protected state have guest_fpu == NULL which makes
* so skip trying to save it. * swap only restore the host state.
*/ */
if (vcpu->arch.guest_fpu) fpu_swap_kvm_fpu(vcpu->arch.guest_fpu, vcpu->arch.user_fpu, ~0ULL);
kvm_save_current_fpu(vcpu->arch.guest_fpu);
restore_fpregs_from_fpstate(&vcpu->arch.user_fpu->state);
fpregs_mark_activate();
fpregs_unlock();
++vcpu->stat.fpu_reload; ++vcpu->stat.fpu_reload;
trace_kvm_fpu(0); trace_kvm_fpu(0);
} }
......
...@@ -47,7 +47,7 @@ static bool ex_handler_fprestore(const struct exception_table_entry *fixup, ...@@ -47,7 +47,7 @@ static bool ex_handler_fprestore(const struct exception_table_entry *fixup,
WARN_ONCE(1, "Bad FPU state detected at %pB, reinitializing FPU registers.", WARN_ONCE(1, "Bad FPU state detected at %pB, reinitializing FPU registers.",
(void *)instruction_pointer(regs)); (void *)instruction_pointer(regs));
__restore_fpregs_from_fpstate(&init_fpstate, xfeatures_mask_fpstate()); restore_fpregs_from_fpstate(&init_fpstate, xfeatures_mask_fpstate());
return true; return true;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment