Commit 595e4f7e authored by Paul Mackerras's avatar Paul Mackerras Committed by Alexander Graf

KVM: PPC: Book3S HV: Use load/store_fp_state functions in HV guest entry/exit

This modifies kvmppc_load_fp and kvmppc_save_fp to use the generic
FP/VSX and VMX load/store functions instead of open-coding the
FP/VSX/VMX load/store instructions.  Since kvmppc_load/save_fp don't
follow C calling conventions, we make them private symbols within
book3s_hv_rmhandlers.S.
Signed-off-by: default avatarPaul Mackerras <paulus@samba.org>
Signed-off-by: default avatarAlexander Graf <agraf@suse.de>
parent 99dae3ba
...@@ -426,10 +426,8 @@ int main(void) ...@@ -426,10 +426,8 @@ int main(void)
DEFINE(VCPU_GPRS, offsetof(struct kvm_vcpu, arch.gpr)); DEFINE(VCPU_GPRS, offsetof(struct kvm_vcpu, arch.gpr));
DEFINE(VCPU_VRSAVE, offsetof(struct kvm_vcpu, arch.vrsave)); DEFINE(VCPU_VRSAVE, offsetof(struct kvm_vcpu, arch.vrsave));
DEFINE(VCPU_FPRS, offsetof(struct kvm_vcpu, arch.fp.fpr)); DEFINE(VCPU_FPRS, offsetof(struct kvm_vcpu, arch.fp.fpr));
DEFINE(VCPU_FPSCR, offsetof(struct kvm_vcpu, arch.fp.fpscr));
#ifdef CONFIG_ALTIVEC #ifdef CONFIG_ALTIVEC
DEFINE(VCPU_VRS, offsetof(struct kvm_vcpu, arch.vr.vr)); DEFINE(VCPU_VRS, offsetof(struct kvm_vcpu, arch.vr.vr));
DEFINE(VCPU_VSCR, offsetof(struct kvm_vcpu, arch.vr.vscr));
#endif #endif
DEFINE(VCPU_XER, offsetof(struct kvm_vcpu, arch.xer)); DEFINE(VCPU_XER, offsetof(struct kvm_vcpu, arch.xer));
DEFINE(VCPU_CTR, offsetof(struct kvm_vcpu, arch.ctr)); DEFINE(VCPU_CTR, offsetof(struct kvm_vcpu, arch.ctr));
......
...@@ -1261,7 +1261,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206) ...@@ -1261,7 +1261,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
/* save FP state */ /* save FP state */
mr r3, r9 mr r3, r9
bl .kvmppc_save_fp bl kvmppc_save_fp
/* Increment yield count if they have a VPA */ /* Increment yield count if they have a VPA */
ld r8, VCPU_VPA(r9) /* do they have a VPA? */ ld r8, VCPU_VPA(r9) /* do they have a VPA? */
...@@ -1691,7 +1691,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_206) ...@@ -1691,7 +1691,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_206)
std r31, VCPU_GPR(R31)(r3) std r31, VCPU_GPR(R31)(r3)
/* save FP state */ /* save FP state */
bl .kvmppc_save_fp bl kvmppc_save_fp
/* /*
* Take a nap until a decrementer or external interrupt occurs, * Take a nap until a decrementer or external interrupt occurs,
...@@ -1869,8 +1869,12 @@ kvmppc_read_intr: ...@@ -1869,8 +1869,12 @@ kvmppc_read_intr:
/* /*
* Save away FP, VMX and VSX registers. * Save away FP, VMX and VSX registers.
* r3 = vcpu pointer * r3 = vcpu pointer
* N.B. r30 and r31 are volatile across this function,
* thus it is not callable from C.
*/ */
_GLOBAL(kvmppc_save_fp) kvmppc_save_fp:
mflr r30
mr r31,r3
mfmsr r5 mfmsr r5
ori r8,r5,MSR_FP ori r8,r5,MSR_FP
#ifdef CONFIG_ALTIVEC #ifdef CONFIG_ALTIVEC
...@@ -1885,42 +1889,17 @@ END_FTR_SECTION_IFSET(CPU_FTR_VSX) ...@@ -1885,42 +1889,17 @@ END_FTR_SECTION_IFSET(CPU_FTR_VSX)
#endif #endif
mtmsrd r8 mtmsrd r8
isync isync
#ifdef CONFIG_VSX addi r3,r3,VCPU_FPRS
BEGIN_FTR_SECTION bl .store_fp_state
reg = 0
.rept 32
li r6,reg*16+VCPU_FPRS
STXVD2X(reg,R6,R3)
reg = reg + 1
.endr
FTR_SECTION_ELSE
#endif
reg = 0
.rept 32
stfd reg,reg*8+VCPU_FPRS(r3)
reg = reg + 1
.endr
#ifdef CONFIG_VSX
ALT_FTR_SECTION_END_IFSET(CPU_FTR_VSX)
#endif
mffs fr0
stfd fr0,VCPU_FPSCR(r3)
#ifdef CONFIG_ALTIVEC #ifdef CONFIG_ALTIVEC
BEGIN_FTR_SECTION BEGIN_FTR_SECTION
reg = 0 addi r3,r31,VCPU_VRS
.rept 32 bl .store_vr_state
li r6,reg*16+VCPU_VRS
stvx reg,r6,r3
reg = reg + 1
.endr
mfvscr vr0
li r6,VCPU_VSCR
stvx vr0,r6,r3
END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
#endif #endif
mfspr r6,SPRN_VRSAVE mfspr r6,SPRN_VRSAVE
stw r6,VCPU_VRSAVE(r3) stw r6,VCPU_VRSAVE(r3)
mtlr r30
mtmsrd r5 mtmsrd r5
isync isync
blr blr
...@@ -1928,9 +1907,12 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) ...@@ -1928,9 +1907,12 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
/* /*
* Load up FP, VMX and VSX registers * Load up FP, VMX and VSX registers
* r4 = vcpu pointer * r4 = vcpu pointer
* N.B. r30 and r31 are volatile across this function,
* thus it is not callable from C.
*/ */
.globl kvmppc_load_fp
kvmppc_load_fp: kvmppc_load_fp:
mflr r30
mr r31,r4
mfmsr r9 mfmsr r9
ori r8,r9,MSR_FP ori r8,r9,MSR_FP
#ifdef CONFIG_ALTIVEC #ifdef CONFIG_ALTIVEC
...@@ -1945,42 +1927,18 @@ END_FTR_SECTION_IFSET(CPU_FTR_VSX) ...@@ -1945,42 +1927,18 @@ END_FTR_SECTION_IFSET(CPU_FTR_VSX)
#endif #endif
mtmsrd r8 mtmsrd r8
isync isync
lfd fr0,VCPU_FPSCR(r4) addi r3,r4,VCPU_FPRS
MTFSF_L(fr0) bl .load_fp_state
#ifdef CONFIG_VSX
BEGIN_FTR_SECTION
reg = 0
.rept 32
li r7,reg*16+VCPU_FPRS
LXVD2X(reg,R7,R4)
reg = reg + 1
.endr
FTR_SECTION_ELSE
#endif
reg = 0
.rept 32
lfd reg,reg*8+VCPU_FPRS(r4)
reg = reg + 1
.endr
#ifdef CONFIG_VSX
ALT_FTR_SECTION_END_IFSET(CPU_FTR_VSX)
#endif
#ifdef CONFIG_ALTIVEC #ifdef CONFIG_ALTIVEC
BEGIN_FTR_SECTION BEGIN_FTR_SECTION
li r7,VCPU_VSCR addi r3,r31,VCPU_VRS
lvx vr0,r7,r4 bl .load_vr_state
mtvscr vr0
reg = 0
.rept 32
li r7,reg*16+VCPU_VRS
lvx reg,r7,r4
reg = reg + 1
.endr
END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
#endif #endif
lwz r7,VCPU_VRSAVE(r4) lwz r7,VCPU_VRSAVE(r4)
mtspr SPRN_VRSAVE,r7 mtspr SPRN_VRSAVE,r7
mtlr r30
mr r4,r31
blr blr
/* /*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment