Commit 7b490411 authored by Michael Neuling's avatar Michael Neuling Committed by Alexander Graf

KVM: PPC: Book3S HV: Add new state for transactional memory

Add new state for transactional memory (TM) to kvm_vcpu_arch.  Also add
asm-offset bits that are going to be required.

This also moves the existing TFHAR, TFIAR and TEXASR SPRs into a
CONFIG_PPC_TRANSACTIONAL_MEM section.  This requires some code changes to
ensure we still compile with CONFIG_PPC_TRANSACTIONAL_MEM=N.  Much of the added
the added #ifdefs are removed in a later patch when the bulk of the TM code is
added.
Signed-off-by: default avatarMichael Neuling <mikey@neuling.org>
Signed-off-by: default avatarPaul Mackerras <paulus@samba.org>
[agraf: fix merge conflict]
Signed-off-by: default avatarAlexander Graf <agraf@suse.de>
parent 7b37a123
...@@ -475,9 +475,6 @@ struct kvm_vcpu_arch { ...@@ -475,9 +475,6 @@ struct kvm_vcpu_arch {
ulong ppr; ulong ppr;
ulong pspb; ulong pspb;
ulong fscr; ulong fscr;
ulong tfhar;
ulong tfiar;
ulong texasr;
ulong ebbhr; ulong ebbhr;
ulong ebbrr; ulong ebbrr;
ulong bescr; ulong bescr;
...@@ -526,6 +523,27 @@ struct kvm_vcpu_arch { ...@@ -526,6 +523,27 @@ struct kvm_vcpu_arch {
u64 siar; u64 siar;
u64 sdar; u64 sdar;
u64 sier; u64 sier;
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
u64 tfhar;
u64 texasr;
u64 tfiar;
u32 cr_tm;
u64 lr_tm;
u64 ctr_tm;
u64 amr_tm;
u64 ppr_tm;
u64 dscr_tm;
u64 tar_tm;
ulong gpr_tm[32];
struct thread_fp_state fp_tm;
struct thread_vr_state vr_tm;
u32 vrsave_tm; /* also USPRG0 */
#endif
#ifdef CONFIG_KVM_EXIT_TIMING #ifdef CONFIG_KVM_EXIT_TIMING
struct mutex exit_timing_lock; struct mutex exit_timing_lock;
......
...@@ -521,9 +521,6 @@ int main(void) ...@@ -521,9 +521,6 @@ int main(void)
DEFINE(VCPU_PPR, offsetof(struct kvm_vcpu, arch.ppr)); DEFINE(VCPU_PPR, offsetof(struct kvm_vcpu, arch.ppr));
DEFINE(VCPU_FSCR, offsetof(struct kvm_vcpu, arch.fscr)); DEFINE(VCPU_FSCR, offsetof(struct kvm_vcpu, arch.fscr));
DEFINE(VCPU_PSPB, offsetof(struct kvm_vcpu, arch.pspb)); DEFINE(VCPU_PSPB, offsetof(struct kvm_vcpu, arch.pspb));
DEFINE(VCPU_TFHAR, offsetof(struct kvm_vcpu, arch.tfhar));
DEFINE(VCPU_TFIAR, offsetof(struct kvm_vcpu, arch.tfiar));
DEFINE(VCPU_TEXASR, offsetof(struct kvm_vcpu, arch.texasr));
DEFINE(VCPU_EBBHR, offsetof(struct kvm_vcpu, arch.ebbhr)); DEFINE(VCPU_EBBHR, offsetof(struct kvm_vcpu, arch.ebbhr));
DEFINE(VCPU_EBBRR, offsetof(struct kvm_vcpu, arch.ebbrr)); DEFINE(VCPU_EBBRR, offsetof(struct kvm_vcpu, arch.ebbrr));
DEFINE(VCPU_BESCR, offsetof(struct kvm_vcpu, arch.bescr)); DEFINE(VCPU_BESCR, offsetof(struct kvm_vcpu, arch.bescr));
...@@ -545,6 +542,22 @@ int main(void) ...@@ -545,6 +542,22 @@ int main(void)
DEFINE(VCPU_SLB_E, offsetof(struct kvmppc_slb, orige)); DEFINE(VCPU_SLB_E, offsetof(struct kvmppc_slb, orige));
DEFINE(VCPU_SLB_V, offsetof(struct kvmppc_slb, origv)); DEFINE(VCPU_SLB_V, offsetof(struct kvmppc_slb, origv));
DEFINE(VCPU_SLB_SIZE, sizeof(struct kvmppc_slb)); DEFINE(VCPU_SLB_SIZE, sizeof(struct kvmppc_slb));
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
DEFINE(VCPU_TFHAR, offsetof(struct kvm_vcpu, arch.tfhar));
DEFINE(VCPU_TFIAR, offsetof(struct kvm_vcpu, arch.tfiar));
DEFINE(VCPU_TEXASR, offsetof(struct kvm_vcpu, arch.texasr));
DEFINE(VCPU_GPR_TM, offsetof(struct kvm_vcpu, arch.gpr_tm));
DEFINE(VCPU_FPRS_TM, offsetof(struct kvm_vcpu, arch.fp_tm.fpr));
DEFINE(VCPU_VRS_TM, offsetof(struct kvm_vcpu, arch.vr_tm.vr));
DEFINE(VCPU_VRSAVE_TM, offsetof(struct kvm_vcpu, arch.vrsave_tm));
DEFINE(VCPU_CR_TM, offsetof(struct kvm_vcpu, arch.cr_tm));
DEFINE(VCPU_LR_TM, offsetof(struct kvm_vcpu, arch.lr_tm));
DEFINE(VCPU_CTR_TM, offsetof(struct kvm_vcpu, arch.ctr_tm));
DEFINE(VCPU_AMR_TM, offsetof(struct kvm_vcpu, arch.amr_tm));
DEFINE(VCPU_PPR_TM, offsetof(struct kvm_vcpu, arch.ppr_tm));
DEFINE(VCPU_DSCR_TM, offsetof(struct kvm_vcpu, arch.dscr_tm));
DEFINE(VCPU_TAR_TM, offsetof(struct kvm_vcpu, arch.tar_tm));
#endif
#ifdef CONFIG_PPC_BOOK3S_64 #ifdef CONFIG_PPC_BOOK3S_64
#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE #ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
......
...@@ -875,6 +875,7 @@ static int kvmppc_get_one_reg_hv(struct kvm_vcpu *vcpu, u64 id, ...@@ -875,6 +875,7 @@ static int kvmppc_get_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
case KVM_REG_PPC_IAMR: case KVM_REG_PPC_IAMR:
*val = get_reg_val(id, vcpu->arch.iamr); *val = get_reg_val(id, vcpu->arch.iamr);
break; break;
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
case KVM_REG_PPC_TFHAR: case KVM_REG_PPC_TFHAR:
*val = get_reg_val(id, vcpu->arch.tfhar); *val = get_reg_val(id, vcpu->arch.tfhar);
break; break;
...@@ -884,6 +885,7 @@ static int kvmppc_get_one_reg_hv(struct kvm_vcpu *vcpu, u64 id, ...@@ -884,6 +885,7 @@ static int kvmppc_get_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
case KVM_REG_PPC_TEXASR: case KVM_REG_PPC_TEXASR:
*val = get_reg_val(id, vcpu->arch.texasr); *val = get_reg_val(id, vcpu->arch.texasr);
break; break;
#endif
case KVM_REG_PPC_FSCR: case KVM_REG_PPC_FSCR:
*val = get_reg_val(id, vcpu->arch.fscr); *val = get_reg_val(id, vcpu->arch.fscr);
break; break;
...@@ -1033,6 +1035,7 @@ static int kvmppc_set_one_reg_hv(struct kvm_vcpu *vcpu, u64 id, ...@@ -1033,6 +1035,7 @@ static int kvmppc_set_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
case KVM_REG_PPC_IAMR: case KVM_REG_PPC_IAMR:
vcpu->arch.iamr = set_reg_val(id, *val); vcpu->arch.iamr = set_reg_val(id, *val);
break; break;
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
case KVM_REG_PPC_TFHAR: case KVM_REG_PPC_TFHAR:
vcpu->arch.tfhar = set_reg_val(id, *val); vcpu->arch.tfhar = set_reg_val(id, *val);
break; break;
...@@ -1042,6 +1045,7 @@ static int kvmppc_set_one_reg_hv(struct kvm_vcpu *vcpu, u64 id, ...@@ -1042,6 +1045,7 @@ static int kvmppc_set_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
case KVM_REG_PPC_TEXASR: case KVM_REG_PPC_TEXASR:
vcpu->arch.texasr = set_reg_val(id, *val); vcpu->arch.texasr = set_reg_val(id, *val);
break; break;
#endif
case KVM_REG_PPC_FSCR: case KVM_REG_PPC_FSCR:
vcpu->arch.fscr = set_reg_val(id, *val); vcpu->arch.fscr = set_reg_val(id, *val);
break; break;
......
...@@ -701,13 +701,15 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S) ...@@ -701,13 +701,15 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
ld r6, VCPU_VTB(r4) ld r6, VCPU_VTB(r4)
mtspr SPRN_IC, r5 mtspr SPRN_IC, r5
mtspr SPRN_VTB, r6 mtspr SPRN_VTB, r6
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
ld r5, VCPU_TFHAR(r4) ld r5, VCPU_TFHAR(r4)
ld r6, VCPU_TFIAR(r4) ld r6, VCPU_TFIAR(r4)
ld r7, VCPU_TEXASR(r4) ld r7, VCPU_TEXASR(r4)
ld r8, VCPU_EBBHR(r4)
mtspr SPRN_TFHAR, r5 mtspr SPRN_TFHAR, r5
mtspr SPRN_TFIAR, r6 mtspr SPRN_TFIAR, r6
mtspr SPRN_TEXASR, r7 mtspr SPRN_TEXASR, r7
#endif
ld r8, VCPU_EBBHR(r4)
mtspr SPRN_EBBHR, r8 mtspr SPRN_EBBHR, r8
ld r5, VCPU_EBBRR(r4) ld r5, VCPU_EBBRR(r4)
ld r6, VCPU_BESCR(r4) ld r6, VCPU_BESCR(r4)
...@@ -1118,13 +1120,15 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S) ...@@ -1118,13 +1120,15 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
std r5, VCPU_IC(r9) std r5, VCPU_IC(r9)
std r6, VCPU_VTB(r9) std r6, VCPU_VTB(r9)
std r7, VCPU_TAR(r9) std r7, VCPU_TAR(r9)
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
mfspr r5, SPRN_TFHAR mfspr r5, SPRN_TFHAR
mfspr r6, SPRN_TFIAR mfspr r6, SPRN_TFIAR
mfspr r7, SPRN_TEXASR mfspr r7, SPRN_TEXASR
mfspr r8, SPRN_EBBHR
std r5, VCPU_TFHAR(r9) std r5, VCPU_TFHAR(r9)
std r6, VCPU_TFIAR(r9) std r6, VCPU_TFIAR(r9)
std r7, VCPU_TEXASR(r9) std r7, VCPU_TEXASR(r9)
#endif
mfspr r8, SPRN_EBBHR
std r8, VCPU_EBBHR(r9) std r8, VCPU_EBBHR(r9)
mfspr r5, SPRN_EBBRR mfspr r5, SPRN_EBBRR
mfspr r6, SPRN_BESCR mfspr r6, SPRN_BESCR
...@@ -1497,6 +1501,73 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) ...@@ -1497,6 +1501,73 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
1: addi r8,r8,16 1: addi r8,r8,16
.endr .endr
/* Save DEC */
mfspr r5,SPRN_DEC
mftb r6
extsw r5,r5
add r5,r5,r6
std r5,VCPU_DEC_EXPIRES(r9)
BEGIN_FTR_SECTION
b 8f
END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
/* Turn on TM so we can access TFHAR/TFIAR/TEXASR */
mfmsr r8
li r0, 1
rldimi r8, r0, MSR_TM_LG, 63-MSR_TM_LG
mtmsrd r8
/* Save POWER8-specific registers */
mfspr r5, SPRN_IAMR
mfspr r6, SPRN_PSPB
mfspr r7, SPRN_FSCR
std r5, VCPU_IAMR(r9)
stw r6, VCPU_PSPB(r9)
std r7, VCPU_FSCR(r9)
mfspr r5, SPRN_IC
mfspr r6, SPRN_VTB
mfspr r7, SPRN_TAR
std r5, VCPU_IC(r9)
std r6, VCPU_VTB(r9)
std r7, VCPU_TAR(r9)
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
mfspr r5, SPRN_TFHAR
mfspr r6, SPRN_TFIAR
mfspr r7, SPRN_TEXASR
std r5, VCPU_TFHAR(r9)
std r6, VCPU_TFIAR(r9)
std r7, VCPU_TEXASR(r9)
#endif
mfspr r8, SPRN_EBBHR
std r8, VCPU_EBBHR(r9)
mfspr r5, SPRN_EBBRR
mfspr r6, SPRN_BESCR
mfspr r7, SPRN_CSIGR
mfspr r8, SPRN_TACR
std r5, VCPU_EBBRR(r9)
std r6, VCPU_BESCR(r9)
std r7, VCPU_CSIGR(r9)
std r8, VCPU_TACR(r9)
mfspr r5, SPRN_TCSCR
mfspr r6, SPRN_ACOP
mfspr r7, SPRN_PID
mfspr r8, SPRN_WORT
std r5, VCPU_TCSCR(r9)
std r6, VCPU_ACOP(r9)
stw r7, VCPU_GUEST_PID(r9)
std r8, VCPU_WORT(r9)
8:
/* Save and reset AMR and UAMOR before turning on the MMU */
BEGIN_FTR_SECTION
mfspr r5,SPRN_AMR
mfspr r6,SPRN_UAMOR
std r5,VCPU_AMR(r9)
std r6,VCPU_UAMOR(r9)
li r6,0
mtspr SPRN_AMR,r6
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
/* Unset guest mode */ /* Unset guest mode */
li r0, KVM_GUEST_MODE_NONE li r0, KVM_GUEST_MODE_NONE
stb r0, HSTATE_IN_GUEST(r13) stb r0, HSTATE_IN_GUEST(r13)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment