Commit 50bf72d0 authored by Paul Mackerras's avatar Paul Mackerras Committed by Ben Hutchings

KVM: PPC: Book3S HV: Save/restore XER in checkpointed register state

commit 0d808df0 upstream.

When switching from/to a guest that has a transaction in progress,
we need to save/restore the checkpointed register state.  Although
XER is part of the CPU state that gets checkpointed, the code that
does this saving and restoring doesn't save/restore XER.

This fixes it by saving and restoring the XER.  To allow userspace
to read/write the checkpointed XER value, we also add a new ONE_REG
specifier.

The visible effect of this bug is that the guest may see its XER
value being corrupted when it uses transactions.

Fixes: e4e38121 ("KVM: PPC: Book3S HV: Add transactional memory support")
Fixes: 0a8eccef ("KVM: PPC: Book3S HV: Add missing code for transaction reclaim on guest exit")
Signed-off-by: default avatarPaul Mackerras <paulus@ozlabs.org>
Reviewed-by: default avatarThomas Huth <thuth@redhat.com>
Signed-off-by: default avatarPaul Mackerras <paulus@ozlabs.org>
[bwh: Backported to 3.16: adjust context, spacing]
Signed-off-by: default avatarBen Hutchings <ben@decadent.org.uk>
parent 0a295bf1
...@@ -1891,6 +1891,7 @@ registers, find a list below: ...@@ -1891,6 +1891,7 @@ registers, find a list below:
PPC | KVM_REG_PPC_TM_VSCR | 32 PPC | KVM_REG_PPC_TM_VSCR | 32
PPC | KVM_REG_PPC_TM_DSCR | 64 PPC | KVM_REG_PPC_TM_DSCR | 64
PPC | KVM_REG_PPC_TM_TAR | 64 PPC | KVM_REG_PPC_TM_TAR | 64
PPC | KVM_REG_PPC_TM_XER | 64
ARM registers are mapped using the lower 32 bits. The upper 16 of that ARM registers are mapped using the lower 32 bits. The upper 16 of that
is the register group type, or coprocessor number: is the register group type, or coprocessor number:
......
...@@ -532,6 +532,7 @@ struct kvm_vcpu_arch { ...@@ -532,6 +532,7 @@ struct kvm_vcpu_arch {
u64 tfiar; u64 tfiar;
u32 cr_tm; u32 cr_tm;
u64 xer_tm;
u64 lr_tm; u64 lr_tm;
u64 ctr_tm; u64 ctr_tm;
u64 amr_tm; u64 amr_tm;
......
...@@ -580,6 +580,7 @@ struct kvm_get_htab_header { ...@@ -580,6 +580,7 @@ struct kvm_get_htab_header {
#define KVM_REG_PPC_TM_VSCR (KVM_REG_PPC_TM | KVM_REG_SIZE_U32 | 0x67) #define KVM_REG_PPC_TM_VSCR (KVM_REG_PPC_TM | KVM_REG_SIZE_U32 | 0x67)
#define KVM_REG_PPC_TM_DSCR (KVM_REG_PPC_TM | KVM_REG_SIZE_U64 | 0x68) #define KVM_REG_PPC_TM_DSCR (KVM_REG_PPC_TM | KVM_REG_SIZE_U64 | 0x68)
#define KVM_REG_PPC_TM_TAR (KVM_REG_PPC_TM | KVM_REG_SIZE_U64 | 0x69) #define KVM_REG_PPC_TM_TAR (KVM_REG_PPC_TM | KVM_REG_SIZE_U64 | 0x69)
#define KVM_REG_PPC_TM_XER (KVM_REG_PPC_TM | KVM_REG_SIZE_U64 | 0x6a)
/* PPC64 eXternal Interrupt Controller Specification */ /* PPC64 eXternal Interrupt Controller Specification */
#define KVM_DEV_XICS_GRP_SOURCES 1 /* 64-bit source attributes */ #define KVM_DEV_XICS_GRP_SOURCES 1 /* 64-bit source attributes */
......
...@@ -572,6 +572,7 @@ int main(void) ...@@ -572,6 +572,7 @@ int main(void)
DEFINE(VCPU_VRS_TM, offsetof(struct kvm_vcpu, arch.vr_tm.vr)); DEFINE(VCPU_VRS_TM, offsetof(struct kvm_vcpu, arch.vr_tm.vr));
DEFINE(VCPU_VRSAVE_TM, offsetof(struct kvm_vcpu, arch.vrsave_tm)); DEFINE(VCPU_VRSAVE_TM, offsetof(struct kvm_vcpu, arch.vrsave_tm));
DEFINE(VCPU_CR_TM, offsetof(struct kvm_vcpu, arch.cr_tm)); DEFINE(VCPU_CR_TM, offsetof(struct kvm_vcpu, arch.cr_tm));
DEFINE(VCPU_XER_TM, offsetof(struct kvm_vcpu, arch.xer_tm));
DEFINE(VCPU_LR_TM, offsetof(struct kvm_vcpu, arch.lr_tm)); DEFINE(VCPU_LR_TM, offsetof(struct kvm_vcpu, arch.lr_tm));
DEFINE(VCPU_CTR_TM, offsetof(struct kvm_vcpu, arch.ctr_tm)); DEFINE(VCPU_CTR_TM, offsetof(struct kvm_vcpu, arch.ctr_tm));
DEFINE(VCPU_AMR_TM, offsetof(struct kvm_vcpu, arch.amr_tm)); DEFINE(VCPU_AMR_TM, offsetof(struct kvm_vcpu, arch.amr_tm));
......
...@@ -988,6 +988,9 @@ static int kvmppc_get_one_reg_hv(struct kvm_vcpu *vcpu, u64 id, ...@@ -988,6 +988,9 @@ static int kvmppc_get_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
case KVM_REG_PPC_TM_CR: case KVM_REG_PPC_TM_CR:
*val = get_reg_val(id, vcpu->arch.cr_tm); *val = get_reg_val(id, vcpu->arch.cr_tm);
break; break;
case KVM_REG_PPC_TM_XER:
*val = get_reg_val(id, vcpu->arch.xer_tm);
break;
case KVM_REG_PPC_TM_LR: case KVM_REG_PPC_TM_LR:
*val = get_reg_val(id, vcpu->arch.lr_tm); *val = get_reg_val(id, vcpu->arch.lr_tm);
break; break;
...@@ -1201,6 +1204,9 @@ static int kvmppc_set_one_reg_hv(struct kvm_vcpu *vcpu, u64 id, ...@@ -1201,6 +1204,9 @@ static int kvmppc_set_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
case KVM_REG_PPC_TM_CR: case KVM_REG_PPC_TM_CR:
vcpu->arch.cr_tm = set_reg_val(id, *val); vcpu->arch.cr_tm = set_reg_val(id, *val);
break; break;
case KVM_REG_PPC_TM_XER:
vcpu->arch.xer_tm = set_reg_val(id, *val);
break;
case KVM_REG_PPC_TM_LR: case KVM_REG_PPC_TM_LR:
vcpu->arch.lr_tm = set_reg_val(id, *val); vcpu->arch.lr_tm = set_reg_val(id, *val);
break; break;
......
...@@ -2356,11 +2356,13 @@ kvmppc_save_tm: ...@@ -2356,11 +2356,13 @@ kvmppc_save_tm:
mfctr r7 mfctr r7
mfspr r8, SPRN_AMR mfspr r8, SPRN_AMR
mfspr r10, SPRN_TAR mfspr r10, SPRN_TAR
mfxer r11
std r5, VCPU_LR_TM(r9) std r5, VCPU_LR_TM(r9)
stw r6, VCPU_CR_TM(r9) stw r6, VCPU_CR_TM(r9)
std r7, VCPU_CTR_TM(r9) std r7, VCPU_CTR_TM(r9)
std r8, VCPU_AMR_TM(r9) std r8, VCPU_AMR_TM(r9)
std r10, VCPU_TAR_TM(r9) std r10, VCPU_TAR_TM(r9)
std r11, VCPU_XER_TM(r9)
/* Restore r12 as trap number. */ /* Restore r12 as trap number. */
lwz r12, VCPU_TRAP(r9) lwz r12, VCPU_TRAP(r9)
...@@ -2453,11 +2455,13 @@ kvmppc_restore_tm: ...@@ -2453,11 +2455,13 @@ kvmppc_restore_tm:
ld r7, VCPU_CTR_TM(r4) ld r7, VCPU_CTR_TM(r4)
ld r8, VCPU_AMR_TM(r4) ld r8, VCPU_AMR_TM(r4)
ld r9, VCPU_TAR_TM(r4) ld r9, VCPU_TAR_TM(r4)
ld r10, VCPU_XER_TM(r4)
mtlr r5 mtlr r5
mtcr r6 mtcr r6
mtctr r7 mtctr r7
mtspr SPRN_AMR, r8 mtspr SPRN_AMR, r8
mtspr SPRN_TAR, r9 mtspr SPRN_TAR, r9
mtxer r10
/* /*
* Load up PPR and DSCR values but don't put them in the actual SPRs * Load up PPR and DSCR values but don't put them in the actual SPRs
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment