Commit 7e4a145e authored by Athira Rajeev's avatar Athira Rajeev Committed by Michael Ellerman

KVM: PPC: Book3S HV: Cleanup updates for kvm vcpu MMCR

Currently `kvm_vcpu_arch` stores all Monitor Mode Control registers
in a flat array in order: mmcr0, mmcr1, mmcra, mmcr2, mmcrs
Split this to give mmcra and mmcrs its own entries in vcpu and
use a flat array for mmcr0 to mmcr2. This patch implements this
cleanup to make code easier to read.
Signed-off-by: default avatarAthira Rajeev <atrajeev@linux.vnet.ibm.com>
[mpe: Fix MMCRA/MMCR2 uapi breakage as noted by paulus]
Signed-off-by: default avatarMichael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/1594996707-3727-3-git-send-email-atrajeev@linux.vnet.ibm.com
parent 78d76819
...@@ -637,7 +637,9 @@ struct kvm_vcpu_arch { ...@@ -637,7 +637,9 @@ struct kvm_vcpu_arch {
u32 ccr1; u32 ccr1;
u32 dbsr; u32 dbsr;
u64 mmcr[5]; u64 mmcr[3]; /* MMCR0, MMCR1, MMCR2 */
u64 mmcra;
u64 mmcrs;
u32 pmc[8]; u32 pmc[8];
u32 spmc[2]; u32 spmc[2];
u64 siar; u64 siar;
......
...@@ -559,6 +559,8 @@ int main(void) ...@@ -559,6 +559,8 @@ int main(void)
OFFSET(VCPU_IRQ_PENDING, kvm_vcpu, arch.irq_pending); OFFSET(VCPU_IRQ_PENDING, kvm_vcpu, arch.irq_pending);
OFFSET(VCPU_DBELL_REQ, kvm_vcpu, arch.doorbell_request); OFFSET(VCPU_DBELL_REQ, kvm_vcpu, arch.doorbell_request);
OFFSET(VCPU_MMCR, kvm_vcpu, arch.mmcr); OFFSET(VCPU_MMCR, kvm_vcpu, arch.mmcr);
OFFSET(VCPU_MMCRA, kvm_vcpu, arch.mmcra);
OFFSET(VCPU_MMCRS, kvm_vcpu, arch.mmcrs);
OFFSET(VCPU_PMC, kvm_vcpu, arch.pmc); OFFSET(VCPU_PMC, kvm_vcpu, arch.pmc);
OFFSET(VCPU_SPMC, kvm_vcpu, arch.spmc); OFFSET(VCPU_SPMC, kvm_vcpu, arch.spmc);
OFFSET(VCPU_SIAR, kvm_vcpu, arch.siar); OFFSET(VCPU_SIAR, kvm_vcpu, arch.siar);
......
...@@ -1679,10 +1679,19 @@ static int kvmppc_get_one_reg_hv(struct kvm_vcpu *vcpu, u64 id, ...@@ -1679,10 +1679,19 @@ static int kvmppc_get_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
case KVM_REG_PPC_UAMOR: case KVM_REG_PPC_UAMOR:
*val = get_reg_val(id, vcpu->arch.uamor); *val = get_reg_val(id, vcpu->arch.uamor);
break; break;
case KVM_REG_PPC_MMCR0 ... KVM_REG_PPC_MMCRS: case KVM_REG_PPC_MMCR0 ... KVM_REG_PPC_MMCR1:
i = id - KVM_REG_PPC_MMCR0; i = id - KVM_REG_PPC_MMCR0;
*val = get_reg_val(id, vcpu->arch.mmcr[i]); *val = get_reg_val(id, vcpu->arch.mmcr[i]);
break; break;
case KVM_REG_PPC_MMCR2:
*val = get_reg_val(id, vcpu->arch.mmcr[2]);
break;
case KVM_REG_PPC_MMCRA:
*val = get_reg_val(id, vcpu->arch.mmcra);
break;
case KVM_REG_PPC_MMCRS:
*val = get_reg_val(id, vcpu->arch.mmcrs);
break;
case KVM_REG_PPC_PMC1 ... KVM_REG_PPC_PMC8: case KVM_REG_PPC_PMC1 ... KVM_REG_PPC_PMC8:
i = id - KVM_REG_PPC_PMC1; i = id - KVM_REG_PPC_PMC1;
*val = get_reg_val(id, vcpu->arch.pmc[i]); *val = get_reg_val(id, vcpu->arch.pmc[i]);
...@@ -1900,10 +1909,19 @@ static int kvmppc_set_one_reg_hv(struct kvm_vcpu *vcpu, u64 id, ...@@ -1900,10 +1909,19 @@ static int kvmppc_set_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
case KVM_REG_PPC_UAMOR: case KVM_REG_PPC_UAMOR:
vcpu->arch.uamor = set_reg_val(id, *val); vcpu->arch.uamor = set_reg_val(id, *val);
break; break;
case KVM_REG_PPC_MMCR0 ... KVM_REG_PPC_MMCRS: case KVM_REG_PPC_MMCR0 ... KVM_REG_PPC_MMCR1:
i = id - KVM_REG_PPC_MMCR0; i = id - KVM_REG_PPC_MMCR0;
vcpu->arch.mmcr[i] = set_reg_val(id, *val); vcpu->arch.mmcr[i] = set_reg_val(id, *val);
break; break;
case KVM_REG_PPC_MMCR2:
vcpu->arch.mmcr[2] = set_reg_val(id, *val);
break;
case KVM_REG_PPC_MMCRA:
vcpu->arch.mmcra = set_reg_val(id, *val);
break;
case KVM_REG_PPC_MMCRS:
vcpu->arch.mmcrs = set_reg_val(id, *val);
break;
case KVM_REG_PPC_PMC1 ... KVM_REG_PPC_PMC8: case KVM_REG_PPC_PMC1 ... KVM_REG_PPC_PMC8:
i = id - KVM_REG_PPC_PMC1; i = id - KVM_REG_PPC_PMC1;
vcpu->arch.pmc[i] = set_reg_val(id, *val); vcpu->arch.pmc[i] = set_reg_val(id, *val);
......
...@@ -3428,7 +3428,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_PMAO_BUG) ...@@ -3428,7 +3428,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_PMAO_BUG)
mtspr SPRN_PMC6, r9 mtspr SPRN_PMC6, r9
ld r3, VCPU_MMCR(r4) ld r3, VCPU_MMCR(r4)
ld r5, VCPU_MMCR + 8(r4) ld r5, VCPU_MMCR + 8(r4)
ld r6, VCPU_MMCR + 16(r4) ld r6, VCPU_MMCRA(r4)
ld r7, VCPU_SIAR(r4) ld r7, VCPU_SIAR(r4)
ld r8, VCPU_SDAR(r4) ld r8, VCPU_SDAR(r4)
mtspr SPRN_MMCR1, r5 mtspr SPRN_MMCR1, r5
...@@ -3436,14 +3436,14 @@ END_FTR_SECTION_IFSET(CPU_FTR_PMAO_BUG) ...@@ -3436,14 +3436,14 @@ END_FTR_SECTION_IFSET(CPU_FTR_PMAO_BUG)
mtspr SPRN_SIAR, r7 mtspr SPRN_SIAR, r7
mtspr SPRN_SDAR, r8 mtspr SPRN_SDAR, r8
BEGIN_FTR_SECTION BEGIN_FTR_SECTION
ld r5, VCPU_MMCR + 24(r4) ld r5, VCPU_MMCR + 16(r4)
ld r6, VCPU_SIER(r4) ld r6, VCPU_SIER(r4)
mtspr SPRN_MMCR2, r5 mtspr SPRN_MMCR2, r5
mtspr SPRN_SIER, r6 mtspr SPRN_SIER, r6
BEGIN_FTR_SECTION_NESTED(96) BEGIN_FTR_SECTION_NESTED(96)
lwz r7, VCPU_PMC + 24(r4) lwz r7, VCPU_PMC + 24(r4)
lwz r8, VCPU_PMC + 28(r4) lwz r8, VCPU_PMC + 28(r4)
ld r9, VCPU_MMCR + 32(r4) ld r9, VCPU_MMCRS(r4)
mtspr SPRN_SPMC1, r7 mtspr SPRN_SPMC1, r7
mtspr SPRN_SPMC2, r8 mtspr SPRN_SPMC2, r8
mtspr SPRN_MMCRS, r9 mtspr SPRN_MMCRS, r9
...@@ -3551,9 +3551,9 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) ...@@ -3551,9 +3551,9 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
mfspr r8, SPRN_SDAR mfspr r8, SPRN_SDAR
std r4, VCPU_MMCR(r9) std r4, VCPU_MMCR(r9)
std r5, VCPU_MMCR + 8(r9) std r5, VCPU_MMCR + 8(r9)
std r6, VCPU_MMCR + 16(r9) std r6, VCPU_MMCRA(r9)
BEGIN_FTR_SECTION BEGIN_FTR_SECTION
std r10, VCPU_MMCR + 24(r9) std r10, VCPU_MMCR + 16(r9)
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
std r7, VCPU_SIAR(r9) std r7, VCPU_SIAR(r9)
std r8, VCPU_SDAR(r9) std r8, VCPU_SDAR(r9)
...@@ -3578,7 +3578,7 @@ BEGIN_FTR_SECTION_NESTED(96) ...@@ -3578,7 +3578,7 @@ BEGIN_FTR_SECTION_NESTED(96)
mfspr r8, SPRN_MMCRS mfspr r8, SPRN_MMCRS
stw r6, VCPU_PMC + 24(r9) stw r6, VCPU_PMC + 24(r9)
stw r7, VCPU_PMC + 28(r9) stw r7, VCPU_PMC + 28(r9)
std r8, VCPU_MMCR + 32(r9) std r8, VCPU_MMCRS(r9)
lis r4, 0x8000 lis r4, 0x8000
mtspr SPRN_MMCRS, r4 mtspr SPRN_MMCRS, r4
END_FTR_SECTION_NESTED(CPU_FTR_ARCH_300, 0, 96) END_FTR_SECTION_NESTED(CPU_FTR_ARCH_300, 0, 96)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment