Commit 401e1ae3 authored by Nicholas Piggin's avatar Nicholas Piggin Committed by Michael Ellerman

KVM: PPC: Book3S HV P9: Factor PMU save/load into context switch functions

Rather than guest/host save/retsore functions, implement context switch
functions that take care of details like the VPA update for nested.

The reason to split these kind of helpers into explicit save/load
functions is mainly to schedule SPR access nicely, but PMU is a special
case where the load requires mtSPR (to stop counters) and other
difficulties, so there's less possibility to schedule those nicely. The
SPR accesses also have side-effects if the PMU is running, and in later
changes we keep the host PMU running as long as possible so this code
can be better profiled, which also complicates scheduling.
Signed-off-by: default avatarNicholas Piggin <npiggin@gmail.com>
Reviewed-by: default avatarAthira Jajeev <atrajeev@linux.vnet.ibm.com>
Signed-off-by: default avatarMichael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/20211123095231.1036501-15-npiggin@gmail.com
parent 57dc0eed
......@@ -3830,7 +3830,8 @@ static void freeze_pmu(unsigned long mmcr0, unsigned long mmcra)
isync();
}
static void save_p9_host_pmu(struct p9_host_os_sprs *host_os_sprs)
static void switch_pmu_to_guest(struct kvm_vcpu *vcpu,
struct p9_host_os_sprs *host_os_sprs)
{
if (ppc_get_pmu_inuse()) {
/*
......@@ -3864,10 +3865,21 @@ static void save_p9_host_pmu(struct p9_host_os_sprs *host_os_sprs)
host_os_sprs->sier3 = mfspr(SPRN_SIER3);
}
}
}
static void load_p9_guest_pmu(struct kvm_vcpu *vcpu)
{
#ifdef CONFIG_PPC_PSERIES
if (kvmhv_on_pseries()) {
barrier();
if (vcpu->arch.vpa.pinned_addr) {
struct lppaca *lp = vcpu->arch.vpa.pinned_addr;
get_lppaca()->pmcregs_in_use = lp->pmcregs_in_use;
} else {
get_lppaca()->pmcregs_in_use = 1;
}
barrier();
}
#endif
/* load guest */
mtspr(SPRN_PMC1, vcpu->arch.pmc[0]);
mtspr(SPRN_PMC2, vcpu->arch.pmc[1]);
mtspr(SPRN_PMC3, vcpu->arch.pmc[2]);
......@@ -3892,7 +3904,8 @@ static void load_p9_guest_pmu(struct kvm_vcpu *vcpu)
/* No isync necessary because we're starting counters */
}
static void save_p9_guest_pmu(struct kvm_vcpu *vcpu)
static void switch_pmu_to_host(struct kvm_vcpu *vcpu,
struct p9_host_os_sprs *host_os_sprs)
{
struct lppaca *lp;
int save_pmu = 1;
......@@ -3935,10 +3948,15 @@ static void save_p9_guest_pmu(struct kvm_vcpu *vcpu)
} else {
freeze_pmu(mfspr(SPRN_MMCR0), mfspr(SPRN_MMCRA));
}
}
static void load_p9_host_pmu(struct p9_host_os_sprs *host_os_sprs)
{
#ifdef CONFIG_PPC_PSERIES
if (kvmhv_on_pseries()) {
barrier();
get_lppaca()->pmcregs_in_use = ppc_get_pmu_inuse();
barrier();
}
#endif
if (ppc_get_pmu_inuse()) {
mtspr(SPRN_PMC1, host_os_sprs->pmc1);
mtspr(SPRN_PMC2, host_os_sprs->pmc2);
......@@ -4071,8 +4089,6 @@ static int kvmhv_p9_guest_entry(struct kvm_vcpu *vcpu, u64 time_limit,
save_p9_host_os_sprs(&host_os_sprs);
save_p9_host_pmu(&host_os_sprs);
kvmppc_subcore_enter_guest();
vc->entry_exit_map = 1;
......@@ -4089,19 +4105,7 @@ static int kvmhv_p9_guest_entry(struct kvm_vcpu *vcpu, u64 time_limit,
cpu_has_feature(CPU_FTR_P9_TM_HV_ASSIST))
kvmppc_restore_tm_hv(vcpu, vcpu->arch.shregs.msr, true);
#ifdef CONFIG_PPC_PSERIES
if (kvmhv_on_pseries()) {
barrier();
if (vcpu->arch.vpa.pinned_addr) {
struct lppaca *lp = vcpu->arch.vpa.pinned_addr;
get_lppaca()->pmcregs_in_use = lp->pmcregs_in_use;
} else {
get_lppaca()->pmcregs_in_use = 1;
}
barrier();
}
#endif
load_p9_guest_pmu(vcpu);
switch_pmu_to_guest(vcpu, &host_os_sprs);
msr_check_and_set(MSR_FP | MSR_VEC | MSR_VSX);
load_fp_state(&vcpu->arch.fp);
......@@ -4230,14 +4234,7 @@ static int kvmhv_p9_guest_entry(struct kvm_vcpu *vcpu, u64 time_limit,
vcpu->arch.vpa.dirty = 1;
}
save_p9_guest_pmu(vcpu);
#ifdef CONFIG_PPC_PSERIES
if (kvmhv_on_pseries()) {
barrier();
get_lppaca()->pmcregs_in_use = ppc_get_pmu_inuse();
barrier();
}
#endif
switch_pmu_to_host(vcpu, &host_os_sprs);
vc->entry_exit_map = 0x101;
vc->in_guest = 0;
......@@ -4246,8 +4243,6 @@ static int kvmhv_p9_guest_entry(struct kvm_vcpu *vcpu, u64 time_limit,
mtspr(SPRN_SPRG_VDSO_WRITE, local_paca->sprg_vdso);
load_p9_host_pmu(&host_os_sprs);
kvmppc_subcore_exit_guest();
return trap;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment