Commit afd80d85 authored by Paolo Bonzini's avatar Paolo Bonzini Committed by Gleb Natapov

pmu: prepare for migration support

In order to migrate the PMU state correctly, we need to restore the
values of MSR_CORE_PERF_GLOBAL_STATUS (a read-only register) and
MSR_CORE_PERF_GLOBAL_OVF_CTRL (which has side effects when written).
We also need to write the full 40-bit value of the performance counter,
which would only be possible with a v3 architectural PMU's full-width
counter MSRs.

To distinguish host-initiated writes from the guest's, pass the
full struct msr_data to kvm_pmu_set_msr.
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
Signed-off-by: default avatarGleb Natapov <gleb@redhat.com>
parent e1e2e605
...@@ -1030,7 +1030,7 @@ void kvm_pmu_reset(struct kvm_vcpu *vcpu); ...@@ -1030,7 +1030,7 @@ void kvm_pmu_reset(struct kvm_vcpu *vcpu);
void kvm_pmu_cpuid_update(struct kvm_vcpu *vcpu); void kvm_pmu_cpuid_update(struct kvm_vcpu *vcpu);
bool kvm_pmu_msr(struct kvm_vcpu *vcpu, u32 msr); bool kvm_pmu_msr(struct kvm_vcpu *vcpu, u32 msr);
int kvm_pmu_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *data); int kvm_pmu_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *data);
int kvm_pmu_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data); int kvm_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info);
int kvm_pmu_read_pmc(struct kvm_vcpu *vcpu, unsigned pmc, u64 *data); int kvm_pmu_read_pmc(struct kvm_vcpu *vcpu, unsigned pmc, u64 *data);
void kvm_handle_pmu_event(struct kvm_vcpu *vcpu); void kvm_handle_pmu_event(struct kvm_vcpu *vcpu);
void kvm_deliver_pmi(struct kvm_vcpu *vcpu); void kvm_deliver_pmi(struct kvm_vcpu *vcpu);
......
...@@ -360,10 +360,12 @@ int kvm_pmu_get_msr(struct kvm_vcpu *vcpu, u32 index, u64 *data) ...@@ -360,10 +360,12 @@ int kvm_pmu_get_msr(struct kvm_vcpu *vcpu, u32 index, u64 *data)
return 1; return 1;
} }
int kvm_pmu_set_msr(struct kvm_vcpu *vcpu, u32 index, u64 data) int kvm_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
{ {
struct kvm_pmu *pmu = &vcpu->arch.pmu; struct kvm_pmu *pmu = &vcpu->arch.pmu;
struct kvm_pmc *pmc; struct kvm_pmc *pmc;
u32 index = msr_info->index;
u64 data = msr_info->data;
switch (index) { switch (index) {
case MSR_CORE_PERF_FIXED_CTR_CTRL: case MSR_CORE_PERF_FIXED_CTR_CTRL:
...@@ -375,6 +377,10 @@ int kvm_pmu_set_msr(struct kvm_vcpu *vcpu, u32 index, u64 data) ...@@ -375,6 +377,10 @@ int kvm_pmu_set_msr(struct kvm_vcpu *vcpu, u32 index, u64 data)
} }
break; break;
case MSR_CORE_PERF_GLOBAL_STATUS: case MSR_CORE_PERF_GLOBAL_STATUS:
if (msr_info->host_initiated) {
pmu->global_status = data;
return 0;
}
break; /* RO MSR */ break; /* RO MSR */
case MSR_CORE_PERF_GLOBAL_CTRL: case MSR_CORE_PERF_GLOBAL_CTRL:
if (pmu->global_ctrl == data) if (pmu->global_ctrl == data)
...@@ -386,6 +392,7 @@ int kvm_pmu_set_msr(struct kvm_vcpu *vcpu, u32 index, u64 data) ...@@ -386,6 +392,7 @@ int kvm_pmu_set_msr(struct kvm_vcpu *vcpu, u32 index, u64 data)
break; break;
case MSR_CORE_PERF_GLOBAL_OVF_CTRL: case MSR_CORE_PERF_GLOBAL_OVF_CTRL:
if (!(data & (pmu->global_ctrl_mask & ~(3ull<<62)))) { if (!(data & (pmu->global_ctrl_mask & ~(3ull<<62)))) {
if (!msr_info->host_initiated)
pmu->global_status &= ~data; pmu->global_status &= ~data;
pmu->global_ovf_ctrl = data; pmu->global_ovf_ctrl = data;
return 0; return 0;
...@@ -394,6 +401,7 @@ int kvm_pmu_set_msr(struct kvm_vcpu *vcpu, u32 index, u64 data) ...@@ -394,6 +401,7 @@ int kvm_pmu_set_msr(struct kvm_vcpu *vcpu, u32 index, u64 data)
default: default:
if ((pmc = get_gp_pmc(pmu, index, MSR_IA32_PERFCTR0)) || if ((pmc = get_gp_pmc(pmu, index, MSR_IA32_PERFCTR0)) ||
(pmc = get_fixed_pmc(pmu, index))) { (pmc = get_fixed_pmc(pmu, index))) {
if (!msr_info->host_initiated)
data = (s64)(s32)data; data = (s64)(s32)data;
pmc->counter += data - read_pmc(pmc); pmc->counter += data - read_pmc(pmc);
return 0; return 0;
......
...@@ -2040,7 +2040,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) ...@@ -2040,7 +2040,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
case MSR_P6_EVNTSEL0: case MSR_P6_EVNTSEL0:
case MSR_P6_EVNTSEL1: case MSR_P6_EVNTSEL1:
if (kvm_pmu_msr(vcpu, msr)) if (kvm_pmu_msr(vcpu, msr))
return kvm_pmu_set_msr(vcpu, msr, data); return kvm_pmu_set_msr(vcpu, msr_info);
if (pr || data != 0) if (pr || data != 0)
vcpu_unimpl(vcpu, "disabled perfctr wrmsr: " vcpu_unimpl(vcpu, "disabled perfctr wrmsr: "
...@@ -2086,7 +2086,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) ...@@ -2086,7 +2086,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
if (msr && (msr == vcpu->kvm->arch.xen_hvm_config.msr)) if (msr && (msr == vcpu->kvm->arch.xen_hvm_config.msr))
return xen_hvm_config(vcpu, data); return xen_hvm_config(vcpu, data);
if (kvm_pmu_msr(vcpu, msr)) if (kvm_pmu_msr(vcpu, msr))
return kvm_pmu_set_msr(vcpu, msr, data); return kvm_pmu_set_msr(vcpu, msr_info);
if (!ignore_msrs) { if (!ignore_msrs) {
vcpu_unimpl(vcpu, "unhandled wrmsr: 0x%x data %llx\n", vcpu_unimpl(vcpu, "unhandled wrmsr: 0x%x data %llx\n",
msr, data); msr, data);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment