Commit cc9cfddb authored by Vitaly Kuznetsov's avatar Vitaly Kuznetsov Committed by Paolo Bonzini

KVM: x86: hyper-v: Track Hyper-V TSC page status

Create an infrastructure for tracking Hyper-V TSC page status, i.e. if it
was updated from guest/host side or if we've failed to set it up (because
e.g. guest wrote some garbage to HV_X64_MSR_REFERENCE_TSC) and there's no
need to retry.

Also, in a hypothetical situation when we are in 'always catchup' mode for
TSC we can now avoid contending 'hv->hv_lock' on every guest enter by
setting the state to HV_TSC_PAGE_BROKEN after compute_tsc_page_parameters()
returns false.

Check for HV_TSC_PAGE_SET state instead of '!hv->tsc_ref.tsc_sequence' in
get_time_ref_counter() to properly handle the situation when we failed to
write the updated TSC page values to the guest.
Signed-off-by: default avatarVitaly Kuznetsov <vkuznets@redhat.com>
Message-Id: <20210316143736.964151-4-vkuznets@redhat.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent e880c6ea
...@@ -884,12 +884,29 @@ struct kvm_hv_syndbg { ...@@ -884,12 +884,29 @@ struct kvm_hv_syndbg {
u64 options; u64 options;
}; };
/* Current state of Hyper-V TSC page clocksource */
enum hv_tsc_page_status {
/* TSC page was not set up or disabled */
HV_TSC_PAGE_UNSET = 0,
/* TSC page MSR was written by the guest, update pending */
HV_TSC_PAGE_GUEST_CHANGED,
/* TSC page MSR was written by KVM userspace, update pending */
HV_TSC_PAGE_HOST_CHANGED,
/* TSC page was properly set up and is currently active */
HV_TSC_PAGE_SET,
/* TSC page is currently being updated and therefore is inactive */
HV_TSC_PAGE_UPDATING,
/* TSC page was set up with an inaccessible GPA */
HV_TSC_PAGE_BROKEN,
};
/* Hyper-V emulation context */ /* Hyper-V emulation context */
struct kvm_hv { struct kvm_hv {
struct mutex hv_lock; struct mutex hv_lock;
u64 hv_guest_os_id; u64 hv_guest_os_id;
u64 hv_hypercall; u64 hv_hypercall;
u64 hv_tsc_page; u64 hv_tsc_page;
enum hv_tsc_page_status hv_tsc_page_status;
/* Hyper-v based guest crash (NT kernel bugcheck) parameters */ /* Hyper-v based guest crash (NT kernel bugcheck) parameters */
u64 hv_crash_param[HV_X64_MSR_CRASH_PARAMS]; u64 hv_crash_param[HV_X64_MSR_CRASH_PARAMS];
......
...@@ -520,10 +520,10 @@ static u64 get_time_ref_counter(struct kvm *kvm) ...@@ -520,10 +520,10 @@ static u64 get_time_ref_counter(struct kvm *kvm)
u64 tsc; u64 tsc;
/* /*
* The guest has not set up the TSC page or the clock isn't * Fall back to get_kvmclock_ns() when TSC page hasn't been set up,
* stable, fall back to get_kvmclock_ns. * is broken, disabled or being updated.
*/ */
if (!hv->tsc_ref.tsc_sequence) if (hv->hv_tsc_page_status != HV_TSC_PAGE_SET)
return div_u64(get_kvmclock_ns(kvm), 100); return div_u64(get_kvmclock_ns(kvm), 100);
vcpu = kvm_get_vcpu(kvm, 0); vcpu = kvm_get_vcpu(kvm, 0);
...@@ -1087,7 +1087,8 @@ void kvm_hv_setup_tsc_page(struct kvm *kvm, ...@@ -1087,7 +1087,8 @@ void kvm_hv_setup_tsc_page(struct kvm *kvm,
BUILD_BUG_ON(sizeof(tsc_seq) != sizeof(hv->tsc_ref.tsc_sequence)); BUILD_BUG_ON(sizeof(tsc_seq) != sizeof(hv->tsc_ref.tsc_sequence));
BUILD_BUG_ON(offsetof(struct ms_hyperv_tsc_page, tsc_sequence) != 0); BUILD_BUG_ON(offsetof(struct ms_hyperv_tsc_page, tsc_sequence) != 0);
if (!(hv->hv_tsc_page & HV_X64_MSR_TSC_REFERENCE_ENABLE)) if (hv->hv_tsc_page_status == HV_TSC_PAGE_BROKEN ||
hv->hv_tsc_page_status == HV_TSC_PAGE_UNSET)
return; return;
mutex_lock(&hv->hv_lock); mutex_lock(&hv->hv_lock);
...@@ -1101,7 +1102,7 @@ void kvm_hv_setup_tsc_page(struct kvm *kvm, ...@@ -1101,7 +1102,7 @@ void kvm_hv_setup_tsc_page(struct kvm *kvm,
*/ */
if (unlikely(kvm_read_guest(kvm, gfn_to_gpa(gfn), if (unlikely(kvm_read_guest(kvm, gfn_to_gpa(gfn),
&tsc_seq, sizeof(tsc_seq)))) &tsc_seq, sizeof(tsc_seq))))
goto out_unlock; goto out_err;
/* /*
* While we're computing and writing the parameters, force the * While we're computing and writing the parameters, force the
...@@ -1110,15 +1111,15 @@ void kvm_hv_setup_tsc_page(struct kvm *kvm, ...@@ -1110,15 +1111,15 @@ void kvm_hv_setup_tsc_page(struct kvm *kvm,
hv->tsc_ref.tsc_sequence = 0; hv->tsc_ref.tsc_sequence = 0;
if (kvm_write_guest(kvm, gfn_to_gpa(gfn), if (kvm_write_guest(kvm, gfn_to_gpa(gfn),
&hv->tsc_ref, sizeof(hv->tsc_ref.tsc_sequence))) &hv->tsc_ref, sizeof(hv->tsc_ref.tsc_sequence)))
goto out_unlock; goto out_err;
if (!compute_tsc_page_parameters(hv_clock, &hv->tsc_ref)) if (!compute_tsc_page_parameters(hv_clock, &hv->tsc_ref))
goto out_unlock; goto out_err;
/* Ensure sequence is zero before writing the rest of the struct. */ /* Ensure sequence is zero before writing the rest of the struct. */
smp_wmb(); smp_wmb();
if (kvm_write_guest(kvm, gfn_to_gpa(gfn), &hv->tsc_ref, sizeof(hv->tsc_ref))) if (kvm_write_guest(kvm, gfn_to_gpa(gfn), &hv->tsc_ref, sizeof(hv->tsc_ref)))
goto out_unlock; goto out_err;
/* /*
* Now switch to the TSC page mechanism by writing the sequence. * Now switch to the TSC page mechanism by writing the sequence.
...@@ -1131,8 +1132,15 @@ void kvm_hv_setup_tsc_page(struct kvm *kvm, ...@@ -1131,8 +1132,15 @@ void kvm_hv_setup_tsc_page(struct kvm *kvm,
smp_wmb(); smp_wmb();
hv->tsc_ref.tsc_sequence = tsc_seq; hv->tsc_ref.tsc_sequence = tsc_seq;
kvm_write_guest(kvm, gfn_to_gpa(gfn), if (kvm_write_guest(kvm, gfn_to_gpa(gfn),
&hv->tsc_ref, sizeof(hv->tsc_ref.tsc_sequence)); &hv->tsc_ref, sizeof(hv->tsc_ref.tsc_sequence)))
goto out_err;
hv->hv_tsc_page_status = HV_TSC_PAGE_SET;
goto out_unlock;
out_err:
hv->hv_tsc_page_status = HV_TSC_PAGE_BROKEN;
out_unlock: out_unlock:
mutex_unlock(&hv->hv_lock); mutex_unlock(&hv->hv_lock);
} }
...@@ -1142,7 +1150,8 @@ void kvm_hv_invalidate_tsc_page(struct kvm *kvm) ...@@ -1142,7 +1150,8 @@ void kvm_hv_invalidate_tsc_page(struct kvm *kvm)
struct kvm_hv *hv = to_kvm_hv(kvm); struct kvm_hv *hv = to_kvm_hv(kvm);
u64 gfn; u64 gfn;
if (!(hv->hv_tsc_page & HV_X64_MSR_TSC_REFERENCE_ENABLE)) if (hv->hv_tsc_page_status == HV_TSC_PAGE_BROKEN ||
hv->hv_tsc_page_status == HV_TSC_PAGE_UNSET)
return; return;
mutex_lock(&hv->hv_lock); mutex_lock(&hv->hv_lock);
...@@ -1150,11 +1159,16 @@ void kvm_hv_invalidate_tsc_page(struct kvm *kvm) ...@@ -1150,11 +1159,16 @@ void kvm_hv_invalidate_tsc_page(struct kvm *kvm)
if (!(hv->hv_tsc_page & HV_X64_MSR_TSC_REFERENCE_ENABLE)) if (!(hv->hv_tsc_page & HV_X64_MSR_TSC_REFERENCE_ENABLE))
goto out_unlock; goto out_unlock;
/* Preserve HV_TSC_PAGE_GUEST_CHANGED/HV_TSC_PAGE_HOST_CHANGED states */
if (hv->hv_tsc_page_status == HV_TSC_PAGE_SET)
hv->hv_tsc_page_status = HV_TSC_PAGE_UPDATING;
gfn = hv->hv_tsc_page >> HV_X64_MSR_TSC_REFERENCE_ADDRESS_SHIFT; gfn = hv->hv_tsc_page >> HV_X64_MSR_TSC_REFERENCE_ADDRESS_SHIFT;
hv->tsc_ref.tsc_sequence = 0; hv->tsc_ref.tsc_sequence = 0;
kvm_write_guest(kvm, gfn_to_gpa(gfn), if (kvm_write_guest(kvm, gfn_to_gpa(gfn),
&hv->tsc_ref, sizeof(hv->tsc_ref.tsc_sequence)); &hv->tsc_ref, sizeof(hv->tsc_ref.tsc_sequence)))
hv->hv_tsc_page_status = HV_TSC_PAGE_BROKEN;
out_unlock: out_unlock:
mutex_unlock(&hv->hv_lock); mutex_unlock(&hv->hv_lock);
...@@ -1216,8 +1230,15 @@ static int kvm_hv_set_msr_pw(struct kvm_vcpu *vcpu, u32 msr, u64 data, ...@@ -1216,8 +1230,15 @@ static int kvm_hv_set_msr_pw(struct kvm_vcpu *vcpu, u32 msr, u64 data,
} }
case HV_X64_MSR_REFERENCE_TSC: case HV_X64_MSR_REFERENCE_TSC:
hv->hv_tsc_page = data; hv->hv_tsc_page = data;
if (hv->hv_tsc_page & HV_X64_MSR_TSC_REFERENCE_ENABLE) if (hv->hv_tsc_page & HV_X64_MSR_TSC_REFERENCE_ENABLE) {
if (!host)
hv->hv_tsc_page_status = HV_TSC_PAGE_GUEST_CHANGED;
else
hv->hv_tsc_page_status = HV_TSC_PAGE_HOST_CHANGED;
kvm_make_request(KVM_REQ_MASTERCLOCK_UPDATE, vcpu); kvm_make_request(KVM_REQ_MASTERCLOCK_UPDATE, vcpu);
} else {
hv->hv_tsc_page_status = HV_TSC_PAGE_UNSET;
}
break; break;
case HV_X64_MSR_CRASH_P0 ... HV_X64_MSR_CRASH_P4: case HV_X64_MSR_CRASH_P0 ... HV_X64_MSR_CRASH_P4:
return kvm_hv_msr_set_crash_data(kvm, return kvm_hv_msr_set_crash_data(kvm,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment