Commit a826faf1 authored by Ladi Prosek's avatar Ladi Prosek Committed by Radim Krčmář

KVM: x86: make backwards_tsc_observed a per-VM variable

The backwards_tsc_observed global introduced in commit 16a96021 is never
reset to false. If a VM happens to be running while the host is suspended
(a common source of the TSC jumping backwards), master clock will never
be enabled again for any VM. In contrast, if no VM is running while the
host is suspended, master clock is unaffected. This is inconsistent and
unnecessarily strict. Let's track the backwards_tsc_observed variable
separately and let each VM start with a clean slate.

Real world impact: My Windows VMs get slower after my laptop undergoes a
suspend/resume cycle. The only way to get the perf back is unloading and
reloading the kvm module.
Signed-off-by: default avatarLadi Prosek <lprosek@redhat.com>
Signed-off-by: default avatarRadim Krčmář <rkrcmar@redhat.com>
parent 286de8f6
...@@ -803,6 +803,7 @@ struct kvm_arch { ...@@ -803,6 +803,7 @@ struct kvm_arch {
int audit_point; int audit_point;
#endif #endif
bool backwards_tsc_observed;
bool boot_vcpu_runs_old_kvmclock; bool boot_vcpu_runs_old_kvmclock;
u32 bsp_vcpu_id; u32 bsp_vcpu_id;
......
...@@ -134,8 +134,6 @@ module_param(lapic_timer_advance_ns, uint, S_IRUGO | S_IWUSR); ...@@ -134,8 +134,6 @@ module_param(lapic_timer_advance_ns, uint, S_IRUGO | S_IWUSR);
static bool __read_mostly vector_hashing = true; static bool __read_mostly vector_hashing = true;
module_param(vector_hashing, bool, S_IRUGO); module_param(vector_hashing, bool, S_IRUGO);
static bool __read_mostly backwards_tsc_observed = false;
#define KVM_NR_SHARED_MSRS 16 #define KVM_NR_SHARED_MSRS 16
struct kvm_shared_msrs_global { struct kvm_shared_msrs_global {
...@@ -1719,7 +1717,7 @@ static void pvclock_update_vm_gtod_copy(struct kvm *kvm) ...@@ -1719,7 +1717,7 @@ static void pvclock_update_vm_gtod_copy(struct kvm *kvm)
&ka->master_cycle_now); &ka->master_cycle_now);
ka->use_master_clock = host_tsc_clocksource && vcpus_matched ka->use_master_clock = host_tsc_clocksource && vcpus_matched
&& !backwards_tsc_observed && !ka->backwards_tsc_observed
&& !ka->boot_vcpu_runs_old_kvmclock; && !ka->boot_vcpu_runs_old_kvmclock;
if (ka->use_master_clock) if (ka->use_master_clock)
...@@ -7835,8 +7833,8 @@ int kvm_arch_hardware_enable(void) ...@@ -7835,8 +7833,8 @@ int kvm_arch_hardware_enable(void)
*/ */
if (backwards_tsc) { if (backwards_tsc) {
u64 delta_cyc = max_tsc - local_tsc; u64 delta_cyc = max_tsc - local_tsc;
backwards_tsc_observed = true;
list_for_each_entry(kvm, &vm_list, vm_list) { list_for_each_entry(kvm, &vm_list, vm_list) {
kvm->arch.backwards_tsc_observed = true;
kvm_for_each_vcpu(i, vcpu, kvm) { kvm_for_each_vcpu(i, vcpu, kvm) {
vcpu->arch.tsc_offset_adjustment += delta_cyc; vcpu->arch.tsc_offset_adjustment += delta_cyc;
vcpu->arch.last_host_tsc = local_tsc; vcpu->arch.last_host_tsc = local_tsc;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment