Commit 6b6fcd28 authored by Paolo Bonzini's avatar Paolo Bonzini

kvm: x86: abstract locking around pvclock_update_vm_gtod_copy

Updates to the kvmclock parameters needs to do a complicated dance of
KVM_REQ_MCLOCK_INPROGRESS and KVM_REQ_CLOCK_UPDATE in addition to taking
pvclock_gtod_sync_lock.  Place that in two functions that can be called
on all of master clock update, KVM_SET_CLOCK, and Hyper-V reenlightenment.
Reviewed-by: default avatarMarcelo Tosatti <mtosatti@redhat.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 3e44dce4
...@@ -1862,7 +1862,6 @@ u64 kvm_calc_nested_tsc_multiplier(u64 l1_multiplier, u64 l2_multiplier); ...@@ -1862,7 +1862,6 @@ u64 kvm_calc_nested_tsc_multiplier(u64 l1_multiplier, u64 l2_multiplier);
unsigned long kvm_get_linear_rip(struct kvm_vcpu *vcpu); unsigned long kvm_get_linear_rip(struct kvm_vcpu *vcpu);
bool kvm_is_linear_rip(struct kvm_vcpu *vcpu, unsigned long linear_rip); bool kvm_is_linear_rip(struct kvm_vcpu *vcpu, unsigned long linear_rip);
void kvm_make_mclock_inprogress_request(struct kvm *kvm);
void kvm_make_scan_ioapic_request(struct kvm *kvm); void kvm_make_scan_ioapic_request(struct kvm *kvm);
void kvm_make_scan_ioapic_request_mask(struct kvm *kvm, void kvm_make_scan_ioapic_request_mask(struct kvm *kvm,
unsigned long *vcpu_bitmap); unsigned long *vcpu_bitmap);
......
...@@ -2743,35 +2743,42 @@ static void pvclock_update_vm_gtod_copy(struct kvm *kvm) ...@@ -2743,35 +2743,42 @@ static void pvclock_update_vm_gtod_copy(struct kvm *kvm)
#endif #endif
} }
void kvm_make_mclock_inprogress_request(struct kvm *kvm) static void kvm_make_mclock_inprogress_request(struct kvm *kvm)
{ {
kvm_make_all_cpus_request(kvm, KVM_REQ_MCLOCK_INPROGRESS); kvm_make_all_cpus_request(kvm, KVM_REQ_MCLOCK_INPROGRESS);
} }
static void kvm_gen_update_masterclock(struct kvm *kvm) static void kvm_start_pvclock_update(struct kvm *kvm)
{ {
#ifdef CONFIG_X86_64
int i;
struct kvm_vcpu *vcpu;
struct kvm_arch *ka = &kvm->arch; struct kvm_arch *ka = &kvm->arch;
unsigned long flags;
kvm_hv_invalidate_tsc_page(kvm);
kvm_make_mclock_inprogress_request(kvm); kvm_make_mclock_inprogress_request(kvm);
/* no guest entries from this point */ /* no guest entries from this point */
spin_lock_irqsave(&ka->pvclock_gtod_sync_lock, flags); spin_lock_irq(&ka->pvclock_gtod_sync_lock);
pvclock_update_vm_gtod_copy(kvm); }
spin_unlock_irqrestore(&ka->pvclock_gtod_sync_lock, flags);
static void kvm_end_pvclock_update(struct kvm *kvm)
{
struct kvm_arch *ka = &kvm->arch;
struct kvm_vcpu *vcpu;
int i;
spin_unlock_irq(&ka->pvclock_gtod_sync_lock);
kvm_for_each_vcpu(i, vcpu, kvm) kvm_for_each_vcpu(i, vcpu, kvm)
kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu); kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
/* guest entries allowed */ /* guest entries allowed */
kvm_for_each_vcpu(i, vcpu, kvm) kvm_for_each_vcpu(i, vcpu, kvm)
kvm_clear_request(KVM_REQ_MCLOCK_INPROGRESS, vcpu); kvm_clear_request(KVM_REQ_MCLOCK_INPROGRESS, vcpu);
#endif }
static void kvm_update_masterclock(struct kvm *kvm)
{
kvm_hv_invalidate_tsc_page(kvm);
kvm_start_pvclock_update(kvm);
pvclock_update_vm_gtod_copy(kvm);
kvm_end_pvclock_update(kvm);
} }
u64 get_kvmclock_ns(struct kvm *kvm) u64 get_kvmclock_ns(struct kvm *kvm)
...@@ -6067,12 +6074,10 @@ long kvm_arch_vm_ioctl(struct file *filp, ...@@ -6067,12 +6074,10 @@ long kvm_arch_vm_ioctl(struct file *filp,
goto out; goto out;
r = 0; r = 0;
/*
* TODO: userspace has to take care of races with VCPU_RUN, so kvm_hv_invalidate_tsc_page(kvm);
* kvm_gen_update_masterclock() can be cut down to locked kvm_start_pvclock_update(kvm);
* pvclock_update_vm_gtod_copy(). pvclock_update_vm_gtod_copy(kvm);
*/
kvm_gen_update_masterclock(kvm);
/* /*
* This pairs with kvm_guest_time_update(): when masterclock is * This pairs with kvm_guest_time_update(): when masterclock is
...@@ -6081,15 +6086,12 @@ long kvm_arch_vm_ioctl(struct file *filp, ...@@ -6081,15 +6086,12 @@ long kvm_arch_vm_ioctl(struct file *filp,
* is slightly ahead) here we risk going negative on unsigned * is slightly ahead) here we risk going negative on unsigned
* 'system_time' when 'user_ns.clock' is very small. * 'system_time' when 'user_ns.clock' is very small.
*/ */
spin_lock_irq(&ka->pvclock_gtod_sync_lock);
if (kvm->arch.use_master_clock) if (kvm->arch.use_master_clock)
now_ns = ka->master_kernel_ns; now_ns = ka->master_kernel_ns;
else else
now_ns = get_kvmclock_base_ns(); now_ns = get_kvmclock_base_ns();
ka->kvmclock_offset = user_ns.clock - now_ns; ka->kvmclock_offset = user_ns.clock - now_ns;
spin_unlock_irq(&ka->pvclock_gtod_sync_lock); kvm_end_pvclock_update(kvm);
kvm_make_all_cpus_request(kvm, KVM_REQ_CLOCK_UPDATE);
break; break;
} }
case KVM_GET_CLOCK: { case KVM_GET_CLOCK: {
...@@ -8102,14 +8104,13 @@ static void tsc_khz_changed(void *data) ...@@ -8102,14 +8104,13 @@ static void tsc_khz_changed(void *data)
static void kvm_hyperv_tsc_notifier(void) static void kvm_hyperv_tsc_notifier(void)
{ {
struct kvm *kvm; struct kvm *kvm;
struct kvm_vcpu *vcpu;
int cpu; int cpu;
unsigned long flags;
mutex_lock(&kvm_lock); mutex_lock(&kvm_lock);
list_for_each_entry(kvm, &vm_list, vm_list) list_for_each_entry(kvm, &vm_list, vm_list)
kvm_make_mclock_inprogress_request(kvm); kvm_make_mclock_inprogress_request(kvm);
/* no guest entries from this point */
hyperv_stop_tsc_emulation(); hyperv_stop_tsc_emulation();
/* TSC frequency always matches when on Hyper-V */ /* TSC frequency always matches when on Hyper-V */
...@@ -8120,16 +8121,11 @@ static void kvm_hyperv_tsc_notifier(void) ...@@ -8120,16 +8121,11 @@ static void kvm_hyperv_tsc_notifier(void)
list_for_each_entry(kvm, &vm_list, vm_list) { list_for_each_entry(kvm, &vm_list, vm_list) {
struct kvm_arch *ka = &kvm->arch; struct kvm_arch *ka = &kvm->arch;
spin_lock_irqsave(&ka->pvclock_gtod_sync_lock, flags); spin_lock_irq(&ka->pvclock_gtod_sync_lock);
pvclock_update_vm_gtod_copy(kvm); pvclock_update_vm_gtod_copy(kvm);
spin_unlock_irqrestore(&ka->pvclock_gtod_sync_lock, flags); kvm_end_pvclock_update(kvm);
kvm_for_each_vcpu(cpu, vcpu, kvm)
kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
kvm_for_each_vcpu(cpu, vcpu, kvm)
kvm_clear_request(KVM_REQ_MCLOCK_INPROGRESS, vcpu);
} }
mutex_unlock(&kvm_lock); mutex_unlock(&kvm_lock);
} }
#endif #endif
...@@ -9406,7 +9402,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu) ...@@ -9406,7 +9402,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
if (kvm_check_request(KVM_REQ_MIGRATE_TIMER, vcpu)) if (kvm_check_request(KVM_REQ_MIGRATE_TIMER, vcpu))
__kvm_migrate_timers(vcpu); __kvm_migrate_timers(vcpu);
if (kvm_check_request(KVM_REQ_MASTERCLOCK_UPDATE, vcpu)) if (kvm_check_request(KVM_REQ_MASTERCLOCK_UPDATE, vcpu))
kvm_gen_update_masterclock(vcpu->kvm); kvm_update_masterclock(vcpu->kvm);
if (kvm_check_request(KVM_REQ_GLOBAL_CLOCK_UPDATE, vcpu)) if (kvm_check_request(KVM_REQ_GLOBAL_CLOCK_UPDATE, vcpu))
kvm_gen_kvmclock_update(vcpu); kvm_gen_kvmclock_update(vcpu);
if (kvm_check_request(KVM_REQ_CLOCK_UPDATE, vcpu)) { if (kvm_check_request(KVM_REQ_CLOCK_UPDATE, vcpu)) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment