Commit 010fd37f authored by Wanpeng Li's avatar Wanpeng Li Committed by Paolo Bonzini

KVM: LAPIC: Reduce world switch latency caused by timer_advance_ns

All the checks in lapic_timer_int_injected(), __kvm_wait_lapic_expire(), and
these function calls waste cpu cycles when the timer mode is not tscdeadline.
We can observe ~1.3% world switch time overhead by kvm-unit-tests/vmexit.flat
vmcall testing on AMD server. This patch reduces the world switch latency
caused by timer_advance_ns feature when the timer mode is not tscdeadline by
simpling move the check against apic->lapic_timer.expired_tscdeadline much
earlier.
Signed-off-by: default avatarWanpeng Li <wanpengli@tencent.com>
Message-Id: <1599731444-3525-7-git-send-email-wanpengli@tencent.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 68ca7663
...@@ -1582,9 +1582,6 @@ static void __kvm_wait_lapic_expire(struct kvm_vcpu *vcpu) ...@@ -1582,9 +1582,6 @@ static void __kvm_wait_lapic_expire(struct kvm_vcpu *vcpu)
struct kvm_lapic *apic = vcpu->arch.apic; struct kvm_lapic *apic = vcpu->arch.apic;
u64 guest_tsc, tsc_deadline; u64 guest_tsc, tsc_deadline;
if (apic->lapic_timer.expired_tscdeadline == 0)
return;
tsc_deadline = apic->lapic_timer.expired_tscdeadline; tsc_deadline = apic->lapic_timer.expired_tscdeadline;
apic->lapic_timer.expired_tscdeadline = 0; apic->lapic_timer.expired_tscdeadline = 0;
guest_tsc = kvm_read_l1_tsc(vcpu, rdtsc()); guest_tsc = kvm_read_l1_tsc(vcpu, rdtsc());
...@@ -1599,7 +1596,10 @@ static void __kvm_wait_lapic_expire(struct kvm_vcpu *vcpu) ...@@ -1599,7 +1596,10 @@ static void __kvm_wait_lapic_expire(struct kvm_vcpu *vcpu)
void kvm_wait_lapic_expire(struct kvm_vcpu *vcpu) void kvm_wait_lapic_expire(struct kvm_vcpu *vcpu)
{ {
if (lapic_timer_int_injected(vcpu)) if (lapic_in_kernel(vcpu) &&
vcpu->arch.apic->lapic_timer.expired_tscdeadline &&
vcpu->arch.apic->lapic_timer.timer_advance_ns &&
lapic_timer_int_injected(vcpu))
__kvm_wait_lapic_expire(vcpu); __kvm_wait_lapic_expire(vcpu);
} }
EXPORT_SYMBOL_GPL(kvm_wait_lapic_expire); EXPORT_SYMBOL_GPL(kvm_wait_lapic_expire);
...@@ -1635,8 +1635,7 @@ static void apic_timer_expired(struct kvm_lapic *apic, bool from_timer_fn) ...@@ -1635,8 +1635,7 @@ static void apic_timer_expired(struct kvm_lapic *apic, bool from_timer_fn)
} }
if (kvm_use_posted_timer_interrupt(apic->vcpu)) { if (kvm_use_posted_timer_interrupt(apic->vcpu)) {
if (apic->lapic_timer.timer_advance_ns) kvm_wait_lapic_expire(vcpu);
__kvm_wait_lapic_expire(vcpu);
kvm_apic_inject_pending_timer_irqs(apic); kvm_apic_inject_pending_timer_irqs(apic);
return; return;
} }
......
...@@ -3454,8 +3454,6 @@ static __no_kcsan fastpath_t svm_vcpu_run(struct kvm_vcpu *vcpu) ...@@ -3454,8 +3454,6 @@ static __no_kcsan fastpath_t svm_vcpu_run(struct kvm_vcpu *vcpu)
clgi(); clgi();
kvm_load_guest_xsave_state(vcpu); kvm_load_guest_xsave_state(vcpu);
if (lapic_in_kernel(vcpu) &&
vcpu->arch.apic->lapic_timer.timer_advance_ns)
kvm_wait_lapic_expire(vcpu); kvm_wait_lapic_expire(vcpu);
/* /*
......
...@@ -6800,8 +6800,6 @@ static fastpath_t vmx_vcpu_run(struct kvm_vcpu *vcpu) ...@@ -6800,8 +6800,6 @@ static fastpath_t vmx_vcpu_run(struct kvm_vcpu *vcpu)
if (enable_preemption_timer) if (enable_preemption_timer)
vmx_update_hv_timer(vcpu); vmx_update_hv_timer(vcpu);
if (lapic_in_kernel(vcpu) &&
vcpu->arch.apic->lapic_timer.timer_advance_ns)
kvm_wait_lapic_expire(vcpu); kvm_wait_lapic_expire(vcpu);
/* /*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment