Commit 199b5763 authored by Longpeng(Mike)'s avatar Longpeng(Mike) Committed by Paolo Bonzini

KVM: add spinlock optimization framework

If a vcpu exits due to request a user mode spinlock, then
the spinlock-holder may be preempted in user mode or kernel mode.
(Note that not all architectures trap spin loops in user mode,
only AMD x86 and ARM/ARM64 currently do).

But if a vcpu exits in kernel mode, then the holder must be
preempted in kernel mode, so we should choose a vcpu in kernel mode
as a more likely candidate for the lock holder.

This introduces kvm_arch_vcpu_in_kernel() to decide whether the
vcpu is in kernel-mode when it's preempted.  kvm_vcpu_on_spin's
new argument says the same of the spinning VCPU.
Signed-off-by: default avatarLongpeng(Mike) <longpeng2@huawei.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 1b4d56b8
...@@ -67,7 +67,7 @@ static int kvm_handle_wfx(struct kvm_vcpu *vcpu, struct kvm_run *run) ...@@ -67,7 +67,7 @@ static int kvm_handle_wfx(struct kvm_vcpu *vcpu, struct kvm_run *run)
if (kvm_vcpu_get_hsr(vcpu) & HSR_WFI_IS_WFE) { if (kvm_vcpu_get_hsr(vcpu) & HSR_WFI_IS_WFE) {
trace_kvm_wfx(*vcpu_pc(vcpu), true); trace_kvm_wfx(*vcpu_pc(vcpu), true);
vcpu->stat.wfe_exit_stat++; vcpu->stat.wfe_exit_stat++;
kvm_vcpu_on_spin(vcpu); kvm_vcpu_on_spin(vcpu, false);
} else { } else {
trace_kvm_wfx(*vcpu_pc(vcpu), false); trace_kvm_wfx(*vcpu_pc(vcpu), false);
vcpu->stat.wfi_exit_stat++; vcpu->stat.wfi_exit_stat++;
......
...@@ -84,7 +84,7 @@ static int kvm_handle_wfx(struct kvm_vcpu *vcpu, struct kvm_run *run) ...@@ -84,7 +84,7 @@ static int kvm_handle_wfx(struct kvm_vcpu *vcpu, struct kvm_run *run)
if (kvm_vcpu_get_hsr(vcpu) & ESR_ELx_WFx_ISS_WFE) { if (kvm_vcpu_get_hsr(vcpu) & ESR_ELx_WFx_ISS_WFE) {
trace_kvm_wfx_arm64(*vcpu_pc(vcpu), true); trace_kvm_wfx_arm64(*vcpu_pc(vcpu), true);
vcpu->stat.wfe_exit_stat++; vcpu->stat.wfe_exit_stat++;
kvm_vcpu_on_spin(vcpu); kvm_vcpu_on_spin(vcpu, false);
} else { } else {
trace_kvm_wfx_arm64(*vcpu_pc(vcpu), false); trace_kvm_wfx_arm64(*vcpu_pc(vcpu), false);
vcpu->stat.wfi_exit_stat++; vcpu->stat.wfi_exit_stat++;
......
...@@ -98,6 +98,11 @@ int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu) ...@@ -98,6 +98,11 @@ int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
return !!(vcpu->arch.pending_exceptions); return !!(vcpu->arch.pending_exceptions);
} }
bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu)
{
return false;
}
int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu) int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
{ {
return 1; return 1;
......
...@@ -58,6 +58,11 @@ int kvm_arch_vcpu_runnable(struct kvm_vcpu *v) ...@@ -58,6 +58,11 @@ int kvm_arch_vcpu_runnable(struct kvm_vcpu *v)
return !!(v->arch.pending_exceptions) || kvm_request_pending(v); return !!(v->arch.pending_exceptions) || kvm_request_pending(v);
} }
bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu)
{
return false;
}
int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu) int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
{ {
return 1; return 1;
......
...@@ -150,7 +150,7 @@ static int __diag_time_slice_end(struct kvm_vcpu *vcpu) ...@@ -150,7 +150,7 @@ static int __diag_time_slice_end(struct kvm_vcpu *vcpu)
{ {
VCPU_EVENT(vcpu, 5, "%s", "diag time slice end"); VCPU_EVENT(vcpu, 5, "%s", "diag time slice end");
vcpu->stat.diagnose_44++; vcpu->stat.diagnose_44++;
kvm_vcpu_on_spin(vcpu); kvm_vcpu_on_spin(vcpu, false);
return 0; return 0;
} }
......
...@@ -2447,6 +2447,11 @@ int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu) ...@@ -2447,6 +2447,11 @@ int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
return kvm_s390_vcpu_has_irq(vcpu, 0); return kvm_s390_vcpu_has_irq(vcpu, 0);
} }
bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu)
{
return false;
}
void kvm_s390_vcpu_block(struct kvm_vcpu *vcpu) void kvm_s390_vcpu_block(struct kvm_vcpu *vcpu)
{ {
atomic_or(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20); atomic_or(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
......
...@@ -1274,7 +1274,7 @@ int kvm_hv_hypercall(struct kvm_vcpu *vcpu) ...@@ -1274,7 +1274,7 @@ int kvm_hv_hypercall(struct kvm_vcpu *vcpu)
switch (code) { switch (code) {
case HVCALL_NOTIFY_LONG_SPIN_WAIT: case HVCALL_NOTIFY_LONG_SPIN_WAIT:
kvm_vcpu_on_spin(vcpu); kvm_vcpu_on_spin(vcpu, false);
break; break;
case HVCALL_POST_MESSAGE: case HVCALL_POST_MESSAGE:
case HVCALL_SIGNAL_EVENT: case HVCALL_SIGNAL_EVENT:
......
...@@ -3749,7 +3749,7 @@ static int interrupt_window_interception(struct vcpu_svm *svm) ...@@ -3749,7 +3749,7 @@ static int interrupt_window_interception(struct vcpu_svm *svm)
static int pause_interception(struct vcpu_svm *svm) static int pause_interception(struct vcpu_svm *svm)
{ {
kvm_vcpu_on_spin(&(svm->vcpu)); kvm_vcpu_on_spin(&svm->vcpu, false);
return 1; return 1;
} }
......
...@@ -6781,7 +6781,7 @@ static int handle_pause(struct kvm_vcpu *vcpu) ...@@ -6781,7 +6781,7 @@ static int handle_pause(struct kvm_vcpu *vcpu)
if (ple_gap) if (ple_gap)
grow_ple_window(vcpu); grow_ple_window(vcpu);
kvm_vcpu_on_spin(vcpu); kvm_vcpu_on_spin(vcpu, false);
return kvm_skip_emulated_instruction(vcpu); return kvm_skip_emulated_instruction(vcpu);
} }
......
...@@ -8432,6 +8432,11 @@ int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu) ...@@ -8432,6 +8432,11 @@ int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
return kvm_vcpu_running(vcpu) || kvm_vcpu_has_events(vcpu); return kvm_vcpu_running(vcpu) || kvm_vcpu_has_events(vcpu);
} }
bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu)
{
return false;
}
int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu) int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
{ {
return kvm_vcpu_exiting_guest_mode(vcpu) == IN_GUEST_MODE; return kvm_vcpu_exiting_guest_mode(vcpu) == IN_GUEST_MODE;
......
...@@ -720,7 +720,7 @@ void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu); ...@@ -720,7 +720,7 @@ void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu);
bool kvm_vcpu_wake_up(struct kvm_vcpu *vcpu); bool kvm_vcpu_wake_up(struct kvm_vcpu *vcpu);
void kvm_vcpu_kick(struct kvm_vcpu *vcpu); void kvm_vcpu_kick(struct kvm_vcpu *vcpu);
int kvm_vcpu_yield_to(struct kvm_vcpu *target); int kvm_vcpu_yield_to(struct kvm_vcpu *target);
void kvm_vcpu_on_spin(struct kvm_vcpu *vcpu); void kvm_vcpu_on_spin(struct kvm_vcpu *vcpu, bool usermode_vcpu_not_eligible);
void kvm_load_guest_fpu(struct kvm_vcpu *vcpu); void kvm_load_guest_fpu(struct kvm_vcpu *vcpu);
void kvm_put_guest_fpu(struct kvm_vcpu *vcpu); void kvm_put_guest_fpu(struct kvm_vcpu *vcpu);
...@@ -800,6 +800,7 @@ int kvm_arch_hardware_setup(void); ...@@ -800,6 +800,7 @@ int kvm_arch_hardware_setup(void);
void kvm_arch_hardware_unsetup(void); void kvm_arch_hardware_unsetup(void);
void kvm_arch_check_processor_compat(void *rtn); void kvm_arch_check_processor_compat(void *rtn);
int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu); int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu);
bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu);
int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu); int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu);
#ifndef __KVM_HAVE_ARCH_VM_ALLOC #ifndef __KVM_HAVE_ARCH_VM_ALLOC
......
...@@ -416,6 +416,11 @@ int kvm_arch_vcpu_runnable(struct kvm_vcpu *v) ...@@ -416,6 +416,11 @@ int kvm_arch_vcpu_runnable(struct kvm_vcpu *v)
&& !v->arch.power_off && !v->arch.pause); && !v->arch.power_off && !v->arch.pause);
} }
bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu)
{
return false;
}
/* Just ensure a guest exit from a particular CPU */ /* Just ensure a guest exit from a particular CPU */
static void exit_vm_noop(void *info) static void exit_vm_noop(void *info)
{ {
......
...@@ -2317,7 +2317,7 @@ static bool kvm_vcpu_eligible_for_directed_yield(struct kvm_vcpu *vcpu) ...@@ -2317,7 +2317,7 @@ static bool kvm_vcpu_eligible_for_directed_yield(struct kvm_vcpu *vcpu)
#endif #endif
} }
void kvm_vcpu_on_spin(struct kvm_vcpu *me) void kvm_vcpu_on_spin(struct kvm_vcpu *me, bool yield_to_kernel_mode)
{ {
struct kvm *kvm = me->kvm; struct kvm *kvm = me->kvm;
struct kvm_vcpu *vcpu; struct kvm_vcpu *vcpu;
...@@ -2348,6 +2348,8 @@ void kvm_vcpu_on_spin(struct kvm_vcpu *me) ...@@ -2348,6 +2348,8 @@ void kvm_vcpu_on_spin(struct kvm_vcpu *me)
continue; continue;
if (swait_active(&vcpu->wq) && !kvm_arch_vcpu_runnable(vcpu)) if (swait_active(&vcpu->wq) && !kvm_arch_vcpu_runnable(vcpu))
continue; continue;
if (yield_to_kernel_mode && !kvm_arch_vcpu_in_kernel(vcpu))
continue;
if (!kvm_vcpu_eligible_for_directed_yield(vcpu)) if (!kvm_vcpu_eligible_for_directed_yield(vcpu))
continue; continue;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment