Commit c08ac06a authored by Takuya Yoshikawa's avatar Takuya Yoshikawa Committed by Paolo Bonzini

KVM: Use cond_resched() directly and remove useless kvm_resched()

Since the commit 15ad7146 ("KVM: Use the scheduler preemption notifiers
to make kvm preemptible"), the remaining stuff in this function is a
simple cond_resched() call with an extra need_resched() check which was
there to avoid dropping VCPUs unnecessarily.  Now it is meaningless.
Signed-off-by: default avatarTakuya Yoshikawa <yoshikawa_takuya_b1@lab.ntt.co.jp>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 6bb05ef7
...@@ -702,7 +702,7 @@ static int __vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) ...@@ -702,7 +702,7 @@ static int __vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
out: out:
srcu_read_unlock(&vcpu->kvm->srcu, idx); srcu_read_unlock(&vcpu->kvm->srcu, idx);
if (r > 0) { if (r > 0) {
kvm_resched(vcpu); cond_resched();
idx = srcu_read_lock(&vcpu->kvm->srcu); idx = srcu_read_lock(&vcpu->kvm->srcu);
goto again; goto again;
} }
......
...@@ -1348,7 +1348,7 @@ static void kvmppc_run_core(struct kvmppc_vcore *vc) ...@@ -1348,7 +1348,7 @@ static void kvmppc_run_core(struct kvmppc_vcore *vc)
kvm_guest_exit(); kvm_guest_exit();
preempt_enable(); preempt_enable();
kvm_resched(vcpu); cond_resched();
spin_lock(&vc->lock); spin_lock(&vc->lock);
now = get_tb(); now = get_tb();
......
...@@ -6125,7 +6125,7 @@ static int __vcpu_run(struct kvm_vcpu *vcpu) ...@@ -6125,7 +6125,7 @@ static int __vcpu_run(struct kvm_vcpu *vcpu)
} }
if (need_resched()) { if (need_resched()) {
srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx); srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx);
kvm_resched(vcpu); cond_resched();
vcpu->srcu_idx = srcu_read_lock(&kvm->srcu); vcpu->srcu_idx = srcu_read_lock(&kvm->srcu);
} }
} }
......
...@@ -583,7 +583,6 @@ void kvm_vcpu_block(struct kvm_vcpu *vcpu); ...@@ -583,7 +583,6 @@ void kvm_vcpu_block(struct kvm_vcpu *vcpu);
void kvm_vcpu_kick(struct kvm_vcpu *vcpu); void kvm_vcpu_kick(struct kvm_vcpu *vcpu);
bool kvm_vcpu_yield_to(struct kvm_vcpu *target); bool kvm_vcpu_yield_to(struct kvm_vcpu *target);
void kvm_vcpu_on_spin(struct kvm_vcpu *vcpu); void kvm_vcpu_on_spin(struct kvm_vcpu *vcpu);
void kvm_resched(struct kvm_vcpu *vcpu);
void kvm_load_guest_fpu(struct kvm_vcpu *vcpu); void kvm_load_guest_fpu(struct kvm_vcpu *vcpu);
void kvm_put_guest_fpu(struct kvm_vcpu *vcpu); void kvm_put_guest_fpu(struct kvm_vcpu *vcpu);
......
...@@ -1710,14 +1710,6 @@ void kvm_vcpu_kick(struct kvm_vcpu *vcpu) ...@@ -1710,14 +1710,6 @@ void kvm_vcpu_kick(struct kvm_vcpu *vcpu)
EXPORT_SYMBOL_GPL(kvm_vcpu_kick); EXPORT_SYMBOL_GPL(kvm_vcpu_kick);
#endif /* !CONFIG_S390 */ #endif /* !CONFIG_S390 */
void kvm_resched(struct kvm_vcpu *vcpu)
{
if (!need_resched())
return;
cond_resched();
}
EXPORT_SYMBOL_GPL(kvm_resched);
bool kvm_vcpu_yield_to(struct kvm_vcpu *target) bool kvm_vcpu_yield_to(struct kvm_vcpu *target)
{ {
struct pid *pid; struct pid *pid;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment