Commit 8455d79e authored by Paul Mackerras's avatar Paul Mackerras Committed by Alexander Graf

KVM: PPC: Book3S HV: Run virtual core whenever any vcpus in it can run

Currently the Book3S HV code implements a policy on multi-threaded
processors (i.e. POWER7) that requires all of the active vcpus in a
virtual core to be ready to run before we run the virtual core.
However, that causes problems on reset, because reset stops all vcpus
except vcpu 0, and can also reduce throughput since all four threads
in a virtual core have to wait whenever any one of them hits a
hypervisor page fault.

This relaxes the policy, allowing the virtual core to run as soon as
any vcpu in it is runnable.  With this, the KVMPPC_VCPU_STOPPED state
and the KVMPPC_VCPU_BUSY_IN_HOST state have been combined into a single
KVMPPC_VCPU_NOTREADY state, since we no longer need to distinguish
between them.
Signed-off-by: default avatarPaul Mackerras <paulus@samba.org>
Signed-off-by: default avatarAlexander Graf <agraf@suse.de>
parent 2f12f034
...@@ -563,9 +563,8 @@ struct kvm_vcpu_arch { ...@@ -563,9 +563,8 @@ struct kvm_vcpu_arch {
}; };
/* Values for vcpu->arch.state */ /* Values for vcpu->arch.state */
#define KVMPPC_VCPU_STOPPED 0 #define KVMPPC_VCPU_NOTREADY 0
#define KVMPPC_VCPU_BUSY_IN_HOST 1 #define KVMPPC_VCPU_RUNNABLE 1
#define KVMPPC_VCPU_RUNNABLE 2
/* Values for vcpu->arch.io_gpr */ /* Values for vcpu->arch.io_gpr */
#define KVM_MMIO_REG_MASK 0x001f #define KVM_MMIO_REG_MASK 0x001f
......
...@@ -776,10 +776,7 @@ struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id) ...@@ -776,10 +776,7 @@ struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id)
kvmppc_mmu_book3s_hv_init(vcpu); kvmppc_mmu_book3s_hv_init(vcpu);
/* vcpu->arch.state = KVMPPC_VCPU_NOTREADY;
* We consider the vcpu stopped until we see the first run ioctl for it.
*/
vcpu->arch.state = KVMPPC_VCPU_STOPPED;
init_waitqueue_head(&vcpu->arch.cpu_run); init_waitqueue_head(&vcpu->arch.cpu_run);
...@@ -866,9 +863,8 @@ static void kvmppc_remove_runnable(struct kvmppc_vcore *vc, ...@@ -866,9 +863,8 @@ static void kvmppc_remove_runnable(struct kvmppc_vcore *vc,
{ {
if (vcpu->arch.state != KVMPPC_VCPU_RUNNABLE) if (vcpu->arch.state != KVMPPC_VCPU_RUNNABLE)
return; return;
vcpu->arch.state = KVMPPC_VCPU_BUSY_IN_HOST; vcpu->arch.state = KVMPPC_VCPU_NOTREADY;
--vc->n_runnable; --vc->n_runnable;
++vc->n_busy;
list_del(&vcpu->arch.run_list); list_del(&vcpu->arch.run_list);
} }
...@@ -1169,7 +1165,6 @@ static void kvmppc_vcore_blocked(struct kvmppc_vcore *vc) ...@@ -1169,7 +1165,6 @@ static void kvmppc_vcore_blocked(struct kvmppc_vcore *vc)
static int kvmppc_run_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) static int kvmppc_run_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
{ {
int n_ceded; int n_ceded;
int prev_state;
struct kvmppc_vcore *vc; struct kvmppc_vcore *vc;
struct kvm_vcpu *v, *vn; struct kvm_vcpu *v, *vn;
...@@ -1186,7 +1181,6 @@ static int kvmppc_run_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) ...@@ -1186,7 +1181,6 @@ static int kvmppc_run_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
vcpu->arch.ceded = 0; vcpu->arch.ceded = 0;
vcpu->arch.run_task = current; vcpu->arch.run_task = current;
vcpu->arch.kvm_run = kvm_run; vcpu->arch.kvm_run = kvm_run;
prev_state = vcpu->arch.state;
vcpu->arch.state = KVMPPC_VCPU_RUNNABLE; vcpu->arch.state = KVMPPC_VCPU_RUNNABLE;
list_add_tail(&vcpu->arch.run_list, &vc->runnable_threads); list_add_tail(&vcpu->arch.run_list, &vc->runnable_threads);
++vc->n_runnable; ++vc->n_runnable;
...@@ -1196,35 +1190,26 @@ static int kvmppc_run_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) ...@@ -1196,35 +1190,26 @@ static int kvmppc_run_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
* If the vcore is already running, we may be able to start * If the vcore is already running, we may be able to start
* this thread straight away and have it join in. * this thread straight away and have it join in.
*/ */
if (prev_state == KVMPPC_VCPU_STOPPED) { if (!signal_pending(current)) {
if (vc->vcore_state == VCORE_RUNNING && if (vc->vcore_state == VCORE_RUNNING &&
VCORE_EXIT_COUNT(vc) == 0) { VCORE_EXIT_COUNT(vc) == 0) {
vcpu->arch.ptid = vc->n_runnable - 1; vcpu->arch.ptid = vc->n_runnable - 1;
kvmppc_create_dtl_entry(vcpu, vc); kvmppc_create_dtl_entry(vcpu, vc);
kvmppc_start_thread(vcpu); kvmppc_start_thread(vcpu);
} else if (vc->vcore_state == VCORE_SLEEPING) {
wake_up(&vc->wq);
} }
} else if (prev_state == KVMPPC_VCPU_BUSY_IN_HOST) }
--vc->n_busy;
while (vcpu->arch.state == KVMPPC_VCPU_RUNNABLE && while (vcpu->arch.state == KVMPPC_VCPU_RUNNABLE &&
!signal_pending(current)) { !signal_pending(current)) {
if (vc->n_busy || vc->vcore_state != VCORE_INACTIVE) { if (vc->vcore_state != VCORE_INACTIVE) {
spin_unlock(&vc->lock); spin_unlock(&vc->lock);
kvmppc_wait_for_exec(vcpu, TASK_INTERRUPTIBLE); kvmppc_wait_for_exec(vcpu, TASK_INTERRUPTIBLE);
spin_lock(&vc->lock); spin_lock(&vc->lock);
continue; continue;
} }
vc->runner = vcpu;
n_ceded = 0;
list_for_each_entry(v, &vc->runnable_threads, arch.run_list)
if (!v->arch.pending_exceptions)
n_ceded += v->arch.ceded;
if (n_ceded == vc->n_runnable)
kvmppc_vcore_blocked(vc);
else
kvmppc_run_core(vc);
list_for_each_entry_safe(v, vn, &vc->runnable_threads, list_for_each_entry_safe(v, vn, &vc->runnable_threads,
arch.run_list) { arch.run_list) {
kvmppc_core_prepare_to_enter(v); kvmppc_core_prepare_to_enter(v);
...@@ -1236,10 +1221,20 @@ static int kvmppc_run_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) ...@@ -1236,10 +1221,20 @@ static int kvmppc_run_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
wake_up(&v->arch.cpu_run); wake_up(&v->arch.cpu_run);
} }
} }
if (!vc->n_runnable || vcpu->arch.state != KVMPPC_VCPU_RUNNABLE)
break;
vc->runner = vcpu;
n_ceded = 0;
list_for_each_entry(v, &vc->runnable_threads, arch.run_list)
if (!v->arch.pending_exceptions)
n_ceded += v->arch.ceded;
if (n_ceded == vc->n_runnable)
kvmppc_vcore_blocked(vc);
else
kvmppc_run_core(vc);
vc->runner = NULL; vc->runner = NULL;
} }
if (signal_pending(current)) {
while (vcpu->arch.state == KVMPPC_VCPU_RUNNABLE && while (vcpu->arch.state == KVMPPC_VCPU_RUNNABLE &&
(vc->vcore_state == VCORE_RUNNING || (vc->vcore_state == VCORE_RUNNING ||
vc->vcore_state == VCORE_EXITING)) { vc->vcore_state == VCORE_EXITING)) {
...@@ -1247,12 +1242,19 @@ static int kvmppc_run_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) ...@@ -1247,12 +1242,19 @@ static int kvmppc_run_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
kvmppc_wait_for_exec(vcpu, TASK_UNINTERRUPTIBLE); kvmppc_wait_for_exec(vcpu, TASK_UNINTERRUPTIBLE);
spin_lock(&vc->lock); spin_lock(&vc->lock);
} }
if (vcpu->arch.state == KVMPPC_VCPU_RUNNABLE) { if (vcpu->arch.state == KVMPPC_VCPU_RUNNABLE) {
kvmppc_remove_runnable(vc, vcpu); kvmppc_remove_runnable(vc, vcpu);
vcpu->stat.signal_exits++; vcpu->stat.signal_exits++;
kvm_run->exit_reason = KVM_EXIT_INTR; kvm_run->exit_reason = KVM_EXIT_INTR;
vcpu->arch.ret = -EINTR; vcpu->arch.ret = -EINTR;
} }
if (vc->n_runnable && vc->vcore_state == VCORE_INACTIVE) {
/* Wake up some vcpu to run the core */
v = list_first_entry(&vc->runnable_threads,
struct kvm_vcpu, arch.run_list);
wake_up(&v->arch.cpu_run);
} }
spin_unlock(&vc->lock); spin_unlock(&vc->lock);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment