Commit 898b25b2 authored by Paul Mackerras's avatar Paul Mackerras

KVM: PPC: Book3S HV: Simplify dynamic micro-threading code

Since commit b009031f ("KVM: PPC: Book3S HV: Take out virtual
core piggybacking code", 2016-09-15), we only have at most one
vcore per subcore.  Previously, the fact that there might be more
than one vcore per subcore meant that we had the notion of a
"master vcore", which was the vcore that controlled thread 0 of
the subcore.  We also needed a list per subcore in the core_info
struct to record which vcores belonged to each subcore.  Now that
there can only be one vcore in the subcore, we can replace the
list with a simple pointer and get rid of the notion of the
master vcore (and in fact treat every vcore as a master vcore).

We can also get rid of the subcore_vm[] field in the core_info
struct since it is never read.
Reviewed-by: default avatarDavid Gibson <david@gibson.dropbear.id.au>
Signed-off-by: default avatarPaul Mackerras <paulus@ozlabs.org>
parent 2ed4f9dd
...@@ -86,7 +86,6 @@ struct kvmppc_vcore { ...@@ -86,7 +86,6 @@ struct kvmppc_vcore {
u16 last_cpu; u16 last_cpu;
u8 vcore_state; u8 vcore_state;
u8 in_guest; u8 in_guest;
struct kvmppc_vcore *master_vcore;
struct kvm_vcpu *runnable_threads[MAX_SMT_THREADS]; struct kvm_vcpu *runnable_threads[MAX_SMT_THREADS];
struct list_head preempt_list; struct list_head preempt_list;
spinlock_t lock; spinlock_t lock;
......
...@@ -81,7 +81,7 @@ struct kvm_split_mode { ...@@ -81,7 +81,7 @@ struct kvm_split_mode {
u8 subcore_size; u8 subcore_size;
u8 do_nap; u8 do_nap;
u8 napped[MAX_SMT_THREADS]; u8 napped[MAX_SMT_THREADS];
struct kvmppc_vcore *master_vcs[MAX_SUBCORES]; struct kvmppc_vcore *vc[MAX_SUBCORES];
}; };
/* /*
......
...@@ -2171,7 +2171,6 @@ static void kvmppc_start_thread(struct kvm_vcpu *vcpu, struct kvmppc_vcore *vc) ...@@ -2171,7 +2171,6 @@ static void kvmppc_start_thread(struct kvm_vcpu *vcpu, struct kvmppc_vcore *vc)
{ {
int cpu; int cpu;
struct paca_struct *tpaca; struct paca_struct *tpaca;
struct kvmppc_vcore *mvc = vc->master_vcore;
struct kvm *kvm = vc->kvm; struct kvm *kvm = vc->kvm;
cpu = vc->pcpu; cpu = vc->pcpu;
...@@ -2181,7 +2180,7 @@ static void kvmppc_start_thread(struct kvm_vcpu *vcpu, struct kvmppc_vcore *vc) ...@@ -2181,7 +2180,7 @@ static void kvmppc_start_thread(struct kvm_vcpu *vcpu, struct kvmppc_vcore *vc)
vcpu->arch.timer_running = 0; vcpu->arch.timer_running = 0;
} }
cpu += vcpu->arch.ptid; cpu += vcpu->arch.ptid;
vcpu->cpu = mvc->pcpu; vcpu->cpu = vc->pcpu;
vcpu->arch.thread_cpu = cpu; vcpu->arch.thread_cpu = cpu;
/* /*
...@@ -2207,10 +2206,10 @@ static void kvmppc_start_thread(struct kvm_vcpu *vcpu, struct kvmppc_vcore *vc) ...@@ -2207,10 +2206,10 @@ static void kvmppc_start_thread(struct kvm_vcpu *vcpu, struct kvmppc_vcore *vc)
} }
tpaca = &paca[cpu]; tpaca = &paca[cpu];
tpaca->kvm_hstate.kvm_vcpu = vcpu; tpaca->kvm_hstate.kvm_vcpu = vcpu;
tpaca->kvm_hstate.ptid = cpu - mvc->pcpu; tpaca->kvm_hstate.ptid = cpu - vc->pcpu;
/* Order stores to hstate.kvm_vcpu etc. before store to kvm_vcore */ /* Order stores to hstate.kvm_vcpu etc. before store to kvm_vcore */
smp_wmb(); smp_wmb();
tpaca->kvm_hstate.kvm_vcore = mvc; tpaca->kvm_hstate.kvm_vcore = vc;
if (cpu != smp_processor_id()) if (cpu != smp_processor_id())
kvmppc_ipi_thread(cpu); kvmppc_ipi_thread(cpu);
} }
...@@ -2339,8 +2338,7 @@ struct core_info { ...@@ -2339,8 +2338,7 @@ struct core_info {
int max_subcore_threads; int max_subcore_threads;
int total_threads; int total_threads;
int subcore_threads[MAX_SUBCORES]; int subcore_threads[MAX_SUBCORES];
struct kvm *subcore_vm[MAX_SUBCORES]; struct kvmppc_vcore *vc[MAX_SUBCORES];
struct list_head vcs[MAX_SUBCORES];
}; };
/* /*
...@@ -2351,17 +2349,12 @@ static int subcore_thread_map[MAX_SUBCORES] = { 0, 4, 2, 6 }; ...@@ -2351,17 +2349,12 @@ static int subcore_thread_map[MAX_SUBCORES] = { 0, 4, 2, 6 };
static void init_core_info(struct core_info *cip, struct kvmppc_vcore *vc) static void init_core_info(struct core_info *cip, struct kvmppc_vcore *vc)
{ {
int sub;
memset(cip, 0, sizeof(*cip)); memset(cip, 0, sizeof(*cip));
cip->n_subcores = 1; cip->n_subcores = 1;
cip->max_subcore_threads = vc->num_threads; cip->max_subcore_threads = vc->num_threads;
cip->total_threads = vc->num_threads; cip->total_threads = vc->num_threads;
cip->subcore_threads[0] = vc->num_threads; cip->subcore_threads[0] = vc->num_threads;
cip->subcore_vm[0] = vc->kvm; cip->vc[0] = vc;
for (sub = 0; sub < MAX_SUBCORES; ++sub)
INIT_LIST_HEAD(&cip->vcs[sub]);
list_add_tail(&vc->preempt_list, &cip->vcs[0]);
} }
static bool subcore_config_ok(int n_subcores, int n_threads) static bool subcore_config_ok(int n_subcores, int n_threads)
...@@ -2381,9 +2374,8 @@ static bool subcore_config_ok(int n_subcores, int n_threads) ...@@ -2381,9 +2374,8 @@ static bool subcore_config_ok(int n_subcores, int n_threads)
return n_subcores * roundup_pow_of_two(n_threads) <= MAX_SMT_THREADS; return n_subcores * roundup_pow_of_two(n_threads) <= MAX_SMT_THREADS;
} }
static void init_master_vcore(struct kvmppc_vcore *vc) static void init_vcore_to_run(struct kvmppc_vcore *vc)
{ {
vc->master_vcore = vc;
vc->entry_exit_map = 0; vc->entry_exit_map = 0;
vc->in_guest = 0; vc->in_guest = 0;
vc->napping_threads = 0; vc->napping_threads = 0;
...@@ -2408,9 +2400,9 @@ static bool can_dynamic_split(struct kvmppc_vcore *vc, struct core_info *cip) ...@@ -2408,9 +2400,9 @@ static bool can_dynamic_split(struct kvmppc_vcore *vc, struct core_info *cip)
++cip->n_subcores; ++cip->n_subcores;
cip->total_threads += vc->num_threads; cip->total_threads += vc->num_threads;
cip->subcore_threads[sub] = vc->num_threads; cip->subcore_threads[sub] = vc->num_threads;
cip->subcore_vm[sub] = vc->kvm; cip->vc[sub] = vc;
init_master_vcore(vc); init_vcore_to_run(vc);
list_move_tail(&vc->preempt_list, &cip->vcs[sub]); list_del_init(&vc->preempt_list);
return true; return true;
} }
...@@ -2515,7 +2507,6 @@ static void post_guest_process(struct kvmppc_vcore *vc, bool is_master) ...@@ -2515,7 +2507,6 @@ static void post_guest_process(struct kvmppc_vcore *vc, bool is_master)
wake_up(&vcpu->arch.cpu_run); wake_up(&vcpu->arch.cpu_run);
} }
} }
list_del_init(&vc->preempt_list);
if (!is_master) { if (!is_master) {
if (still_running > 0) { if (still_running > 0) {
kvmppc_vcore_preempt(vc); kvmppc_vcore_preempt(vc);
...@@ -2587,7 +2578,7 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc) ...@@ -2587,7 +2578,7 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc)
int i; int i;
int srcu_idx; int srcu_idx;
struct core_info core_info; struct core_info core_info;
struct kvmppc_vcore *pvc, *vcnext; struct kvmppc_vcore *pvc;
struct kvm_split_mode split_info, *sip; struct kvm_split_mode split_info, *sip;
int split, subcore_size, active; int split, subcore_size, active;
int sub; int sub;
...@@ -2610,7 +2601,7 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc) ...@@ -2610,7 +2601,7 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc)
/* /*
* Initialize *vc. * Initialize *vc.
*/ */
init_master_vcore(vc); init_vcore_to_run(vc);
vc->preempt_tb = TB_NIL; vc->preempt_tb = TB_NIL;
/* /*
...@@ -2670,9 +2661,7 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc) ...@@ -2670,9 +2661,7 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc)
split_info.ldbar = mfspr(SPRN_LDBAR); split_info.ldbar = mfspr(SPRN_LDBAR);
split_info.subcore_size = subcore_size; split_info.subcore_size = subcore_size;
for (sub = 0; sub < core_info.n_subcores; ++sub) for (sub = 0; sub < core_info.n_subcores; ++sub)
split_info.master_vcs[sub] = split_info.vc[sub] = core_info.vc[sub];
list_first_entry(&core_info.vcs[sub],
struct kvmppc_vcore, preempt_list);
/* order writes to split_info before kvm_split_mode pointer */ /* order writes to split_info before kvm_split_mode pointer */
smp_wmb(); smp_wmb();
} }
...@@ -2704,24 +2693,23 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc) ...@@ -2704,24 +2693,23 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc)
thr = subcore_thread_map[sub]; thr = subcore_thread_map[sub];
thr0_done = false; thr0_done = false;
active |= 1 << thr; active |= 1 << thr;
list_for_each_entry(pvc, &core_info.vcs[sub], preempt_list) { pvc = core_info.vc[sub];
pvc->pcpu = pcpu + thr; pvc->pcpu = pcpu + thr;
for_each_runnable_thread(i, vcpu, pvc) { for_each_runnable_thread(i, vcpu, pvc) {
kvmppc_start_thread(vcpu, pvc); kvmppc_start_thread(vcpu, pvc);
kvmppc_create_dtl_entry(vcpu, pvc); kvmppc_create_dtl_entry(vcpu, pvc);
trace_kvm_guest_enter(vcpu); trace_kvm_guest_enter(vcpu);
if (!vcpu->arch.ptid) if (!vcpu->arch.ptid)
thr0_done = true; thr0_done = true;
active |= 1 << (thr + vcpu->arch.ptid); active |= 1 << (thr + vcpu->arch.ptid);
}
/*
* We need to start the first thread of each subcore
* even if it doesn't have a vcpu.
*/
if (pvc->master_vcore == pvc && !thr0_done)
kvmppc_start_thread(NULL, pvc);
thr += pvc->num_threads;
} }
/*
* We need to start the first thread of each subcore
* even if it doesn't have a vcpu.
*/
if (!thr0_done)
kvmppc_start_thread(NULL, pvc);
thr += pvc->num_threads;
} }
/* /*
...@@ -2748,8 +2736,7 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc) ...@@ -2748,8 +2736,7 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc)
trace_kvmppc_run_core(vc, 0); trace_kvmppc_run_core(vc, 0);
for (sub = 0; sub < core_info.n_subcores; ++sub) for (sub = 0; sub < core_info.n_subcores; ++sub)
list_for_each_entry(pvc, &core_info.vcs[sub], preempt_list) spin_unlock(&core_info.vc[sub]->lock);
spin_unlock(&pvc->lock);
guest_enter(); guest_enter();
...@@ -2802,10 +2789,10 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc) ...@@ -2802,10 +2789,10 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc)
smp_mb(); smp_mb();
guest_exit(); guest_exit();
for (sub = 0; sub < core_info.n_subcores; ++sub) for (sub = 0; sub < core_info.n_subcores; ++sub) {
list_for_each_entry_safe(pvc, vcnext, &core_info.vcs[sub], pvc = core_info.vc[sub];
preempt_list) post_guest_process(pvc, pvc == vc);
post_guest_process(pvc, pvc == vc); }
spin_lock(&vc->lock); spin_lock(&vc->lock);
preempt_enable(); preempt_enable();
...@@ -3026,15 +3013,14 @@ static int kvmppc_run_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) ...@@ -3026,15 +3013,14 @@ static int kvmppc_run_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
*/ */
if (!signal_pending(current)) { if (!signal_pending(current)) {
if (vc->vcore_state == VCORE_PIGGYBACK) { if (vc->vcore_state == VCORE_PIGGYBACK) {
struct kvmppc_vcore *mvc = vc->master_vcore; if (spin_trylock(&vc->lock)) {
if (spin_trylock(&mvc->lock)) { if (vc->vcore_state == VCORE_RUNNING &&
if (mvc->vcore_state == VCORE_RUNNING && !VCORE_IS_EXITING(vc)) {
!VCORE_IS_EXITING(mvc)) {
kvmppc_create_dtl_entry(vcpu, vc); kvmppc_create_dtl_entry(vcpu, vc);
kvmppc_start_thread(vcpu, vc); kvmppc_start_thread(vcpu, vc);
trace_kvm_guest_enter(vcpu); trace_kvm_guest_enter(vcpu);
} }
spin_unlock(&mvc->lock); spin_unlock(&vc->lock);
} }
} else if (vc->vcore_state == VCORE_RUNNING && } else if (vc->vcore_state == VCORE_RUNNING &&
!VCORE_IS_EXITING(vc)) { !VCORE_IS_EXITING(vc)) {
......
...@@ -307,7 +307,7 @@ void kvmhv_commence_exit(int trap) ...@@ -307,7 +307,7 @@ void kvmhv_commence_exit(int trap)
return; return;
for (i = 0; i < MAX_SUBCORES; ++i) { for (i = 0; i < MAX_SUBCORES; ++i) {
vc = sip->master_vcs[i]; vc = sip->vc[i];
if (!vc) if (!vc)
break; break;
do { do {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment