Commit 4faa7f05 authored by Paul Mackerras's avatar Paul Mackerras Committed by Greg Kroah-Hartman

KVM: PPC: Book3S HV: Check for MMU ready on piggybacked virtual cores

commit d28eafc5 upstream.

When we are running multiple vcores on the same physical core, they
could be from different VMs and so it is possible that one of the
VMs could have its arch.mmu_ready flag cleared (for example by a
concurrent HPT resize) when we go to run it on a physical core.
We currently check the arch.mmu_ready flag for the primary vcore
but not the flags for the other vcores that will be run alongside
it.  This adds that check, and also a check when we select the
secondary vcores from the preempted vcores list.

Cc: stable@vger.kernel.org # v4.14+
Fixes: 38c53af8 ("KVM: PPC: Book3S HV: Fix exclusion between HPT resizing and other HPT updates")
Signed-off-by: default avatarPaul Mackerras <paulus@ozlabs.org>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent 577a5119
...@@ -2550,7 +2550,7 @@ static void collect_piggybacks(struct core_info *cip, int target_threads) ...@@ -2550,7 +2550,7 @@ static void collect_piggybacks(struct core_info *cip, int target_threads)
if (!spin_trylock(&pvc->lock)) if (!spin_trylock(&pvc->lock))
continue; continue;
prepare_threads(pvc); prepare_threads(pvc);
if (!pvc->n_runnable) { if (!pvc->n_runnable || !pvc->kvm->arch.mmu_ready) {
list_del_init(&pvc->preempt_list); list_del_init(&pvc->preempt_list);
if (pvc->runner == NULL) { if (pvc->runner == NULL) {
pvc->vcore_state = VCORE_INACTIVE; pvc->vcore_state = VCORE_INACTIVE;
...@@ -2571,15 +2571,20 @@ static void collect_piggybacks(struct core_info *cip, int target_threads) ...@@ -2571,15 +2571,20 @@ static void collect_piggybacks(struct core_info *cip, int target_threads)
spin_unlock(&lp->lock); spin_unlock(&lp->lock);
} }
static bool recheck_signals(struct core_info *cip) static bool recheck_signals_and_mmu(struct core_info *cip)
{ {
int sub, i; int sub, i;
struct kvm_vcpu *vcpu; struct kvm_vcpu *vcpu;
struct kvmppc_vcore *vc;
for (sub = 0; sub < cip->n_subcores; ++sub) for (sub = 0; sub < cip->n_subcores; ++sub) {
for_each_runnable_thread(i, vcpu, cip->vc[sub]) vc = cip->vc[sub];
if (!vc->kvm->arch.mmu_ready)
return true;
for_each_runnable_thread(i, vcpu, vc)
if (signal_pending(vcpu->arch.run_task)) if (signal_pending(vcpu->arch.run_task))
return true; return true;
}
return false; return false;
} }
...@@ -2800,7 +2805,7 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc) ...@@ -2800,7 +2805,7 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc)
local_irq_disable(); local_irq_disable();
hard_irq_disable(); hard_irq_disable();
if (lazy_irq_pending() || need_resched() || if (lazy_irq_pending() || need_resched() ||
recheck_signals(&core_info) || !vc->kvm->arch.mmu_ready) { recheck_signals_and_mmu(&core_info)) {
local_irq_enable(); local_irq_enable();
vc->vcore_state = VCORE_INACTIVE; vc->vcore_state = VCORE_INACTIVE;
/* Unlock all except the primary vcore */ /* Unlock all except the primary vcore */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment