Commit 818fab44 authored by Paul Mackerras's avatar Paul Mackerras Committed by Greg Kroah-Hartman

KVM: PPC: Book3S HV: Use new mutex to synchronize MMU setup

[ Upstream commit 0d4ee88d ]

Currently the HV KVM code uses kvm->lock in conjunction with a flag,
kvm->arch.mmu_ready, to synchronize MMU setup and hold off vcpu
execution until the MMU-related data structures are ready.  However,
this means that kvm->lock is being taken inside vcpu->mutex, which
is contrary to Documentation/virtual/kvm/locking.txt and results in
lockdep warnings.

To fix this, we add a new mutex, kvm->arch.mmu_setup_lock, which nests
inside the vcpu mutexes, and is taken in the places where kvm->lock
was taken that are related to MMU setup.

Additionally we take the new mutex in the vcpu creation code at the
point where we are creating a new vcore, in order to provide mutual
exclusion with kvmppc_update_lpcr() and ensure that an update to
kvm->arch.lpcr doesn't get missed, which could otherwise lead to a
stale vcore->lpcr value.
Signed-off-by: default avatarPaul Mackerras <paulus@ozlabs.org>
Signed-off-by: default avatarSasha Levin <sashal@kernel.org>
parent 560e20f2
...@@ -317,6 +317,7 @@ struct kvm_arch { ...@@ -317,6 +317,7 @@ struct kvm_arch {
#endif #endif
struct kvmppc_ops *kvm_ops; struct kvmppc_ops *kvm_ops;
#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
struct mutex mmu_setup_lock; /* nests inside vcpu mutexes */
u64 l1_ptcr; u64 l1_ptcr;
int max_nested_lpid; int max_nested_lpid;
struct kvm_nested_guest *nested_guests[KVM_MAX_NESTED_GUESTS]; struct kvm_nested_guest *nested_guests[KVM_MAX_NESTED_GUESTS];
......
...@@ -63,7 +63,7 @@ struct kvm_resize_hpt { ...@@ -63,7 +63,7 @@ struct kvm_resize_hpt {
struct work_struct work; struct work_struct work;
u32 order; u32 order;
/* These fields protected by kvm->lock */ /* These fields protected by kvm->arch.mmu_setup_lock */
/* Possible values and their usage: /* Possible values and their usage:
* <0 an error occurred during allocation, * <0 an error occurred during allocation,
...@@ -73,7 +73,7 @@ struct kvm_resize_hpt { ...@@ -73,7 +73,7 @@ struct kvm_resize_hpt {
int error; int error;
/* Private to the work thread, until error != -EBUSY, /* Private to the work thread, until error != -EBUSY,
* then protected by kvm->lock. * then protected by kvm->arch.mmu_setup_lock.
*/ */
struct kvm_hpt_info hpt; struct kvm_hpt_info hpt;
}; };
...@@ -139,7 +139,7 @@ long kvmppc_alloc_reset_hpt(struct kvm *kvm, int order) ...@@ -139,7 +139,7 @@ long kvmppc_alloc_reset_hpt(struct kvm *kvm, int order)
long err = -EBUSY; long err = -EBUSY;
struct kvm_hpt_info info; struct kvm_hpt_info info;
mutex_lock(&kvm->lock); mutex_lock(&kvm->arch.mmu_setup_lock);
if (kvm->arch.mmu_ready) { if (kvm->arch.mmu_ready) {
kvm->arch.mmu_ready = 0; kvm->arch.mmu_ready = 0;
/* order mmu_ready vs. vcpus_running */ /* order mmu_ready vs. vcpus_running */
...@@ -183,7 +183,7 @@ long kvmppc_alloc_reset_hpt(struct kvm *kvm, int order) ...@@ -183,7 +183,7 @@ long kvmppc_alloc_reset_hpt(struct kvm *kvm, int order)
/* Ensure that each vcpu will flush its TLB on next entry. */ /* Ensure that each vcpu will flush its TLB on next entry. */
cpumask_setall(&kvm->arch.need_tlb_flush); cpumask_setall(&kvm->arch.need_tlb_flush);
mutex_unlock(&kvm->lock); mutex_unlock(&kvm->arch.mmu_setup_lock);
return err; return err;
} }
...@@ -1447,7 +1447,7 @@ static void resize_hpt_pivot(struct kvm_resize_hpt *resize) ...@@ -1447,7 +1447,7 @@ static void resize_hpt_pivot(struct kvm_resize_hpt *resize)
static void resize_hpt_release(struct kvm *kvm, struct kvm_resize_hpt *resize) static void resize_hpt_release(struct kvm *kvm, struct kvm_resize_hpt *resize)
{ {
if (WARN_ON(!mutex_is_locked(&kvm->lock))) if (WARN_ON(!mutex_is_locked(&kvm->arch.mmu_setup_lock)))
return; return;
if (!resize) if (!resize)
...@@ -1474,14 +1474,14 @@ static void resize_hpt_prepare_work(struct work_struct *work) ...@@ -1474,14 +1474,14 @@ static void resize_hpt_prepare_work(struct work_struct *work)
if (WARN_ON(resize->error != -EBUSY)) if (WARN_ON(resize->error != -EBUSY))
return; return;
mutex_lock(&kvm->lock); mutex_lock(&kvm->arch.mmu_setup_lock);
/* Request is still current? */ /* Request is still current? */
if (kvm->arch.resize_hpt == resize) { if (kvm->arch.resize_hpt == resize) {
/* We may request large allocations here: /* We may request large allocations here:
* do not sleep with kvm->lock held for a while. * do not sleep with kvm->arch.mmu_setup_lock held for a while.
*/ */
mutex_unlock(&kvm->lock); mutex_unlock(&kvm->arch.mmu_setup_lock);
resize_hpt_debug(resize, "resize_hpt_prepare_work(): order = %d\n", resize_hpt_debug(resize, "resize_hpt_prepare_work(): order = %d\n",
resize->order); resize->order);
...@@ -1494,9 +1494,9 @@ static void resize_hpt_prepare_work(struct work_struct *work) ...@@ -1494,9 +1494,9 @@ static void resize_hpt_prepare_work(struct work_struct *work)
if (WARN_ON(err == -EBUSY)) if (WARN_ON(err == -EBUSY))
err = -EINPROGRESS; err = -EINPROGRESS;
mutex_lock(&kvm->lock); mutex_lock(&kvm->arch.mmu_setup_lock);
/* It is possible that kvm->arch.resize_hpt != resize /* It is possible that kvm->arch.resize_hpt != resize
* after we grab kvm->lock again. * after we grab kvm->arch.mmu_setup_lock again.
*/ */
} }
...@@ -1505,7 +1505,7 @@ static void resize_hpt_prepare_work(struct work_struct *work) ...@@ -1505,7 +1505,7 @@ static void resize_hpt_prepare_work(struct work_struct *work)
if (kvm->arch.resize_hpt != resize) if (kvm->arch.resize_hpt != resize)
resize_hpt_release(kvm, resize); resize_hpt_release(kvm, resize);
mutex_unlock(&kvm->lock); mutex_unlock(&kvm->arch.mmu_setup_lock);
} }
long kvm_vm_ioctl_resize_hpt_prepare(struct kvm *kvm, long kvm_vm_ioctl_resize_hpt_prepare(struct kvm *kvm,
...@@ -1522,7 +1522,7 @@ long kvm_vm_ioctl_resize_hpt_prepare(struct kvm *kvm, ...@@ -1522,7 +1522,7 @@ long kvm_vm_ioctl_resize_hpt_prepare(struct kvm *kvm,
if (shift && ((shift < 18) || (shift > 46))) if (shift && ((shift < 18) || (shift > 46)))
return -EINVAL; return -EINVAL;
mutex_lock(&kvm->lock); mutex_lock(&kvm->arch.mmu_setup_lock);
resize = kvm->arch.resize_hpt; resize = kvm->arch.resize_hpt;
...@@ -1565,7 +1565,7 @@ long kvm_vm_ioctl_resize_hpt_prepare(struct kvm *kvm, ...@@ -1565,7 +1565,7 @@ long kvm_vm_ioctl_resize_hpt_prepare(struct kvm *kvm,
ret = 100; /* estimated time in ms */ ret = 100; /* estimated time in ms */
out: out:
mutex_unlock(&kvm->lock); mutex_unlock(&kvm->arch.mmu_setup_lock);
return ret; return ret;
} }
...@@ -1588,7 +1588,7 @@ long kvm_vm_ioctl_resize_hpt_commit(struct kvm *kvm, ...@@ -1588,7 +1588,7 @@ long kvm_vm_ioctl_resize_hpt_commit(struct kvm *kvm,
if (shift && ((shift < 18) || (shift > 46))) if (shift && ((shift < 18) || (shift > 46)))
return -EINVAL; return -EINVAL;
mutex_lock(&kvm->lock); mutex_lock(&kvm->arch.mmu_setup_lock);
resize = kvm->arch.resize_hpt; resize = kvm->arch.resize_hpt;
...@@ -1625,7 +1625,7 @@ long kvm_vm_ioctl_resize_hpt_commit(struct kvm *kvm, ...@@ -1625,7 +1625,7 @@ long kvm_vm_ioctl_resize_hpt_commit(struct kvm *kvm,
smp_mb(); smp_mb();
out_no_hpt: out_no_hpt:
resize_hpt_release(kvm, resize); resize_hpt_release(kvm, resize);
mutex_unlock(&kvm->lock); mutex_unlock(&kvm->arch.mmu_setup_lock);
return ret; return ret;
} }
...@@ -1868,7 +1868,7 @@ static ssize_t kvm_htab_write(struct file *file, const char __user *buf, ...@@ -1868,7 +1868,7 @@ static ssize_t kvm_htab_write(struct file *file, const char __user *buf,
return -EINVAL; return -EINVAL;
/* lock out vcpus from running while we're doing this */ /* lock out vcpus from running while we're doing this */
mutex_lock(&kvm->lock); mutex_lock(&kvm->arch.mmu_setup_lock);
mmu_ready = kvm->arch.mmu_ready; mmu_ready = kvm->arch.mmu_ready;
if (mmu_ready) { if (mmu_ready) {
kvm->arch.mmu_ready = 0; /* temporarily */ kvm->arch.mmu_ready = 0; /* temporarily */
...@@ -1876,7 +1876,7 @@ static ssize_t kvm_htab_write(struct file *file, const char __user *buf, ...@@ -1876,7 +1876,7 @@ static ssize_t kvm_htab_write(struct file *file, const char __user *buf,
smp_mb(); smp_mb();
if (atomic_read(&kvm->arch.vcpus_running)) { if (atomic_read(&kvm->arch.vcpus_running)) {
kvm->arch.mmu_ready = 1; kvm->arch.mmu_ready = 1;
mutex_unlock(&kvm->lock); mutex_unlock(&kvm->arch.mmu_setup_lock);
return -EBUSY; return -EBUSY;
} }
} }
...@@ -1963,7 +1963,7 @@ static ssize_t kvm_htab_write(struct file *file, const char __user *buf, ...@@ -1963,7 +1963,7 @@ static ssize_t kvm_htab_write(struct file *file, const char __user *buf,
/* Order HPTE updates vs. mmu_ready */ /* Order HPTE updates vs. mmu_ready */
smp_wmb(); smp_wmb();
kvm->arch.mmu_ready = mmu_ready; kvm->arch.mmu_ready = mmu_ready;
mutex_unlock(&kvm->lock); mutex_unlock(&kvm->arch.mmu_setup_lock);
if (err) if (err)
return err; return err;
......
...@@ -2257,11 +2257,17 @@ static struct kvm_vcpu *kvmppc_core_vcpu_create_hv(struct kvm *kvm, ...@@ -2257,11 +2257,17 @@ static struct kvm_vcpu *kvmppc_core_vcpu_create_hv(struct kvm *kvm,
pr_devel("KVM: collision on id %u", id); pr_devel("KVM: collision on id %u", id);
vcore = NULL; vcore = NULL;
} else if (!vcore) { } else if (!vcore) {
/*
* Take mmu_setup_lock for mutual exclusion
* with kvmppc_update_lpcr().
*/
err = -ENOMEM; err = -ENOMEM;
vcore = kvmppc_vcore_create(kvm, vcore = kvmppc_vcore_create(kvm,
id & ~(kvm->arch.smt_mode - 1)); id & ~(kvm->arch.smt_mode - 1));
mutex_lock(&kvm->arch.mmu_setup_lock);
kvm->arch.vcores[core] = vcore; kvm->arch.vcores[core] = vcore;
kvm->arch.online_vcores++; kvm->arch.online_vcores++;
mutex_unlock(&kvm->arch.mmu_setup_lock);
} }
} }
mutex_unlock(&kvm->lock); mutex_unlock(&kvm->lock);
...@@ -3821,7 +3827,7 @@ static int kvmhv_setup_mmu(struct kvm_vcpu *vcpu) ...@@ -3821,7 +3827,7 @@ static int kvmhv_setup_mmu(struct kvm_vcpu *vcpu)
int r = 0; int r = 0;
struct kvm *kvm = vcpu->kvm; struct kvm *kvm = vcpu->kvm;
mutex_lock(&kvm->lock); mutex_lock(&kvm->arch.mmu_setup_lock);
if (!kvm->arch.mmu_ready) { if (!kvm->arch.mmu_ready) {
if (!kvm_is_radix(kvm)) if (!kvm_is_radix(kvm))
r = kvmppc_hv_setup_htab_rma(vcpu); r = kvmppc_hv_setup_htab_rma(vcpu);
...@@ -3831,7 +3837,7 @@ static int kvmhv_setup_mmu(struct kvm_vcpu *vcpu) ...@@ -3831,7 +3837,7 @@ static int kvmhv_setup_mmu(struct kvm_vcpu *vcpu)
kvm->arch.mmu_ready = 1; kvm->arch.mmu_ready = 1;
} }
} }
mutex_unlock(&kvm->lock); mutex_unlock(&kvm->arch.mmu_setup_lock);
return r; return r;
} }
...@@ -4439,7 +4445,8 @@ static void kvmppc_core_commit_memory_region_hv(struct kvm *kvm, ...@@ -4439,7 +4445,8 @@ static void kvmppc_core_commit_memory_region_hv(struct kvm *kvm,
/* /*
* Update LPCR values in kvm->arch and in vcores. * Update LPCR values in kvm->arch and in vcores.
* Caller must hold kvm->lock. * Caller must hold kvm->arch.mmu_setup_lock (for mutual exclusion
* of kvm->arch.lpcr update).
*/ */
void kvmppc_update_lpcr(struct kvm *kvm, unsigned long lpcr, unsigned long mask) void kvmppc_update_lpcr(struct kvm *kvm, unsigned long lpcr, unsigned long mask)
{ {
...@@ -4491,7 +4498,7 @@ void kvmppc_setup_partition_table(struct kvm *kvm) ...@@ -4491,7 +4498,7 @@ void kvmppc_setup_partition_table(struct kvm *kvm)
/* /*
* Set up HPT (hashed page table) and RMA (real-mode area). * Set up HPT (hashed page table) and RMA (real-mode area).
* Must be called with kvm->lock held. * Must be called with kvm->arch.mmu_setup_lock held.
*/ */
static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu) static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu)
{ {
...@@ -4579,7 +4586,10 @@ static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu) ...@@ -4579,7 +4586,10 @@ static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu)
goto out_srcu; goto out_srcu;
} }
/* Must be called with kvm->lock held and mmu_ready = 0 and no vcpus running */ /*
* Must be called with kvm->arch.mmu_setup_lock held and
* mmu_ready = 0 and no vcpus running.
*/
int kvmppc_switch_mmu_to_hpt(struct kvm *kvm) int kvmppc_switch_mmu_to_hpt(struct kvm *kvm)
{ {
if (nesting_enabled(kvm)) if (nesting_enabled(kvm))
...@@ -4596,7 +4606,10 @@ int kvmppc_switch_mmu_to_hpt(struct kvm *kvm) ...@@ -4596,7 +4606,10 @@ int kvmppc_switch_mmu_to_hpt(struct kvm *kvm)
return 0; return 0;
} }
/* Must be called with kvm->lock held and mmu_ready = 0 and no vcpus running */ /*
* Must be called with kvm->arch.mmu_setup_lock held and
* mmu_ready = 0 and no vcpus running.
*/
int kvmppc_switch_mmu_to_radix(struct kvm *kvm) int kvmppc_switch_mmu_to_radix(struct kvm *kvm)
{ {
int err; int err;
...@@ -4701,6 +4714,8 @@ static int kvmppc_core_init_vm_hv(struct kvm *kvm) ...@@ -4701,6 +4714,8 @@ static int kvmppc_core_init_vm_hv(struct kvm *kvm)
char buf[32]; char buf[32];
int ret; int ret;
mutex_init(&kvm->arch.mmu_setup_lock);
/* Allocate the guest's logical partition ID */ /* Allocate the guest's logical partition ID */
lpid = kvmppc_alloc_lpid(); lpid = kvmppc_alloc_lpid();
...@@ -5226,7 +5241,7 @@ static int kvmhv_configure_mmu(struct kvm *kvm, struct kvm_ppc_mmuv3_cfg *cfg) ...@@ -5226,7 +5241,7 @@ static int kvmhv_configure_mmu(struct kvm *kvm, struct kvm_ppc_mmuv3_cfg *cfg)
if (kvmhv_on_pseries() && !radix) if (kvmhv_on_pseries() && !radix)
return -EINVAL; return -EINVAL;
mutex_lock(&kvm->lock); mutex_lock(&kvm->arch.mmu_setup_lock);
if (radix != kvm_is_radix(kvm)) { if (radix != kvm_is_radix(kvm)) {
if (kvm->arch.mmu_ready) { if (kvm->arch.mmu_ready) {
kvm->arch.mmu_ready = 0; kvm->arch.mmu_ready = 0;
...@@ -5254,7 +5269,7 @@ static int kvmhv_configure_mmu(struct kvm *kvm, struct kvm_ppc_mmuv3_cfg *cfg) ...@@ -5254,7 +5269,7 @@ static int kvmhv_configure_mmu(struct kvm *kvm, struct kvm_ppc_mmuv3_cfg *cfg)
err = 0; err = 0;
out_unlock: out_unlock:
mutex_unlock(&kvm->lock); mutex_unlock(&kvm->arch.mmu_setup_lock);
return err; return err;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment