Commit ded13fc1 authored by Paul Mackerras's avatar Paul Mackerras

KVM: PPC: Book3S HV: Fix migration and HPT resizing of HPT guests on radix hosts

This fixes two errors that prevent a guest using the HPT MMU from
successfully migrating to a POWER9 host in radix MMU mode, or resizing
its HPT when running on a radix host.

The first bug was that commit 8dc6cca5 ("KVM: PPC: Book3S HV:
Don't rely on host's page size information", 2017-09-11) missed two
uses of hpte_base_page_size(), one in the HPT rehashing code and
one in kvm_htab_write() (which is used on the destination side in
migrating a HPT guest).  Instead we use kvmppc_hpte_base_page_shift().
Having the shift count means that we can use left and right shifts
instead of multiplication and division in a few places.

Along the way, this adds a check in kvm_htab_write() to ensure that the
page size encoding in the incoming HPTEs is recognized, and if not
return an EINVAL error to userspace.

The second bug was that kvm_htab_write was performing some but not all
of the functions of kvmhv_setup_mmu(), resulting in the destination VM
being left in radix mode as far as the hardware is concerned.  The
simplest fix for now is make kvm_htab_write() call
kvmppc_setup_partition_table() like kvmppc_hv_setup_htab_rma() does.
In future it would be better to refactor the code more extensively
to remove the duplication.

Fixes: 8dc6cca5 ("KVM: PPC: Book3S HV: Don't rely on host's page size information")
Fixes: 7a84084c ("KVM: PPC: Book3S HV: Set partition table rather than SDR1 on POWER9")
Reported-by: default avatarSuraj Jitindar Singh <sjitindarsingh@gmail.com>
Tested-by: default avatarSuraj Jitindar Singh <sjitindarsingh@gmail.com>
Signed-off-by: default avatarPaul Mackerras <paulus@ozlabs.org>
parent cf9b0772
...@@ -180,6 +180,7 @@ extern void kvm_spapr_tce_release_iommu_group(struct kvm *kvm, ...@@ -180,6 +180,7 @@ extern void kvm_spapr_tce_release_iommu_group(struct kvm *kvm,
struct iommu_group *grp); struct iommu_group *grp);
extern int kvmppc_switch_mmu_to_hpt(struct kvm *kvm); extern int kvmppc_switch_mmu_to_hpt(struct kvm *kvm);
extern int kvmppc_switch_mmu_to_radix(struct kvm *kvm); extern int kvmppc_switch_mmu_to_radix(struct kvm *kvm);
extern void kvmppc_setup_partition_table(struct kvm *kvm);
extern long kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm, extern long kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm,
struct kvm_create_spapr_tce_64 *args); struct kvm_create_spapr_tce_64 *args);
......
...@@ -1238,8 +1238,9 @@ static unsigned long resize_hpt_rehash_hpte(struct kvm_resize_hpt *resize, ...@@ -1238,8 +1238,9 @@ static unsigned long resize_hpt_rehash_hpte(struct kvm_resize_hpt *resize,
unsigned long vpte, rpte, guest_rpte; unsigned long vpte, rpte, guest_rpte;
int ret; int ret;
struct revmap_entry *rev; struct revmap_entry *rev;
unsigned long apsize, psize, avpn, pteg, hash; unsigned long apsize, avpn, pteg, hash;
unsigned long new_idx, new_pteg, replace_vpte; unsigned long new_idx, new_pteg, replace_vpte;
int pshift;
hptep = (__be64 *)(old->virt + (idx << 4)); hptep = (__be64 *)(old->virt + (idx << 4));
...@@ -1298,8 +1299,8 @@ static unsigned long resize_hpt_rehash_hpte(struct kvm_resize_hpt *resize, ...@@ -1298,8 +1299,8 @@ static unsigned long resize_hpt_rehash_hpte(struct kvm_resize_hpt *resize,
goto out; goto out;
rpte = be64_to_cpu(hptep[1]); rpte = be64_to_cpu(hptep[1]);
psize = hpte_base_page_size(vpte, rpte); pshift = kvmppc_hpte_base_page_shift(vpte, rpte);
avpn = HPTE_V_AVPN_VAL(vpte) & ~((psize - 1) >> 23); avpn = HPTE_V_AVPN_VAL(vpte) & ~(((1ul << pshift) - 1) >> 23);
pteg = idx / HPTES_PER_GROUP; pteg = idx / HPTES_PER_GROUP;
if (vpte & HPTE_V_SECONDARY) if (vpte & HPTE_V_SECONDARY)
pteg = ~pteg; pteg = ~pteg;
...@@ -1311,20 +1312,20 @@ static unsigned long resize_hpt_rehash_hpte(struct kvm_resize_hpt *resize, ...@@ -1311,20 +1312,20 @@ static unsigned long resize_hpt_rehash_hpte(struct kvm_resize_hpt *resize,
offset = (avpn & 0x1f) << 23; offset = (avpn & 0x1f) << 23;
vsid = avpn >> 5; vsid = avpn >> 5;
/* We can find more bits from the pteg value */ /* We can find more bits from the pteg value */
if (psize < (1ULL << 23)) if (pshift < 23)
offset |= ((vsid ^ pteg) & old_hash_mask) * psize; offset |= ((vsid ^ pteg) & old_hash_mask) << pshift;
hash = vsid ^ (offset / psize); hash = vsid ^ (offset >> pshift);
} else { } else {
unsigned long offset, vsid; unsigned long offset, vsid;
/* We only have 40 - 23 bits of seg_off in avpn */ /* We only have 40 - 23 bits of seg_off in avpn */
offset = (avpn & 0x1ffff) << 23; offset = (avpn & 0x1ffff) << 23;
vsid = avpn >> 17; vsid = avpn >> 17;
if (psize < (1ULL << 23)) if (pshift < 23)
offset |= ((vsid ^ (vsid << 25) ^ pteg) & old_hash_mask) * psize; offset |= ((vsid ^ (vsid << 25) ^ pteg) & old_hash_mask) << pshift;
hash = vsid ^ (vsid << 25) ^ (offset / psize); hash = vsid ^ (vsid << 25) ^ (offset >> pshift);
} }
new_pteg = hash & new_hash_mask; new_pteg = hash & new_hash_mask;
...@@ -1801,6 +1802,7 @@ static ssize_t kvm_htab_write(struct file *file, const char __user *buf, ...@@ -1801,6 +1802,7 @@ static ssize_t kvm_htab_write(struct file *file, const char __user *buf,
ssize_t nb; ssize_t nb;
long int err, ret; long int err, ret;
int mmu_ready; int mmu_ready;
int pshift;
if (!access_ok(VERIFY_READ, buf, count)) if (!access_ok(VERIFY_READ, buf, count))
return -EFAULT; return -EFAULT;
...@@ -1855,6 +1857,9 @@ static ssize_t kvm_htab_write(struct file *file, const char __user *buf, ...@@ -1855,6 +1857,9 @@ static ssize_t kvm_htab_write(struct file *file, const char __user *buf,
err = -EINVAL; err = -EINVAL;
if (!(v & HPTE_V_VALID)) if (!(v & HPTE_V_VALID))
goto out; goto out;
pshift = kvmppc_hpte_base_page_shift(v, r);
if (pshift <= 0)
goto out;
lbuf += 2; lbuf += 2;
nb += HPTE_SIZE; nb += HPTE_SIZE;
...@@ -1869,14 +1874,18 @@ static ssize_t kvm_htab_write(struct file *file, const char __user *buf, ...@@ -1869,14 +1874,18 @@ static ssize_t kvm_htab_write(struct file *file, const char __user *buf,
goto out; goto out;
} }
if (!mmu_ready && is_vrma_hpte(v)) { if (!mmu_ready && is_vrma_hpte(v)) {
unsigned long psize = hpte_base_page_size(v, r); unsigned long senc, lpcr;
unsigned long senc = slb_pgsize_encoding(psize);
unsigned long lpcr;
senc = slb_pgsize_encoding(1ul << pshift);
kvm->arch.vrma_slb_v = senc | SLB_VSID_B_1T | kvm->arch.vrma_slb_v = senc | SLB_VSID_B_1T |
(VRMA_VSID << SLB_VSID_SHIFT_1T); (VRMA_VSID << SLB_VSID_SHIFT_1T);
if (!cpu_has_feature(CPU_FTR_ARCH_300)) {
lpcr = senc << (LPCR_VRMASD_SH - 4); lpcr = senc << (LPCR_VRMASD_SH - 4);
kvmppc_update_lpcr(kvm, lpcr, LPCR_VRMASD); kvmppc_update_lpcr(kvm, lpcr,
LPCR_VRMASD);
} else {
kvmppc_setup_partition_table(kvm);
}
mmu_ready = 1; mmu_ready = 1;
} }
++i; ++i;
......
...@@ -120,7 +120,6 @@ MODULE_PARM_DESC(h_ipi_redirect, "Redirect H_IPI wakeup to a free host core"); ...@@ -120,7 +120,6 @@ MODULE_PARM_DESC(h_ipi_redirect, "Redirect H_IPI wakeup to a free host core");
static void kvmppc_end_cede(struct kvm_vcpu *vcpu); static void kvmppc_end_cede(struct kvm_vcpu *vcpu);
static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu); static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu);
static void kvmppc_setup_partition_table(struct kvm *kvm);
static inline struct kvm_vcpu *next_runnable_thread(struct kvmppc_vcore *vc, static inline struct kvm_vcpu *next_runnable_thread(struct kvmppc_vcore *vc,
int *ip) int *ip)
...@@ -3574,7 +3573,7 @@ static void kvmppc_mmu_destroy_hv(struct kvm_vcpu *vcpu) ...@@ -3574,7 +3573,7 @@ static void kvmppc_mmu_destroy_hv(struct kvm_vcpu *vcpu)
return; return;
} }
static void kvmppc_setup_partition_table(struct kvm *kvm) void kvmppc_setup_partition_table(struct kvm *kvm)
{ {
unsigned long dw0, dw1; unsigned long dw0, dw1;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment