Commit a64fd707 authored by Paul Mackerras's avatar Paul Mackerras Committed by Alexander Graf

KVM: PPC: Book3S HV: Reset reverse-map chains when resetting the HPT

With HV-style KVM, we maintain reverse-mapping lists that enable us to
find all the HPT (hashed page table) entries that reference each guest
physical page, with the heads of the lists in the memslot->arch.rmap
arrays.  When we reset the HPT (i.e. when we reboot the VM), we clear
out all the HPT entries but we were not clearing out the reverse
mapping lists.  The result is that as we create new HPT entries, the
lists get corrupted, which can easily lead to loops, resulting in the
host kernel hanging when it tries to traverse those lists.

This fixes the problem by zeroing out all the reverse mapping lists
when we zero out the HPT.  This incidentally means that we are also
zeroing our record of the referenced and changed bits (not the bits
in the Linux PTEs, used by the Linux MM subsystem, but the bits used
by the KVM_GET_DIRTY_LOG ioctl, and those used by kvm_age_hva() and
kvm_test_age_hva()).
Signed-off-by: default avatarPaul Mackerras <paulus@samba.org>
Signed-off-by: default avatarAlexander Graf <agraf@suse.de>
parent a2932923
...@@ -46,6 +46,7 @@ ...@@ -46,6 +46,7 @@
static long kvmppc_virtmode_do_h_enter(struct kvm *kvm, unsigned long flags, static long kvmppc_virtmode_do_h_enter(struct kvm *kvm, unsigned long flags,
long pte_index, unsigned long pteh, long pte_index, unsigned long pteh,
unsigned long ptel, unsigned long *pte_idx_ret); unsigned long ptel, unsigned long *pte_idx_ret);
static void kvmppc_rmap_reset(struct kvm *kvm);
long kvmppc_alloc_hpt(struct kvm *kvm, u32 *htab_orderp) long kvmppc_alloc_hpt(struct kvm *kvm, u32 *htab_orderp)
{ {
...@@ -143,6 +144,10 @@ long kvmppc_alloc_reset_hpt(struct kvm *kvm, u32 *htab_orderp) ...@@ -143,6 +144,10 @@ long kvmppc_alloc_reset_hpt(struct kvm *kvm, u32 *htab_orderp)
order = kvm->arch.hpt_order; order = kvm->arch.hpt_order;
/* Set the entire HPT to 0, i.e. invalid HPTEs */ /* Set the entire HPT to 0, i.e. invalid HPTEs */
memset((void *)kvm->arch.hpt_virt, 0, 1ul << order); memset((void *)kvm->arch.hpt_virt, 0, 1ul << order);
/*
* Reset all the reverse-mapping chains for all memslots
*/
kvmppc_rmap_reset(kvm);
/* /*
* Set the whole last_vcpu array to an invalid vcpu number. * Set the whole last_vcpu array to an invalid vcpu number.
* This ensures that each vcpu will flush its TLB on next entry. * This ensures that each vcpu will flush its TLB on next entry.
...@@ -772,6 +777,25 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu, ...@@ -772,6 +777,25 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
goto out_put; goto out_put;
} }
static void kvmppc_rmap_reset(struct kvm *kvm)
{
struct kvm_memslots *slots;
struct kvm_memory_slot *memslot;
int srcu_idx;
srcu_idx = srcu_read_lock(&kvm->srcu);
slots = kvm->memslots;
kvm_for_each_memslot(memslot, slots) {
/*
* This assumes it is acceptable to lose reference and
* change bits across a reset.
*/
memset(memslot->arch.rmap, 0,
memslot->npages * sizeof(*memslot->arch.rmap));
}
srcu_read_unlock(&kvm->srcu, srcu_idx);
}
static int kvm_handle_hva_range(struct kvm *kvm, static int kvm_handle_hva_range(struct kvm *kvm,
unsigned long start, unsigned long start,
unsigned long end, unsigned long end,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment