Commit e88643ba authored by James Hogan's avatar James Hogan

KVM: MIPS/MMU: Use generic dirty log & protect helper

MIPS hasn't up to this point properly supported dirty page logging, as
pages in slots with dirty logging enabled aren't made clean, and tlbmod
exceptions from writes to clean pages have been assumed to be due to
guest TLB protection and unconditionally passed to the guest.

Use the generic dirty logging helper kvm_get_dirty_log_protect() to
properly implement kvm_vm_ioctl_get_dirty_log(), similar to how ARM
does. This uses xchg to clear the dirty bits when reading them, rather
than wiping them out afterwards with a memset, which would potentially
wipe recently set bits that weren't caught by kvm_get_dirty_log(). It
also makes the pages clean again using the
kvm_arch_mmu_enable_log_dirty_pt_masked() architecture callback so that
further writes after the shadow memslot is flushed will trigger tlbmod
exceptions and dirty handling.
Signed-off-by: default avatarJames Hogan <james.hogan@imgtec.com>
Cc: Paolo Bonzini <pbonzini@redhat.com>
Cc: "Radim Krčmář" <rkrcmar@redhat.com>
Cc: Ralf Baechle <ralf@linux-mips.org>
Cc: linux-mips@linux-mips.org
Cc: kvm@vger.kernel.org
parent f0c0c330
...@@ -20,6 +20,7 @@ config KVM ...@@ -20,6 +20,7 @@ config KVM
select EXPORT_UASM select EXPORT_UASM
select PREEMPT_NOTIFIERS select PREEMPT_NOTIFIERS
select ANON_INODES select ANON_INODES
select KVM_GENERIC_DIRTYLOG_READ_PROTECT
select KVM_MMIO select KVM_MMIO
select SRCU select SRCU
---help--- ---help---
......
...@@ -1086,42 +1086,46 @@ long kvm_arch_vcpu_ioctl(struct file *filp, unsigned int ioctl, ...@@ -1086,42 +1086,46 @@ long kvm_arch_vcpu_ioctl(struct file *filp, unsigned int ioctl,
return r; return r;
} }
/* Get (and clear) the dirty memory log for a memory slot. */ /**
* kvm_vm_ioctl_get_dirty_log - get and clear the log of dirty pages in a slot
* @kvm: kvm instance
* @log: slot id and address to which we copy the log
*
* Steps 1-4 below provide general overview of dirty page logging. See
* kvm_get_dirty_log_protect() function description for additional details.
*
* We call kvm_get_dirty_log_protect() to handle steps 1-3, upon return we
* always flush the TLB (step 4) even if previous step failed and the dirty
* bitmap may be corrupt. Regardless of previous outcome the KVM logging API
* does not preclude user space subsequent dirty log read. Flushing TLB ensures
* writes will be marked dirty for next log read.
*
* 1. Take a snapshot of the bit and clear it if needed.
* 2. Write protect the corresponding page.
* 3. Copy the snapshot to the userspace.
* 4. Flush TLB's if needed.
*/
int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log) int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log)
{ {
struct kvm_memslots *slots; struct kvm_memslots *slots;
struct kvm_memory_slot *memslot; struct kvm_memory_slot *memslot;
unsigned long ga, ga_end; bool is_dirty = false;
int is_dirty = 0;
int r; int r;
unsigned long n;
mutex_lock(&kvm->slots_lock); mutex_lock(&kvm->slots_lock);
r = kvm_get_dirty_log(kvm, log, &is_dirty); r = kvm_get_dirty_log_protect(kvm, log, &is_dirty);
if (r)
goto out;
/* If nothing is dirty, don't bother messing with page tables. */
if (is_dirty) { if (is_dirty) {
slots = kvm_memslots(kvm); slots = kvm_memslots(kvm);
memslot = id_to_memslot(slots, log->slot); memslot = id_to_memslot(slots, log->slot);
ga = memslot->base_gfn << PAGE_SHIFT; /* Let implementation handle TLB/GVA invalidation */
ga_end = ga + (memslot->npages << PAGE_SHIFT); kvm_mips_callbacks->flush_shadow_memslot(kvm, memslot);
kvm_info("%s: dirty, ga: %#lx, ga_end %#lx\n", __func__, ga,
ga_end);
n = kvm_dirty_bitmap_bytes(memslot);
memset(memslot->dirty_bitmap, 0, n);
} }
r = 0;
out:
mutex_unlock(&kvm->slots_lock); mutex_unlock(&kvm->slots_lock);
return r; return r;
} }
long kvm_arch_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg) long kvm_arch_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg)
......
...@@ -428,6 +428,28 @@ int kvm_mips_mkclean_gpa_pt(struct kvm *kvm, gfn_t start_gfn, gfn_t end_gfn) ...@@ -428,6 +428,28 @@ int kvm_mips_mkclean_gpa_pt(struct kvm *kvm, gfn_t start_gfn, gfn_t end_gfn)
end_gfn << PAGE_SHIFT); end_gfn << PAGE_SHIFT);
} }
/**
* kvm_arch_mmu_enable_log_dirty_pt_masked() - write protect dirty pages
* @kvm: The KVM pointer
* @slot: The memory slot associated with mask
* @gfn_offset: The gfn offset in memory slot
* @mask: The mask of dirty pages at offset 'gfn_offset' in this memory
* slot to be write protected
*
* Walks bits set in mask write protects the associated pte's. Caller must
* acquire @kvm->mmu_lock.
*/
void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm,
struct kvm_memory_slot *slot,
gfn_t gfn_offset, unsigned long mask)
{
gfn_t base_gfn = slot->base_gfn + gfn_offset;
gfn_t start = base_gfn + __ffs(mask);
gfn_t end = base_gfn + __fls(mask);
kvm_mips_mkclean_gpa_pt(kvm, start, end);
}
/** /**
* kvm_mips_map_page() - Map a guest physical page. * kvm_mips_map_page() - Map a guest physical page.
* @vcpu: VCPU pointer. * @vcpu: VCPU pointer.
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment