Commit a6a0b05d authored by Ben Gardon's avatar Ben Gardon Committed by Paolo Bonzini

kvm: x86/mmu: Support dirty logging for the TDP MMU

Dirty logging is a key feature of the KVM MMU and must be supported by
the TDP MMU. Add support for both the write protection and PML dirty
logging modes.

Tested by running kvm-unit-tests and KVM selftests on an Intel Haswell
machine. This series introduced no new failures.

This series can be viewed in Gerrit at:
	https://linux-review.googlesource.com/c/virt/kvm/kvm/+/2538Signed-off-by: default avatarBen Gardon <bgardon@google.com>
Message-Id: <20201014182700.2888246-16-bgardon@google.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 1d8dd6b3
...@@ -1223,6 +1223,9 @@ static void kvm_mmu_write_protect_pt_masked(struct kvm *kvm, ...@@ -1223,6 +1223,9 @@ static void kvm_mmu_write_protect_pt_masked(struct kvm *kvm,
{ {
struct kvm_rmap_head *rmap_head; struct kvm_rmap_head *rmap_head;
if (kvm->arch.tdp_mmu_enabled)
kvm_tdp_mmu_clear_dirty_pt_masked(kvm, slot,
slot->base_gfn + gfn_offset, mask, true);
while (mask) { while (mask) {
rmap_head = __gfn_to_rmap(slot->base_gfn + gfn_offset + __ffs(mask), rmap_head = __gfn_to_rmap(slot->base_gfn + gfn_offset + __ffs(mask),
PG_LEVEL_4K, slot); PG_LEVEL_4K, slot);
...@@ -1249,6 +1252,9 @@ void kvm_mmu_clear_dirty_pt_masked(struct kvm *kvm, ...@@ -1249,6 +1252,9 @@ void kvm_mmu_clear_dirty_pt_masked(struct kvm *kvm,
{ {
struct kvm_rmap_head *rmap_head; struct kvm_rmap_head *rmap_head;
if (kvm->arch.tdp_mmu_enabled)
kvm_tdp_mmu_clear_dirty_pt_masked(kvm, slot,
slot->base_gfn + gfn_offset, mask, false);
while (mask) { while (mask) {
rmap_head = __gfn_to_rmap(slot->base_gfn + gfn_offset + __ffs(mask), rmap_head = __gfn_to_rmap(slot->base_gfn + gfn_offset + __ffs(mask),
PG_LEVEL_4K, slot); PG_LEVEL_4K, slot);
...@@ -5473,6 +5479,8 @@ void kvm_mmu_slot_remove_write_access(struct kvm *kvm, ...@@ -5473,6 +5479,8 @@ void kvm_mmu_slot_remove_write_access(struct kvm *kvm,
spin_lock(&kvm->mmu_lock); spin_lock(&kvm->mmu_lock);
flush = slot_handle_level(kvm, memslot, slot_rmap_write_protect, flush = slot_handle_level(kvm, memslot, slot_rmap_write_protect,
start_level, KVM_MAX_HUGEPAGE_LEVEL, false); start_level, KVM_MAX_HUGEPAGE_LEVEL, false);
if (kvm->arch.tdp_mmu_enabled)
flush |= kvm_tdp_mmu_wrprot_slot(kvm, memslot, PG_LEVEL_4K);
spin_unlock(&kvm->mmu_lock); spin_unlock(&kvm->mmu_lock);
/* /*
...@@ -5561,6 +5569,8 @@ void kvm_mmu_slot_leaf_clear_dirty(struct kvm *kvm, ...@@ -5561,6 +5569,8 @@ void kvm_mmu_slot_leaf_clear_dirty(struct kvm *kvm,
spin_lock(&kvm->mmu_lock); spin_lock(&kvm->mmu_lock);
flush = slot_handle_leaf(kvm, memslot, __rmap_clear_dirty, false); flush = slot_handle_leaf(kvm, memslot, __rmap_clear_dirty, false);
if (kvm->arch.tdp_mmu_enabled)
flush |= kvm_tdp_mmu_clear_dirty_slot(kvm, memslot);
spin_unlock(&kvm->mmu_lock); spin_unlock(&kvm->mmu_lock);
/* /*
...@@ -5582,6 +5592,8 @@ void kvm_mmu_slot_largepage_remove_write_access(struct kvm *kvm, ...@@ -5582,6 +5592,8 @@ void kvm_mmu_slot_largepage_remove_write_access(struct kvm *kvm,
spin_lock(&kvm->mmu_lock); spin_lock(&kvm->mmu_lock);
flush = slot_handle_large_level(kvm, memslot, slot_rmap_write_protect, flush = slot_handle_large_level(kvm, memslot, slot_rmap_write_protect,
false); false);
if (kvm->arch.tdp_mmu_enabled)
flush |= kvm_tdp_mmu_wrprot_slot(kvm, memslot, PG_LEVEL_2M);
spin_unlock(&kvm->mmu_lock); spin_unlock(&kvm->mmu_lock);
if (flush) if (flush)
...@@ -5596,6 +5608,8 @@ void kvm_mmu_slot_set_dirty(struct kvm *kvm, ...@@ -5596,6 +5608,8 @@ void kvm_mmu_slot_set_dirty(struct kvm *kvm,
spin_lock(&kvm->mmu_lock); spin_lock(&kvm->mmu_lock);
flush = slot_handle_all_level(kvm, memslot, __rmap_set_dirty, false); flush = slot_handle_all_level(kvm, memslot, __rmap_set_dirty, false);
if (kvm->arch.tdp_mmu_enabled)
flush |= kvm_tdp_mmu_slot_set_dirty(kvm, memslot);
spin_unlock(&kvm->mmu_lock); spin_unlock(&kvm->mmu_lock);
if (flush) if (flush)
......
...@@ -41,11 +41,14 @@ struct tdp_iter { ...@@ -41,11 +41,14 @@ struct tdp_iter {
* Iterates over every SPTE mapping the GFN range [start, end) in a * Iterates over every SPTE mapping the GFN range [start, end) in a
* preorder traversal. * preorder traversal.
*/ */
#define for_each_tdp_pte(iter, root, root_level, start, end) \ #define for_each_tdp_pte_min_level(iter, root, root_level, min_level, start, end) \
for (tdp_iter_start(&iter, root, root_level, PG_LEVEL_4K, start); \ for (tdp_iter_start(&iter, root, root_level, min_level, start); \
iter.valid && iter.gfn < end; \ iter.valid && iter.gfn < end; \
tdp_iter_next(&iter)) tdp_iter_next(&iter))
#define for_each_tdp_pte(iter, root, root_level, start, end) \
for_each_tdp_pte_min_level(iter, root, root_level, PG_LEVEL_4K, start, end)
u64 *spte_to_child_pt(u64 pte, int level); u64 *spte_to_child_pt(u64 pte, int level);
void tdp_iter_start(struct tdp_iter *iter, u64 *root_pt, int root_level, void tdp_iter_start(struct tdp_iter *iter, u64 *root_pt, int root_level,
......
This diff is collapsed.
...@@ -28,4 +28,14 @@ int kvm_tdp_mmu_test_age_hva(struct kvm *kvm, unsigned long hva); ...@@ -28,4 +28,14 @@ int kvm_tdp_mmu_test_age_hva(struct kvm *kvm, unsigned long hva);
int kvm_tdp_mmu_set_spte_hva(struct kvm *kvm, unsigned long address, int kvm_tdp_mmu_set_spte_hva(struct kvm *kvm, unsigned long address,
pte_t *host_ptep); pte_t *host_ptep);
bool kvm_tdp_mmu_wrprot_slot(struct kvm *kvm, struct kvm_memory_slot *slot,
int min_level);
bool kvm_tdp_mmu_clear_dirty_slot(struct kvm *kvm,
struct kvm_memory_slot *slot);
void kvm_tdp_mmu_clear_dirty_pt_masked(struct kvm *kvm,
struct kvm_memory_slot *slot,
gfn_t gfn, unsigned long mask,
bool wrprot);
bool kvm_tdp_mmu_slot_set_dirty(struct kvm *kvm, struct kvm_memory_slot *slot);
#endif /* __KVM_X86_MMU_TDP_MMU_H */ #endif /* __KVM_X86_MMU_TDP_MMU_H */
...@@ -798,6 +798,7 @@ struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn); ...@@ -798,6 +798,7 @@ struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn);
bool kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn); bool kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn);
bool kvm_vcpu_is_visible_gfn(struct kvm_vcpu *vcpu, gfn_t gfn); bool kvm_vcpu_is_visible_gfn(struct kvm_vcpu *vcpu, gfn_t gfn);
unsigned long kvm_host_page_size(struct kvm_vcpu *vcpu, gfn_t gfn); unsigned long kvm_host_page_size(struct kvm_vcpu *vcpu, gfn_t gfn);
void mark_page_dirty_in_slot(struct kvm_memory_slot *memslot, gfn_t gfn);
void mark_page_dirty(struct kvm *kvm, gfn_t gfn); void mark_page_dirty(struct kvm *kvm, gfn_t gfn);
struct kvm_memslots *kvm_vcpu_memslots(struct kvm_vcpu *vcpu); struct kvm_memslots *kvm_vcpu_memslots(struct kvm_vcpu *vcpu);
......
...@@ -143,8 +143,6 @@ static void hardware_disable_all(void); ...@@ -143,8 +143,6 @@ static void hardware_disable_all(void);
static void kvm_io_bus_destroy(struct kvm_io_bus *bus); static void kvm_io_bus_destroy(struct kvm_io_bus *bus);
static void mark_page_dirty_in_slot(struct kvm_memory_slot *memslot, gfn_t gfn);
__visible bool kvm_rebooting; __visible bool kvm_rebooting;
EXPORT_SYMBOL_GPL(kvm_rebooting); EXPORT_SYMBOL_GPL(kvm_rebooting);
...@@ -2645,8 +2643,7 @@ int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len) ...@@ -2645,8 +2643,7 @@ int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len)
} }
EXPORT_SYMBOL_GPL(kvm_clear_guest); EXPORT_SYMBOL_GPL(kvm_clear_guest);
static void mark_page_dirty_in_slot(struct kvm_memory_slot *memslot, void mark_page_dirty_in_slot(struct kvm_memory_slot *memslot, gfn_t gfn)
gfn_t gfn)
{ {
if (memslot && memslot->dirty_bitmap) { if (memslot && memslot->dirty_bitmap) {
unsigned long rel_gfn = gfn - memslot->base_gfn; unsigned long rel_gfn = gfn - memslot->base_gfn;
...@@ -2654,6 +2651,7 @@ static void mark_page_dirty_in_slot(struct kvm_memory_slot *memslot, ...@@ -2654,6 +2651,7 @@ static void mark_page_dirty_in_slot(struct kvm_memory_slot *memslot,
set_bit_le(rel_gfn, memslot->dirty_bitmap); set_bit_le(rel_gfn, memslot->dirty_bitmap);
} }
} }
EXPORT_SYMBOL_GPL(mark_page_dirty_in_slot);
void mark_page_dirty(struct kvm *kvm, gfn_t gfn) void mark_page_dirty(struct kvm *kvm, gfn_t gfn)
{ {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment