Commit 6b73a960 authored by Marcelo Tosatti's avatar Marcelo Tosatti

Revert "KVM: MMU: lazily drop large spte"

This reverts commit caf6900f.

It is causing migration failures, reference
https://bugzilla.kernel.org/show_bug.cgi?id=54061.
Signed-off-by: default avatarMarcelo Tosatti <mtosatti@redhat.com>
parent ed55705d
...@@ -1105,7 +1105,8 @@ static void drop_large_spte(struct kvm_vcpu *vcpu, u64 *sptep) ...@@ -1105,7 +1105,8 @@ static void drop_large_spte(struct kvm_vcpu *vcpu, u64 *sptep)
/* /*
* Write-protect on the specified @sptep, @pt_protect indicates whether * Write-protect on the specified @sptep, @pt_protect indicates whether
* spte write-protection is caused by protecting shadow page table. * spte writ-protection is caused by protecting shadow page table.
* @flush indicates whether tlb need be flushed.
* *
* Note: write protection is difference between drity logging and spte * Note: write protection is difference between drity logging and spte
* protection: * protection:
...@@ -1114,9 +1115,10 @@ static void drop_large_spte(struct kvm_vcpu *vcpu, u64 *sptep) ...@@ -1114,9 +1115,10 @@ static void drop_large_spte(struct kvm_vcpu *vcpu, u64 *sptep)
* - for spte protection, the spte can be writable only after unsync-ing * - for spte protection, the spte can be writable only after unsync-ing
* shadow page. * shadow page.
* *
* Return true if tlb need be flushed. * Return true if the spte is dropped.
*/ */
static bool spte_write_protect(struct kvm *kvm, u64 *sptep, bool pt_protect) static bool
spte_write_protect(struct kvm *kvm, u64 *sptep, bool *flush, bool pt_protect)
{ {
u64 spte = *sptep; u64 spte = *sptep;
...@@ -1126,11 +1128,17 @@ static bool spte_write_protect(struct kvm *kvm, u64 *sptep, bool pt_protect) ...@@ -1126,11 +1128,17 @@ static bool spte_write_protect(struct kvm *kvm, u64 *sptep, bool pt_protect)
rmap_printk("rmap_write_protect: spte %p %llx\n", sptep, *sptep); rmap_printk("rmap_write_protect: spte %p %llx\n", sptep, *sptep);
if (__drop_large_spte(kvm, sptep)) {
*flush |= true;
return true;
}
if (pt_protect) if (pt_protect)
spte &= ~SPTE_MMU_WRITEABLE; spte &= ~SPTE_MMU_WRITEABLE;
spte = spte & ~PT_WRITABLE_MASK; spte = spte & ~PT_WRITABLE_MASK;
return mmu_spte_update(sptep, spte); *flush |= mmu_spte_update(sptep, spte);
return false;
} }
static bool __rmap_write_protect(struct kvm *kvm, unsigned long *rmapp, static bool __rmap_write_protect(struct kvm *kvm, unsigned long *rmapp,
...@@ -1142,8 +1150,11 @@ static bool __rmap_write_protect(struct kvm *kvm, unsigned long *rmapp, ...@@ -1142,8 +1150,11 @@ static bool __rmap_write_protect(struct kvm *kvm, unsigned long *rmapp,
for (sptep = rmap_get_first(*rmapp, &iter); sptep;) { for (sptep = rmap_get_first(*rmapp, &iter); sptep;) {
BUG_ON(!(*sptep & PT_PRESENT_MASK)); BUG_ON(!(*sptep & PT_PRESENT_MASK));
if (spte_write_protect(kvm, sptep, &flush, pt_protect)) {
sptep = rmap_get_first(*rmapp, &iter);
continue;
}
flush |= spte_write_protect(kvm, sptep, pt_protect);
sptep = rmap_get_next(&iter); sptep = rmap_get_next(&iter);
} }
...@@ -2581,8 +2592,6 @@ static int __direct_map(struct kvm_vcpu *vcpu, gpa_t v, int write, ...@@ -2581,8 +2592,6 @@ static int __direct_map(struct kvm_vcpu *vcpu, gpa_t v, int write,
break; break;
} }
drop_large_spte(vcpu, iterator.sptep);
if (!is_shadow_present_pte(*iterator.sptep)) { if (!is_shadow_present_pte(*iterator.sptep)) {
u64 base_addr = iterator.addr; u64 base_addr = iterator.addr;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment