Commit 9647c14c authored by Izik Eidus's avatar Izik Eidus Committed by Avi Kivity

KVM: MMU: Keep a reverse mapping of non-writable translations

The current kvm mmu only reverse maps writable translation.  This is used
to write-protect a page in case it becomes a pagetable.

But with swapping support, we need a reverse mapping of read-only pages as
well:  when we evict a page, we need to remove any mapping to it, whether
writable or not.
Signed-off-by: default avatarIzik Eidus <izike@qumranet.com>
Signed-off-by: default avatarAvi Kivity <avi@qumranet.com>
parent 98348e95
...@@ -211,8 +211,8 @@ static int is_io_pte(unsigned long pte) ...@@ -211,8 +211,8 @@ static int is_io_pte(unsigned long pte)
static int is_rmap_pte(u64 pte) static int is_rmap_pte(u64 pte)
{ {
return (pte & (PT_WRITABLE_MASK | PT_PRESENT_MASK)) return pte != shadow_trap_nonpresent_pte
== (PT_WRITABLE_MASK | PT_PRESENT_MASK); && pte != shadow_notrap_nonpresent_pte;
} }
static void set_shadow_pte(u64 *sptep, u64 spte) static void set_shadow_pte(u64 *sptep, u64 spte)
...@@ -488,7 +488,6 @@ static void rmap_write_protect(struct kvm *kvm, u64 gfn) ...@@ -488,7 +488,6 @@ static void rmap_write_protect(struct kvm *kvm, u64 gfn)
{ {
unsigned long *rmapp; unsigned long *rmapp;
u64 *spte; u64 *spte;
u64 *prev_spte;
gfn = unalias_gfn(kvm, gfn); gfn = unalias_gfn(kvm, gfn);
rmapp = gfn_to_rmap(kvm, gfn); rmapp = gfn_to_rmap(kvm, gfn);
...@@ -497,13 +496,11 @@ static void rmap_write_protect(struct kvm *kvm, u64 gfn) ...@@ -497,13 +496,11 @@ static void rmap_write_protect(struct kvm *kvm, u64 gfn)
while (spte) { while (spte) {
BUG_ON(!spte); BUG_ON(!spte);
BUG_ON(!(*spte & PT_PRESENT_MASK)); BUG_ON(!(*spte & PT_PRESENT_MASK));
BUG_ON(!(*spte & PT_WRITABLE_MASK));
rmap_printk("rmap_write_protect: spte %p %llx\n", spte, *spte); rmap_printk("rmap_write_protect: spte %p %llx\n", spte, *spte);
prev_spte = spte; if (is_writeble_pte(*spte))
spte = rmap_next(kvm, rmapp, spte); set_shadow_pte(spte, *spte & ~PT_WRITABLE_MASK);
rmap_remove(kvm, prev_spte);
set_shadow_pte(prev_spte, *prev_spte & ~PT_WRITABLE_MASK);
kvm_flush_remote_tlbs(kvm); kvm_flush_remote_tlbs(kvm);
spte = rmap_next(kvm, rmapp, spte);
} }
} }
...@@ -908,14 +905,18 @@ static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, hpa_t p) ...@@ -908,14 +905,18 @@ static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, hpa_t p)
table = __va(table_addr); table = __va(table_addr);
if (level == 1) { if (level == 1) {
int was_rmapped;
pte = table[index]; pte = table[index];
was_rmapped = is_rmap_pte(pte);
if (is_shadow_present_pte(pte) && is_writeble_pte(pte)) if (is_shadow_present_pte(pte) && is_writeble_pte(pte))
return 0; return 0;
mark_page_dirty(vcpu->kvm, v >> PAGE_SHIFT); mark_page_dirty(vcpu->kvm, v >> PAGE_SHIFT);
page_header_update_slot(vcpu->kvm, table, v); page_header_update_slot(vcpu->kvm, table, v);
table[index] = p | PT_PRESENT_MASK | PT_WRITABLE_MASK | table[index] = p | PT_PRESENT_MASK | PT_WRITABLE_MASK |
PT_USER_MASK; PT_USER_MASK;
rmap_add(vcpu, &table[index], v >> PAGE_SHIFT); if (!was_rmapped)
rmap_add(vcpu, &table[index], v >> PAGE_SHIFT);
return 0; return 0;
} }
...@@ -1424,10 +1425,8 @@ void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot) ...@@ -1424,10 +1425,8 @@ void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot)
pt = page->spt; pt = page->spt;
for (i = 0; i < PT64_ENT_PER_PAGE; ++i) for (i = 0; i < PT64_ENT_PER_PAGE; ++i)
/* avoid RMW */ /* avoid RMW */
if (pt[i] & PT_WRITABLE_MASK) { if (pt[i] & PT_WRITABLE_MASK)
rmap_remove(kvm, &pt[i]);
pt[i] &= ~PT_WRITABLE_MASK; pt[i] &= ~PT_WRITABLE_MASK;
}
} }
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment