Commit a9221dd5 authored by Avi Kivity's avatar Avi Kivity

KVM: MMU: Atomically check for accessed bit when dropping an spte

Currently, in the window between the check for the accessed bit, and actually
dropping the spte, a vcpu can access the page through the spte and set the bit,
which will be ignored by the mmu.

Fix by using an exchange operation to atmoically fetch the spte and drop it.
Signed-off-by: default avatarAvi Kivity <avi@redhat.com>
parent ce061867
...@@ -288,6 +288,21 @@ static void __set_spte(u64 *sptep, u64 spte) ...@@ -288,6 +288,21 @@ static void __set_spte(u64 *sptep, u64 spte)
#endif #endif
} }
static u64 __xchg_spte(u64 *sptep, u64 new_spte)
{
#ifdef CONFIG_X86_64
return xchg(sptep, new_spte);
#else
u64 old_spte;
do {
old_spte = *sptep;
} while (cmpxchg64(sptep, old_spte, new_spte) != old_spte);
return old_spte;
#endif
}
static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache, static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache,
struct kmem_cache *base_cache, int min) struct kmem_cache *base_cache, int min)
{ {
...@@ -653,18 +668,17 @@ static void rmap_remove(struct kvm *kvm, u64 *spte) ...@@ -653,18 +668,17 @@ static void rmap_remove(struct kvm *kvm, u64 *spte)
static void drop_spte(struct kvm *kvm, u64 *sptep, u64 new_spte) static void drop_spte(struct kvm *kvm, u64 *sptep, u64 new_spte)
{ {
pfn_t pfn; pfn_t pfn;
u64 old_spte;
if (!is_rmap_spte(*sptep)) { old_spte = __xchg_spte(sptep, new_spte);
__set_spte(sptep, new_spte); if (!is_rmap_spte(old_spte))
return; return;
} pfn = spte_to_pfn(old_spte);
pfn = spte_to_pfn(*sptep); if (old_spte & shadow_accessed_mask)
if (*sptep & shadow_accessed_mask)
kvm_set_pfn_accessed(pfn); kvm_set_pfn_accessed(pfn);
if (is_writable_pte(*sptep)) if (is_writable_pte(old_spte))
kvm_set_pfn_dirty(pfn); kvm_set_pfn_dirty(pfn);
rmap_remove(kvm, sptep); rmap_remove(kvm, sptep);
__set_spte(sptep, new_spte);
} }
static u64 *rmap_next(struct kvm *kvm, unsigned long *rmapp, u64 *spte) static u64 *rmap_next(struct kvm *kvm, unsigned long *rmapp, u64 *spte)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment