Commit 14f47605 authored by Yu Zhao's avatar Yu Zhao Committed by Paolo Bonzini

kvm: set page dirty only if page has been writable

In absence of shadow dirty mask, there is no need to set page dirty
if page has never been writable. This is a tiny optimization but
good to have for people who care much about dirty page tracking.
Signed-off-by: default avatarYu Zhao <yuzhao@google.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 14ebda33
...@@ -557,8 +557,15 @@ static bool mmu_spte_update(u64 *sptep, u64 new_spte) ...@@ -557,8 +557,15 @@ static bool mmu_spte_update(u64 *sptep, u64 new_spte)
!is_writable_pte(new_spte)) !is_writable_pte(new_spte))
ret = true; ret = true;
if (!shadow_accessed_mask) if (!shadow_accessed_mask) {
/*
* We don't set page dirty when dropping non-writable spte.
* So do it now if the new spte is becoming non-writable.
*/
if (ret)
kvm_set_pfn_dirty(spte_to_pfn(old_spte));
return ret; return ret;
}
/* /*
* Flush TLB when accessed/dirty bits are changed in the page tables, * Flush TLB when accessed/dirty bits are changed in the page tables,
...@@ -605,7 +612,8 @@ static int mmu_spte_clear_track_bits(u64 *sptep) ...@@ -605,7 +612,8 @@ static int mmu_spte_clear_track_bits(u64 *sptep)
if (!shadow_accessed_mask || old_spte & shadow_accessed_mask) if (!shadow_accessed_mask || old_spte & shadow_accessed_mask)
kvm_set_pfn_accessed(pfn); kvm_set_pfn_accessed(pfn);
if (!shadow_dirty_mask || (old_spte & shadow_dirty_mask)) if (old_spte & (shadow_dirty_mask ? shadow_dirty_mask :
PT_WRITABLE_MASK))
kvm_set_pfn_dirty(pfn); kvm_set_pfn_dirty(pfn);
return 1; return 1;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment