Commit 8672b721 authored by Xiao Guangrong's avatar Xiao Guangrong Committed by Avi Kivity

KVM: MMU: move bits lost judgement into a separate function

Introduce spte_has_volatile_bits() function to judge whether spte
bits will miss, it's more readable and can help us to cleanup code
later
Signed-off-by: default avatarXiao Guangrong <xiaoguangrong@cn.fujitsu.com>
Signed-off-by: default avatarAvi Kivity <avi@redhat.com>
parent 251464c4
...@@ -299,6 +299,20 @@ static u64 __xchg_spte(u64 *sptep, u64 new_spte) ...@@ -299,6 +299,20 @@ static u64 __xchg_spte(u64 *sptep, u64 new_spte)
#endif #endif
} }
static bool spte_has_volatile_bits(u64 spte)
{
if (!shadow_accessed_mask)
return false;
if (!is_shadow_present_pte(spte))
return false;
if (spte & shadow_accessed_mask)
return false;
return true;
}
static void update_spte(u64 *sptep, u64 new_spte) static void update_spte(u64 *sptep, u64 new_spte)
{ {
u64 old_spte; u64 old_spte;
...@@ -679,14 +693,14 @@ static void set_spte_track_bits(u64 *sptep, u64 new_spte) ...@@ -679,14 +693,14 @@ static void set_spte_track_bits(u64 *sptep, u64 new_spte)
pfn_t pfn; pfn_t pfn;
u64 old_spte = *sptep; u64 old_spte = *sptep;
if (!shadow_accessed_mask || !is_shadow_present_pte(old_spte) || if (!spte_has_volatile_bits(old_spte))
old_spte & shadow_accessed_mask) {
__set_spte(sptep, new_spte); __set_spte(sptep, new_spte);
} else else
old_spte = __xchg_spte(sptep, new_spte); old_spte = __xchg_spte(sptep, new_spte);
if (!is_rmap_spte(old_spte)) if (!is_rmap_spte(old_spte))
return; return;
pfn = spte_to_pfn(old_spte); pfn = spte_to_pfn(old_spte);
if (!shadow_accessed_mask || old_spte & shadow_accessed_mask) if (!shadow_accessed_mask || old_spte & shadow_accessed_mask)
kvm_set_pfn_accessed(pfn); kvm_set_pfn_accessed(pfn);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment