Commit 61719a8f authored by Gleb Natapov's avatar Gleb Natapov Committed by Paolo Bonzini

nEPT: Support shadow paging for guest paging without A/D bits

Some guest paging modes do not support A/D bits. Add support for such
modes in shadow page code. For such modes PT_GUEST_DIRTY_MASK,
PT_GUEST_ACCESSED_MASK, PT_GUEST_DIRTY_SHIFT and PT_GUEST_ACCESSED_SHIFT
should be set to zero.
Reviewed-by: default avatarXiao Guangrong <xiaoguangrong@linux.vnet.ibm.com>
Signed-off-by: default avatarGleb Natapov <gleb@redhat.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent d8089bac
...@@ -92,6 +92,10 @@ static inline void FNAME(protect_clean_gpte)(unsigned *access, unsigned gpte) ...@@ -92,6 +92,10 @@ static inline void FNAME(protect_clean_gpte)(unsigned *access, unsigned gpte)
{ {
unsigned mask; unsigned mask;
/* dirty bit is not supported, so no need to track it */
if (!PT_GUEST_DIRTY_MASK)
return;
BUILD_BUG_ON(PT_WRITABLE_MASK != ACC_WRITE_MASK); BUILD_BUG_ON(PT_WRITABLE_MASK != ACC_WRITE_MASK);
mask = (unsigned)~ACC_WRITE_MASK; mask = (unsigned)~ACC_WRITE_MASK;
...@@ -147,7 +151,8 @@ static bool FNAME(prefetch_invalid_gpte)(struct kvm_vcpu *vcpu, ...@@ -147,7 +151,8 @@ static bool FNAME(prefetch_invalid_gpte)(struct kvm_vcpu *vcpu,
if (!FNAME(is_present_gpte)(gpte)) if (!FNAME(is_present_gpte)(gpte))
goto no_present; goto no_present;
if (!(gpte & PT_GUEST_ACCESSED_MASK)) /* if accessed bit is not supported prefetch non accessed gpte */
if (PT_GUEST_ACCESSED_MASK && !(gpte & PT_GUEST_ACCESSED_MASK))
goto no_present; goto no_present;
return false; return false;
...@@ -178,6 +183,10 @@ static int FNAME(update_accessed_dirty_bits)(struct kvm_vcpu *vcpu, ...@@ -178,6 +183,10 @@ static int FNAME(update_accessed_dirty_bits)(struct kvm_vcpu *vcpu,
gfn_t table_gfn; gfn_t table_gfn;
int ret; int ret;
/* dirty/accessed bits are not supported, so no need to update them */
if (!PT_GUEST_DIRTY_MASK)
return 0;
for (level = walker->max_level; level >= walker->level; --level) { for (level = walker->max_level; level >= walker->level; --level) {
pte = orig_pte = walker->ptes[level - 1]; pte = orig_pte = walker->ptes[level - 1];
table_gfn = walker->table_gfn[level - 1]; table_gfn = walker->table_gfn[level - 1];
...@@ -316,8 +325,9 @@ static int FNAME(walk_addr_generic)(struct guest_walker *walker, ...@@ -316,8 +325,9 @@ static int FNAME(walk_addr_generic)(struct guest_walker *walker,
FNAME(protect_clean_gpte)(&pte_access, pte); FNAME(protect_clean_gpte)(&pte_access, pte);
else else
/* /*
* On a write fault, fold the dirty bit into accessed_dirty by * On a write fault, fold the dirty bit into accessed_dirty.
* shifting it one place right. * For modes without A/D bits support accessed_dirty will be
* always clear.
*/ */
accessed_dirty &= pte >> accessed_dirty &= pte >>
(PT_GUEST_DIRTY_SHIFT - PT_GUEST_ACCESSED_SHIFT); (PT_GUEST_DIRTY_SHIFT - PT_GUEST_ACCESSED_SHIFT);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment