Commit 4d88954d authored by Marcelo Tosatti's avatar Marcelo Tosatti Committed by Avi Kivity

KVM: MMU: make for_each_shadow_entry aware of largepages

This way there is no need to add explicit checks in every
for_each_shadow_entry user.
Signed-off-by: default avatarMarcelo Tosatti <mtosatti@redhat.com>
Signed-off-by: default avatarAvi Kivity <avi@redhat.com>
parent e799794e
...@@ -1302,6 +1302,11 @@ static bool shadow_walk_okay(struct kvm_shadow_walk_iterator *iterator) ...@@ -1302,6 +1302,11 @@ static bool shadow_walk_okay(struct kvm_shadow_walk_iterator *iterator)
{ {
if (iterator->level < PT_PAGE_TABLE_LEVEL) if (iterator->level < PT_PAGE_TABLE_LEVEL)
return false; return false;
if (iterator->level == PT_PAGE_TABLE_LEVEL)
if (is_large_pte(*iterator->sptep))
return false;
iterator->index = SHADOW_PT_INDEX(iterator->addr, iterator->level); iterator->index = SHADOW_PT_INDEX(iterator->addr, iterator->level);
iterator->sptep = ((u64 *)__va(iterator->shadow_addr)) + iterator->index; iterator->sptep = ((u64 *)__va(iterator->shadow_addr)) + iterator->index;
return true; return true;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment