Commit 2d11123a authored by Avi Kivity's avatar Avi Kivity

KVM: MMU: Add for_each_shadow_entry(), a simpler alternative to walk_shadow()

Using a for_each loop style removes the need to write callback and nasty
casts.

Implement the walk_shadow() using the for_each_shadow_entry().
Signed-off-by: default avatarAvi Kivity <avi@redhat.com>
parent 971cc3dc
...@@ -150,6 +150,20 @@ struct kvm_shadow_walk { ...@@ -150,6 +150,20 @@ struct kvm_shadow_walk {
u64 addr, u64 *spte, int level); u64 addr, u64 *spte, int level);
}; };
struct kvm_shadow_walk_iterator {
u64 addr;
hpa_t shadow_addr;
int level;
u64 *sptep;
unsigned index;
};
#define for_each_shadow_entry(_vcpu, _addr, _walker) \
for (shadow_walk_init(&(_walker), _vcpu, _addr); \
shadow_walk_okay(&(_walker)); \
shadow_walk_next(&(_walker)))
struct kvm_unsync_walk { struct kvm_unsync_walk {
int (*entry) (struct kvm_mmu_page *sp, struct kvm_unsync_walk *walk); int (*entry) (struct kvm_mmu_page *sp, struct kvm_unsync_walk *walk);
}; };
...@@ -1254,33 +1268,48 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu, ...@@ -1254,33 +1268,48 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
return sp; return sp;
} }
static void shadow_walk_init(struct kvm_shadow_walk_iterator *iterator,
struct kvm_vcpu *vcpu, u64 addr)
{
iterator->addr = addr;
iterator->shadow_addr = vcpu->arch.mmu.root_hpa;
iterator->level = vcpu->arch.mmu.shadow_root_level;
if (iterator->level == PT32E_ROOT_LEVEL) {
iterator->shadow_addr
= vcpu->arch.mmu.pae_root[(addr >> 30) & 3];
iterator->shadow_addr &= PT64_BASE_ADDR_MASK;
--iterator->level;
if (!iterator->shadow_addr)
iterator->level = 0;
}
}
static bool shadow_walk_okay(struct kvm_shadow_walk_iterator *iterator)
{
if (iterator->level < PT_PAGE_TABLE_LEVEL)
return false;
iterator->index = SHADOW_PT_INDEX(iterator->addr, iterator->level);
iterator->sptep = ((u64 *)__va(iterator->shadow_addr)) + iterator->index;
return true;
}
static void shadow_walk_next(struct kvm_shadow_walk_iterator *iterator)
{
iterator->shadow_addr = *iterator->sptep & PT64_BASE_ADDR_MASK;
--iterator->level;
}
static int walk_shadow(struct kvm_shadow_walk *walker, static int walk_shadow(struct kvm_shadow_walk *walker,
struct kvm_vcpu *vcpu, u64 addr) struct kvm_vcpu *vcpu, u64 addr)
{ {
hpa_t shadow_addr; struct kvm_shadow_walk_iterator iterator;
int level;
int r; int r;
u64 *sptep;
unsigned index;
shadow_addr = vcpu->arch.mmu.root_hpa;
level = vcpu->arch.mmu.shadow_root_level;
if (level == PT32E_ROOT_LEVEL) {
shadow_addr = vcpu->arch.mmu.pae_root[(addr >> 30) & 3];
shadow_addr &= PT64_BASE_ADDR_MASK;
if (!shadow_addr)
return 1;
--level;
}
while (level >= PT_PAGE_TABLE_LEVEL) { for_each_shadow_entry(vcpu, addr, iterator) {
index = SHADOW_PT_INDEX(addr, level); r = walker->entry(walker, vcpu, addr,
sptep = ((u64 *)__va(shadow_addr)) + index; iterator.sptep, iterator.level);
r = walker->entry(walker, vcpu, addr, sptep, level);
if (r) if (r)
return r; return r;
shadow_addr = *sptep & PT64_BASE_ADDR_MASK;
--level;
} }
return 0; return 0;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment