Commit e4cd1da9 authored by Paolo Bonzini's avatar Paolo Bonzini

KVM: x86: pass struct kvm_mmu_page to gfn_to_rmap

This is always available (with one exception in the auditing code),
and with the same auditing exception the level was coming from
sp->role.level.

Later, the spte's role will also be used to look up the right memslots
array.
Reviewed-by: default avatarRadim Krčmář <rkrcmar@redhat.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent f481b069
...@@ -1043,12 +1043,12 @@ static unsigned long *__gfn_to_rmap(gfn_t gfn, int level, ...@@ -1043,12 +1043,12 @@ static unsigned long *__gfn_to_rmap(gfn_t gfn, int level,
/* /*
* Take gfn and return the reverse mapping to it. * Take gfn and return the reverse mapping to it.
*/ */
static unsigned long *gfn_to_rmap(struct kvm *kvm, gfn_t gfn, int level) static unsigned long *gfn_to_rmap(struct kvm *kvm, gfn_t gfn, struct kvm_mmu_page *sp)
{ {
struct kvm_memory_slot *slot; struct kvm_memory_slot *slot;
slot = gfn_to_memslot(kvm, gfn); slot = gfn_to_memslot(kvm, gfn);
return __gfn_to_rmap(gfn, level, slot); return __gfn_to_rmap(gfn, sp->role.level, slot);
} }
static bool rmap_can_add(struct kvm_vcpu *vcpu) static bool rmap_can_add(struct kvm_vcpu *vcpu)
...@@ -1066,7 +1066,7 @@ static int rmap_add(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn) ...@@ -1066,7 +1066,7 @@ static int rmap_add(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn)
sp = page_header(__pa(spte)); sp = page_header(__pa(spte));
kvm_mmu_page_set_gfn(sp, spte - sp->spt, gfn); kvm_mmu_page_set_gfn(sp, spte - sp->spt, gfn);
rmapp = gfn_to_rmap(vcpu->kvm, gfn, sp->role.level); rmapp = gfn_to_rmap(vcpu->kvm, gfn, sp);
return pte_list_add(vcpu, spte, rmapp); return pte_list_add(vcpu, spte, rmapp);
} }
...@@ -1078,7 +1078,7 @@ static void rmap_remove(struct kvm *kvm, u64 *spte) ...@@ -1078,7 +1078,7 @@ static void rmap_remove(struct kvm *kvm, u64 *spte)
sp = page_header(__pa(spte)); sp = page_header(__pa(spte));
gfn = kvm_mmu_page_get_gfn(sp, spte - sp->spt); gfn = kvm_mmu_page_get_gfn(sp, spte - sp->spt);
rmapp = gfn_to_rmap(kvm, gfn, sp->role.level); rmapp = gfn_to_rmap(kvm, gfn, sp);
pte_list_remove(spte, rmapp); pte_list_remove(spte, rmapp);
} }
...@@ -1612,7 +1612,7 @@ static void rmap_recycle(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn) ...@@ -1612,7 +1612,7 @@ static void rmap_recycle(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn)
sp = page_header(__pa(spte)); sp = page_header(__pa(spte));
rmapp = gfn_to_rmap(vcpu->kvm, gfn, sp->role.level); rmapp = gfn_to_rmap(vcpu->kvm, gfn, sp);
kvm_unmap_rmapp(vcpu->kvm, rmapp, NULL, gfn, sp->role.level, 0); kvm_unmap_rmapp(vcpu->kvm, rmapp, NULL, gfn, sp->role.level, 0);
kvm_flush_remote_tlbs(vcpu->kvm); kvm_flush_remote_tlbs(vcpu->kvm);
......
...@@ -146,7 +146,7 @@ static void inspect_spte_has_rmap(struct kvm *kvm, u64 *sptep) ...@@ -146,7 +146,7 @@ static void inspect_spte_has_rmap(struct kvm *kvm, u64 *sptep)
return; return;
} }
rmapp = gfn_to_rmap(kvm, gfn, rev_sp->role.level); rmapp = gfn_to_rmap(kvm, gfn, rev_sp);
if (!*rmapp) { if (!*rmapp) {
if (!__ratelimit(&ratelimit_state)) if (!__ratelimit(&ratelimit_state))
return; return;
...@@ -191,11 +191,15 @@ static void audit_write_protection(struct kvm *kvm, struct kvm_mmu_page *sp) ...@@ -191,11 +191,15 @@ static void audit_write_protection(struct kvm *kvm, struct kvm_mmu_page *sp)
unsigned long *rmapp; unsigned long *rmapp;
u64 *sptep; u64 *sptep;
struct rmap_iterator iter; struct rmap_iterator iter;
struct kvm_memslots *slots;
struct kvm_memory_slot *slot;
if (sp->role.direct || sp->unsync || sp->role.invalid) if (sp->role.direct || sp->unsync || sp->role.invalid)
return; return;
rmapp = gfn_to_rmap(kvm, sp->gfn, PT_PAGE_TABLE_LEVEL); slots = kvm_memslots(kvm);
slot = __gfn_to_memslot(slots, sp->gfn);
rmapp = __gfn_to_rmap(sp->gfn, PT_PAGE_TABLE_LEVEL, slot);
for_each_rmap_spte(rmapp, &iter, sptep) for_each_rmap_spte(rmapp, &iter, sptep)
if (is_writable_pte(*sptep)) if (is_writable_pte(*sptep))
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment