Commit 7eb77e9f authored by Junaid Shahid's avatar Junaid Shahid Committed by Paolo Bonzini

kvm: x86: Add a root_hpa parameter to kvm_mmu->invlpg()

This allows invlpg() to be called using either the active root_hpa
or the prev_root_hpa.
Signed-off-by: default avatarJunaid Shahid <junaids@google.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent ade61e28
...@@ -354,7 +354,7 @@ struct kvm_mmu { ...@@ -354,7 +354,7 @@ struct kvm_mmu {
struct x86_exception *exception); struct x86_exception *exception);
int (*sync_page)(struct kvm_vcpu *vcpu, int (*sync_page)(struct kvm_vcpu *vcpu,
struct kvm_mmu_page *sp); struct kvm_mmu_page *sp);
void (*invlpg)(struct kvm_vcpu *vcpu, gva_t gva); void (*invlpg)(struct kvm_vcpu *vcpu, gva_t gva, hpa_t root_hpa);
void (*update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, void (*update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
u64 *spte, const void *pte); u64 *spte, const void *pte);
hpa_t root_hpa; hpa_t root_hpa;
......
...@@ -189,6 +189,12 @@ static const union kvm_mmu_page_role mmu_base_role_mask = { ...@@ -189,6 +189,12 @@ static const union kvm_mmu_page_role mmu_base_role_mask = {
.ad_disabled = 1, .ad_disabled = 1,
}; };
#define for_each_shadow_entry_using_root(_vcpu, _root, _addr, _walker) \
for (shadow_walk_init_using_root(&(_walker), (_vcpu), \
(_root), (_addr)); \
shadow_walk_okay(&(_walker)); \
shadow_walk_next(&(_walker)))
#define for_each_shadow_entry(_vcpu, _addr, _walker) \ #define for_each_shadow_entry(_vcpu, _addr, _walker) \
for (shadow_walk_init(&(_walker), _vcpu, _addr); \ for (shadow_walk_init(&(_walker), _vcpu, _addr); \
shadow_walk_okay(&(_walker)); \ shadow_walk_okay(&(_walker)); \
...@@ -1999,7 +2005,7 @@ static int nonpaging_sync_page(struct kvm_vcpu *vcpu, ...@@ -1999,7 +2005,7 @@ static int nonpaging_sync_page(struct kvm_vcpu *vcpu,
return 0; return 0;
} }
static void nonpaging_invlpg(struct kvm_vcpu *vcpu, gva_t gva) static void nonpaging_invlpg(struct kvm_vcpu *vcpu, gva_t gva, hpa_t root)
{ {
} }
...@@ -2405,11 +2411,12 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu, ...@@ -2405,11 +2411,12 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
return sp; return sp;
} }
static void shadow_walk_init(struct kvm_shadow_walk_iterator *iterator, static void shadow_walk_init_using_root(struct kvm_shadow_walk_iterator *iterator,
struct kvm_vcpu *vcpu, u64 addr) struct kvm_vcpu *vcpu, hpa_t root,
u64 addr)
{ {
iterator->addr = addr; iterator->addr = addr;
iterator->shadow_addr = vcpu->arch.mmu.root_hpa; iterator->shadow_addr = root;
iterator->level = vcpu->arch.mmu.shadow_root_level; iterator->level = vcpu->arch.mmu.shadow_root_level;
if (iterator->level == PT64_ROOT_4LEVEL && if (iterator->level == PT64_ROOT_4LEVEL &&
...@@ -2418,6 +2425,12 @@ static void shadow_walk_init(struct kvm_shadow_walk_iterator *iterator, ...@@ -2418,6 +2425,12 @@ static void shadow_walk_init(struct kvm_shadow_walk_iterator *iterator,
--iterator->level; --iterator->level;
if (iterator->level == PT32E_ROOT_LEVEL) { if (iterator->level == PT32E_ROOT_LEVEL) {
/*
* prev_root is currently only used for 64-bit hosts. So only
* the active root_hpa is valid here.
*/
BUG_ON(root != vcpu->arch.mmu.root_hpa);
iterator->shadow_addr iterator->shadow_addr
= vcpu->arch.mmu.pae_root[(addr >> 30) & 3]; = vcpu->arch.mmu.pae_root[(addr >> 30) & 3];
iterator->shadow_addr &= PT64_BASE_ADDR_MASK; iterator->shadow_addr &= PT64_BASE_ADDR_MASK;
...@@ -2427,6 +2440,13 @@ static void shadow_walk_init(struct kvm_shadow_walk_iterator *iterator, ...@@ -2427,6 +2440,13 @@ static void shadow_walk_init(struct kvm_shadow_walk_iterator *iterator,
} }
} }
static void shadow_walk_init(struct kvm_shadow_walk_iterator *iterator,
struct kvm_vcpu *vcpu, u64 addr)
{
shadow_walk_init_using_root(iterator, vcpu, vcpu->arch.mmu.root_hpa,
addr);
}
static bool shadow_walk_okay(struct kvm_shadow_walk_iterator *iterator) static bool shadow_walk_okay(struct kvm_shadow_walk_iterator *iterator)
{ {
if (iterator->level < PT_PAGE_TABLE_LEVEL) if (iterator->level < PT_PAGE_TABLE_LEVEL)
...@@ -5186,7 +5206,9 @@ EXPORT_SYMBOL_GPL(kvm_mmu_page_fault); ...@@ -5186,7 +5206,9 @@ EXPORT_SYMBOL_GPL(kvm_mmu_page_fault);
void kvm_mmu_invlpg(struct kvm_vcpu *vcpu, gva_t gva) void kvm_mmu_invlpg(struct kvm_vcpu *vcpu, gva_t gva)
{ {
vcpu->arch.mmu.invlpg(vcpu, gva); struct kvm_mmu *mmu = &vcpu->arch.mmu;
mmu->invlpg(vcpu, gva, mmu->root_hpa);
kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu); kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
++vcpu->stat.invlpg; ++vcpu->stat.invlpg;
} }
...@@ -5197,7 +5219,7 @@ void kvm_mmu_invpcid_gva(struct kvm_vcpu *vcpu, gva_t gva, unsigned long pcid) ...@@ -5197,7 +5219,7 @@ void kvm_mmu_invpcid_gva(struct kvm_vcpu *vcpu, gva_t gva, unsigned long pcid)
struct kvm_mmu *mmu = &vcpu->arch.mmu; struct kvm_mmu *mmu = &vcpu->arch.mmu;
if (pcid == kvm_get_active_pcid(vcpu)) { if (pcid == kvm_get_active_pcid(vcpu)) {
mmu->invlpg(vcpu, gva); mmu->invlpg(vcpu, gva, mmu->root_hpa);
kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu); kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
} }
......
...@@ -856,7 +856,7 @@ static gpa_t FNAME(get_level1_sp_gpa)(struct kvm_mmu_page *sp) ...@@ -856,7 +856,7 @@ static gpa_t FNAME(get_level1_sp_gpa)(struct kvm_mmu_page *sp)
return gfn_to_gpa(sp->gfn) + offset * sizeof(pt_element_t); return gfn_to_gpa(sp->gfn) + offset * sizeof(pt_element_t);
} }
static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva) static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva, hpa_t root_hpa)
{ {
struct kvm_shadow_walk_iterator iterator; struct kvm_shadow_walk_iterator iterator;
struct kvm_mmu_page *sp; struct kvm_mmu_page *sp;
...@@ -871,13 +871,13 @@ static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva) ...@@ -871,13 +871,13 @@ static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva)
*/ */
mmu_topup_memory_caches(vcpu); mmu_topup_memory_caches(vcpu);
if (!VALID_PAGE(vcpu->arch.mmu.root_hpa)) { if (!VALID_PAGE(root_hpa)) {
WARN_ON(1); WARN_ON(1);
return; return;
} }
spin_lock(&vcpu->kvm->mmu_lock); spin_lock(&vcpu->kvm->mmu_lock);
for_each_shadow_entry(vcpu, gva, iterator) { for_each_shadow_entry_using_root(vcpu, root_hpa, gva, iterator) {
level = iterator.level; level = iterator.level;
sptep = iterator.sptep; sptep = iterator.sptep;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment