Commit 2c0629f4 authored by Sean Christopherson's avatar Sean Christopherson Committed by Paolo Bonzini

KVM: x86/mmu: Remove lpage_is_disallowed() check from set_spte()

Remove the late "lpage is disallowed" check from set_spte() now that the
initial check is performed after acquiring mmu_lock.  Fold the guts of
the remaining helper, __mmu_gfn_lpage_is_disallowed(), into
kvm_mmu_hugepage_adjust() to eliminate the unnecessary slot !NULL check.
Signed-off-by: default avatarSean Christopherson <sean.j.christopherson@intel.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 293e306e
...@@ -1264,28 +1264,6 @@ static void unaccount_huge_nx_page(struct kvm *kvm, struct kvm_mmu_page *sp) ...@@ -1264,28 +1264,6 @@ static void unaccount_huge_nx_page(struct kvm *kvm, struct kvm_mmu_page *sp)
list_del(&sp->lpage_disallowed_link); list_del(&sp->lpage_disallowed_link);
} }
static bool __mmu_gfn_lpage_is_disallowed(gfn_t gfn, int level,
struct kvm_memory_slot *slot)
{
struct kvm_lpage_info *linfo;
if (slot) {
linfo = lpage_info_slot(gfn, slot, level);
return !!linfo->disallow_lpage;
}
return true;
}
static bool mmu_gfn_lpage_is_disallowed(struct kvm_vcpu *vcpu, gfn_t gfn,
int level)
{
struct kvm_memory_slot *slot;
slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
return __mmu_gfn_lpage_is_disallowed(gfn, level, slot);
}
static inline bool memslot_valid_for_gpte(struct kvm_memory_slot *slot, static inline bool memslot_valid_for_gpte(struct kvm_memory_slot *slot,
bool no_dirty_log) bool no_dirty_log)
{ {
...@@ -3078,18 +3056,6 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep, ...@@ -3078,18 +3056,6 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
spte |= (u64)pfn << PAGE_SHIFT; spte |= (u64)pfn << PAGE_SHIFT;
if (pte_access & ACC_WRITE_MASK) { if (pte_access & ACC_WRITE_MASK) {
/*
* Legacy code to handle an obsolete scenario where a different
* vcpu creates new sp in the window between this vcpu's query
* of lpage_is_disallowed() and acquiring mmu_lock. No longer
* necessary now that lpage_is_disallowed() is called after
* acquiring mmu_lock.
*/
if (level > PT_PAGE_TABLE_LEVEL &&
mmu_gfn_lpage_is_disallowed(vcpu, gfn, level))
goto done;
spte |= PT_WRITABLE_MASK | SPTE_MMU_WRITEABLE; spte |= PT_WRITABLE_MASK | SPTE_MMU_WRITEABLE;
/* /*
...@@ -3121,7 +3087,6 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep, ...@@ -3121,7 +3087,6 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
set_pte: set_pte:
if (mmu_spte_update(sptep, spte)) if (mmu_spte_update(sptep, spte))
ret |= SET_SPTE_NEED_REMOTE_TLB_FLUSH; ret |= SET_SPTE_NEED_REMOTE_TLB_FLUSH;
done:
return ret; return ret;
} }
...@@ -3309,6 +3274,7 @@ static int kvm_mmu_hugepage_adjust(struct kvm_vcpu *vcpu, gfn_t gfn, ...@@ -3309,6 +3274,7 @@ static int kvm_mmu_hugepage_adjust(struct kvm_vcpu *vcpu, gfn_t gfn,
int max_level, kvm_pfn_t *pfnp) int max_level, kvm_pfn_t *pfnp)
{ {
struct kvm_memory_slot *slot; struct kvm_memory_slot *slot;
struct kvm_lpage_info *linfo;
kvm_pfn_t pfn = *pfnp; kvm_pfn_t pfn = *pfnp;
kvm_pfn_t mask; kvm_pfn_t mask;
int level; int level;
...@@ -3326,7 +3292,8 @@ static int kvm_mmu_hugepage_adjust(struct kvm_vcpu *vcpu, gfn_t gfn, ...@@ -3326,7 +3292,8 @@ static int kvm_mmu_hugepage_adjust(struct kvm_vcpu *vcpu, gfn_t gfn,
max_level = min(max_level, kvm_x86_ops->get_lpage_level()); max_level = min(max_level, kvm_x86_ops->get_lpage_level());
for ( ; max_level > PT_PAGE_TABLE_LEVEL; max_level--) { for ( ; max_level > PT_PAGE_TABLE_LEVEL; max_level--) {
if (!__mmu_gfn_lpage_is_disallowed(gfn, max_level, slot)) linfo = lpage_info_slot(gfn, slot, max_level);
if (!linfo->disallow_lpage)
break; break;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment