Commit 17eff019 authored by Sean Christopherson's avatar Sean Christopherson Committed by Paolo Bonzini

KVM: x86/mmu: Refactor THP adjust to prep for changing query

Refactor transparent_hugepage_adjust() in preparation for walking the
host page tables to identify hugepage mappings, initially for THP pages,
and eventualy for HugeTLB and DAX-backed pages as well.  The latter
cases support 1gb pages, i.e. the adjustment logic needs access to the
max allowed level.
Signed-off-by: default avatarSean Christopherson <sean.j.christopherson@intel.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 13c72c06
...@@ -3329,33 +3329,34 @@ static void direct_pte_prefetch(struct kvm_vcpu *vcpu, u64 *sptep) ...@@ -3329,33 +3329,34 @@ static void direct_pte_prefetch(struct kvm_vcpu *vcpu, u64 *sptep)
__direct_pte_prefetch(vcpu, sp, sptep); __direct_pte_prefetch(vcpu, sp, sptep);
} }
static void transparent_hugepage_adjust(struct kvm_vcpu *vcpu, static void transparent_hugepage_adjust(struct kvm_vcpu *vcpu, gfn_t gfn,
gfn_t gfn, kvm_pfn_t *pfnp, int max_level, kvm_pfn_t *pfnp,
int *levelp) int *levelp)
{ {
kvm_pfn_t pfn = *pfnp; kvm_pfn_t pfn = *pfnp;
int level = *levelp; int level = *levelp;
kvm_pfn_t mask;
if (max_level == PT_PAGE_TABLE_LEVEL || level > PT_PAGE_TABLE_LEVEL)
return;
if (is_error_noslot_pfn(pfn) || kvm_is_reserved_pfn(pfn) ||
kvm_is_zone_device_pfn(pfn))
return;
if (!kvm_is_transparent_hugepage(pfn))
return;
level = PT_DIRECTORY_LEVEL;
/* /*
* Check if it's a transparent hugepage. If this would be an * mmu_notifier_retry() was successful and mmu_lock is held, so
* hugetlbfs page, level wouldn't be set to * the pmd can't be split from under us.
* PT_PAGE_TABLE_LEVEL and there would be no adjustment done
* here.
*/ */
if (!is_error_noslot_pfn(pfn) && !kvm_is_reserved_pfn(pfn) && *levelp = level;
!kvm_is_zone_device_pfn(pfn) && level == PT_PAGE_TABLE_LEVEL && mask = KVM_PAGES_PER_HPAGE(level) - 1;
kvm_is_transparent_hugepage(pfn)) { VM_BUG_ON((gfn & mask) != (pfn & mask));
unsigned long mask; *pfnp = pfn & ~mask;
/*
* mmu_notifier_retry() was successful and mmu_lock is held, so
* the pmd can't be split from under us.
*/
*levelp = level = PT_DIRECTORY_LEVEL;
mask = KVM_PAGES_PER_HPAGE(level) - 1;
VM_BUG_ON((gfn & mask) != (pfn & mask));
*pfnp = pfn & ~mask;
}
} }
static void disallowed_hugepage_adjust(struct kvm_shadow_walk_iterator it, static void disallowed_hugepage_adjust(struct kvm_shadow_walk_iterator it,
...@@ -3395,8 +3396,7 @@ static int __direct_map(struct kvm_vcpu *vcpu, gpa_t gpa, int write, ...@@ -3395,8 +3396,7 @@ static int __direct_map(struct kvm_vcpu *vcpu, gpa_t gpa, int write,
if (WARN_ON(!VALID_PAGE(vcpu->arch.mmu->root_hpa))) if (WARN_ON(!VALID_PAGE(vcpu->arch.mmu->root_hpa)))
return RET_PF_RETRY; return RET_PF_RETRY;
if (likely(max_level > PT_PAGE_TABLE_LEVEL)) transparent_hugepage_adjust(vcpu, gfn, max_level, &pfn, &level);
transparent_hugepage_adjust(vcpu, gfn, &pfn, &level);
trace_kvm_mmu_spte_requested(gpa, level, pfn); trace_kvm_mmu_spte_requested(gpa, level, pfn);
for_each_shadow_entry(vcpu, gpa, it) { for_each_shadow_entry(vcpu, gpa, it) {
......
...@@ -688,8 +688,7 @@ static int FNAME(fetch)(struct kvm_vcpu *vcpu, gpa_t addr, ...@@ -688,8 +688,7 @@ static int FNAME(fetch)(struct kvm_vcpu *vcpu, gpa_t addr,
gfn = gw->gfn | ((addr & PT_LVL_OFFSET_MASK(gw->level)) >> PAGE_SHIFT); gfn = gw->gfn | ((addr & PT_LVL_OFFSET_MASK(gw->level)) >> PAGE_SHIFT);
base_gfn = gfn; base_gfn = gfn;
if (max_level > PT_PAGE_TABLE_LEVEL) transparent_hugepage_adjust(vcpu, gw->gfn, max_level, &pfn, &hlevel);
transparent_hugepage_adjust(vcpu, gw->gfn, &pfn, &hlevel);
trace_kvm_mmu_spte_requested(addr, gw->level, pfn); trace_kvm_mmu_spte_requested(addr, gw->level, pfn);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment