Commit 6c882ef4 authored by David Matlack's avatar David Matlack Committed by Paolo Bonzini

KVM: x86/mmu: Rename __direct_map() to direct_map()

Rename __direct_map() to direct_map() since the leading underscores are
unnecessary. This also makes the page fault handler names more
consistent: kvm_tdp_mmu_page_fault() calls kvm_tdp_mmu_map() and
direct_page_fault() calls direct_map().

Opportunistically make some trivial cleanups to comments that had to be
modified anyway since they mentioned __direct_map(). Specifically, use
"()" when referring to functions, and include kvm_tdp_mmu_map() among
the various callers of disallowed_hugepage_adjust().

No functional change intended.
Signed-off-by: default avatarDavid Matlack <dmatlack@google.com>
Reviewed-by: default avatarIsaku Yamahata <isaku.yamahata@intel.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
Message-Id: <20220921173546.2674386-11-dmatlack@google.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 9f33697a
...@@ -3131,11 +3131,11 @@ void disallowed_hugepage_adjust(struct kvm_page_fault *fault, u64 spte, int cur_ ...@@ -3131,11 +3131,11 @@ void disallowed_hugepage_adjust(struct kvm_page_fault *fault, u64 spte, int cur_
!is_large_pte(spte) && !is_large_pte(spte) &&
spte_to_child_sp(spte)->nx_huge_page_disallowed) { spte_to_child_sp(spte)->nx_huge_page_disallowed) {
/* /*
* A small SPTE exists for this pfn, but FNAME(fetch) * A small SPTE exists for this pfn, but FNAME(fetch),
* and __direct_map would like to create a large PTE * direct_map(), or kvm_tdp_mmu_map() would like to create a
* instead: just force them to go down another level, * large PTE instead: just force them to go down another level,
* patching back for them into pfn the next 9 bits of * patching back for them into pfn the next 9 bits of the
* the address. * address.
*/ */
u64 page_mask = KVM_PAGES_PER_HPAGE(cur_level) - u64 page_mask = KVM_PAGES_PER_HPAGE(cur_level) -
KVM_PAGES_PER_HPAGE(cur_level - 1); KVM_PAGES_PER_HPAGE(cur_level - 1);
...@@ -3144,7 +3144,7 @@ void disallowed_hugepage_adjust(struct kvm_page_fault *fault, u64 spte, int cur_ ...@@ -3144,7 +3144,7 @@ void disallowed_hugepage_adjust(struct kvm_page_fault *fault, u64 spte, int cur_
} }
} }
static int __direct_map(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault) static int direct_map(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
{ {
struct kvm_shadow_walk_iterator it; struct kvm_shadow_walk_iterator it;
struct kvm_mmu_page *sp; struct kvm_mmu_page *sp;
...@@ -4330,7 +4330,7 @@ static int direct_page_fault(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault ...@@ -4330,7 +4330,7 @@ static int direct_page_fault(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault
if (r) if (r)
goto out_unlock; goto out_unlock;
r = __direct_map(vcpu, fault); r = direct_map(vcpu, fault);
out_unlock: out_unlock:
write_unlock(&vcpu->kvm->mmu_lock); write_unlock(&vcpu->kvm->mmu_lock);
......
...@@ -199,7 +199,7 @@ struct kvm_page_fault { ...@@ -199,7 +199,7 @@ struct kvm_page_fault {
/* /*
* Maximum page size that can be created for this fault; input to * Maximum page size that can be created for this fault; input to
* FNAME(fetch), __direct_map and kvm_tdp_mmu_map. * FNAME(fetch), direct_map() and kvm_tdp_mmu_map().
*/ */
u8 max_level; u8 max_level;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment