Commit 6e1d2a3f authored by Hou Wenlong's avatar Hou Wenlong Committed by Sean Christopherson

KVM: x86/mmu: Replace UNMAPPED_GVA with INVALID_GPA for gva_to_gpa()

The result of gva_to_gpa() is physical address not virtual address,
it is odd that UNMAPPED_GVA macro is used as the result for physical
address. Replace UNMAPPED_GVA with INVALID_GPA and drop UNMAPPED_GVA
macro.

No functional change intended.
Signed-off-by: default avatarHou Wenlong <houwenlong.hwl@antgroup.com>
Reviewed-by: default avatarSean Christopherson <seanjc@google.com>
Link: https://lore.kernel.org/r/6104978956449467d3c68f1ad7f2c2f6d771d0ee.1656667239.git.houwenlong.hwl@antgroup.comSigned-off-by: default avatarSean Christopherson <seanjc@google.com>
parent 156b9d76
...@@ -129,7 +129,6 @@ ...@@ -129,7 +129,6 @@
#define INVALID_PAGE (~(hpa_t)0) #define INVALID_PAGE (~(hpa_t)0)
#define VALID_PAGE(x) ((x) != INVALID_PAGE) #define VALID_PAGE(x) ((x) != INVALID_PAGE)
#define UNMAPPED_GVA (~(gpa_t)0)
#define INVALID_GPA (~(gpa_t)0) #define INVALID_GPA (~(gpa_t)0)
/* KVM Hugepage definitions for x86 */ /* KVM Hugepage definitions for x86 */
......
...@@ -378,7 +378,7 @@ static int FNAME(walk_addr_generic)(struct guest_walker *walker, ...@@ -378,7 +378,7 @@ static int FNAME(walk_addr_generic)(struct guest_walker *walker,
* information to fix the exit_qualification or exit_info_1 * information to fix the exit_qualification or exit_info_1
* fields. * fields.
*/ */
if (unlikely(real_gpa == UNMAPPED_GVA)) if (unlikely(real_gpa == INVALID_GPA))
return 0; return 0;
host_addr = kvm_vcpu_gfn_to_hva_prot(vcpu, gpa_to_gfn(real_gpa), host_addr = kvm_vcpu_gfn_to_hva_prot(vcpu, gpa_to_gfn(real_gpa),
...@@ -431,7 +431,7 @@ static int FNAME(walk_addr_generic)(struct guest_walker *walker, ...@@ -431,7 +431,7 @@ static int FNAME(walk_addr_generic)(struct guest_walker *walker,
#endif #endif
real_gpa = kvm_translate_gpa(vcpu, mmu, gfn_to_gpa(gfn), access, &walker->fault); real_gpa = kvm_translate_gpa(vcpu, mmu, gfn_to_gpa(gfn), access, &walker->fault);
if (real_gpa == UNMAPPED_GVA) if (real_gpa == INVALID_GPA)
return 0; return 0;
walker->gfn = real_gpa >> PAGE_SHIFT; walker->gfn = real_gpa >> PAGE_SHIFT;
...@@ -962,7 +962,7 @@ static gpa_t FNAME(gva_to_gpa)(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, ...@@ -962,7 +962,7 @@ static gpa_t FNAME(gva_to_gpa)(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
struct x86_exception *exception) struct x86_exception *exception)
{ {
struct guest_walker walker; struct guest_walker walker;
gpa_t gpa = UNMAPPED_GVA; gpa_t gpa = INVALID_GPA;
int r; int r;
#ifndef CONFIG_X86_64 #ifndef CONFIG_X86_64
......
...@@ -79,7 +79,7 @@ static int sgx_gva_to_gpa(struct kvm_vcpu *vcpu, gva_t gva, bool write, ...@@ -79,7 +79,7 @@ static int sgx_gva_to_gpa(struct kvm_vcpu *vcpu, gva_t gva, bool write,
else else
*gpa = kvm_mmu_gva_to_gpa_read(vcpu, gva, &ex); *gpa = kvm_mmu_gva_to_gpa_read(vcpu, gva, &ex);
if (*gpa == UNMAPPED_GVA) { if (*gpa == INVALID_GPA) {
kvm_inject_emulated_page_fault(vcpu, &ex); kvm_inject_emulated_page_fault(vcpu, &ex);
return -EFAULT; return -EFAULT;
} }
......
...@@ -849,7 +849,7 @@ int load_pdptrs(struct kvm_vcpu *vcpu, unsigned long cr3) ...@@ -849,7 +849,7 @@ int load_pdptrs(struct kvm_vcpu *vcpu, unsigned long cr3)
*/ */
real_gpa = kvm_translate_gpa(vcpu, mmu, gfn_to_gpa(pdpt_gfn), real_gpa = kvm_translate_gpa(vcpu, mmu, gfn_to_gpa(pdpt_gfn),
PFERR_USER_MASK | PFERR_WRITE_MASK, NULL); PFERR_USER_MASK | PFERR_WRITE_MASK, NULL);
if (real_gpa == UNMAPPED_GVA) if (real_gpa == INVALID_GPA)
return 0; return 0;
/* Note the offset, PDPTRs are 32 byte aligned when using PAE paging. */ /* Note the offset, PDPTRs are 32 byte aligned when using PAE paging. */
...@@ -7072,7 +7072,7 @@ static int kvm_read_guest_virt_helper(gva_t addr, void *val, unsigned int bytes, ...@@ -7072,7 +7072,7 @@ static int kvm_read_guest_virt_helper(gva_t addr, void *val, unsigned int bytes,
unsigned toread = min(bytes, (unsigned)PAGE_SIZE - offset); unsigned toread = min(bytes, (unsigned)PAGE_SIZE - offset);
int ret; int ret;
if (gpa == UNMAPPED_GVA) if (gpa == INVALID_GPA)
return X86EMUL_PROPAGATE_FAULT; return X86EMUL_PROPAGATE_FAULT;
ret = kvm_vcpu_read_guest_page(vcpu, gpa >> PAGE_SHIFT, data, ret = kvm_vcpu_read_guest_page(vcpu, gpa >> PAGE_SHIFT, data,
offset, toread); offset, toread);
...@@ -7103,7 +7103,7 @@ static int kvm_fetch_guest_virt(struct x86_emulate_ctxt *ctxt, ...@@ -7103,7 +7103,7 @@ static int kvm_fetch_guest_virt(struct x86_emulate_ctxt *ctxt,
/* Inline kvm_read_guest_virt_helper for speed. */ /* Inline kvm_read_guest_virt_helper for speed. */
gpa_t gpa = mmu->gva_to_gpa(vcpu, mmu, addr, access|PFERR_FETCH_MASK, gpa_t gpa = mmu->gva_to_gpa(vcpu, mmu, addr, access|PFERR_FETCH_MASK,
exception); exception);
if (unlikely(gpa == UNMAPPED_GVA)) if (unlikely(gpa == INVALID_GPA))
return X86EMUL_PROPAGATE_FAULT; return X86EMUL_PROPAGATE_FAULT;
offset = addr & (PAGE_SIZE-1); offset = addr & (PAGE_SIZE-1);
...@@ -7173,7 +7173,7 @@ static int kvm_write_guest_virt_helper(gva_t addr, void *val, unsigned int bytes ...@@ -7173,7 +7173,7 @@ static int kvm_write_guest_virt_helper(gva_t addr, void *val, unsigned int bytes
unsigned towrite = min(bytes, (unsigned)PAGE_SIZE - offset); unsigned towrite = min(bytes, (unsigned)PAGE_SIZE - offset);
int ret; int ret;
if (gpa == UNMAPPED_GVA) if (gpa == INVALID_GPA)
return X86EMUL_PROPAGATE_FAULT; return X86EMUL_PROPAGATE_FAULT;
ret = kvm_vcpu_write_guest(vcpu, gpa, data, towrite); ret = kvm_vcpu_write_guest(vcpu, gpa, data, towrite);
if (ret < 0) { if (ret < 0) {
...@@ -7284,7 +7284,7 @@ static int vcpu_mmio_gva_to_gpa(struct kvm_vcpu *vcpu, unsigned long gva, ...@@ -7284,7 +7284,7 @@ static int vcpu_mmio_gva_to_gpa(struct kvm_vcpu *vcpu, unsigned long gva,
*gpa = mmu->gva_to_gpa(vcpu, mmu, gva, access, exception); *gpa = mmu->gva_to_gpa(vcpu, mmu, gva, access, exception);
if (*gpa == UNMAPPED_GVA) if (*gpa == INVALID_GPA)
return -1; return -1;
return vcpu_is_mmio_gpa(vcpu, gva, *gpa, write); return vcpu_is_mmio_gpa(vcpu, gva, *gpa, write);
...@@ -7521,7 +7521,7 @@ static int emulator_cmpxchg_emulated(struct x86_emulate_ctxt *ctxt, ...@@ -7521,7 +7521,7 @@ static int emulator_cmpxchg_emulated(struct x86_emulate_ctxt *ctxt,
gpa = kvm_mmu_gva_to_gpa_write(vcpu, addr, NULL); gpa = kvm_mmu_gva_to_gpa_write(vcpu, addr, NULL);
if (gpa == UNMAPPED_GVA || if (gpa == INVALID_GPA ||
(gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE) (gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE)
goto emul_write; goto emul_write;
...@@ -8338,7 +8338,7 @@ static bool reexecute_instruction(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, ...@@ -8338,7 +8338,7 @@ static bool reexecute_instruction(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
* If the mapping is invalid in guest, let cpu retry * If the mapping is invalid in guest, let cpu retry
* it to generate fault. * it to generate fault.
*/ */
if (gpa == UNMAPPED_GVA) if (gpa == INVALID_GPA)
return true; return true;
} }
...@@ -11378,7 +11378,7 @@ int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu, ...@@ -11378,7 +11378,7 @@ int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
gpa = kvm_mmu_gva_to_gpa_system(vcpu, vaddr, NULL); gpa = kvm_mmu_gva_to_gpa_system(vcpu, vaddr, NULL);
srcu_read_unlock(&vcpu->kvm->srcu, idx); srcu_read_unlock(&vcpu->kvm->srcu, idx);
tr->physical_address = gpa; tr->physical_address = gpa;
tr->valid = gpa != UNMAPPED_GVA; tr->valid = gpa != INVALID_GPA;
tr->writeable = 1; tr->writeable = 1;
tr->usermode = 0; tr->usermode = 0;
...@@ -12983,7 +12983,7 @@ void kvm_fixup_and_inject_pf_error(struct kvm_vcpu *vcpu, gva_t gva, u16 error_c ...@@ -12983,7 +12983,7 @@ void kvm_fixup_and_inject_pf_error(struct kvm_vcpu *vcpu, gva_t gva, u16 error_c
(PFERR_WRITE_MASK | PFERR_FETCH_MASK | PFERR_USER_MASK); (PFERR_WRITE_MASK | PFERR_FETCH_MASK | PFERR_USER_MASK);
if (!(error_code & PFERR_PRESENT_MASK) || if (!(error_code & PFERR_PRESENT_MASK) ||
mmu->gva_to_gpa(vcpu, mmu, gva, access, &fault) != UNMAPPED_GVA) { mmu->gva_to_gpa(vcpu, mmu, gva, access, &fault) != INVALID_GPA) {
/* /*
* If vcpu->arch.walk_mmu->gva_to_gpa succeeded, the page * If vcpu->arch.walk_mmu->gva_to_gpa succeeded, the page
* tables probably do not match the TLB. Just proceed * tables probably do not match the TLB. Just proceed
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment