Commit 9e7325ac authored by Sean Christopherson's avatar Sean Christopherson

KVM: s390: Refactor kvm_is_error_gpa() into kvm_is_gpa_in_memslot()

Rename kvm_is_error_gpa() to kvm_is_gpa_in_memslot() and invert the
polarity accordingly in order to (a) free up kvm_is_error_gpa() to match
with kvm_is_error_{hva,page}(), and (b) to make it more obvious that the
helper is doing a memslot lookup, i.e. not simply checking for INVALID_GPA.

No functional change intended.

Link: https://lore.kernel.org/r/20240215152916.1158-9-paul@xen.orgSigned-off-by: default avatarSean Christopherson <seanjc@google.com>
parent 406c1096
...@@ -102,7 +102,7 @@ static int __diag_page_ref_service(struct kvm_vcpu *vcpu) ...@@ -102,7 +102,7 @@ static int __diag_page_ref_service(struct kvm_vcpu *vcpu)
parm.token_addr & 7 || parm.zarch != 0x8000000000000000ULL) parm.token_addr & 7 || parm.zarch != 0x8000000000000000ULL)
return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
if (kvm_is_error_gpa(vcpu->kvm, parm.token_addr)) if (!kvm_is_gpa_in_memslot(vcpu->kvm, parm.token_addr))
return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
vcpu->arch.pfault_token = parm.token_addr; vcpu->arch.pfault_token = parm.token_addr;
......
...@@ -664,7 +664,7 @@ static unsigned long guest_translate(struct kvm_vcpu *vcpu, unsigned long gva, ...@@ -664,7 +664,7 @@ static unsigned long guest_translate(struct kvm_vcpu *vcpu, unsigned long gva,
case ASCE_TYPE_REGION1: { case ASCE_TYPE_REGION1: {
union region1_table_entry rfte; union region1_table_entry rfte;
if (kvm_is_error_gpa(vcpu->kvm, ptr)) if (!kvm_is_gpa_in_memslot(vcpu->kvm, ptr))
return PGM_ADDRESSING; return PGM_ADDRESSING;
if (deref_table(vcpu->kvm, ptr, &rfte.val)) if (deref_table(vcpu->kvm, ptr, &rfte.val))
return -EFAULT; return -EFAULT;
...@@ -682,7 +682,7 @@ static unsigned long guest_translate(struct kvm_vcpu *vcpu, unsigned long gva, ...@@ -682,7 +682,7 @@ static unsigned long guest_translate(struct kvm_vcpu *vcpu, unsigned long gva,
case ASCE_TYPE_REGION2: { case ASCE_TYPE_REGION2: {
union region2_table_entry rste; union region2_table_entry rste;
if (kvm_is_error_gpa(vcpu->kvm, ptr)) if (!kvm_is_gpa_in_memslot(vcpu->kvm, ptr))
return PGM_ADDRESSING; return PGM_ADDRESSING;
if (deref_table(vcpu->kvm, ptr, &rste.val)) if (deref_table(vcpu->kvm, ptr, &rste.val))
return -EFAULT; return -EFAULT;
...@@ -700,7 +700,7 @@ static unsigned long guest_translate(struct kvm_vcpu *vcpu, unsigned long gva, ...@@ -700,7 +700,7 @@ static unsigned long guest_translate(struct kvm_vcpu *vcpu, unsigned long gva,
case ASCE_TYPE_REGION3: { case ASCE_TYPE_REGION3: {
union region3_table_entry rtte; union region3_table_entry rtte;
if (kvm_is_error_gpa(vcpu->kvm, ptr)) if (!kvm_is_gpa_in_memslot(vcpu->kvm, ptr))
return PGM_ADDRESSING; return PGM_ADDRESSING;
if (deref_table(vcpu->kvm, ptr, &rtte.val)) if (deref_table(vcpu->kvm, ptr, &rtte.val))
return -EFAULT; return -EFAULT;
...@@ -728,7 +728,7 @@ static unsigned long guest_translate(struct kvm_vcpu *vcpu, unsigned long gva, ...@@ -728,7 +728,7 @@ static unsigned long guest_translate(struct kvm_vcpu *vcpu, unsigned long gva,
case ASCE_TYPE_SEGMENT: { case ASCE_TYPE_SEGMENT: {
union segment_table_entry ste; union segment_table_entry ste;
if (kvm_is_error_gpa(vcpu->kvm, ptr)) if (!kvm_is_gpa_in_memslot(vcpu->kvm, ptr))
return PGM_ADDRESSING; return PGM_ADDRESSING;
if (deref_table(vcpu->kvm, ptr, &ste.val)) if (deref_table(vcpu->kvm, ptr, &ste.val))
return -EFAULT; return -EFAULT;
...@@ -748,7 +748,7 @@ static unsigned long guest_translate(struct kvm_vcpu *vcpu, unsigned long gva, ...@@ -748,7 +748,7 @@ static unsigned long guest_translate(struct kvm_vcpu *vcpu, unsigned long gva,
ptr = ste.fc0.pto * (PAGE_SIZE / 2) + vaddr.px * 8; ptr = ste.fc0.pto * (PAGE_SIZE / 2) + vaddr.px * 8;
} }
} }
if (kvm_is_error_gpa(vcpu->kvm, ptr)) if (!kvm_is_gpa_in_memslot(vcpu->kvm, ptr))
return PGM_ADDRESSING; return PGM_ADDRESSING;
if (deref_table(vcpu->kvm, ptr, &pte.val)) if (deref_table(vcpu->kvm, ptr, &pte.val))
return -EFAULT; return -EFAULT;
...@@ -770,7 +770,7 @@ static unsigned long guest_translate(struct kvm_vcpu *vcpu, unsigned long gva, ...@@ -770,7 +770,7 @@ static unsigned long guest_translate(struct kvm_vcpu *vcpu, unsigned long gva,
*prot = PROT_TYPE_IEP; *prot = PROT_TYPE_IEP;
return PGM_PROTECTION; return PGM_PROTECTION;
} }
if (kvm_is_error_gpa(vcpu->kvm, raddr.addr)) if (!kvm_is_gpa_in_memslot(vcpu->kvm, raddr.addr))
return PGM_ADDRESSING; return PGM_ADDRESSING;
*gpa = raddr.addr; *gpa = raddr.addr;
return 0; return 0;
...@@ -957,7 +957,7 @@ static int guest_range_to_gpas(struct kvm_vcpu *vcpu, unsigned long ga, u8 ar, ...@@ -957,7 +957,7 @@ static int guest_range_to_gpas(struct kvm_vcpu *vcpu, unsigned long ga, u8 ar,
return rc; return rc;
} else { } else {
gpa = kvm_s390_real_to_abs(vcpu, ga); gpa = kvm_s390_real_to_abs(vcpu, ga);
if (kvm_is_error_gpa(vcpu->kvm, gpa)) { if (!kvm_is_gpa_in_memslot(vcpu->kvm, gpa)) {
rc = PGM_ADDRESSING; rc = PGM_ADDRESSING;
prot = PROT_NONE; prot = PROT_NONE;
} }
......
...@@ -2878,7 +2878,7 @@ static int kvm_s390_vm_mem_op_abs(struct kvm *kvm, struct kvm_s390_mem_op *mop) ...@@ -2878,7 +2878,7 @@ static int kvm_s390_vm_mem_op_abs(struct kvm *kvm, struct kvm_s390_mem_op *mop)
srcu_idx = srcu_read_lock(&kvm->srcu); srcu_idx = srcu_read_lock(&kvm->srcu);
if (kvm_is_error_gpa(kvm, mop->gaddr)) { if (!kvm_is_gpa_in_memslot(kvm, mop->gaddr)) {
r = PGM_ADDRESSING; r = PGM_ADDRESSING;
goto out_unlock; goto out_unlock;
} }
...@@ -2940,7 +2940,7 @@ static int kvm_s390_vm_mem_op_cmpxchg(struct kvm *kvm, struct kvm_s390_mem_op *m ...@@ -2940,7 +2940,7 @@ static int kvm_s390_vm_mem_op_cmpxchg(struct kvm *kvm, struct kvm_s390_mem_op *m
srcu_idx = srcu_read_lock(&kvm->srcu); srcu_idx = srcu_read_lock(&kvm->srcu);
if (kvm_is_error_gpa(kvm, mop->gaddr)) { if (!kvm_is_gpa_in_memslot(kvm, mop->gaddr)) {
r = PGM_ADDRESSING; r = PGM_ADDRESSING;
goto out_unlock; goto out_unlock;
} }
......
...@@ -149,7 +149,7 @@ static int handle_set_prefix(struct kvm_vcpu *vcpu) ...@@ -149,7 +149,7 @@ static int handle_set_prefix(struct kvm_vcpu *vcpu)
* first page, since address is 8k aligned and memory pieces are always * first page, since address is 8k aligned and memory pieces are always
* at least 1MB aligned and have at least a size of 1MB. * at least 1MB aligned and have at least a size of 1MB.
*/ */
if (kvm_is_error_gpa(vcpu->kvm, address)) if (!kvm_is_gpa_in_memslot(vcpu->kvm, address))
return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
kvm_s390_set_prefix(vcpu, address); kvm_s390_set_prefix(vcpu, address);
...@@ -464,7 +464,7 @@ static int handle_test_block(struct kvm_vcpu *vcpu) ...@@ -464,7 +464,7 @@ static int handle_test_block(struct kvm_vcpu *vcpu)
return kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm); return kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm);
addr = kvm_s390_real_to_abs(vcpu, addr); addr = kvm_s390_real_to_abs(vcpu, addr);
if (kvm_is_error_gpa(vcpu->kvm, addr)) if (!kvm_is_gpa_in_memslot(vcpu->kvm, addr))
return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
/* /*
* We don't expect errors on modern systems, and do not care * We don't expect errors on modern systems, and do not care
......
...@@ -172,7 +172,7 @@ static int __sigp_set_prefix(struct kvm_vcpu *vcpu, struct kvm_vcpu *dst_vcpu, ...@@ -172,7 +172,7 @@ static int __sigp_set_prefix(struct kvm_vcpu *vcpu, struct kvm_vcpu *dst_vcpu,
* first page, since address is 8k aligned and memory pieces are always * first page, since address is 8k aligned and memory pieces are always
* at least 1MB aligned and have at least a size of 1MB. * at least 1MB aligned and have at least a size of 1MB.
*/ */
if (kvm_is_error_gpa(vcpu->kvm, irq.u.prefix.address)) { if (!kvm_is_gpa_in_memslot(vcpu->kvm, irq.u.prefix.address)) {
*reg &= 0xffffffff00000000UL; *reg &= 0xffffffff00000000UL;
*reg |= SIGP_STATUS_INVALID_PARAMETER; *reg |= SIGP_STATUS_INVALID_PARAMETER;
return SIGP_CC_STATUS_STORED; return SIGP_CC_STATUS_STORED;
......
...@@ -1779,11 +1779,11 @@ static inline hpa_t pfn_to_hpa(kvm_pfn_t pfn) ...@@ -1779,11 +1779,11 @@ static inline hpa_t pfn_to_hpa(kvm_pfn_t pfn)
return (hpa_t)pfn << PAGE_SHIFT; return (hpa_t)pfn << PAGE_SHIFT;
} }
static inline bool kvm_is_error_gpa(struct kvm *kvm, gpa_t gpa) static inline bool kvm_is_gpa_in_memslot(struct kvm *kvm, gpa_t gpa)
{ {
unsigned long hva = gfn_to_hva(kvm, gpa_to_gfn(gpa)); unsigned long hva = gfn_to_hva(kvm, gpa_to_gfn(gpa));
return kvm_is_error_hva(hva); return !kvm_is_error_hva(hva);
} }
static inline void kvm_gpc_mark_dirty_in_slot(struct gfn_to_pfn_cache *gpc) static inline void kvm_gpc_mark_dirty_in_slot(struct gfn_to_pfn_cache *gpc)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment