Commit 5ac14bac authored by Claudio Imbrenda's avatar Claudio Imbrenda Committed by Christian Borntraeger

KVM: s390: extend kvm_s390_shadow_fault to return entry pointer

Extend kvm_s390_shadow_fault to return the pointer to the valid leaf
DAT table entry, or to the invalid entry.

Also return some flags in the lower bits of the address:
PEI_DAT_PROT: indicates that DAT protection applies because of the
              protection bit in the segment (or, if EDAT, region) tables.
PEI_NOT_PTE: indicates that the address of the DAT table entry returned
             does not refer to a PTE, but to a segment or region table.
Signed-off-by: default avatarClaudio Imbrenda <imbrenda@linux.ibm.com>
Cc: stable@vger.kernel.org
Reviewed-by: default avatarJanosch Frank <frankja@de.ibm.com>
Reviewed-by: default avatarDavid Hildenbrand <david@redhat.com>
Reviewed-by: default avatarChristian Borntraeger <borntraeger@de.ibm.com>
Link: https://lore.kernel.org/r/20210302174443.514363-3-imbrenda@linux.ibm.com
[borntraeger@de.ibm.com: fold in a fix from Claudio]
Signed-off-by: default avatarChristian Borntraeger <borntraeger@de.ibm.com>
parent f85f1baa
...@@ -976,7 +976,9 @@ int kvm_s390_check_low_addr_prot_real(struct kvm_vcpu *vcpu, unsigned long gra) ...@@ -976,7 +976,9 @@ int kvm_s390_check_low_addr_prot_real(struct kvm_vcpu *vcpu, unsigned long gra)
* kvm_s390_shadow_tables - walk the guest page table and create shadow tables * kvm_s390_shadow_tables - walk the guest page table and create shadow tables
* @sg: pointer to the shadow guest address space structure * @sg: pointer to the shadow guest address space structure
* @saddr: faulting address in the shadow gmap * @saddr: faulting address in the shadow gmap
* @pgt: pointer to the page table address result * @pgt: pointer to the beginning of the page table for the given address if
* successful (return value 0), or to the first invalid DAT entry in
* case of exceptions (return value > 0)
* @fake: pgt references contiguous guest memory block, not a pgtable * @fake: pgt references contiguous guest memory block, not a pgtable
*/ */
static int kvm_s390_shadow_tables(struct gmap *sg, unsigned long saddr, static int kvm_s390_shadow_tables(struct gmap *sg, unsigned long saddr,
...@@ -1034,6 +1036,7 @@ static int kvm_s390_shadow_tables(struct gmap *sg, unsigned long saddr, ...@@ -1034,6 +1036,7 @@ static int kvm_s390_shadow_tables(struct gmap *sg, unsigned long saddr,
rfte.val = ptr; rfte.val = ptr;
goto shadow_r2t; goto shadow_r2t;
} }
*pgt = ptr + vaddr.rfx * 8;
rc = gmap_read_table(parent, ptr + vaddr.rfx * 8, &rfte.val); rc = gmap_read_table(parent, ptr + vaddr.rfx * 8, &rfte.val);
if (rc) if (rc)
return rc; return rc;
...@@ -1060,6 +1063,7 @@ static int kvm_s390_shadow_tables(struct gmap *sg, unsigned long saddr, ...@@ -1060,6 +1063,7 @@ static int kvm_s390_shadow_tables(struct gmap *sg, unsigned long saddr,
rste.val = ptr; rste.val = ptr;
goto shadow_r3t; goto shadow_r3t;
} }
*pgt = ptr + vaddr.rsx * 8;
rc = gmap_read_table(parent, ptr + vaddr.rsx * 8, &rste.val); rc = gmap_read_table(parent, ptr + vaddr.rsx * 8, &rste.val);
if (rc) if (rc)
return rc; return rc;
...@@ -1087,6 +1091,7 @@ static int kvm_s390_shadow_tables(struct gmap *sg, unsigned long saddr, ...@@ -1087,6 +1091,7 @@ static int kvm_s390_shadow_tables(struct gmap *sg, unsigned long saddr,
rtte.val = ptr; rtte.val = ptr;
goto shadow_sgt; goto shadow_sgt;
} }
*pgt = ptr + vaddr.rtx * 8;
rc = gmap_read_table(parent, ptr + vaddr.rtx * 8, &rtte.val); rc = gmap_read_table(parent, ptr + vaddr.rtx * 8, &rtte.val);
if (rc) if (rc)
return rc; return rc;
...@@ -1123,6 +1128,7 @@ static int kvm_s390_shadow_tables(struct gmap *sg, unsigned long saddr, ...@@ -1123,6 +1128,7 @@ static int kvm_s390_shadow_tables(struct gmap *sg, unsigned long saddr,
ste.val = ptr; ste.val = ptr;
goto shadow_pgt; goto shadow_pgt;
} }
*pgt = ptr + vaddr.sx * 8;
rc = gmap_read_table(parent, ptr + vaddr.sx * 8, &ste.val); rc = gmap_read_table(parent, ptr + vaddr.sx * 8, &ste.val);
if (rc) if (rc)
return rc; return rc;
...@@ -1157,6 +1163,8 @@ static int kvm_s390_shadow_tables(struct gmap *sg, unsigned long saddr, ...@@ -1157,6 +1163,8 @@ static int kvm_s390_shadow_tables(struct gmap *sg, unsigned long saddr,
* @vcpu: virtual cpu * @vcpu: virtual cpu
* @sg: pointer to the shadow guest address space structure * @sg: pointer to the shadow guest address space structure
* @saddr: faulting address in the shadow gmap * @saddr: faulting address in the shadow gmap
* @datptr: will contain the address of the faulting DAT table entry, or of
* the valid leaf, plus some flags
* *
* Returns: - 0 if the shadow fault was successfully resolved * Returns: - 0 if the shadow fault was successfully resolved
* - > 0 (pgm exception code) on exceptions while faulting * - > 0 (pgm exception code) on exceptions while faulting
...@@ -1165,11 +1173,11 @@ static int kvm_s390_shadow_tables(struct gmap *sg, unsigned long saddr, ...@@ -1165,11 +1173,11 @@ static int kvm_s390_shadow_tables(struct gmap *sg, unsigned long saddr,
* - -ENOMEM if out of memory * - -ENOMEM if out of memory
*/ */
int kvm_s390_shadow_fault(struct kvm_vcpu *vcpu, struct gmap *sg, int kvm_s390_shadow_fault(struct kvm_vcpu *vcpu, struct gmap *sg,
unsigned long saddr) unsigned long saddr, unsigned long *datptr)
{ {
union vaddress vaddr; union vaddress vaddr;
union page_table_entry pte; union page_table_entry pte;
unsigned long pgt; unsigned long pgt = 0;
int dat_protection, fake; int dat_protection, fake;
int rc; int rc;
...@@ -1191,8 +1199,20 @@ int kvm_s390_shadow_fault(struct kvm_vcpu *vcpu, struct gmap *sg, ...@@ -1191,8 +1199,20 @@ int kvm_s390_shadow_fault(struct kvm_vcpu *vcpu, struct gmap *sg,
pte.val = pgt + vaddr.px * PAGE_SIZE; pte.val = pgt + vaddr.px * PAGE_SIZE;
goto shadow_page; goto shadow_page;
} }
if (!rc)
rc = gmap_read_table(sg->parent, pgt + vaddr.px * 8, &pte.val); switch (rc) {
case PGM_SEGMENT_TRANSLATION:
case PGM_REGION_THIRD_TRANS:
case PGM_REGION_SECOND_TRANS:
case PGM_REGION_FIRST_TRANS:
pgt |= PEI_NOT_PTE;
break;
case 0:
pgt += vaddr.px * 8;
rc = gmap_read_table(sg->parent, pgt, &pte.val);
}
if (datptr)
*datptr = pgt | dat_protection * PEI_DAT_PROT;
if (!rc && pte.i) if (!rc && pte.i)
rc = PGM_PAGE_TRANSLATION; rc = PGM_PAGE_TRANSLATION;
if (!rc && pte.z) if (!rc && pte.z)
......
...@@ -376,7 +376,11 @@ void ipte_unlock(struct kvm_vcpu *vcpu); ...@@ -376,7 +376,11 @@ void ipte_unlock(struct kvm_vcpu *vcpu);
int ipte_lock_held(struct kvm_vcpu *vcpu); int ipte_lock_held(struct kvm_vcpu *vcpu);
int kvm_s390_check_low_addr_prot_real(struct kvm_vcpu *vcpu, unsigned long gra); int kvm_s390_check_low_addr_prot_real(struct kvm_vcpu *vcpu, unsigned long gra);
/* MVPG PEI indication bits */
#define PEI_DAT_PROT 2
#define PEI_NOT_PTE 4
int kvm_s390_shadow_fault(struct kvm_vcpu *vcpu, struct gmap *shadow, int kvm_s390_shadow_fault(struct kvm_vcpu *vcpu, struct gmap *shadow,
unsigned long saddr); unsigned long saddr, unsigned long *datptr);
#endif /* __KVM_S390_GACCESS_H */ #endif /* __KVM_S390_GACCESS_H */
...@@ -620,10 +620,10 @@ static int map_prefix(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page) ...@@ -620,10 +620,10 @@ static int map_prefix(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
/* with mso/msl, the prefix lies at offset *mso* */ /* with mso/msl, the prefix lies at offset *mso* */
prefix += scb_s->mso; prefix += scb_s->mso;
rc = kvm_s390_shadow_fault(vcpu, vsie_page->gmap, prefix); rc = kvm_s390_shadow_fault(vcpu, vsie_page->gmap, prefix, NULL);
if (!rc && (scb_s->ecb & ECB_TE)) if (!rc && (scb_s->ecb & ECB_TE))
rc = kvm_s390_shadow_fault(vcpu, vsie_page->gmap, rc = kvm_s390_shadow_fault(vcpu, vsie_page->gmap,
prefix + PAGE_SIZE); prefix + PAGE_SIZE, NULL);
/* /*
* We don't have to mprotect, we will be called for all unshadows. * We don't have to mprotect, we will be called for all unshadows.
* SIE will detect if protection applies and trigger a validity. * SIE will detect if protection applies and trigger a validity.
...@@ -914,7 +914,7 @@ static int handle_fault(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page) ...@@ -914,7 +914,7 @@ static int handle_fault(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
current->thread.gmap_addr, 1); current->thread.gmap_addr, 1);
rc = kvm_s390_shadow_fault(vcpu, vsie_page->gmap, rc = kvm_s390_shadow_fault(vcpu, vsie_page->gmap,
current->thread.gmap_addr); current->thread.gmap_addr, NULL);
if (rc > 0) { if (rc > 0) {
rc = inject_fault(vcpu, rc, rc = inject_fault(vcpu, rc,
current->thread.gmap_addr, current->thread.gmap_addr,
...@@ -936,7 +936,7 @@ static void handle_last_fault(struct kvm_vcpu *vcpu, ...@@ -936,7 +936,7 @@ static void handle_last_fault(struct kvm_vcpu *vcpu,
{ {
if (vsie_page->fault_addr) if (vsie_page->fault_addr)
kvm_s390_shadow_fault(vcpu, vsie_page->gmap, kvm_s390_shadow_fault(vcpu, vsie_page->gmap,
vsie_page->fault_addr); vsie_page->fault_addr, NULL);
vsie_page->fault_addr = 0; vsie_page->fault_addr = 0;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment