Commit 7a674157 authored by David Hildenbrand's avatar David Hildenbrand Committed by Christian Borntraeger

s390/mm: protection exceptions are corrrectly shadowed

As gmap shadows contains correct protection permissions, protection
exceptons can directly be forwarded to guest 3. If we would encounter
a protection exception while faulting, the next guest 3 run will
automatically handle that for us.

Keep the dat_protection logic in place, as it will be helpful later.
Acked-by: default avatarMartin Schwidefsky <schwidefsky@de.ibm.com>
Signed-off-by: default avatarDavid Hildenbrand <dahi@linux.vnet.ibm.com>
Signed-off-by: default avatarChristian Borntraeger <borntraeger@de.ibm.com>
parent e52f8b61
...@@ -1075,7 +1075,6 @@ static int kvm_s390_shadow_tables(struct gmap *sg, unsigned long saddr, ...@@ -1075,7 +1075,6 @@ static int kvm_s390_shadow_tables(struct gmap *sg, unsigned long saddr,
* kvm_s390_shadow_fault - handle fault on a shadow page table * kvm_s390_shadow_fault - handle fault on a shadow page table
* @sg: pointer to the shadow guest address space structure * @sg: pointer to the shadow guest address space structure
* @saddr: faulting address in the shadow gmap * @saddr: faulting address in the shadow gmap
* @write: =1 map r/w, =0 map r/o
* *
* Returns: - 0 if the shadow fault was successfully resolved * Returns: - 0 if the shadow fault was successfully resolved
* - > 0 (pgm exception code) on exceptions while faulting * - > 0 (pgm exception code) on exceptions while faulting
...@@ -1083,7 +1082,7 @@ static int kvm_s390_shadow_tables(struct gmap *sg, unsigned long saddr, ...@@ -1083,7 +1082,7 @@ static int kvm_s390_shadow_tables(struct gmap *sg, unsigned long saddr,
* - -EFAULT when accessing invalid guest addresses * - -EFAULT when accessing invalid guest addresses
* - -ENOMEM if out of memory * - -ENOMEM if out of memory
*/ */
int kvm_s390_shadow_fault(struct gmap *sg, unsigned long saddr, int write) int kvm_s390_shadow_fault(struct gmap *sg, unsigned long saddr)
{ {
union vaddress vaddr; union vaddress vaddr;
union page_table_entry pte; union page_table_entry pte;
...@@ -1104,9 +1103,6 @@ int kvm_s390_shadow_fault(struct gmap *sg, unsigned long saddr, int write) ...@@ -1104,9 +1103,6 @@ int kvm_s390_shadow_fault(struct gmap *sg, unsigned long saddr, int write)
rc = PGM_PAGE_TRANSLATION; rc = PGM_PAGE_TRANSLATION;
if (!rc && (pte.z || pte.co)) if (!rc && (pte.z || pte.co))
rc = PGM_TRANSLATION_SPEC; rc = PGM_TRANSLATION_SPEC;
dat_protection |= pte.p;
if (!rc && write && dat_protection)
rc = PGM_PROTECTION;
if (!rc) if (!rc)
rc = gmap_shadow_page(sg, saddr, __pte(pte.val)); rc = gmap_shadow_page(sg, saddr, __pte(pte.val));
up_read(&sg->mm->mmap_sem); up_read(&sg->mm->mmap_sem);
......
...@@ -361,6 +361,6 @@ void ipte_unlock(struct kvm_vcpu *vcpu); ...@@ -361,6 +361,6 @@ void ipte_unlock(struct kvm_vcpu *vcpu);
int ipte_lock_held(struct kvm_vcpu *vcpu); int ipte_lock_held(struct kvm_vcpu *vcpu);
int kvm_s390_check_low_addr_prot_real(struct kvm_vcpu *vcpu, unsigned long gra); int kvm_s390_check_low_addr_prot_real(struct kvm_vcpu *vcpu, unsigned long gra);
int kvm_s390_shadow_fault(struct gmap *shadow, unsigned long saddr, int write); int kvm_s390_shadow_fault(struct gmap *shadow, unsigned long saddr);
#endif /* __KVM_S390_GACCESS_H */ #endif /* __KVM_S390_GACCESS_H */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment