Commit a0465f9a authored by Thomas Huth's avatar Thomas Huth Committed by Christian Borntraeger

KVM: s390: Enable DAT support for TPROT handler

The TPROT instruction can be used to check the accessability of storage
for any kind of logical addresses. So far, our handler only supported
real addresses. This patch now also enables support for addresses that
have to be translated via DAT first. And while we're at it, change the
code to use the common KVM function gfn_to_hva_prot() to check for the
validity and writability of the memory page.
Signed-off-by: default avatarThomas Huth <thuth@linux.vnet.ibm.com>
Reviewed-by: default avatarCornelia Huck <cornelia.huck@de.ibm.com>
parent 9fbc0276
...@@ -292,7 +292,7 @@ static void ipte_unlock_siif(struct kvm_vcpu *vcpu) ...@@ -292,7 +292,7 @@ static void ipte_unlock_siif(struct kvm_vcpu *vcpu)
wake_up(&vcpu->kvm->arch.ipte_wq); wake_up(&vcpu->kvm->arch.ipte_wq);
} }
static void ipte_lock(struct kvm_vcpu *vcpu) void ipte_lock(struct kvm_vcpu *vcpu)
{ {
if (vcpu->arch.sie_block->eca & 1) if (vcpu->arch.sie_block->eca & 1)
ipte_lock_siif(vcpu); ipte_lock_siif(vcpu);
...@@ -300,7 +300,7 @@ static void ipte_lock(struct kvm_vcpu *vcpu) ...@@ -300,7 +300,7 @@ static void ipte_lock(struct kvm_vcpu *vcpu)
ipte_lock_simple(vcpu); ipte_lock_simple(vcpu);
} }
static void ipte_unlock(struct kvm_vcpu *vcpu) void ipte_unlock(struct kvm_vcpu *vcpu)
{ {
if (vcpu->arch.sie_block->eca & 1) if (vcpu->arch.sie_block->eca & 1)
ipte_unlock_siif(vcpu); ipte_unlock_siif(vcpu);
......
...@@ -327,6 +327,8 @@ int read_guest_real(struct kvm_vcpu *vcpu, unsigned long gra, void *data, ...@@ -327,6 +327,8 @@ int read_guest_real(struct kvm_vcpu *vcpu, unsigned long gra, void *data,
return access_guest_real(vcpu, gra, data, len, 0); return access_guest_real(vcpu, gra, data, len, 0);
} }
void ipte_lock(struct kvm_vcpu *vcpu);
void ipte_unlock(struct kvm_vcpu *vcpu);
int ipte_lock_held(struct kvm_vcpu *vcpu); int ipte_lock_held(struct kvm_vcpu *vcpu);
int kvm_s390_check_low_addr_protection(struct kvm_vcpu *vcpu, unsigned long ga); int kvm_s390_check_low_addr_protection(struct kvm_vcpu *vcpu, unsigned long ga);
......
...@@ -930,8 +930,9 @@ int kvm_s390_handle_eb(struct kvm_vcpu *vcpu) ...@@ -930,8 +930,9 @@ int kvm_s390_handle_eb(struct kvm_vcpu *vcpu)
static int handle_tprot(struct kvm_vcpu *vcpu) static int handle_tprot(struct kvm_vcpu *vcpu)
{ {
u64 address1, address2; u64 address1, address2;
struct vm_area_struct *vma; unsigned long hva, gpa;
unsigned long user_address; int ret = 0, cc = 0;
bool writable;
vcpu->stat.instruction_tprot++; vcpu->stat.instruction_tprot++;
...@@ -942,32 +943,41 @@ static int handle_tprot(struct kvm_vcpu *vcpu) ...@@ -942,32 +943,41 @@ static int handle_tprot(struct kvm_vcpu *vcpu)
/* we only handle the Linux memory detection case: /* we only handle the Linux memory detection case:
* access key == 0 * access key == 0
* guest DAT == off
* everything else goes to userspace. */ * everything else goes to userspace. */
if (address2 & 0xf0) if (address2 & 0xf0)
return -EOPNOTSUPP; return -EOPNOTSUPP;
if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_DAT) if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_DAT)
return -EOPNOTSUPP; ipte_lock(vcpu);
ret = guest_translate_address(vcpu, address1, &gpa, 1);
down_read(&current->mm->mmap_sem); if (ret == PGM_PROTECTION) {
user_address = __gmap_translate(address1, vcpu->arch.gmap); /* Write protected? Try again with read-only... */
if (IS_ERR_VALUE(user_address)) cc = 1;
goto out_inject; ret = guest_translate_address(vcpu, address1, &gpa, 0);
vma = find_vma(current->mm, user_address); }
if (!vma) if (ret) {
goto out_inject; if (ret == PGM_ADDRESSING || ret == PGM_TRANSLATION_SPEC) {
vcpu->arch.sie_block->gpsw.mask &= ~(3ul << 44); ret = kvm_s390_inject_program_int(vcpu, ret);
if (!(vma->vm_flags & VM_WRITE) && (vma->vm_flags & VM_READ)) } else if (ret > 0) {
vcpu->arch.sie_block->gpsw.mask |= (1ul << 44); /* Translation not available */
if (!(vma->vm_flags & VM_WRITE) && !(vma->vm_flags & VM_READ)) kvm_s390_set_psw_cc(vcpu, 3);
vcpu->arch.sie_block->gpsw.mask |= (2ul << 44); ret = 0;
}
up_read(&current->mm->mmap_sem); goto out_unlock;
return 0; }
out_inject: hva = gfn_to_hva_prot(vcpu->kvm, gpa_to_gfn(gpa), &writable);
up_read(&current->mm->mmap_sem); if (kvm_is_error_hva(hva)) {
return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); ret = kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
} else {
if (!writable)
cc = 1; /* Write not permitted ==> read-only */
kvm_s390_set_psw_cc(vcpu, cc);
/* Note: CC2 only occurs for storage keys (not supported yet) */
}
out_unlock:
if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_DAT)
ipte_unlock(vcpu);
return ret;
} }
int kvm_s390_handle_e5(struct kvm_vcpu *vcpu) int kvm_s390_handle_e5(struct kvm_vcpu *vcpu)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment