Commit b99f4512 authored by Nico Boehr's avatar Nico Boehr Committed by Janosch Frank

KVM: s390: sida: sort out physical vs virtual pointers usage

All callers of the sida_origin() macro actually expected a virtual
address, so rename it to sida_addr() and hand out a virtual address.

At some places, the macro wasn't used, potentially creating problems
if the sida size ever becomes nonzero (not currently the case), so let's
start using it everywhere now while at it.
Signed-off-by: default avatarNico Boehr <nrb@linux.ibm.com>
Reviewed-by: default avatarClaudio Imbrenda <imbrenda@linux.ibm.com>
Link: https://lore.kernel.org/r/20221020143159.294605-5-nrb@linux.ibm.com
Message-Id: <20221020143159.294605-5-nrb@linux.ibm.com>
Signed-off-by: default avatarJanosch Frank <frankja@linux.ibm.com>
parent fe0ef003
...@@ -142,8 +142,7 @@ struct mcck_volatile_info { ...@@ -142,8 +142,7 @@ struct mcck_volatile_info {
CR14_EXTERNAL_DAMAGE_SUBMASK) CR14_EXTERNAL_DAMAGE_SUBMASK)
#define SIDAD_SIZE_MASK 0xff #define SIDAD_SIZE_MASK 0xff
#define sida_origin(sie_block) \ #define sida_addr(sie_block) phys_to_virt((sie_block)->sidad & PAGE_MASK)
((sie_block)->sidad & PAGE_MASK)
#define sida_size(sie_block) \ #define sida_size(sie_block) \
((((sie_block)->sidad & SIDAD_SIZE_MASK) + 1) * PAGE_SIZE) ((((sie_block)->sidad & SIDAD_SIZE_MASK) + 1) * PAGE_SIZE)
......
...@@ -409,8 +409,7 @@ int handle_sthyi(struct kvm_vcpu *vcpu) ...@@ -409,8 +409,7 @@ int handle_sthyi(struct kvm_vcpu *vcpu)
out: out:
if (!cc) { if (!cc) {
if (kvm_s390_pv_cpu_is_protected(vcpu)) { if (kvm_s390_pv_cpu_is_protected(vcpu)) {
memcpy((void *)(sida_origin(vcpu->arch.sie_block)), memcpy(sida_addr(vcpu->arch.sie_block), sctns, PAGE_SIZE);
sctns, PAGE_SIZE);
} else { } else {
r = write_guest(vcpu, addr, reg2, sctns, PAGE_SIZE); r = write_guest(vcpu, addr, reg2, sctns, PAGE_SIZE);
if (r) { if (r) {
...@@ -464,7 +463,7 @@ static int handle_operexc(struct kvm_vcpu *vcpu) ...@@ -464,7 +463,7 @@ static int handle_operexc(struct kvm_vcpu *vcpu)
static int handle_pv_spx(struct kvm_vcpu *vcpu) static int handle_pv_spx(struct kvm_vcpu *vcpu)
{ {
u32 pref = *(u32 *)vcpu->arch.sie_block->sidad; u32 pref = *(u32 *)sida_addr(vcpu->arch.sie_block);
kvm_s390_set_prefix(vcpu, pref); kvm_s390_set_prefix(vcpu, pref);
trace_kvm_s390_handle_prefix(vcpu, 1, pref); trace_kvm_s390_handle_prefix(vcpu, 1, pref);
...@@ -497,7 +496,7 @@ static int handle_pv_sclp(struct kvm_vcpu *vcpu) ...@@ -497,7 +496,7 @@ static int handle_pv_sclp(struct kvm_vcpu *vcpu)
static int handle_pv_uvc(struct kvm_vcpu *vcpu) static int handle_pv_uvc(struct kvm_vcpu *vcpu)
{ {
struct uv_cb_share *guest_uvcb = (void *)vcpu->arch.sie_block->sidad; struct uv_cb_share *guest_uvcb = sida_addr(vcpu->arch.sie_block);
struct uv_cb_cts uvcb = { struct uv_cb_cts uvcb = {
.header.cmd = UVC_CMD_UNPIN_PAGE_SHARED, .header.cmd = UVC_CMD_UNPIN_PAGE_SHARED,
.header.len = sizeof(uvcb), .header.len = sizeof(uvcb),
......
...@@ -5167,6 +5167,7 @@ static long kvm_s390_vcpu_sida_op(struct kvm_vcpu *vcpu, ...@@ -5167,6 +5167,7 @@ static long kvm_s390_vcpu_sida_op(struct kvm_vcpu *vcpu,
struct kvm_s390_mem_op *mop) struct kvm_s390_mem_op *mop)
{ {
void __user *uaddr = (void __user *)mop->buf; void __user *uaddr = (void __user *)mop->buf;
void *sida_addr;
int r = 0; int r = 0;
if (mop->flags || !mop->size) if (mop->flags || !mop->size)
...@@ -5178,16 +5179,16 @@ static long kvm_s390_vcpu_sida_op(struct kvm_vcpu *vcpu, ...@@ -5178,16 +5179,16 @@ static long kvm_s390_vcpu_sida_op(struct kvm_vcpu *vcpu,
if (!kvm_s390_pv_cpu_is_protected(vcpu)) if (!kvm_s390_pv_cpu_is_protected(vcpu))
return -EINVAL; return -EINVAL;
sida_addr = (char *)sida_addr(vcpu->arch.sie_block) + mop->sida_offset;
switch (mop->op) { switch (mop->op) {
case KVM_S390_MEMOP_SIDA_READ: case KVM_S390_MEMOP_SIDA_READ:
if (copy_to_user(uaddr, (void *)(sida_origin(vcpu->arch.sie_block) + if (copy_to_user(uaddr, sida_addr, mop->size))
mop->sida_offset), mop->size))
r = -EFAULT; r = -EFAULT;
break; break;
case KVM_S390_MEMOP_SIDA_WRITE: case KVM_S390_MEMOP_SIDA_WRITE:
if (copy_from_user((void *)(sida_origin(vcpu->arch.sie_block) + if (copy_from_user(sida_addr, uaddr, mop->size))
mop->sida_offset), uaddr, mop->size))
r = -EFAULT; r = -EFAULT;
break; break;
} }
......
...@@ -924,8 +924,7 @@ static int handle_stsi(struct kvm_vcpu *vcpu) ...@@ -924,8 +924,7 @@ static int handle_stsi(struct kvm_vcpu *vcpu)
return -EREMOTE; return -EREMOTE;
} }
if (kvm_s390_pv_cpu_is_protected(vcpu)) { if (kvm_s390_pv_cpu_is_protected(vcpu)) {
memcpy((void *)sida_origin(vcpu->arch.sie_block), (void *)mem, memcpy(sida_addr(vcpu->arch.sie_block), (void *)mem, PAGE_SIZE);
PAGE_SIZE);
rc = 0; rc = 0;
} else { } else {
rc = write_guest(vcpu, operand2, ar, (void *)mem, PAGE_SIZE); rc = write_guest(vcpu, operand2, ar, (void *)mem, PAGE_SIZE);
......
...@@ -44,7 +44,7 @@ int kvm_s390_pv_destroy_cpu(struct kvm_vcpu *vcpu, u16 *rc, u16 *rrc) ...@@ -44,7 +44,7 @@ int kvm_s390_pv_destroy_cpu(struct kvm_vcpu *vcpu, u16 *rc, u16 *rrc)
free_pages(vcpu->arch.pv.stor_base, free_pages(vcpu->arch.pv.stor_base,
get_order(uv_info.guest_cpu_stor_len)); get_order(uv_info.guest_cpu_stor_len));
free_page(sida_origin(vcpu->arch.sie_block)); free_page((unsigned long)sida_addr(vcpu->arch.sie_block));
vcpu->arch.sie_block->pv_handle_cpu = 0; vcpu->arch.sie_block->pv_handle_cpu = 0;
vcpu->arch.sie_block->pv_handle_config = 0; vcpu->arch.sie_block->pv_handle_config = 0;
memset(&vcpu->arch.pv, 0, sizeof(vcpu->arch.pv)); memset(&vcpu->arch.pv, 0, sizeof(vcpu->arch.pv));
...@@ -66,6 +66,7 @@ int kvm_s390_pv_create_cpu(struct kvm_vcpu *vcpu, u16 *rc, u16 *rrc) ...@@ -66,6 +66,7 @@ int kvm_s390_pv_create_cpu(struct kvm_vcpu *vcpu, u16 *rc, u16 *rrc)
.header.cmd = UVC_CMD_CREATE_SEC_CPU, .header.cmd = UVC_CMD_CREATE_SEC_CPU,
.header.len = sizeof(uvcb), .header.len = sizeof(uvcb),
}; };
void *sida_addr;
int cc; int cc;
if (kvm_s390_pv_cpu_get_handle(vcpu)) if (kvm_s390_pv_cpu_get_handle(vcpu))
...@@ -83,12 +84,13 @@ int kvm_s390_pv_create_cpu(struct kvm_vcpu *vcpu, u16 *rc, u16 *rrc) ...@@ -83,12 +84,13 @@ int kvm_s390_pv_create_cpu(struct kvm_vcpu *vcpu, u16 *rc, u16 *rrc)
uvcb.stor_origin = (u64)vcpu->arch.pv.stor_base; uvcb.stor_origin = (u64)vcpu->arch.pv.stor_base;
/* Alloc Secure Instruction Data Area Designation */ /* Alloc Secure Instruction Data Area Designation */
vcpu->arch.sie_block->sidad = __get_free_page(GFP_KERNEL_ACCOUNT | __GFP_ZERO); sida_addr = (void *)__get_free_page(GFP_KERNEL_ACCOUNT | __GFP_ZERO);
if (!vcpu->arch.sie_block->sidad) { if (!sida_addr) {
free_pages(vcpu->arch.pv.stor_base, free_pages(vcpu->arch.pv.stor_base,
get_order(uv_info.guest_cpu_stor_len)); get_order(uv_info.guest_cpu_stor_len));
return -ENOMEM; return -ENOMEM;
} }
vcpu->arch.sie_block->sidad = virt_to_phys(sida_addr);
cc = uv_call(0, (u64)&uvcb); cc = uv_call(0, (u64)&uvcb);
*rc = uvcb.header.rc; *rc = uvcb.header.rc;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment