Commit 8aba0958 authored by Janosch Frank's avatar Janosch Frank Committed by Christian Borntraeger

KVM: s390: Add CPU dump functionality

The previous patch introduced the per-VM dump functions now let's
focus on dumping the VCPU state via the newly introduced
KVM_S390_PV_CPU_COMMAND ioctl which mirrors the VM UV ioctl and can be
extended with new commands later.
Signed-off-by: default avatarJanosch Frank <frankja@linux.ibm.com>
Reviewed-by: default avatarClaudio Imbrenda <imbrenda@linux.ibm.com>
Link: https://lore.kernel.org/r/20220517163629.3443-8-frankja@linux.ibm.com
Message-Id: <20220517163629.3443-8-frankja@linux.ibm.com>
Signed-off-by: default avatarChristian Borntraeger <borntraeger@linux.ibm.com>
parent 0460eb35
......@@ -5096,6 +5096,48 @@ long kvm_arch_vcpu_async_ioctl(struct file *filp,
return -ENOIOCTLCMD;
}
static int kvm_s390_handle_pv_vcpu_dump(struct kvm_vcpu *vcpu,
struct kvm_pv_cmd *cmd)
{
struct kvm_s390_pv_dmp dmp;
void *data;
int ret;
/* Dump initialization is a prerequisite */
if (!vcpu->kvm->arch.pv.dumping)
return -EINVAL;
if (copy_from_user(&dmp, (__u8 __user *)cmd->data, sizeof(dmp)))
return -EFAULT;
/* We only handle this subcmd right now */
if (dmp.subcmd != KVM_PV_DUMP_CPU)
return -EINVAL;
/* CPU dump length is the same as create cpu storage donation. */
if (dmp.buff_len != uv_info.guest_cpu_stor_len)
return -EINVAL;
data = kvzalloc(uv_info.guest_cpu_stor_len, GFP_KERNEL);
if (!data)
return -ENOMEM;
ret = kvm_s390_pv_dump_cpu(vcpu, data, &cmd->rc, &cmd->rrc);
VCPU_EVENT(vcpu, 3, "PROTVIRT DUMP CPU %d rc %x rrc %x",
vcpu->vcpu_id, cmd->rc, cmd->rrc);
if (ret)
ret = -EINVAL;
/* On success copy over the dump data */
if (!ret && copy_to_user((__u8 __user *)dmp.buff_addr, data, uv_info.guest_cpu_stor_len))
ret = -EFAULT;
kvfree(data);
return ret;
}
long kvm_arch_vcpu_ioctl(struct file *filp,
unsigned int ioctl, unsigned long arg)
{
......@@ -5260,6 +5302,33 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
irq_state.len);
break;
}
case KVM_S390_PV_CPU_COMMAND: {
struct kvm_pv_cmd cmd;
r = -EINVAL;
if (!is_prot_virt_host())
break;
r = -EFAULT;
if (copy_from_user(&cmd, argp, sizeof(cmd)))
break;
r = -EINVAL;
if (cmd.flags)
break;
/* We only handle this cmd right now */
if (cmd.cmd != KVM_PV_DUMP)
break;
r = kvm_s390_handle_pv_vcpu_dump(vcpu, &cmd);
/* Always copy over UV rc / rrc data */
if (copy_to_user((__u8 __user *)argp, &cmd.rc,
sizeof(cmd.rc) + sizeof(cmd.rrc)))
r = -EFAULT;
break;
}
default:
r = -ENOTTY;
}
......
......@@ -250,6 +250,7 @@ int kvm_s390_pv_set_sec_parms(struct kvm *kvm, void *hdr, u64 length, u16 *rc,
int kvm_s390_pv_unpack(struct kvm *kvm, unsigned long addr, unsigned long size,
unsigned long tweak, u16 *rc, u16 *rrc);
int kvm_s390_pv_set_cpu_state(struct kvm_vcpu *vcpu, u8 state);
int kvm_s390_pv_dump_cpu(struct kvm_vcpu *vcpu, void *buff, u16 *rc, u16 *rrc);
int kvm_s390_pv_dump_stor_state(struct kvm *kvm, void __user *buff_user,
u64 *gaddr, u64 buff_user_len, u16 *rc, u16 *rrc);
int kvm_s390_pv_dump_complete(struct kvm *kvm, void __user *buff_user,
......
......@@ -300,6 +300,22 @@ int kvm_s390_pv_set_cpu_state(struct kvm_vcpu *vcpu, u8 state)
return 0;
}
int kvm_s390_pv_dump_cpu(struct kvm_vcpu *vcpu, void *buff, u16 *rc, u16 *rrc)
{
struct uv_cb_dump_cpu uvcb = {
.header.cmd = UVC_CMD_DUMP_CPU,
.header.len = sizeof(uvcb),
.cpu_handle = vcpu->arch.pv.handle,
.dump_area_origin = (u64)buff,
};
int cc;
cc = uv_call_sched(0, (u64)&uvcb);
*rc = uvcb.header.rc;
*rrc = uvcb.header.rrc;
return cc;
}
/* Size of the cache for the storage state dump data. 1MB for now */
#define DUMP_BUFF_LEN HPAGE_SIZE
......
......@@ -1664,6 +1664,7 @@ enum pv_cmd_dmp_id {
KVM_PV_DUMP_INIT,
KVM_PV_DUMP_CONFIG_STOR_STATE,
KVM_PV_DUMP_COMPLETE,
KVM_PV_DUMP_CPU,
};
struct kvm_s390_pv_dmp {
......@@ -2168,4 +2169,7 @@ struct kvm_stats_desc {
/* Available with KVM_CAP_XSAVE2 */
#define KVM_GET_XSAVE2 _IOR(KVMIO, 0xcf, struct kvm_xsave)
/* Available with KVM_CAP_S390_PROTECTED_DUMP */
#define KVM_S390_PV_CPU_COMMAND _IOWR(KVMIO, 0xd0, struct kvm_pv_cmd)
#endif /* __LINUX_KVM_H */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment