Commit 7cd91804 authored by Radim Krčmář's avatar Radim Krčmář

Merge tag 'kvm-s390-next-4.16-1' of git://git.kernel.org/pub/scm/linux/kernel/git/kvms390/linux

KVM: s390: Fixes and features for 4.16

- add the virtio-ccw transport for kvmconfig
- more debug tracing for cpu model
- cleanups and fixes
parents 6b697711 a9f6c9a9
...@@ -515,9 +515,6 @@ struct kvm_s390_irq_payload { ...@@ -515,9 +515,6 @@ struct kvm_s390_irq_payload {
struct kvm_s390_local_interrupt { struct kvm_s390_local_interrupt {
spinlock_t lock; spinlock_t lock;
struct kvm_s390_float_interrupt *float_int;
struct swait_queue_head *wq;
atomic_t *cpuflags;
DECLARE_BITMAP(sigp_emerg_pending, KVM_MAX_VCPUS); DECLARE_BITMAP(sigp_emerg_pending, KVM_MAX_VCPUS);
struct kvm_s390_irq_payload irq; struct kvm_s390_irq_payload irq;
unsigned long pending_irqs; unsigned long pending_irqs;
......
...@@ -107,12 +107,11 @@ static int sca_inject_ext_call(struct kvm_vcpu *vcpu, int src_id) ...@@ -107,12 +107,11 @@ static int sca_inject_ext_call(struct kvm_vcpu *vcpu, int src_id)
static void sca_clear_ext_call(struct kvm_vcpu *vcpu) static void sca_clear_ext_call(struct kvm_vcpu *vcpu)
{ {
struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
int rc, expect; int rc, expect;
if (!kvm_s390_use_sca_entries()) if (!kvm_s390_use_sca_entries())
return; return;
atomic_andnot(CPUSTAT_ECALL_PEND, li->cpuflags); atomic_andnot(CPUSTAT_ECALL_PEND, &vcpu->arch.sie_block->cpuflags);
read_lock(&vcpu->kvm->arch.sca_lock); read_lock(&vcpu->kvm->arch.sca_lock);
if (vcpu->kvm->arch.use_esca) { if (vcpu->kvm->arch.use_esca) {
struct esca_block *sca = vcpu->kvm->arch.sca; struct esca_block *sca = vcpu->kvm->arch.sca;
...@@ -279,13 +278,13 @@ static unsigned long deliverable_irqs(struct kvm_vcpu *vcpu) ...@@ -279,13 +278,13 @@ static unsigned long deliverable_irqs(struct kvm_vcpu *vcpu)
static void __set_cpu_idle(struct kvm_vcpu *vcpu) static void __set_cpu_idle(struct kvm_vcpu *vcpu)
{ {
atomic_or(CPUSTAT_WAIT, &vcpu->arch.sie_block->cpuflags); atomic_or(CPUSTAT_WAIT, &vcpu->arch.sie_block->cpuflags);
set_bit(vcpu->vcpu_id, vcpu->arch.local_int.float_int->idle_mask); set_bit(vcpu->vcpu_id, vcpu->kvm->arch.float_int.idle_mask);
} }
static void __unset_cpu_idle(struct kvm_vcpu *vcpu) static void __unset_cpu_idle(struct kvm_vcpu *vcpu)
{ {
atomic_andnot(CPUSTAT_WAIT, &vcpu->arch.sie_block->cpuflags); atomic_andnot(CPUSTAT_WAIT, &vcpu->arch.sie_block->cpuflags);
clear_bit(vcpu->vcpu_id, vcpu->arch.local_int.float_int->idle_mask); clear_bit(vcpu->vcpu_id, vcpu->kvm->arch.float_int.idle_mask);
} }
static void __reset_intercept_indicators(struct kvm_vcpu *vcpu) static void __reset_intercept_indicators(struct kvm_vcpu *vcpu)
...@@ -1228,7 +1227,7 @@ static int __inject_pfault_init(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq) ...@@ -1228,7 +1227,7 @@ static int __inject_pfault_init(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
li->irq.ext = irq->u.ext; li->irq.ext = irq->u.ext;
set_bit(IRQ_PEND_PFAULT_INIT, &li->pending_irqs); set_bit(IRQ_PEND_PFAULT_INIT, &li->pending_irqs);
atomic_or(CPUSTAT_EXT_INT, li->cpuflags); __set_cpuflag(vcpu, CPUSTAT_EXT_INT);
return 0; return 0;
} }
...@@ -1253,7 +1252,7 @@ static int __inject_extcall(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq) ...@@ -1253,7 +1252,7 @@ static int __inject_extcall(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
if (test_and_set_bit(IRQ_PEND_EXT_EXTERNAL, &li->pending_irqs)) if (test_and_set_bit(IRQ_PEND_EXT_EXTERNAL, &li->pending_irqs))
return -EBUSY; return -EBUSY;
*extcall = irq->u.extcall; *extcall = irq->u.extcall;
atomic_or(CPUSTAT_EXT_INT, li->cpuflags); __set_cpuflag(vcpu, CPUSTAT_EXT_INT);
return 0; return 0;
} }
...@@ -1329,7 +1328,7 @@ static int __inject_sigp_emergency(struct kvm_vcpu *vcpu, ...@@ -1329,7 +1328,7 @@ static int __inject_sigp_emergency(struct kvm_vcpu *vcpu,
set_bit(irq->u.emerg.code, li->sigp_emerg_pending); set_bit(irq->u.emerg.code, li->sigp_emerg_pending);
set_bit(IRQ_PEND_EXT_EMERGENCY, &li->pending_irqs); set_bit(IRQ_PEND_EXT_EMERGENCY, &li->pending_irqs);
atomic_or(CPUSTAT_EXT_INT, li->cpuflags); __set_cpuflag(vcpu, CPUSTAT_EXT_INT);
return 0; return 0;
} }
...@@ -1373,7 +1372,7 @@ static int __inject_ckc(struct kvm_vcpu *vcpu) ...@@ -1373,7 +1372,7 @@ static int __inject_ckc(struct kvm_vcpu *vcpu)
0, 0); 0, 0);
set_bit(IRQ_PEND_EXT_CLOCK_COMP, &li->pending_irqs); set_bit(IRQ_PEND_EXT_CLOCK_COMP, &li->pending_irqs);
atomic_or(CPUSTAT_EXT_INT, li->cpuflags); __set_cpuflag(vcpu, CPUSTAT_EXT_INT);
return 0; return 0;
} }
...@@ -1386,7 +1385,7 @@ static int __inject_cpu_timer(struct kvm_vcpu *vcpu) ...@@ -1386,7 +1385,7 @@ static int __inject_cpu_timer(struct kvm_vcpu *vcpu)
0, 0); 0, 0);
set_bit(IRQ_PEND_EXT_CPU_TIMER, &li->pending_irqs); set_bit(IRQ_PEND_EXT_CPU_TIMER, &li->pending_irqs);
atomic_or(CPUSTAT_EXT_INT, li->cpuflags); __set_cpuflag(vcpu, CPUSTAT_EXT_INT);
return 0; return 0;
} }
...@@ -1546,7 +1545,6 @@ static int __inject_io(struct kvm *kvm, struct kvm_s390_interrupt_info *inti) ...@@ -1546,7 +1545,6 @@ static int __inject_io(struct kvm *kvm, struct kvm_s390_interrupt_info *inti)
static void __floating_irq_kick(struct kvm *kvm, u64 type) static void __floating_irq_kick(struct kvm *kvm, u64 type)
{ {
struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int; struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int;
struct kvm_s390_local_interrupt *li;
struct kvm_vcpu *dst_vcpu; struct kvm_vcpu *dst_vcpu;
int sigcpu, online_vcpus, nr_tries = 0; int sigcpu, online_vcpus, nr_tries = 0;
...@@ -1568,20 +1566,17 @@ static void __floating_irq_kick(struct kvm *kvm, u64 type) ...@@ -1568,20 +1566,17 @@ static void __floating_irq_kick(struct kvm *kvm, u64 type)
dst_vcpu = kvm_get_vcpu(kvm, sigcpu); dst_vcpu = kvm_get_vcpu(kvm, sigcpu);
/* make the VCPU drop out of the SIE, or wake it up if sleeping */ /* make the VCPU drop out of the SIE, or wake it up if sleeping */
li = &dst_vcpu->arch.local_int;
spin_lock(&li->lock);
switch (type) { switch (type) {
case KVM_S390_MCHK: case KVM_S390_MCHK:
atomic_or(CPUSTAT_STOP_INT, li->cpuflags); __set_cpuflag(dst_vcpu, CPUSTAT_STOP_INT);
break; break;
case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX: case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
atomic_or(CPUSTAT_IO_INT, li->cpuflags); __set_cpuflag(dst_vcpu, CPUSTAT_IO_INT);
break; break;
default: default:
atomic_or(CPUSTAT_EXT_INT, li->cpuflags); __set_cpuflag(dst_vcpu, CPUSTAT_EXT_INT);
break; break;
} }
spin_unlock(&li->lock);
kvm_s390_vcpu_wakeup(dst_vcpu); kvm_s390_vcpu_wakeup(dst_vcpu);
} }
......
...@@ -573,7 +573,7 @@ static int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap) ...@@ -573,7 +573,7 @@ static int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap)
case KVM_CAP_S390_GS: case KVM_CAP_S390_GS:
r = -EINVAL; r = -EINVAL;
mutex_lock(&kvm->lock); mutex_lock(&kvm->lock);
if (atomic_read(&kvm->online_vcpus)) { if (kvm->created_vcpus) {
r = -EBUSY; r = -EBUSY;
} else if (test_facility(133)) { } else if (test_facility(133)) {
set_kvm_facility(kvm->arch.model.fac_mask, 133); set_kvm_facility(kvm->arch.model.fac_mask, 133);
...@@ -1084,7 +1084,6 @@ static int kvm_s390_set_processor_feat(struct kvm *kvm, ...@@ -1084,7 +1084,6 @@ static int kvm_s390_set_processor_feat(struct kvm *kvm,
struct kvm_device_attr *attr) struct kvm_device_attr *attr)
{ {
struct kvm_s390_vm_cpu_feat data; struct kvm_s390_vm_cpu_feat data;
int ret = -EBUSY;
if (copy_from_user(&data, (void __user *)attr->addr, sizeof(data))) if (copy_from_user(&data, (void __user *)attr->addr, sizeof(data)))
return -EFAULT; return -EFAULT;
...@@ -1094,13 +1093,18 @@ static int kvm_s390_set_processor_feat(struct kvm *kvm, ...@@ -1094,13 +1093,18 @@ static int kvm_s390_set_processor_feat(struct kvm *kvm,
return -EINVAL; return -EINVAL;
mutex_lock(&kvm->lock); mutex_lock(&kvm->lock);
if (!atomic_read(&kvm->online_vcpus)) { if (kvm->created_vcpus) {
mutex_unlock(&kvm->lock);
return -EBUSY;
}
bitmap_copy(kvm->arch.cpu_feat, (unsigned long *) data.feat, bitmap_copy(kvm->arch.cpu_feat, (unsigned long *) data.feat,
KVM_S390_VM_CPU_FEAT_NR_BITS); KVM_S390_VM_CPU_FEAT_NR_BITS);
ret = 0;
}
mutex_unlock(&kvm->lock); mutex_unlock(&kvm->lock);
return ret; VM_EVENT(kvm, 3, "SET: guest feat: 0x%16.16llx.0x%16.16llx.0x%16.16llx",
data.feat[0],
data.feat[1],
data.feat[2]);
return 0;
} }
static int kvm_s390_set_processor_subfunc(struct kvm *kvm, static int kvm_s390_set_processor_subfunc(struct kvm *kvm,
...@@ -1202,6 +1206,10 @@ static int kvm_s390_get_processor_feat(struct kvm *kvm, ...@@ -1202,6 +1206,10 @@ static int kvm_s390_get_processor_feat(struct kvm *kvm,
KVM_S390_VM_CPU_FEAT_NR_BITS); KVM_S390_VM_CPU_FEAT_NR_BITS);
if (copy_to_user((void __user *)attr->addr, &data, sizeof(data))) if (copy_to_user((void __user *)attr->addr, &data, sizeof(data)))
return -EFAULT; return -EFAULT;
VM_EVENT(kvm, 3, "GET: guest feat: 0x%16.16llx.0x%16.16llx.0x%16.16llx",
data.feat[0],
data.feat[1],
data.feat[2]);
return 0; return 0;
} }
...@@ -1215,6 +1223,10 @@ static int kvm_s390_get_machine_feat(struct kvm *kvm, ...@@ -1215,6 +1223,10 @@ static int kvm_s390_get_machine_feat(struct kvm *kvm,
KVM_S390_VM_CPU_FEAT_NR_BITS); KVM_S390_VM_CPU_FEAT_NR_BITS);
if (copy_to_user((void __user *)attr->addr, &data, sizeof(data))) if (copy_to_user((void __user *)attr->addr, &data, sizeof(data)))
return -EFAULT; return -EFAULT;
VM_EVENT(kvm, 3, "GET: host feat: 0x%16.16llx.0x%16.16llx.0x%16.16llx",
data.feat[0],
data.feat[1],
data.feat[2]);
return 0; return 0;
} }
...@@ -2497,9 +2509,6 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, ...@@ -2497,9 +2509,6 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
vcpu->arch.sie_block->icpua = id; vcpu->arch.sie_block->icpua = id;
spin_lock_init(&vcpu->arch.local_int.lock); spin_lock_init(&vcpu->arch.local_int.lock);
vcpu->arch.local_int.float_int = &kvm->arch.float_int;
vcpu->arch.local_int.wq = &vcpu->wq;
vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags;
seqcount_init(&vcpu->arch.cputm_seqcount); seqcount_init(&vcpu->arch.cputm_seqcount);
rc = kvm_vcpu_init(vcpu, kvm, id); rc = kvm_vcpu_init(vcpu, kvm, id);
......
...@@ -54,7 +54,7 @@ static inline int is_vcpu_stopped(struct kvm_vcpu *vcpu) ...@@ -54,7 +54,7 @@ static inline int is_vcpu_stopped(struct kvm_vcpu *vcpu)
static inline int is_vcpu_idle(struct kvm_vcpu *vcpu) static inline int is_vcpu_idle(struct kvm_vcpu *vcpu)
{ {
return test_bit(vcpu->vcpu_id, vcpu->arch.local_int.float_int->idle_mask); return test_bit(vcpu->vcpu_id, vcpu->kvm->arch.float_int.idle_mask);
} }
static inline int kvm_is_ucontrol(struct kvm *kvm) static inline int kvm_is_ucontrol(struct kvm *kvm)
......
...@@ -20,14 +20,11 @@ ...@@ -20,14 +20,11 @@
static int __sigp_sense(struct kvm_vcpu *vcpu, struct kvm_vcpu *dst_vcpu, static int __sigp_sense(struct kvm_vcpu *vcpu, struct kvm_vcpu *dst_vcpu,
u64 *reg) u64 *reg)
{ {
struct kvm_s390_local_interrupt *li;
int cpuflags; int cpuflags;
int rc; int rc;
int ext_call_pending; int ext_call_pending;
li = &dst_vcpu->arch.local_int; cpuflags = atomic_read(&dst_vcpu->arch.sie_block->cpuflags);
cpuflags = atomic_read(li->cpuflags);
ext_call_pending = kvm_s390_ext_call_pending(dst_vcpu); ext_call_pending = kvm_s390_ext_call_pending(dst_vcpu);
if (!(cpuflags & CPUSTAT_STOPPED) && !ext_call_pending) if (!(cpuflags & CPUSTAT_STOPPED) && !ext_call_pending)
rc = SIGP_CC_ORDER_CODE_ACCEPTED; rc = SIGP_CC_ORDER_CODE_ACCEPTED;
...@@ -211,7 +208,7 @@ static int __sigp_store_status_at_addr(struct kvm_vcpu *vcpu, ...@@ -211,7 +208,7 @@ static int __sigp_store_status_at_addr(struct kvm_vcpu *vcpu,
int flags; int flags;
int rc; int rc;
flags = atomic_read(dst_vcpu->arch.local_int.cpuflags); flags = atomic_read(&dst_vcpu->arch.sie_block->cpuflags);
if (!(flags & CPUSTAT_STOPPED)) { if (!(flags & CPUSTAT_STOPPED)) {
*reg &= 0xffffffff00000000UL; *reg &= 0xffffffff00000000UL;
*reg |= SIGP_STATUS_INCORRECT_STATE; *reg |= SIGP_STATUS_INCORRECT_STATE;
...@@ -231,7 +228,6 @@ static int __sigp_store_status_at_addr(struct kvm_vcpu *vcpu, ...@@ -231,7 +228,6 @@ static int __sigp_store_status_at_addr(struct kvm_vcpu *vcpu,
static int __sigp_sense_running(struct kvm_vcpu *vcpu, static int __sigp_sense_running(struct kvm_vcpu *vcpu,
struct kvm_vcpu *dst_vcpu, u64 *reg) struct kvm_vcpu *dst_vcpu, u64 *reg)
{ {
struct kvm_s390_local_interrupt *li;
int rc; int rc;
if (!test_kvm_facility(vcpu->kvm, 9)) { if (!test_kvm_facility(vcpu->kvm, 9)) {
...@@ -240,8 +236,8 @@ static int __sigp_sense_running(struct kvm_vcpu *vcpu, ...@@ -240,8 +236,8 @@ static int __sigp_sense_running(struct kvm_vcpu *vcpu,
return SIGP_CC_STATUS_STORED; return SIGP_CC_STATUS_STORED;
} }
li = &dst_vcpu->arch.local_int; if (atomic_read(&dst_vcpu->arch.sie_block->cpuflags) &
if (atomic_read(li->cpuflags) & CPUSTAT_RUNNING) { CPUSTAT_RUNNING) {
/* running */ /* running */
rc = SIGP_CC_ORDER_CODE_ACCEPTED; rc = SIGP_CC_ORDER_CODE_ACCEPTED;
} else { } else {
......
...@@ -815,27 +815,17 @@ static inline unsigned long *gmap_table_walk(struct gmap *gmap, ...@@ -815,27 +815,17 @@ static inline unsigned long *gmap_table_walk(struct gmap *gmap,
* @ptl: pointer to the spinlock pointer * @ptl: pointer to the spinlock pointer
* *
* Returns a pointer to the locked pte for a guest address, or NULL * Returns a pointer to the locked pte for a guest address, or NULL
*
* Note: Can also be called for shadow gmaps.
*/ */
static pte_t *gmap_pte_op_walk(struct gmap *gmap, unsigned long gaddr, static pte_t *gmap_pte_op_walk(struct gmap *gmap, unsigned long gaddr,
spinlock_t **ptl) spinlock_t **ptl)
{ {
unsigned long *table; unsigned long *table;
if (gmap_is_shadow(gmap)) BUG_ON(gmap_is_shadow(gmap));
spin_lock(&gmap->guest_table_lock);
/* Walk the gmap page table, lock and get pte pointer */ /* Walk the gmap page table, lock and get pte pointer */
table = gmap_table_walk(gmap, gaddr, 1); /* get segment pointer */ table = gmap_table_walk(gmap, gaddr, 1); /* get segment pointer */
if (!table || *table & _SEGMENT_ENTRY_INVALID) { if (!table || *table & _SEGMENT_ENTRY_INVALID)
if (gmap_is_shadow(gmap))
spin_unlock(&gmap->guest_table_lock);
return NULL; return NULL;
}
if (gmap_is_shadow(gmap)) {
*ptl = &gmap->guest_table_lock;
return pte_offset_map((pmd_t *) table, gaddr);
}
return pte_alloc_map_lock(gmap->mm, (pmd_t *) table, gaddr, ptl); return pte_alloc_map_lock(gmap->mm, (pmd_t *) table, gaddr, ptl);
} }
...@@ -889,8 +879,6 @@ static void gmap_pte_op_end(spinlock_t *ptl) ...@@ -889,8 +879,6 @@ static void gmap_pte_op_end(spinlock_t *ptl)
* -EFAULT if gaddr is invalid (or mapping for shadows is missing). * -EFAULT if gaddr is invalid (or mapping for shadows is missing).
* *
* Called with sg->mm->mmap_sem in read. * Called with sg->mm->mmap_sem in read.
*
* Note: Can also be called for shadow gmaps.
*/ */
static int gmap_protect_range(struct gmap *gmap, unsigned long gaddr, static int gmap_protect_range(struct gmap *gmap, unsigned long gaddr,
unsigned long len, int prot, unsigned long bits) unsigned long len, int prot, unsigned long bits)
...@@ -900,6 +888,7 @@ static int gmap_protect_range(struct gmap *gmap, unsigned long gaddr, ...@@ -900,6 +888,7 @@ static int gmap_protect_range(struct gmap *gmap, unsigned long gaddr,
pte_t *ptep; pte_t *ptep;
int rc; int rc;
BUG_ON(gmap_is_shadow(gmap));
while (len) { while (len) {
rc = -EAGAIN; rc = -EAGAIN;
ptep = gmap_pte_op_walk(gmap, gaddr, &ptl); ptep = gmap_pte_op_walk(gmap, gaddr, &ptl);
...@@ -960,7 +949,8 @@ EXPORT_SYMBOL_GPL(gmap_mprotect_notify); ...@@ -960,7 +949,8 @@ EXPORT_SYMBOL_GPL(gmap_mprotect_notify);
* @val: pointer to the unsigned long value to return * @val: pointer to the unsigned long value to return
* *
* Returns 0 if the value was read, -ENOMEM if out of memory and -EFAULT * Returns 0 if the value was read, -ENOMEM if out of memory and -EFAULT
* if reading using the virtual address failed. * if reading using the virtual address failed. -EINVAL if called on a gmap
* shadow.
* *
* Called with gmap->mm->mmap_sem in read. * Called with gmap->mm->mmap_sem in read.
*/ */
...@@ -971,6 +961,9 @@ int gmap_read_table(struct gmap *gmap, unsigned long gaddr, unsigned long *val) ...@@ -971,6 +961,9 @@ int gmap_read_table(struct gmap *gmap, unsigned long gaddr, unsigned long *val)
pte_t *ptep, pte; pte_t *ptep, pte;
int rc; int rc;
if (gmap_is_shadow(gmap))
return -EINVAL;
while (1) { while (1) {
rc = -EAGAIN; rc = -EAGAIN;
ptep = gmap_pte_op_walk(gmap, gaddr, &ptl); ptep = gmap_pte_op_walk(gmap, gaddr, &ptl);
......
...@@ -18,6 +18,7 @@ CONFIG_VIRTUALIZATION=y ...@@ -18,6 +18,7 @@ CONFIG_VIRTUALIZATION=y
CONFIG_HYPERVISOR_GUEST=y CONFIG_HYPERVISOR_GUEST=y
CONFIG_PARAVIRT=y CONFIG_PARAVIRT=y
CONFIG_KVM_GUEST=y CONFIG_KVM_GUEST=y
CONFIG_S390_GUEST=y
CONFIG_VIRTIO=y CONFIG_VIRTIO=y
CONFIG_VIRTIO_PCI=y CONFIG_VIRTIO_PCI=y
CONFIG_VIRTIO_BLK=y CONFIG_VIRTIO_BLK=y
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment