Commit c756ad03 authored by Paolo Bonzini's avatar Paolo Bonzini

Merge tag 'kvm-s390-20140721' of...

Merge tag 'kvm-s390-20140721' of git://git.kernel.org/pub/scm/linux/kernel/git/kvms390/linux into kvm-next

Bugfixes
--------
- add IPTE to trace event decoder
- document and advertise KVM_CAP_S390_IRQCHIP

Cleanups
--------
- Reuse kvm_vcpu_block for s390
- Get rid of tasklet for wakup processing
parents 6f43ed01 e59d120f
...@@ -2934,15 +2934,18 @@ The fields in each entry are defined as follows: ...@@ -2934,15 +2934,18 @@ The fields in each entry are defined as follows:
6. Capabilities that can be enabled 6. Capabilities that can be enabled
----------------------------------- -----------------------------------
There are certain capabilities that change the behavior of the virtual CPU when There are certain capabilities that change the behavior of the virtual CPU or
enabled. To enable them, please see section 4.37. Below you can find a list of the virtual machine when enabled. To enable them, please see section 4.37.
capabilities and what their effect on the vCPU is when enabling them. Below you can find a list of capabilities and what their effect on the vCPU or
the virtual machine is when enabling them.
The following information is provided along with the description: The following information is provided along with the description:
Architectures: which instruction set architectures provide this ioctl. Architectures: which instruction set architectures provide this ioctl.
x86 includes both i386 and x86_64. x86 includes both i386 and x86_64.
Target: whether this is a per-vcpu or per-vm capability.
Parameters: what parameters are accepted by the capability. Parameters: what parameters are accepted by the capability.
Returns: the return value. General error numbers (EBADF, ENOMEM, EINVAL) Returns: the return value. General error numbers (EBADF, ENOMEM, EINVAL)
...@@ -2952,6 +2955,7 @@ The following information is provided along with the description: ...@@ -2952,6 +2955,7 @@ The following information is provided along with the description:
6.1 KVM_CAP_PPC_OSI 6.1 KVM_CAP_PPC_OSI
Architectures: ppc Architectures: ppc
Target: vcpu
Parameters: none Parameters: none
Returns: 0 on success; -1 on error Returns: 0 on success; -1 on error
...@@ -2966,6 +2970,7 @@ When this capability is enabled, KVM_EXIT_OSI can occur. ...@@ -2966,6 +2970,7 @@ When this capability is enabled, KVM_EXIT_OSI can occur.
6.2 KVM_CAP_PPC_PAPR 6.2 KVM_CAP_PPC_PAPR
Architectures: ppc Architectures: ppc
Target: vcpu
Parameters: none Parameters: none
Returns: 0 on success; -1 on error Returns: 0 on success; -1 on error
...@@ -2985,6 +2990,7 @@ When this capability is enabled, KVM_EXIT_PAPR_HCALL can occur. ...@@ -2985,6 +2990,7 @@ When this capability is enabled, KVM_EXIT_PAPR_HCALL can occur.
6.3 KVM_CAP_SW_TLB 6.3 KVM_CAP_SW_TLB
Architectures: ppc Architectures: ppc
Target: vcpu
Parameters: args[0] is the address of a struct kvm_config_tlb Parameters: args[0] is the address of a struct kvm_config_tlb
Returns: 0 on success; -1 on error Returns: 0 on success; -1 on error
...@@ -3027,6 +3033,7 @@ For mmu types KVM_MMU_FSL_BOOKE_NOHV and KVM_MMU_FSL_BOOKE_HV: ...@@ -3027,6 +3033,7 @@ For mmu types KVM_MMU_FSL_BOOKE_NOHV and KVM_MMU_FSL_BOOKE_HV:
6.4 KVM_CAP_S390_CSS_SUPPORT 6.4 KVM_CAP_S390_CSS_SUPPORT
Architectures: s390 Architectures: s390
Target: vcpu
Parameters: none Parameters: none
Returns: 0 on success; -1 on error Returns: 0 on success; -1 on error
...@@ -3038,9 +3045,13 @@ handled in-kernel, while the other I/O instructions are passed to userspace. ...@@ -3038,9 +3045,13 @@ handled in-kernel, while the other I/O instructions are passed to userspace.
When this capability is enabled, KVM_EXIT_S390_TSCH will occur on TEST When this capability is enabled, KVM_EXIT_S390_TSCH will occur on TEST
SUBCHANNEL intercepts. SUBCHANNEL intercepts.
Note that even though this capability is enabled per-vcpu, the complete
virtual machine is affected.
6.5 KVM_CAP_PPC_EPR 6.5 KVM_CAP_PPC_EPR
Architectures: ppc Architectures: ppc
Target: vcpu
Parameters: args[0] defines whether the proxy facility is active Parameters: args[0] defines whether the proxy facility is active
Returns: 0 on success; -1 on error Returns: 0 on success; -1 on error
...@@ -3066,7 +3077,17 @@ This capability connects the vcpu to an in-kernel MPIC device. ...@@ -3066,7 +3077,17 @@ This capability connects the vcpu to an in-kernel MPIC device.
6.7 KVM_CAP_IRQ_XICS 6.7 KVM_CAP_IRQ_XICS
Architectures: ppc Architectures: ppc
Target: vcpu
Parameters: args[0] is the XICS device fd Parameters: args[0] is the XICS device fd
args[1] is the XICS CPU number (server ID) for this vcpu args[1] is the XICS CPU number (server ID) for this vcpu
This capability connects the vcpu to an in-kernel XICS device. This capability connects the vcpu to an in-kernel XICS device.
6.8 KVM_CAP_S390_IRQCHIP
Architectures: s390
Target: vm
Parameters: none
This capability enables the in-kernel irqchip for s390. Please refer to
"4.24 KVM_CREATE_IRQCHIP" for details.
...@@ -305,7 +305,6 @@ struct kvm_s390_local_interrupt { ...@@ -305,7 +305,6 @@ struct kvm_s390_local_interrupt {
struct list_head list; struct list_head list;
atomic_t active; atomic_t active;
struct kvm_s390_float_interrupt *float_int; struct kvm_s390_float_interrupt *float_int;
int timer_due; /* event indicator for waitqueue below */
wait_queue_head_t *wq; wait_queue_head_t *wq;
atomic_t *cpuflags; atomic_t *cpuflags;
unsigned int action_bits; unsigned int action_bits;
...@@ -367,7 +366,6 @@ struct kvm_vcpu_arch { ...@@ -367,7 +366,6 @@ struct kvm_vcpu_arch {
s390_fp_regs guest_fpregs; s390_fp_regs guest_fpregs;
struct kvm_s390_local_interrupt local_int; struct kvm_s390_local_interrupt local_int;
struct hrtimer ckc_timer; struct hrtimer ckc_timer;
struct tasklet_struct tasklet;
struct kvm_s390_pgm_info pgm; struct kvm_s390_pgm_info pgm;
union { union {
struct cpuid cpu_id; struct cpuid cpu_id;
......
...@@ -108,6 +108,7 @@ ...@@ -108,6 +108,7 @@
exit_code_ipa0(0xB2, 0x17, "STETR"), \ exit_code_ipa0(0xB2, 0x17, "STETR"), \
exit_code_ipa0(0xB2, 0x18, "PC"), \ exit_code_ipa0(0xB2, 0x18, "PC"), \
exit_code_ipa0(0xB2, 0x20, "SERVC"), \ exit_code_ipa0(0xB2, 0x20, "SERVC"), \
exit_code_ipa0(0xB2, 0x21, "IPTE"), \
exit_code_ipa0(0xB2, 0x28, "PT"), \ exit_code_ipa0(0xB2, 0x28, "PT"), \
exit_code_ipa0(0xB2, 0x29, "ISKE"), \ exit_code_ipa0(0xB2, 0x29, "ISKE"), \
exit_code_ipa0(0xB2, 0x2a, "RRBE"), \ exit_code_ipa0(0xB2, 0x2a, "RRBE"), \
......
...@@ -544,13 +544,13 @@ int kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu) ...@@ -544,13 +544,13 @@ int kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu)
int rc = 0; int rc = 0;
if (atomic_read(&li->active)) { if (atomic_read(&li->active)) {
spin_lock_bh(&li->lock); spin_lock(&li->lock);
list_for_each_entry(inti, &li->list, list) list_for_each_entry(inti, &li->list, list)
if (__interrupt_is_deliverable(vcpu, inti)) { if (__interrupt_is_deliverable(vcpu, inti)) {
rc = 1; rc = 1;
break; break;
} }
spin_unlock_bh(&li->lock); spin_unlock(&li->lock);
} }
if ((!rc) && atomic_read(&fi->active)) { if ((!rc) && atomic_read(&fi->active)) {
...@@ -585,88 +585,56 @@ int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu) ...@@ -585,88 +585,56 @@ int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
int kvm_s390_handle_wait(struct kvm_vcpu *vcpu) int kvm_s390_handle_wait(struct kvm_vcpu *vcpu)
{ {
u64 now, sltime; u64 now, sltime;
DECLARE_WAITQUEUE(wait, current);
vcpu->stat.exit_wait_state++; vcpu->stat.exit_wait_state++;
if (kvm_cpu_has_interrupt(vcpu))
return 0;
__set_cpu_idle(vcpu); /* fast path */
spin_lock_bh(&vcpu->arch.local_int.lock); if (kvm_cpu_has_pending_timer(vcpu) || kvm_arch_vcpu_runnable(vcpu))
vcpu->arch.local_int.timer_due = 0; return 0;
spin_unlock_bh(&vcpu->arch.local_int.lock);
if (psw_interrupts_disabled(vcpu)) { if (psw_interrupts_disabled(vcpu)) {
VCPU_EVENT(vcpu, 3, "%s", "disabled wait"); VCPU_EVENT(vcpu, 3, "%s", "disabled wait");
__unset_cpu_idle(vcpu);
return -EOPNOTSUPP; /* disabled wait */ return -EOPNOTSUPP; /* disabled wait */
} }
__set_cpu_idle(vcpu);
if (!ckc_interrupts_enabled(vcpu)) { if (!ckc_interrupts_enabled(vcpu)) {
VCPU_EVENT(vcpu, 3, "%s", "enabled wait w/o timer"); VCPU_EVENT(vcpu, 3, "%s", "enabled wait w/o timer");
goto no_timer; goto no_timer;
} }
now = get_tod_clock_fast() + vcpu->arch.sie_block->epoch; now = get_tod_clock_fast() + vcpu->arch.sie_block->epoch;
if (vcpu->arch.sie_block->ckc < now) {
__unset_cpu_idle(vcpu);
return 0;
}
sltime = tod_to_ns(vcpu->arch.sie_block->ckc - now); sltime = tod_to_ns(vcpu->arch.sie_block->ckc - now);
hrtimer_start(&vcpu->arch.ckc_timer, ktime_set (0, sltime) , HRTIMER_MODE_REL); hrtimer_start(&vcpu->arch.ckc_timer, ktime_set (0, sltime) , HRTIMER_MODE_REL);
VCPU_EVENT(vcpu, 5, "enabled wait via clock comparator: %llx ns", sltime); VCPU_EVENT(vcpu, 5, "enabled wait via clock comparator: %llx ns", sltime);
no_timer: no_timer:
srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx); srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
spin_lock(&vcpu->arch.local_int.float_int->lock); kvm_vcpu_block(vcpu);
spin_lock_bh(&vcpu->arch.local_int.lock);
add_wait_queue(&vcpu->wq, &wait);
while (list_empty(&vcpu->arch.local_int.list) &&
list_empty(&vcpu->arch.local_int.float_int->list) &&
(!vcpu->arch.local_int.timer_due) &&
!signal_pending(current) &&
!kvm_s390_si_ext_call_pending(vcpu)) {
set_current_state(TASK_INTERRUPTIBLE);
spin_unlock_bh(&vcpu->arch.local_int.lock);
spin_unlock(&vcpu->arch.local_int.float_int->lock);
schedule();
spin_lock(&vcpu->arch.local_int.float_int->lock);
spin_lock_bh(&vcpu->arch.local_int.lock);
}
__unset_cpu_idle(vcpu); __unset_cpu_idle(vcpu);
__set_current_state(TASK_RUNNING);
remove_wait_queue(&vcpu->wq, &wait);
spin_unlock_bh(&vcpu->arch.local_int.lock);
spin_unlock(&vcpu->arch.local_int.float_int->lock);
vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
hrtimer_try_to_cancel(&vcpu->arch.ckc_timer); hrtimer_try_to_cancel(&vcpu->arch.ckc_timer);
return 0; return 0;
} }
void kvm_s390_tasklet(unsigned long parm) void kvm_s390_vcpu_wakeup(struct kvm_vcpu *vcpu)
{ {
struct kvm_vcpu *vcpu = (struct kvm_vcpu *) parm; if (waitqueue_active(&vcpu->wq)) {
/*
spin_lock(&vcpu->arch.local_int.lock); * The vcpu gave up the cpu voluntarily, mark it as a good
vcpu->arch.local_int.timer_due = 1; * yield-candidate.
if (waitqueue_active(&vcpu->wq)) */
vcpu->preempted = true;
wake_up_interruptible(&vcpu->wq); wake_up_interruptible(&vcpu->wq);
spin_unlock(&vcpu->arch.local_int.lock); }
} }
/*
* low level hrtimer wake routine. Because this runs in hardirq context
* we schedule a tasklet to do the real work.
*/
enum hrtimer_restart kvm_s390_idle_wakeup(struct hrtimer *timer) enum hrtimer_restart kvm_s390_idle_wakeup(struct hrtimer *timer)
{ {
struct kvm_vcpu *vcpu; struct kvm_vcpu *vcpu;
vcpu = container_of(timer, struct kvm_vcpu, arch.ckc_timer); vcpu = container_of(timer, struct kvm_vcpu, arch.ckc_timer);
vcpu->preempted = true; kvm_s390_vcpu_wakeup(vcpu);
tasklet_schedule(&vcpu->arch.tasklet);
return HRTIMER_NORESTART; return HRTIMER_NORESTART;
} }
...@@ -676,13 +644,13 @@ void kvm_s390_clear_local_irqs(struct kvm_vcpu *vcpu) ...@@ -676,13 +644,13 @@ void kvm_s390_clear_local_irqs(struct kvm_vcpu *vcpu)
struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
struct kvm_s390_interrupt_info *n, *inti = NULL; struct kvm_s390_interrupt_info *n, *inti = NULL;
spin_lock_bh(&li->lock); spin_lock(&li->lock);
list_for_each_entry_safe(inti, n, &li->list, list) { list_for_each_entry_safe(inti, n, &li->list, list) {
list_del(&inti->list); list_del(&inti->list);
kfree(inti); kfree(inti);
} }
atomic_set(&li->active, 0); atomic_set(&li->active, 0);
spin_unlock_bh(&li->lock); spin_unlock(&li->lock);
/* clear pending external calls set by sigp interpretation facility */ /* clear pending external calls set by sigp interpretation facility */
atomic_clear_mask(CPUSTAT_ECALL_PEND, &vcpu->arch.sie_block->cpuflags); atomic_clear_mask(CPUSTAT_ECALL_PEND, &vcpu->arch.sie_block->cpuflags);
...@@ -701,7 +669,7 @@ void kvm_s390_deliver_pending_interrupts(struct kvm_vcpu *vcpu) ...@@ -701,7 +669,7 @@ void kvm_s390_deliver_pending_interrupts(struct kvm_vcpu *vcpu)
if (atomic_read(&li->active)) { if (atomic_read(&li->active)) {
do { do {
deliver = 0; deliver = 0;
spin_lock_bh(&li->lock); spin_lock(&li->lock);
list_for_each_entry_safe(inti, n, &li->list, list) { list_for_each_entry_safe(inti, n, &li->list, list) {
if (__interrupt_is_deliverable(vcpu, inti)) { if (__interrupt_is_deliverable(vcpu, inti)) {
list_del(&inti->list); list_del(&inti->list);
...@@ -712,7 +680,7 @@ void kvm_s390_deliver_pending_interrupts(struct kvm_vcpu *vcpu) ...@@ -712,7 +680,7 @@ void kvm_s390_deliver_pending_interrupts(struct kvm_vcpu *vcpu)
} }
if (list_empty(&li->list)) if (list_empty(&li->list))
atomic_set(&li->active, 0); atomic_set(&li->active, 0);
spin_unlock_bh(&li->lock); spin_unlock(&li->lock);
if (deliver) { if (deliver) {
__do_deliver_interrupt(vcpu, inti); __do_deliver_interrupt(vcpu, inti);
kfree(inti); kfree(inti);
...@@ -758,7 +726,7 @@ void kvm_s390_deliver_pending_machine_checks(struct kvm_vcpu *vcpu) ...@@ -758,7 +726,7 @@ void kvm_s390_deliver_pending_machine_checks(struct kvm_vcpu *vcpu)
if (atomic_read(&li->active)) { if (atomic_read(&li->active)) {
do { do {
deliver = 0; deliver = 0;
spin_lock_bh(&li->lock); spin_lock(&li->lock);
list_for_each_entry_safe(inti, n, &li->list, list) { list_for_each_entry_safe(inti, n, &li->list, list) {
if ((inti->type == KVM_S390_MCHK) && if ((inti->type == KVM_S390_MCHK) &&
__interrupt_is_deliverable(vcpu, inti)) { __interrupt_is_deliverable(vcpu, inti)) {
...@@ -770,7 +738,7 @@ void kvm_s390_deliver_pending_machine_checks(struct kvm_vcpu *vcpu) ...@@ -770,7 +738,7 @@ void kvm_s390_deliver_pending_machine_checks(struct kvm_vcpu *vcpu)
} }
if (list_empty(&li->list)) if (list_empty(&li->list))
atomic_set(&li->active, 0); atomic_set(&li->active, 0);
spin_unlock_bh(&li->lock); spin_unlock(&li->lock);
if (deliver) { if (deliver) {
__do_deliver_interrupt(vcpu, inti); __do_deliver_interrupt(vcpu, inti);
kfree(inti); kfree(inti);
...@@ -817,11 +785,11 @@ int kvm_s390_inject_program_int(struct kvm_vcpu *vcpu, u16 code) ...@@ -817,11 +785,11 @@ int kvm_s390_inject_program_int(struct kvm_vcpu *vcpu, u16 code)
VCPU_EVENT(vcpu, 3, "inject: program check %d (from kernel)", code); VCPU_EVENT(vcpu, 3, "inject: program check %d (from kernel)", code);
trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, inti->type, code, 0, 1); trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, inti->type, code, 0, 1);
spin_lock_bh(&li->lock); spin_lock(&li->lock);
list_add(&inti->list, &li->list); list_add(&inti->list, &li->list);
atomic_set(&li->active, 1); atomic_set(&li->active, 1);
BUG_ON(waitqueue_active(li->wq)); BUG_ON(waitqueue_active(li->wq));
spin_unlock_bh(&li->lock); spin_unlock(&li->lock);
return 0; return 0;
} }
...@@ -842,11 +810,11 @@ int kvm_s390_inject_prog_irq(struct kvm_vcpu *vcpu, ...@@ -842,11 +810,11 @@ int kvm_s390_inject_prog_irq(struct kvm_vcpu *vcpu,
inti->type = KVM_S390_PROGRAM_INT; inti->type = KVM_S390_PROGRAM_INT;
memcpy(&inti->pgm, pgm_info, sizeof(inti->pgm)); memcpy(&inti->pgm, pgm_info, sizeof(inti->pgm));
spin_lock_bh(&li->lock); spin_lock(&li->lock);
list_add(&inti->list, &li->list); list_add(&inti->list, &li->list);
atomic_set(&li->active, 1); atomic_set(&li->active, 1);
BUG_ON(waitqueue_active(li->wq)); BUG_ON(waitqueue_active(li->wq));
spin_unlock_bh(&li->lock); spin_unlock(&li->lock);
return 0; return 0;
} }
...@@ -934,12 +902,10 @@ static int __inject_vm(struct kvm *kvm, struct kvm_s390_interrupt_info *inti) ...@@ -934,12 +902,10 @@ static int __inject_vm(struct kvm *kvm, struct kvm_s390_interrupt_info *inti)
} }
dst_vcpu = kvm_get_vcpu(kvm, sigcpu); dst_vcpu = kvm_get_vcpu(kvm, sigcpu);
li = &dst_vcpu->arch.local_int; li = &dst_vcpu->arch.local_int;
spin_lock_bh(&li->lock); spin_lock(&li->lock);
atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags); atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags);
if (waitqueue_active(li->wq)) spin_unlock(&li->lock);
wake_up_interruptible(li->wq); kvm_s390_vcpu_wakeup(kvm_get_vcpu(kvm, sigcpu));
kvm_get_vcpu(kvm, sigcpu)->preempted = true;
spin_unlock_bh(&li->lock);
unlock_fi: unlock_fi:
spin_unlock(&fi->lock); spin_unlock(&fi->lock);
mutex_unlock(&kvm->lock); mutex_unlock(&kvm->lock);
...@@ -1081,7 +1047,7 @@ int kvm_s390_inject_vcpu(struct kvm_vcpu *vcpu, ...@@ -1081,7 +1047,7 @@ int kvm_s390_inject_vcpu(struct kvm_vcpu *vcpu,
mutex_lock(&vcpu->kvm->lock); mutex_lock(&vcpu->kvm->lock);
li = &vcpu->arch.local_int; li = &vcpu->arch.local_int;
spin_lock_bh(&li->lock); spin_lock(&li->lock);
if (inti->type == KVM_S390_PROGRAM_INT) if (inti->type == KVM_S390_PROGRAM_INT)
list_add(&inti->list, &li->list); list_add(&inti->list, &li->list);
else else
...@@ -1090,11 +1056,9 @@ int kvm_s390_inject_vcpu(struct kvm_vcpu *vcpu, ...@@ -1090,11 +1056,9 @@ int kvm_s390_inject_vcpu(struct kvm_vcpu *vcpu,
if (inti->type == KVM_S390_SIGP_STOP) if (inti->type == KVM_S390_SIGP_STOP)
li->action_bits |= ACTION_STOP_ON_STOP; li->action_bits |= ACTION_STOP_ON_STOP;
atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags); atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags);
if (waitqueue_active(&vcpu->wq)) spin_unlock(&li->lock);
wake_up_interruptible(&vcpu->wq);
vcpu->preempted = true;
spin_unlock_bh(&li->lock);
mutex_unlock(&vcpu->kvm->lock); mutex_unlock(&vcpu->kvm->lock);
kvm_s390_vcpu_wakeup(vcpu);
return 0; return 0;
} }
......
...@@ -166,6 +166,7 @@ int kvm_dev_ioctl_check_extension(long ext) ...@@ -166,6 +166,7 @@ int kvm_dev_ioctl_check_extension(long ext)
case KVM_CAP_IOEVENTFD: case KVM_CAP_IOEVENTFD:
case KVM_CAP_DEVICE_CTRL: case KVM_CAP_DEVICE_CTRL:
case KVM_CAP_ENABLE_CAP_VM: case KVM_CAP_ENABLE_CAP_VM:
case KVM_CAP_S390_IRQCHIP:
case KVM_CAP_VM_ATTRIBUTES: case KVM_CAP_VM_ATTRIBUTES:
case KVM_CAP_MP_STATE: case KVM_CAP_MP_STATE:
r = 1; r = 1;
...@@ -649,8 +650,6 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) ...@@ -649,8 +650,6 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
return rc; return rc;
} }
hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS); hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
tasklet_init(&vcpu->arch.tasklet, kvm_s390_tasklet,
(unsigned long) vcpu);
vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup; vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup;
get_cpu_id(&vcpu->arch.cpu_id); get_cpu_id(&vcpu->arch.cpu_id);
vcpu->arch.cpu_id.version = 0xff; vcpu->arch.cpu_id.version = 0xff;
...@@ -1068,6 +1067,9 @@ static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu) ...@@ -1068,6 +1067,9 @@ static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu)
goto retry; goto retry;
} }
/* nothing to do, just clear the request */
clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
return 0; return 0;
} }
...@@ -1475,7 +1477,7 @@ void kvm_s390_vcpu_start(struct kvm_vcpu *vcpu) ...@@ -1475,7 +1477,7 @@ void kvm_s390_vcpu_start(struct kvm_vcpu *vcpu)
trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 1); trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 1);
/* Only one cpu at a time may enter/leave the STOPPED state. */ /* Only one cpu at a time may enter/leave the STOPPED state. */
spin_lock_bh(&vcpu->kvm->arch.start_stop_lock); spin_lock(&vcpu->kvm->arch.start_stop_lock);
online_vcpus = atomic_read(&vcpu->kvm->online_vcpus); online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
for (i = 0; i < online_vcpus; i++) { for (i = 0; i < online_vcpus; i++) {
...@@ -1501,7 +1503,7 @@ void kvm_s390_vcpu_start(struct kvm_vcpu *vcpu) ...@@ -1501,7 +1503,7 @@ void kvm_s390_vcpu_start(struct kvm_vcpu *vcpu)
* Let's play safe and flush the VCPU at startup. * Let's play safe and flush the VCPU at startup.
*/ */
vcpu->arch.sie_block->ihcpu = 0xffff; vcpu->arch.sie_block->ihcpu = 0xffff;
spin_unlock_bh(&vcpu->kvm->arch.start_stop_lock); spin_unlock(&vcpu->kvm->arch.start_stop_lock);
return; return;
} }
...@@ -1515,17 +1517,17 @@ void kvm_s390_vcpu_stop(struct kvm_vcpu *vcpu) ...@@ -1515,17 +1517,17 @@ void kvm_s390_vcpu_stop(struct kvm_vcpu *vcpu)
trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 0); trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 0);
/* Only one cpu at a time may enter/leave the STOPPED state. */ /* Only one cpu at a time may enter/leave the STOPPED state. */
spin_lock_bh(&vcpu->kvm->arch.start_stop_lock); spin_lock(&vcpu->kvm->arch.start_stop_lock);
online_vcpus = atomic_read(&vcpu->kvm->online_vcpus); online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
/* Need to lock access to action_bits to avoid a SIGP race condition */ /* Need to lock access to action_bits to avoid a SIGP race condition */
spin_lock_bh(&vcpu->arch.local_int.lock); spin_lock(&vcpu->arch.local_int.lock);
atomic_set_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags); atomic_set_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
/* SIGP STOP and SIGP STOP AND STORE STATUS has been fully processed */ /* SIGP STOP and SIGP STOP AND STORE STATUS has been fully processed */
vcpu->arch.local_int.action_bits &= vcpu->arch.local_int.action_bits &=
~(ACTION_STOP_ON_STOP | ACTION_STORE_ON_STOP); ~(ACTION_STOP_ON_STOP | ACTION_STORE_ON_STOP);
spin_unlock_bh(&vcpu->arch.local_int.lock); spin_unlock(&vcpu->arch.local_int.lock);
__disable_ibs_on_vcpu(vcpu); __disable_ibs_on_vcpu(vcpu);
...@@ -1544,7 +1546,7 @@ void kvm_s390_vcpu_stop(struct kvm_vcpu *vcpu) ...@@ -1544,7 +1546,7 @@ void kvm_s390_vcpu_stop(struct kvm_vcpu *vcpu)
__enable_ibs_on_vcpu(started_vcpu); __enable_ibs_on_vcpu(started_vcpu);
} }
spin_unlock_bh(&vcpu->kvm->arch.start_stop_lock); spin_unlock(&vcpu->kvm->arch.start_stop_lock);
return; return;
} }
......
...@@ -136,8 +136,8 @@ static inline int kvm_s390_user_cpu_state_ctrl(struct kvm *kvm) ...@@ -136,8 +136,8 @@ static inline int kvm_s390_user_cpu_state_ctrl(struct kvm *kvm)
} }
int kvm_s390_handle_wait(struct kvm_vcpu *vcpu); int kvm_s390_handle_wait(struct kvm_vcpu *vcpu);
void kvm_s390_vcpu_wakeup(struct kvm_vcpu *vcpu);
enum hrtimer_restart kvm_s390_idle_wakeup(struct hrtimer *timer); enum hrtimer_restart kvm_s390_idle_wakeup(struct hrtimer *timer);
void kvm_s390_tasklet(unsigned long parm);
void kvm_s390_deliver_pending_interrupts(struct kvm_vcpu *vcpu); void kvm_s390_deliver_pending_interrupts(struct kvm_vcpu *vcpu);
void kvm_s390_deliver_pending_machine_checks(struct kvm_vcpu *vcpu); void kvm_s390_deliver_pending_machine_checks(struct kvm_vcpu *vcpu);
void kvm_s390_clear_local_irqs(struct kvm_vcpu *vcpu); void kvm_s390_clear_local_irqs(struct kvm_vcpu *vcpu);
......
...@@ -125,8 +125,9 @@ static int __sigp_external_call(struct kvm_vcpu *vcpu, u16 cpu_addr) ...@@ -125,8 +125,9 @@ static int __sigp_external_call(struct kvm_vcpu *vcpu, u16 cpu_addr)
return rc ? rc : SIGP_CC_ORDER_CODE_ACCEPTED; return rc ? rc : SIGP_CC_ORDER_CODE_ACCEPTED;
} }
static int __inject_sigp_stop(struct kvm_s390_local_interrupt *li, int action) static int __inject_sigp_stop(struct kvm_vcpu *dst_vcpu, int action)
{ {
struct kvm_s390_local_interrupt *li = &dst_vcpu->arch.local_int;
struct kvm_s390_interrupt_info *inti; struct kvm_s390_interrupt_info *inti;
int rc = SIGP_CC_ORDER_CODE_ACCEPTED; int rc = SIGP_CC_ORDER_CODE_ACCEPTED;
...@@ -135,7 +136,7 @@ static int __inject_sigp_stop(struct kvm_s390_local_interrupt *li, int action) ...@@ -135,7 +136,7 @@ static int __inject_sigp_stop(struct kvm_s390_local_interrupt *li, int action)
return -ENOMEM; return -ENOMEM;
inti->type = KVM_S390_SIGP_STOP; inti->type = KVM_S390_SIGP_STOP;
spin_lock_bh(&li->lock); spin_lock(&li->lock);
if (li->action_bits & ACTION_STOP_ON_STOP) { if (li->action_bits & ACTION_STOP_ON_STOP) {
/* another SIGP STOP is pending */ /* another SIGP STOP is pending */
rc = SIGP_CC_BUSY; rc = SIGP_CC_BUSY;
...@@ -151,17 +152,15 @@ static int __inject_sigp_stop(struct kvm_s390_local_interrupt *li, int action) ...@@ -151,17 +152,15 @@ static int __inject_sigp_stop(struct kvm_s390_local_interrupt *li, int action)
atomic_set(&li->active, 1); atomic_set(&li->active, 1);
li->action_bits |= action; li->action_bits |= action;
atomic_set_mask(CPUSTAT_STOP_INT, li->cpuflags); atomic_set_mask(CPUSTAT_STOP_INT, li->cpuflags);
if (waitqueue_active(li->wq)) kvm_s390_vcpu_wakeup(dst_vcpu);
wake_up_interruptible(li->wq);
out: out:
spin_unlock_bh(&li->lock); spin_unlock(&li->lock);
return rc; return rc;
} }
static int __sigp_stop(struct kvm_vcpu *vcpu, u16 cpu_addr, int action) static int __sigp_stop(struct kvm_vcpu *vcpu, u16 cpu_addr, int action)
{ {
struct kvm_s390_local_interrupt *li;
struct kvm_vcpu *dst_vcpu = NULL; struct kvm_vcpu *dst_vcpu = NULL;
int rc; int rc;
...@@ -171,9 +170,8 @@ static int __sigp_stop(struct kvm_vcpu *vcpu, u16 cpu_addr, int action) ...@@ -171,9 +170,8 @@ static int __sigp_stop(struct kvm_vcpu *vcpu, u16 cpu_addr, int action)
dst_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_addr); dst_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_addr);
if (!dst_vcpu) if (!dst_vcpu)
return SIGP_CC_NOT_OPERATIONAL; return SIGP_CC_NOT_OPERATIONAL;
li = &dst_vcpu->arch.local_int;
rc = __inject_sigp_stop(li, action); rc = __inject_sigp_stop(dst_vcpu, action);
VCPU_EVENT(vcpu, 4, "sent sigp stop to cpu %x", cpu_addr); VCPU_EVENT(vcpu, 4, "sent sigp stop to cpu %x", cpu_addr);
...@@ -243,7 +241,7 @@ static int __sigp_set_prefix(struct kvm_vcpu *vcpu, u16 cpu_addr, u32 address, ...@@ -243,7 +241,7 @@ static int __sigp_set_prefix(struct kvm_vcpu *vcpu, u16 cpu_addr, u32 address,
if (!inti) if (!inti)
return SIGP_CC_BUSY; return SIGP_CC_BUSY;
spin_lock_bh(&li->lock); spin_lock(&li->lock);
/* cpu must be in stopped state */ /* cpu must be in stopped state */
if (!(atomic_read(li->cpuflags) & CPUSTAT_STOPPED)) { if (!(atomic_read(li->cpuflags) & CPUSTAT_STOPPED)) {
*reg &= 0xffffffff00000000UL; *reg &= 0xffffffff00000000UL;
...@@ -258,13 +256,12 @@ static int __sigp_set_prefix(struct kvm_vcpu *vcpu, u16 cpu_addr, u32 address, ...@@ -258,13 +256,12 @@ static int __sigp_set_prefix(struct kvm_vcpu *vcpu, u16 cpu_addr, u32 address,
list_add_tail(&inti->list, &li->list); list_add_tail(&inti->list, &li->list);
atomic_set(&li->active, 1); atomic_set(&li->active, 1);
if (waitqueue_active(li->wq)) kvm_s390_vcpu_wakeup(dst_vcpu);
wake_up_interruptible(li->wq);
rc = SIGP_CC_ORDER_CODE_ACCEPTED; rc = SIGP_CC_ORDER_CODE_ACCEPTED;
VCPU_EVENT(vcpu, 4, "set prefix of cpu %02x to %x", cpu_addr, address); VCPU_EVENT(vcpu, 4, "set prefix of cpu %02x to %x", cpu_addr, address);
out_li: out_li:
spin_unlock_bh(&li->lock); spin_unlock(&li->lock);
return rc; return rc;
} }
...@@ -280,9 +277,9 @@ static int __sigp_store_status_at_addr(struct kvm_vcpu *vcpu, u16 cpu_id, ...@@ -280,9 +277,9 @@ static int __sigp_store_status_at_addr(struct kvm_vcpu *vcpu, u16 cpu_id,
if (!dst_vcpu) if (!dst_vcpu)
return SIGP_CC_NOT_OPERATIONAL; return SIGP_CC_NOT_OPERATIONAL;
spin_lock_bh(&dst_vcpu->arch.local_int.lock); spin_lock(&dst_vcpu->arch.local_int.lock);
flags = atomic_read(dst_vcpu->arch.local_int.cpuflags); flags = atomic_read(dst_vcpu->arch.local_int.cpuflags);
spin_unlock_bh(&dst_vcpu->arch.local_int.lock); spin_unlock(&dst_vcpu->arch.local_int.lock);
if (!(flags & CPUSTAT_STOPPED)) { if (!(flags & CPUSTAT_STOPPED)) {
*reg &= 0xffffffff00000000UL; *reg &= 0xffffffff00000000UL;
*reg |= SIGP_STATUS_INCORRECT_STATE; *reg |= SIGP_STATUS_INCORRECT_STATE;
...@@ -343,10 +340,10 @@ static int sigp_check_callable(struct kvm_vcpu *vcpu, u16 cpu_addr) ...@@ -343,10 +340,10 @@ static int sigp_check_callable(struct kvm_vcpu *vcpu, u16 cpu_addr)
if (!dst_vcpu) if (!dst_vcpu)
return SIGP_CC_NOT_OPERATIONAL; return SIGP_CC_NOT_OPERATIONAL;
li = &dst_vcpu->arch.local_int; li = &dst_vcpu->arch.local_int;
spin_lock_bh(&li->lock); spin_lock(&li->lock);
if (li->action_bits & ACTION_STOP_ON_STOP) if (li->action_bits & ACTION_STOP_ON_STOP)
rc = SIGP_CC_BUSY; rc = SIGP_CC_BUSY;
spin_unlock_bh(&li->lock); spin_unlock(&li->lock);
return rc; return rc;
} }
...@@ -466,12 +463,7 @@ int kvm_s390_handle_sigp_pei(struct kvm_vcpu *vcpu) ...@@ -466,12 +463,7 @@ int kvm_s390_handle_sigp_pei(struct kvm_vcpu *vcpu)
dest_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_addr); dest_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_addr);
BUG_ON(dest_vcpu == NULL); BUG_ON(dest_vcpu == NULL);
spin_lock_bh(&dest_vcpu->arch.local_int.lock); kvm_s390_vcpu_wakeup(dest_vcpu);
if (waitqueue_active(&dest_vcpu->wq))
wake_up_interruptible(&dest_vcpu->wq);
dest_vcpu->preempted = true;
spin_unlock_bh(&dest_vcpu->arch.local_int.lock);
kvm_s390_set_psw_cc(vcpu, SIGP_CC_ORDER_CODE_ACCEPTED); kvm_s390_set_psw_cc(vcpu, SIGP_CC_ORDER_CODE_ACCEPTED);
return 0; return 0;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment