Commit 8fff5e37 authored by Paolo Bonzini's avatar Paolo Bonzini

Merge tag 'kvm-s390-next-20150122' of...

Merge tag 'kvm-s390-next-20150122' of git://git.kernel.org/pub/scm/linux/kernel/git/kvms390/linux into kvm-next

KVM: s390: fixes and features for kvm/next (3.20)

1. Generic
- sparse warning (make function static)
- optimize locking
- bugfixes for interrupt injection
- fix MVPG addressing modes

2. hrtimer/wakeup fun
A recent change can cause KVM hangs if adjtime is used in the host.
The hrtimer might wake up too early or too late. Too early is fatal
as vcpu_block will see that the wakeup condition is not met and
sleep again. This CPU might never wake up again.
This series addresses this problem. adjclock slowing down the host
clock will result in too late wakeups. This will require more work.
In addition to that we also change the hrtimer from REALTIME to
MONOTONIC to avoid similar problems with timedatectl set-time.

3. sigp rework
We will move all "slow" sigps to QEMU (protected with a capability that
can be enabled) to avoid several races between concurrent SIGP orders.

4. Optimize the shadow page table
Provide an interface to announce the maximum guest size. The kernel
will use that to make the pagetable 2,3,4 (or theoretically) 5 levels.

5. Provide an interface to set the guest TOD
We now use two vm attributes instead of two oneregs, as oneregs are
vcpu ioctl and we don't want to call them from other threads.

6. Protected key functions
The real HMC allows to enable/disable protected key CPACF functions.
Lets provide an implementation + an interface for QEMU to activate
this the protected key instructions.
parents 1c6007d5 0eb135ff
...@@ -2315,7 +2315,7 @@ struct kvm_s390_interrupt { ...@@ -2315,7 +2315,7 @@ struct kvm_s390_interrupt {
type can be one of the following: type can be one of the following:
KVM_S390_SIGP_STOP (vcpu) - sigp restart KVM_S390_SIGP_STOP (vcpu) - sigp stop; optional flags in parm
KVM_S390_PROGRAM_INT (vcpu) - program check; code in parm KVM_S390_PROGRAM_INT (vcpu) - program check; code in parm
KVM_S390_SIGP_SET_PREFIX (vcpu) - sigp set prefix; prefix address in parm KVM_S390_SIGP_SET_PREFIX (vcpu) - sigp set prefix; prefix address in parm
KVM_S390_RESTART (vcpu) - restart KVM_S390_RESTART (vcpu) - restart
...@@ -3228,3 +3228,23 @@ userspace from doing that. ...@@ -3228,3 +3228,23 @@ userspace from doing that.
If the hcall number specified is not one that has an in-kernel If the hcall number specified is not one that has an in-kernel
implementation, the KVM_ENABLE_CAP ioctl will fail with an EINVAL implementation, the KVM_ENABLE_CAP ioctl will fail with an EINVAL
error. error.
7.2 KVM_CAP_S390_USER_SIGP
Architectures: s390
Parameters: none
This capability controls which SIGP orders will be handled completely in user
space. With this capability enabled, all fast orders will be handled completely
in the kernel:
- SENSE
- SENSE RUNNING
- EXTERNAL CALL
- EMERGENCY SIGNAL
- CONDITIONAL EMERGENCY SIGNAL
All other orders will be handled completely in user space.
Only privileged operation exceptions will be checked for in the kernel (or even
in the hardware prior to interception). If this capability is not enabled, the
old way of handling SIGP orders is used (partially in kernel and user space).
...@@ -24,3 +24,17 @@ Returns: 0 ...@@ -24,3 +24,17 @@ Returns: 0
Clear the CMMA status for all guest pages, so any pages the guest marked Clear the CMMA status for all guest pages, so any pages the guest marked
as unused are again used any may not be reclaimed by the host. as unused are again used any may not be reclaimed by the host.
1.3. ATTRIBUTE KVM_S390_VM_MEM_LIMIT_SIZE
Parameters: in attr->addr the address for the new limit of guest memory
Returns: -EFAULT if the given address is not accessible
-EINVAL if the virtual machine is of type UCONTROL
-E2BIG if the given guest memory is to big for that machine
-EBUSY if a vcpu is already defined
-ENOMEM if not enough memory is available for a new shadow guest mapping
0 otherwise
Allows userspace to query the actual limit and set a new limit for
the maximum guest memory size. The limit will be rounded up to
2048 MB, 4096 GB, 8192 TB respectively, as this limit is governed by
the number of page table levels.
...@@ -249,9 +249,8 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id) ...@@ -249,9 +249,8 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id)
return ERR_PTR(err); return ERR_PTR(err);
} }
int kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu) void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
{ {
return 0;
} }
void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu) void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
......
...@@ -832,9 +832,8 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, ...@@ -832,9 +832,8 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
return -ENOIOCTLCMD; return -ENOIOCTLCMD;
} }
int kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu) void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
{ {
return 0;
} }
int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
......
...@@ -623,9 +623,8 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id) ...@@ -623,9 +623,8 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id)
return vcpu; return vcpu;
} }
int kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu) void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
{ {
return 0;
} }
void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu) void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
......
...@@ -35,11 +35,13 @@ ...@@ -35,11 +35,13 @@
#define KVM_NR_IRQCHIPS 1 #define KVM_NR_IRQCHIPS 1
#define KVM_IRQCHIP_NUM_PINS 4096 #define KVM_IRQCHIP_NUM_PINS 4096
#define SIGP_CTRL_C 0x00800000 #define SIGP_CTRL_C 0x80
#define SIGP_CTRL_SCN_MASK 0x3f
struct sca_entry { struct sca_entry {
atomic_t ctrl; __u8 reserved0;
__u32 reserved; __u8 sigp_ctrl;
__u16 reserved[3];
__u64 sda; __u64 sda;
__u64 reserved2[2]; __u64 reserved2[2];
} __attribute__((packed)); } __attribute__((packed));
...@@ -132,7 +134,9 @@ struct kvm_s390_sie_block { ...@@ -132,7 +134,9 @@ struct kvm_s390_sie_block {
__u8 reserved60; /* 0x0060 */ __u8 reserved60; /* 0x0060 */
__u8 ecb; /* 0x0061 */ __u8 ecb; /* 0x0061 */
__u8 ecb2; /* 0x0062 */ __u8 ecb2; /* 0x0062 */
__u8 reserved63[1]; /* 0x0063 */ #define ECB3_AES 0x04
#define ECB3_DEA 0x08
__u8 ecb3; /* 0x0063 */
__u32 scaol; /* 0x0064 */ __u32 scaol; /* 0x0064 */
__u8 reserved68[4]; /* 0x0068 */ __u8 reserved68[4]; /* 0x0068 */
__u32 todpr; /* 0x006c */ __u32 todpr; /* 0x006c */
...@@ -378,14 +382,11 @@ struct kvm_s390_interrupt_info { ...@@ -378,14 +382,11 @@ struct kvm_s390_interrupt_info {
struct kvm_s390_emerg_info emerg; struct kvm_s390_emerg_info emerg;
struct kvm_s390_extcall_info extcall; struct kvm_s390_extcall_info extcall;
struct kvm_s390_prefix_info prefix; struct kvm_s390_prefix_info prefix;
struct kvm_s390_stop_info stop;
struct kvm_s390_mchk_info mchk; struct kvm_s390_mchk_info mchk;
}; };
}; };
/* for local_interrupt.action_flags */
#define ACTION_STORE_ON_STOP (1<<0)
#define ACTION_STOP_ON_STOP (1<<1)
struct kvm_s390_irq_payload { struct kvm_s390_irq_payload {
struct kvm_s390_io_info io; struct kvm_s390_io_info io;
struct kvm_s390_ext_info ext; struct kvm_s390_ext_info ext;
...@@ -393,6 +394,7 @@ struct kvm_s390_irq_payload { ...@@ -393,6 +394,7 @@ struct kvm_s390_irq_payload {
struct kvm_s390_emerg_info emerg; struct kvm_s390_emerg_info emerg;
struct kvm_s390_extcall_info extcall; struct kvm_s390_extcall_info extcall;
struct kvm_s390_prefix_info prefix; struct kvm_s390_prefix_info prefix;
struct kvm_s390_stop_info stop;
struct kvm_s390_mchk_info mchk; struct kvm_s390_mchk_info mchk;
}; };
...@@ -401,7 +403,6 @@ struct kvm_s390_local_interrupt { ...@@ -401,7 +403,6 @@ struct kvm_s390_local_interrupt {
struct kvm_s390_float_interrupt *float_int; struct kvm_s390_float_interrupt *float_int;
wait_queue_head_t *wq; wait_queue_head_t *wq;
atomic_t *cpuflags; atomic_t *cpuflags;
unsigned int action_bits;
DECLARE_BITMAP(sigp_emerg_pending, KVM_MAX_VCPUS); DECLARE_BITMAP(sigp_emerg_pending, KVM_MAX_VCPUS);
struct kvm_s390_irq_payload irq; struct kvm_s390_irq_payload irq;
unsigned long pending_irqs; unsigned long pending_irqs;
...@@ -470,7 +471,6 @@ struct kvm_vcpu_arch { ...@@ -470,7 +471,6 @@ struct kvm_vcpu_arch {
}; };
struct gmap *gmap; struct gmap *gmap;
struct kvm_guestdbg_info_arch guestdbg; struct kvm_guestdbg_info_arch guestdbg;
#define KVM_S390_PFAULT_TOKEN_INVALID (-1UL)
unsigned long pfault_token; unsigned long pfault_token;
unsigned long pfault_select; unsigned long pfault_select;
unsigned long pfault_compare; unsigned long pfault_compare;
...@@ -507,10 +507,14 @@ struct s390_io_adapter { ...@@ -507,10 +507,14 @@ struct s390_io_adapter {
struct kvm_s390_crypto { struct kvm_s390_crypto {
struct kvm_s390_crypto_cb *crycb; struct kvm_s390_crypto_cb *crycb;
__u32 crycbd; __u32 crycbd;
__u8 aes_kw;
__u8 dea_kw;
}; };
struct kvm_s390_crypto_cb { struct kvm_s390_crypto_cb {
__u8 reserved00[128]; /* 0x0000 */ __u8 reserved00[72]; /* 0x0000 */
__u8 dea_wrapping_key_mask[24]; /* 0x0048 */
__u8 aes_wrapping_key_mask[32]; /* 0x0060 */
}; };
struct kvm_arch{ struct kvm_arch{
...@@ -523,12 +527,14 @@ struct kvm_arch{ ...@@ -523,12 +527,14 @@ struct kvm_arch{
int use_irqchip; int use_irqchip;
int use_cmma; int use_cmma;
int user_cpu_state_ctrl; int user_cpu_state_ctrl;
int user_sigp;
struct s390_io_adapter *adapters[MAX_S390_IO_ADAPTERS]; struct s390_io_adapter *adapters[MAX_S390_IO_ADAPTERS];
wait_queue_head_t ipte_wq; wait_queue_head_t ipte_wq;
int ipte_lock_count; int ipte_lock_count;
struct mutex ipte_mutex; struct mutex ipte_mutex;
spinlock_t start_stop_lock; spinlock_t start_stop_lock;
struct kvm_s390_crypto crypto; struct kvm_s390_crypto crypto;
u64 epoch;
}; };
#define KVM_HVA_ERR_BAD (-1UL) #define KVM_HVA_ERR_BAD (-1UL)
......
...@@ -31,7 +31,8 @@ struct sclp_cpu_entry { ...@@ -31,7 +31,8 @@ struct sclp_cpu_entry {
u8 reserved0[2]; u8 reserved0[2];
u8 : 3; u8 : 3;
u8 siif : 1; u8 siif : 1;
u8 : 4; u8 sigpif : 1;
u8 : 3;
u8 reserved2[10]; u8 reserved2[10];
u8 type; u8 type;
u8 reserved1; u8 reserved1;
...@@ -66,6 +67,7 @@ int memcpy_hsa(void *dest, unsigned long src, size_t count, int mode); ...@@ -66,6 +67,7 @@ int memcpy_hsa(void *dest, unsigned long src, size_t count, int mode);
unsigned long sclp_get_hsa_size(void); unsigned long sclp_get_hsa_size(void);
void sclp_early_detect(void); void sclp_early_detect(void);
int sclp_has_siif(void); int sclp_has_siif(void);
int sclp_has_sigpif(void);
unsigned int sclp_get_ibc(void); unsigned int sclp_get_ibc(void);
#endif /* _ASM_S390_SCLP_H */ #endif /* _ASM_S390_SCLP_H */
...@@ -57,10 +57,23 @@ struct kvm_s390_io_adapter_req { ...@@ -57,10 +57,23 @@ struct kvm_s390_io_adapter_req {
/* kvm attr_group on vm fd */ /* kvm attr_group on vm fd */
#define KVM_S390_VM_MEM_CTRL 0 #define KVM_S390_VM_MEM_CTRL 0
#define KVM_S390_VM_TOD 1
#define KVM_S390_VM_CRYPTO 2
/* kvm attributes for mem_ctrl */ /* kvm attributes for mem_ctrl */
#define KVM_S390_VM_MEM_ENABLE_CMMA 0 #define KVM_S390_VM_MEM_ENABLE_CMMA 0
#define KVM_S390_VM_MEM_CLR_CMMA 1 #define KVM_S390_VM_MEM_CLR_CMMA 1
#define KVM_S390_VM_MEM_LIMIT_SIZE 2
/* kvm attributes for KVM_S390_VM_TOD */
#define KVM_S390_VM_TOD_LOW 0
#define KVM_S390_VM_TOD_HIGH 1
/* kvm attributes for crypto */
#define KVM_S390_VM_CRYPTO_ENABLE_AES_KW 0
#define KVM_S390_VM_CRYPTO_ENABLE_DEA_KW 1
#define KVM_S390_VM_CRYPTO_DISABLE_AES_KW 2
#define KVM_S390_VM_CRYPTO_DISABLE_DEA_KW 3
/* for KVM_GET_REGS and KVM_SET_REGS */ /* for KVM_GET_REGS and KVM_SET_REGS */
struct kvm_regs { struct kvm_regs {
...@@ -107,6 +120,9 @@ struct kvm_guest_debug_arch { ...@@ -107,6 +120,9 @@ struct kvm_guest_debug_arch {
struct kvm_hw_breakpoint __user *hw_bp; struct kvm_hw_breakpoint __user *hw_bp;
}; };
/* for KVM_SYNC_PFAULT and KVM_REG_S390_PFTOKEN */
#define KVM_S390_PFAULT_TOKEN_INVALID 0xffffffffffffffffULL
#define KVM_SYNC_PREFIX (1UL << 0) #define KVM_SYNC_PREFIX (1UL << 0)
#define KVM_SYNC_GPRS (1UL << 1) #define KVM_SYNC_GPRS (1UL << 1)
#define KVM_SYNC_ACRS (1UL << 2) #define KVM_SYNC_ACRS (1UL << 2)
......
...@@ -68,18 +68,27 @@ static int handle_noop(struct kvm_vcpu *vcpu) ...@@ -68,18 +68,27 @@ static int handle_noop(struct kvm_vcpu *vcpu)
static int handle_stop(struct kvm_vcpu *vcpu) static int handle_stop(struct kvm_vcpu *vcpu)
{ {
struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
int rc = 0; int rc = 0;
unsigned int action_bits; uint8_t flags, stop_pending;
vcpu->stat.exit_stop_request++; vcpu->stat.exit_stop_request++;
trace_kvm_s390_stop_request(vcpu->arch.local_int.action_bits);
action_bits = vcpu->arch.local_int.action_bits; /* delay the stop if any non-stop irq is pending */
if (kvm_s390_vcpu_has_irq(vcpu, 1))
return 0;
/* avoid races with the injection/SIGP STOP code */
spin_lock(&li->lock);
flags = li->irq.stop.flags;
stop_pending = kvm_s390_is_stop_irq_pending(vcpu);
spin_unlock(&li->lock);
if (!(action_bits & ACTION_STOP_ON_STOP)) trace_kvm_s390_stop_request(stop_pending, flags);
if (!stop_pending)
return 0; return 0;
if (action_bits & ACTION_STORE_ON_STOP) { if (flags & KVM_S390_STOP_FLAG_STORE_STATUS) {
rc = kvm_s390_vcpu_store_status(vcpu, rc = kvm_s390_vcpu_store_status(vcpu,
KVM_S390_STORE_STATUS_NOADDR); KVM_S390_STORE_STATUS_NOADDR);
if (rc) if (rc)
...@@ -279,11 +288,13 @@ static int handle_external_interrupt(struct kvm_vcpu *vcpu) ...@@ -279,11 +288,13 @@ static int handle_external_interrupt(struct kvm_vcpu *vcpu)
irq.type = KVM_S390_INT_CPU_TIMER; irq.type = KVM_S390_INT_CPU_TIMER;
break; break;
case EXT_IRQ_EXTERNAL_CALL: case EXT_IRQ_EXTERNAL_CALL:
if (kvm_s390_si_ext_call_pending(vcpu))
return 0;
irq.type = KVM_S390_INT_EXTERNAL_CALL; irq.type = KVM_S390_INT_EXTERNAL_CALL;
irq.u.extcall.code = vcpu->arch.sie_block->extcpuaddr; irq.u.extcall.code = vcpu->arch.sie_block->extcpuaddr;
break; rc = kvm_s390_inject_vcpu(vcpu, &irq);
/* ignore if another external call is already pending */
if (rc == -EBUSY)
return 0;
return rc;
default: default:
return -EOPNOTSUPP; return -EOPNOTSUPP;
} }
...@@ -307,17 +318,19 @@ static int handle_mvpg_pei(struct kvm_vcpu *vcpu) ...@@ -307,17 +318,19 @@ static int handle_mvpg_pei(struct kvm_vcpu *vcpu)
kvm_s390_get_regs_rre(vcpu, &reg1, &reg2); kvm_s390_get_regs_rre(vcpu, &reg1, &reg2);
/* Make sure that the source is paged-in */ /* Make sure that the source is paged-in */
srcaddr = kvm_s390_real_to_abs(vcpu, vcpu->run->s.regs.gprs[reg2]); rc = guest_translate_address(vcpu, vcpu->run->s.regs.gprs[reg2],
if (kvm_is_error_gpa(vcpu->kvm, srcaddr)) &srcaddr, 0);
return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); if (rc)
return kvm_s390_inject_prog_cond(vcpu, rc);
rc = kvm_arch_fault_in_page(vcpu, srcaddr, 0); rc = kvm_arch_fault_in_page(vcpu, srcaddr, 0);
if (rc != 0) if (rc != 0)
return rc; return rc;
/* Make sure that the destination is paged-in */ /* Make sure that the destination is paged-in */
dstaddr = kvm_s390_real_to_abs(vcpu, vcpu->run->s.regs.gprs[reg1]); rc = guest_translate_address(vcpu, vcpu->run->s.regs.gprs[reg1],
if (kvm_is_error_gpa(vcpu->kvm, dstaddr)) &dstaddr, 1);
return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); if (rc)
return kvm_s390_inject_prog_cond(vcpu, rc);
rc = kvm_arch_fault_in_page(vcpu, dstaddr, 1); rc = kvm_arch_fault_in_page(vcpu, dstaddr, 1);
if (rc != 0) if (rc != 0)
return rc; return rc;
......
This diff is collapsed.
This diff is collapsed.
...@@ -228,11 +228,13 @@ int s390int_to_s390irq(struct kvm_s390_interrupt *s390int, ...@@ -228,11 +228,13 @@ int s390int_to_s390irq(struct kvm_s390_interrupt *s390int,
struct kvm_s390_irq *s390irq); struct kvm_s390_irq *s390irq);
/* implemented in interrupt.c */ /* implemented in interrupt.c */
int kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu); int kvm_s390_vcpu_has_irq(struct kvm_vcpu *vcpu, int exclude_stop);
int psw_extint_disabled(struct kvm_vcpu *vcpu); int psw_extint_disabled(struct kvm_vcpu *vcpu);
void kvm_s390_destroy_adapters(struct kvm *kvm); void kvm_s390_destroy_adapters(struct kvm *kvm);
int kvm_s390_si_ext_call_pending(struct kvm_vcpu *vcpu); int kvm_s390_ext_call_pending(struct kvm_vcpu *vcpu);
extern struct kvm_device_ops kvm_flic_ops; extern struct kvm_device_ops kvm_flic_ops;
int kvm_s390_is_stop_irq_pending(struct kvm_vcpu *vcpu);
void kvm_s390_clear_stop_irq(struct kvm_vcpu *vcpu);
/* implemented in guestdbg.c */ /* implemented in guestdbg.c */
void kvm_s390_backup_guest_per_regs(struct kvm_vcpu *vcpu); void kvm_s390_backup_guest_per_regs(struct kvm_vcpu *vcpu);
......
...@@ -26,15 +26,17 @@ static int __sigp_sense(struct kvm_vcpu *vcpu, struct kvm_vcpu *dst_vcpu, ...@@ -26,15 +26,17 @@ static int __sigp_sense(struct kvm_vcpu *vcpu, struct kvm_vcpu *dst_vcpu,
struct kvm_s390_local_interrupt *li; struct kvm_s390_local_interrupt *li;
int cpuflags; int cpuflags;
int rc; int rc;
int ext_call_pending;
li = &dst_vcpu->arch.local_int; li = &dst_vcpu->arch.local_int;
cpuflags = atomic_read(li->cpuflags); cpuflags = atomic_read(li->cpuflags);
if (!(cpuflags & (CPUSTAT_ECALL_PEND | CPUSTAT_STOPPED))) ext_call_pending = kvm_s390_ext_call_pending(dst_vcpu);
if (!(cpuflags & CPUSTAT_STOPPED) && !ext_call_pending)
rc = SIGP_CC_ORDER_CODE_ACCEPTED; rc = SIGP_CC_ORDER_CODE_ACCEPTED;
else { else {
*reg &= 0xffffffff00000000UL; *reg &= 0xffffffff00000000UL;
if (cpuflags & CPUSTAT_ECALL_PEND) if (ext_call_pending)
*reg |= SIGP_STATUS_EXT_CALL_PENDING; *reg |= SIGP_STATUS_EXT_CALL_PENDING;
if (cpuflags & CPUSTAT_STOPPED) if (cpuflags & CPUSTAT_STOPPED)
*reg |= SIGP_STATUS_STOPPED; *reg |= SIGP_STATUS_STOPPED;
...@@ -96,7 +98,7 @@ static int __sigp_conditional_emergency(struct kvm_vcpu *vcpu, ...@@ -96,7 +98,7 @@ static int __sigp_conditional_emergency(struct kvm_vcpu *vcpu,
} }
static int __sigp_external_call(struct kvm_vcpu *vcpu, static int __sigp_external_call(struct kvm_vcpu *vcpu,
struct kvm_vcpu *dst_vcpu) struct kvm_vcpu *dst_vcpu, u64 *reg)
{ {
struct kvm_s390_irq irq = { struct kvm_s390_irq irq = {
.type = KVM_S390_INT_EXTERNAL_CALL, .type = KVM_S390_INT_EXTERNAL_CALL,
...@@ -105,45 +107,31 @@ static int __sigp_external_call(struct kvm_vcpu *vcpu, ...@@ -105,45 +107,31 @@ static int __sigp_external_call(struct kvm_vcpu *vcpu,
int rc; int rc;
rc = kvm_s390_inject_vcpu(dst_vcpu, &irq); rc = kvm_s390_inject_vcpu(dst_vcpu, &irq);
if (!rc) if (rc == -EBUSY) {
*reg &= 0xffffffff00000000UL;
*reg |= SIGP_STATUS_EXT_CALL_PENDING;
return SIGP_CC_STATUS_STORED;
} else if (rc == 0) {
VCPU_EVENT(vcpu, 4, "sent sigp ext call to cpu %x", VCPU_EVENT(vcpu, 4, "sent sigp ext call to cpu %x",
dst_vcpu->vcpu_id); dst_vcpu->vcpu_id);
return rc ? rc : SIGP_CC_ORDER_CODE_ACCEPTED;
}
static int __inject_sigp_stop(struct kvm_vcpu *dst_vcpu, int action)
{
struct kvm_s390_local_interrupt *li = &dst_vcpu->arch.local_int;
int rc = SIGP_CC_ORDER_CODE_ACCEPTED;
spin_lock(&li->lock);
if (li->action_bits & ACTION_STOP_ON_STOP) {
/* another SIGP STOP is pending */
rc = SIGP_CC_BUSY;
goto out;
} }
if ((atomic_read(li->cpuflags) & CPUSTAT_STOPPED)) {
if ((action & ACTION_STORE_ON_STOP) != 0)
rc = -ESHUTDOWN;
goto out;
}
set_bit(IRQ_PEND_SIGP_STOP, &li->pending_irqs);
li->action_bits |= action;
atomic_set_mask(CPUSTAT_STOP_INT, li->cpuflags);
kvm_s390_vcpu_wakeup(dst_vcpu);
out:
spin_unlock(&li->lock);
return rc; return rc ? rc : SIGP_CC_ORDER_CODE_ACCEPTED;
} }
static int __sigp_stop(struct kvm_vcpu *vcpu, struct kvm_vcpu *dst_vcpu) static int __sigp_stop(struct kvm_vcpu *vcpu, struct kvm_vcpu *dst_vcpu)
{ {
struct kvm_s390_irq irq = {
.type = KVM_S390_SIGP_STOP,
};
int rc; int rc;
rc = __inject_sigp_stop(dst_vcpu, ACTION_STOP_ON_STOP); rc = kvm_s390_inject_vcpu(dst_vcpu, &irq);
VCPU_EVENT(vcpu, 4, "sent sigp stop to cpu %x", dst_vcpu->vcpu_id); if (rc == -EBUSY)
rc = SIGP_CC_BUSY;
else if (rc == 0)
VCPU_EVENT(vcpu, 4, "sent sigp stop to cpu %x",
dst_vcpu->vcpu_id);
return rc; return rc;
} }
...@@ -151,20 +139,18 @@ static int __sigp_stop(struct kvm_vcpu *vcpu, struct kvm_vcpu *dst_vcpu) ...@@ -151,20 +139,18 @@ static int __sigp_stop(struct kvm_vcpu *vcpu, struct kvm_vcpu *dst_vcpu)
static int __sigp_stop_and_store_status(struct kvm_vcpu *vcpu, static int __sigp_stop_and_store_status(struct kvm_vcpu *vcpu,
struct kvm_vcpu *dst_vcpu, u64 *reg) struct kvm_vcpu *dst_vcpu, u64 *reg)
{ {
struct kvm_s390_irq irq = {
.type = KVM_S390_SIGP_STOP,
.u.stop.flags = KVM_S390_STOP_FLAG_STORE_STATUS,
};
int rc; int rc;
rc = __inject_sigp_stop(dst_vcpu, ACTION_STOP_ON_STOP | rc = kvm_s390_inject_vcpu(dst_vcpu, &irq);
ACTION_STORE_ON_STOP); if (rc == -EBUSY)
VCPU_EVENT(vcpu, 4, "sent sigp stop and store status to cpu %x", rc = SIGP_CC_BUSY;
dst_vcpu->vcpu_id); else if (rc == 0)
VCPU_EVENT(vcpu, 4, "sent sigp stop and store status to cpu %x",
if (rc == -ESHUTDOWN) { dst_vcpu->vcpu_id);
/* If the CPU has already been stopped, we still have
* to save the status when doing stop-and-store. This
* has to be done after unlocking all spinlocks. */
rc = kvm_s390_store_status_unloaded(dst_vcpu,
KVM_S390_STORE_STATUS_NOADDR);
}
return rc; return rc;
} }
...@@ -197,41 +183,33 @@ static int __sigp_set_arch(struct kvm_vcpu *vcpu, u32 parameter) ...@@ -197,41 +183,33 @@ static int __sigp_set_arch(struct kvm_vcpu *vcpu, u32 parameter)
static int __sigp_set_prefix(struct kvm_vcpu *vcpu, struct kvm_vcpu *dst_vcpu, static int __sigp_set_prefix(struct kvm_vcpu *vcpu, struct kvm_vcpu *dst_vcpu,
u32 address, u64 *reg) u32 address, u64 *reg)
{ {
struct kvm_s390_local_interrupt *li; struct kvm_s390_irq irq = {
.type = KVM_S390_SIGP_SET_PREFIX,
.u.prefix.address = address & 0x7fffe000u,
};
int rc; int rc;
li = &dst_vcpu->arch.local_int;
/* /*
* Make sure the new value is valid memory. We only need to check the * Make sure the new value is valid memory. We only need to check the
* first page, since address is 8k aligned and memory pieces are always * first page, since address is 8k aligned and memory pieces are always
* at least 1MB aligned and have at least a size of 1MB. * at least 1MB aligned and have at least a size of 1MB.
*/ */
address &= 0x7fffe000u; if (kvm_is_error_gpa(vcpu->kvm, irq.u.prefix.address)) {
if (kvm_is_error_gpa(vcpu->kvm, address)) {
*reg &= 0xffffffff00000000UL; *reg &= 0xffffffff00000000UL;
*reg |= SIGP_STATUS_INVALID_PARAMETER; *reg |= SIGP_STATUS_INVALID_PARAMETER;
return SIGP_CC_STATUS_STORED; return SIGP_CC_STATUS_STORED;
} }
spin_lock(&li->lock); rc = kvm_s390_inject_vcpu(dst_vcpu, &irq);
/* cpu must be in stopped state */ if (rc == -EBUSY) {
if (!(atomic_read(li->cpuflags) & CPUSTAT_STOPPED)) {
*reg &= 0xffffffff00000000UL; *reg &= 0xffffffff00000000UL;
*reg |= SIGP_STATUS_INCORRECT_STATE; *reg |= SIGP_STATUS_INCORRECT_STATE;
rc = SIGP_CC_STATUS_STORED; return SIGP_CC_STATUS_STORED;
goto out_li; } else if (rc == 0) {
VCPU_EVENT(vcpu, 4, "set prefix of cpu %02x to %x",
dst_vcpu->vcpu_id, irq.u.prefix.address);
} }
li->irq.prefix.address = address;
set_bit(IRQ_PEND_SET_PREFIX, &li->pending_irqs);
kvm_s390_vcpu_wakeup(dst_vcpu);
rc = SIGP_CC_ORDER_CODE_ACCEPTED;
VCPU_EVENT(vcpu, 4, "set prefix of cpu %02x to %x", dst_vcpu->vcpu_id,
address);
out_li:
spin_unlock(&li->lock);
return rc; return rc;
} }
...@@ -242,9 +220,7 @@ static int __sigp_store_status_at_addr(struct kvm_vcpu *vcpu, ...@@ -242,9 +220,7 @@ static int __sigp_store_status_at_addr(struct kvm_vcpu *vcpu,
int flags; int flags;
int rc; int rc;
spin_lock(&dst_vcpu->arch.local_int.lock);
flags = atomic_read(dst_vcpu->arch.local_int.cpuflags); flags = atomic_read(dst_vcpu->arch.local_int.cpuflags);
spin_unlock(&dst_vcpu->arch.local_int.lock);
if (!(flags & CPUSTAT_STOPPED)) { if (!(flags & CPUSTAT_STOPPED)) {
*reg &= 0xffffffff00000000UL; *reg &= 0xffffffff00000000UL;
*reg |= SIGP_STATUS_INCORRECT_STATE; *reg |= SIGP_STATUS_INCORRECT_STATE;
...@@ -291,8 +267,9 @@ static int __prepare_sigp_re_start(struct kvm_vcpu *vcpu, ...@@ -291,8 +267,9 @@ static int __prepare_sigp_re_start(struct kvm_vcpu *vcpu,
/* handle (RE)START in user space */ /* handle (RE)START in user space */
int rc = -EOPNOTSUPP; int rc = -EOPNOTSUPP;
/* make sure we don't race with STOP irq injection */
spin_lock(&li->lock); spin_lock(&li->lock);
if (li->action_bits & ACTION_STOP_ON_STOP) if (kvm_s390_is_stop_irq_pending(dst_vcpu))
rc = SIGP_CC_BUSY; rc = SIGP_CC_BUSY;
spin_unlock(&li->lock); spin_unlock(&li->lock);
...@@ -333,7 +310,7 @@ static int handle_sigp_dst(struct kvm_vcpu *vcpu, u8 order_code, ...@@ -333,7 +310,7 @@ static int handle_sigp_dst(struct kvm_vcpu *vcpu, u8 order_code,
break; break;
case SIGP_EXTERNAL_CALL: case SIGP_EXTERNAL_CALL:
vcpu->stat.instruction_sigp_external_call++; vcpu->stat.instruction_sigp_external_call++;
rc = __sigp_external_call(vcpu, dst_vcpu); rc = __sigp_external_call(vcpu, dst_vcpu, status_reg);
break; break;
case SIGP_EMERGENCY_SIGNAL: case SIGP_EMERGENCY_SIGNAL:
vcpu->stat.instruction_sigp_emergency++; vcpu->stat.instruction_sigp_emergency++;
...@@ -394,6 +371,53 @@ static int handle_sigp_dst(struct kvm_vcpu *vcpu, u8 order_code, ...@@ -394,6 +371,53 @@ static int handle_sigp_dst(struct kvm_vcpu *vcpu, u8 order_code,
return rc; return rc;
} }
static int handle_sigp_order_in_user_space(struct kvm_vcpu *vcpu, u8 order_code)
{
if (!vcpu->kvm->arch.user_sigp)
return 0;
switch (order_code) {
case SIGP_SENSE:
case SIGP_EXTERNAL_CALL:
case SIGP_EMERGENCY_SIGNAL:
case SIGP_COND_EMERGENCY_SIGNAL:
case SIGP_SENSE_RUNNING:
return 0;
/* update counters as we're directly dropping to user space */
case SIGP_STOP:
vcpu->stat.instruction_sigp_stop++;
break;
case SIGP_STOP_AND_STORE_STATUS:
vcpu->stat.instruction_sigp_stop_store_status++;
break;
case SIGP_STORE_STATUS_AT_ADDRESS:
vcpu->stat.instruction_sigp_store_status++;
break;
case SIGP_SET_PREFIX:
vcpu->stat.instruction_sigp_prefix++;
break;
case SIGP_START:
vcpu->stat.instruction_sigp_start++;
break;
case SIGP_RESTART:
vcpu->stat.instruction_sigp_restart++;
break;
case SIGP_INITIAL_CPU_RESET:
vcpu->stat.instruction_sigp_init_cpu_reset++;
break;
case SIGP_CPU_RESET:
vcpu->stat.instruction_sigp_cpu_reset++;
break;
default:
vcpu->stat.instruction_sigp_unknown++;
}
VCPU_EVENT(vcpu, 4, "sigp order %u: completely handled in user space",
order_code);
return 1;
}
int kvm_s390_handle_sigp(struct kvm_vcpu *vcpu) int kvm_s390_handle_sigp(struct kvm_vcpu *vcpu)
{ {
int r1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4; int r1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4;
...@@ -408,6 +432,8 @@ int kvm_s390_handle_sigp(struct kvm_vcpu *vcpu) ...@@ -408,6 +432,8 @@ int kvm_s390_handle_sigp(struct kvm_vcpu *vcpu)
return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
order_code = kvm_s390_get_base_disp_rs(vcpu); order_code = kvm_s390_get_base_disp_rs(vcpu);
if (handle_sigp_order_in_user_space(vcpu, order_code))
return -EOPNOTSUPP;
if (r1 % 2) if (r1 % 2)
parameter = vcpu->run->s.regs.gprs[r1]; parameter = vcpu->run->s.regs.gprs[r1];
......
...@@ -209,19 +209,21 @@ TRACE_EVENT(kvm_s390_request_resets, ...@@ -209,19 +209,21 @@ TRACE_EVENT(kvm_s390_request_resets,
* Trace point for a vcpu's stop requests. * Trace point for a vcpu's stop requests.
*/ */
TRACE_EVENT(kvm_s390_stop_request, TRACE_EVENT(kvm_s390_stop_request,
TP_PROTO(unsigned int action_bits), TP_PROTO(unsigned char stop_irq, unsigned char flags),
TP_ARGS(action_bits), TP_ARGS(stop_irq, flags),
TP_STRUCT__entry( TP_STRUCT__entry(
__field(unsigned int, action_bits) __field(unsigned char, stop_irq)
__field(unsigned char, flags)
), ),
TP_fast_assign( TP_fast_assign(
__entry->action_bits = action_bits; __entry->stop_irq = stop_irq;
__entry->flags = flags;
), ),
TP_printk("stop request, action_bits = %08x", TP_printk("stop request, stop irq = %u, flags = %08x",
__entry->action_bits) __entry->stop_irq, __entry->flags)
); );
......
...@@ -7021,15 +7021,13 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) ...@@ -7021,15 +7021,13 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
return r; return r;
} }
int kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu) void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
{ {
int r;
struct msr_data msr; struct msr_data msr;
struct kvm *kvm = vcpu->kvm; struct kvm *kvm = vcpu->kvm;
r = vcpu_load(vcpu); if (vcpu_load(vcpu))
if (r) return;
return r;
msr.data = 0x0; msr.data = 0x0;
msr.index = MSR_IA32_TSC; msr.index = MSR_IA32_TSC;
msr.host_initiated = true; msr.host_initiated = true;
...@@ -7038,8 +7036,6 @@ int kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu) ...@@ -7038,8 +7036,6 @@ int kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
schedule_delayed_work(&kvm->arch.kvmclock_sync_work, schedule_delayed_work(&kvm->arch.kvmclock_sync_work,
KVMCLOCK_SYNC_PERIOD); KVMCLOCK_SYNC_PERIOD);
return r;
} }
void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu) void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
......
...@@ -49,6 +49,7 @@ static unsigned long sclp_hsa_size; ...@@ -49,6 +49,7 @@ static unsigned long sclp_hsa_size;
static unsigned int sclp_max_cpu; static unsigned int sclp_max_cpu;
static struct sclp_ipl_info sclp_ipl_info; static struct sclp_ipl_info sclp_ipl_info;
static unsigned char sclp_siif; static unsigned char sclp_siif;
static unsigned char sclp_sigpif;
static u32 sclp_ibc; static u32 sclp_ibc;
u64 sclp_facilities; u64 sclp_facilities;
...@@ -131,6 +132,7 @@ static void __init sclp_facilities_detect(struct read_info_sccb *sccb) ...@@ -131,6 +132,7 @@ static void __init sclp_facilities_detect(struct read_info_sccb *sccb)
if (boot_cpu_address != cpue->address) if (boot_cpu_address != cpue->address)
continue; continue;
sclp_siif = cpue->siif; sclp_siif = cpue->siif;
sclp_sigpif = cpue->sigpif;
break; break;
} }
...@@ -172,6 +174,12 @@ int sclp_has_siif(void) ...@@ -172,6 +174,12 @@ int sclp_has_siif(void)
} }
EXPORT_SYMBOL(sclp_has_siif); EXPORT_SYMBOL(sclp_has_siif);
int sclp_has_sigpif(void)
{
return sclp_sigpif;
}
EXPORT_SYMBOL(sclp_has_sigpif);
unsigned int sclp_get_ibc(void) unsigned int sclp_get_ibc(void)
{ {
return sclp_ibc; return sclp_ibc;
......
...@@ -661,7 +661,7 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu); ...@@ -661,7 +661,7 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu);
void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu); void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu);
struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id); struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id);
int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu); int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu);
int kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu); void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu);
void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu); void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu);
int kvm_arch_hardware_enable(void); int kvm_arch_hardware_enable(void);
......
...@@ -491,6 +491,11 @@ struct kvm_s390_emerg_info { ...@@ -491,6 +491,11 @@ struct kvm_s390_emerg_info {
__u16 code; __u16 code;
}; };
#define KVM_S390_STOP_FLAG_STORE_STATUS 0x01
struct kvm_s390_stop_info {
__u32 flags;
};
struct kvm_s390_mchk_info { struct kvm_s390_mchk_info {
__u64 cr14; __u64 cr14;
__u64 mcic; __u64 mcic;
...@@ -509,6 +514,7 @@ struct kvm_s390_irq { ...@@ -509,6 +514,7 @@ struct kvm_s390_irq {
struct kvm_s390_emerg_info emerg; struct kvm_s390_emerg_info emerg;
struct kvm_s390_extcall_info extcall; struct kvm_s390_extcall_info extcall;
struct kvm_s390_prefix_info prefix; struct kvm_s390_prefix_info prefix;
struct kvm_s390_stop_info stop;
struct kvm_s390_mchk_info mchk; struct kvm_s390_mchk_info mchk;
char reserved[64]; char reserved[64];
} u; } u;
...@@ -753,6 +759,7 @@ struct kvm_ppc_smmu_info { ...@@ -753,6 +759,7 @@ struct kvm_ppc_smmu_info {
#define KVM_CAP_PPC_FIXUP_HCALL 103 #define KVM_CAP_PPC_FIXUP_HCALL 103
#define KVM_CAP_PPC_ENABLE_HCALL 104 #define KVM_CAP_PPC_ENABLE_HCALL 104
#define KVM_CAP_CHECK_EXTENSION_VM 105 #define KVM_CAP_CHECK_EXTENSION_VM 105
#define KVM_CAP_S390_USER_SIGP 106
#ifdef KVM_CAP_IRQ_ROUTING #ifdef KVM_CAP_IRQ_ROUTING
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment