Commit 2b253857 authored by Marcelo Tosatti's avatar Marcelo Tosatti Committed by Marcelo Tosatti

Merge tag 'kvm-s390-next-20150306' of...

Merge tag 'kvm-s390-next-20150306' of git://git.kernel.org/pub/scm/linux/kernel/git/kvms390/linux into queue

KVM: s390: Features and Fixes for 4.1 (kvm/next)

1. Several Fixes and enhancements
---------------------------------
- These 3 patches have cc stable:
b75f4c9a KVM: s390: Zero out current VMDB of STSI before including level3 data.
261520dc KVM: s390: fix handling of write errors in the tpi handler
15462e37 KVM: s390: reinjection of irqs can fail in the tpi handler

2. SIMD support the kernel part (introduced with z13)
-----------------------------------------------------
- two KVM-generic changes in kvm.h:
1. New capability that can be enabled: KVM_CAP_S390_VECTOR_REGISTERS
2. increased padding size for sync regs in struct kvm_run to clarify that
   sync regs can be larger than 1k. This is fine as this is the last
   element in the structure.
parents 1662e862 13211ea7
...@@ -3248,3 +3248,13 @@ All other orders will be handled completely in user space. ...@@ -3248,3 +3248,13 @@ All other orders will be handled completely in user space.
Only privileged operation exceptions will be checked for in the kernel (or even Only privileged operation exceptions will be checked for in the kernel (or even
in the hardware prior to interception). If this capability is not enabled, the in the hardware prior to interception). If this capability is not enabled, the
old way of handling SIGP orders is used (partially in kernel and user space). old way of handling SIGP orders is used (partially in kernel and user space).
7.3 KVM_CAP_S390_VECTOR_REGISTERS
Architectures: s390
Parameters: none
Returns: 0 on success, negative value on error
Allows use of the vector registers introduced with z13 processor, and
provides for the synchronization between host and user space. Will
return -EINVAL if the machine does not support vectors.
...@@ -172,7 +172,9 @@ struct kvm_s390_sie_block { ...@@ -172,7 +172,9 @@ struct kvm_s390_sie_block {
__u32 fac; /* 0x01a0 */ __u32 fac; /* 0x01a0 */
__u8 reserved1a4[20]; /* 0x01a4 */ __u8 reserved1a4[20]; /* 0x01a4 */
__u64 cbrlo; /* 0x01b8 */ __u64 cbrlo; /* 0x01b8 */
__u8 reserved1c0[30]; /* 0x01c0 */ __u8 reserved1c0[8]; /* 0x01c0 */
__u32 ecd; /* 0x01c8 */
__u8 reserved1cc[18]; /* 0x01cc */
__u64 pp; /* 0x01de */ __u64 pp; /* 0x01de */
__u8 reserved1e6[2]; /* 0x01e6 */ __u8 reserved1e6[2]; /* 0x01e6 */
__u64 itdba; /* 0x01e8 */ __u64 itdba; /* 0x01e8 */
...@@ -183,11 +185,17 @@ struct kvm_s390_itdb { ...@@ -183,11 +185,17 @@ struct kvm_s390_itdb {
__u8 data[256]; __u8 data[256];
} __packed; } __packed;
struct kvm_s390_vregs {
__vector128 vrs[32];
__u8 reserved200[512]; /* for future vector expansion */
} __packed;
struct sie_page { struct sie_page {
struct kvm_s390_sie_block sie_block; struct kvm_s390_sie_block sie_block;
__u8 reserved200[1024]; /* 0x0200 */ __u8 reserved200[1024]; /* 0x0200 */
struct kvm_s390_itdb itdb; /* 0x0600 */ struct kvm_s390_itdb itdb; /* 0x0600 */
__u8 reserved700[2304]; /* 0x0700 */ __u8 reserved700[1280]; /* 0x0700 */
struct kvm_s390_vregs vregs; /* 0x0c00 */
} __packed; } __packed;
struct kvm_vcpu_stat { struct kvm_vcpu_stat {
...@@ -238,6 +246,7 @@ struct kvm_vcpu_stat { ...@@ -238,6 +246,7 @@ struct kvm_vcpu_stat {
u32 instruction_sigp_stop; u32 instruction_sigp_stop;
u32 instruction_sigp_stop_store_status; u32 instruction_sigp_stop_store_status;
u32 instruction_sigp_store_status; u32 instruction_sigp_store_status;
u32 instruction_sigp_store_adtl_status;
u32 instruction_sigp_arch; u32 instruction_sigp_arch;
u32 instruction_sigp_prefix; u32 instruction_sigp_prefix;
u32 instruction_sigp_restart; u32 instruction_sigp_restart;
...@@ -270,6 +279,7 @@ struct kvm_vcpu_stat { ...@@ -270,6 +279,7 @@ struct kvm_vcpu_stat {
#define PGM_SPECIAL_OPERATION 0x13 #define PGM_SPECIAL_OPERATION 0x13
#define PGM_OPERAND 0x15 #define PGM_OPERAND 0x15
#define PGM_TRACE_TABEL 0x16 #define PGM_TRACE_TABEL 0x16
#define PGM_VECTOR_PROCESSING 0x1b
#define PGM_SPACE_SWITCH 0x1c #define PGM_SPACE_SWITCH 0x1c
#define PGM_HFP_SQUARE_ROOT 0x1d #define PGM_HFP_SQUARE_ROOT 0x1d
#define PGM_PC_TRANSLATION_SPEC 0x1f #define PGM_PC_TRANSLATION_SPEC 0x1f
...@@ -465,6 +475,7 @@ struct kvm_vcpu_arch { ...@@ -465,6 +475,7 @@ struct kvm_vcpu_arch {
s390_fp_regs host_fpregs; s390_fp_regs host_fpregs;
unsigned int host_acrs[NUM_ACRS]; unsigned int host_acrs[NUM_ACRS];
s390_fp_regs guest_fpregs; s390_fp_regs guest_fpregs;
struct kvm_s390_vregs *host_vregs;
struct kvm_s390_local_interrupt local_int; struct kvm_s390_local_interrupt local_int;
struct hrtimer ckc_timer; struct hrtimer ckc_timer;
struct kvm_s390_pgm_info pgm; struct kvm_s390_pgm_info pgm;
...@@ -551,6 +562,7 @@ struct kvm_arch{ ...@@ -551,6 +562,7 @@ struct kvm_arch{
int css_support; int css_support;
int use_irqchip; int use_irqchip;
int use_cmma; int use_cmma;
int use_vectors;
int user_cpu_state_ctrl; int user_cpu_state_ctrl;
int user_sigp; int user_sigp;
struct s390_io_adapter *adapters[MAX_S390_IO_ADAPTERS]; struct s390_io_adapter *adapters[MAX_S390_IO_ADAPTERS];
......
...@@ -150,6 +150,7 @@ struct kvm_guest_debug_arch { ...@@ -150,6 +150,7 @@ struct kvm_guest_debug_arch {
#define KVM_SYNC_CRS (1UL << 3) #define KVM_SYNC_CRS (1UL << 3)
#define KVM_SYNC_ARCH0 (1UL << 4) #define KVM_SYNC_ARCH0 (1UL << 4)
#define KVM_SYNC_PFAULT (1UL << 5) #define KVM_SYNC_PFAULT (1UL << 5)
#define KVM_SYNC_VRS (1UL << 6)
/* definition of registers in kvm_run */ /* definition of registers in kvm_run */
struct kvm_sync_regs { struct kvm_sync_regs {
__u64 prefix; /* prefix register */ __u64 prefix; /* prefix register */
...@@ -164,6 +165,9 @@ struct kvm_sync_regs { ...@@ -164,6 +165,9 @@ struct kvm_sync_regs {
__u64 pft; /* pfault token [PFAULT] */ __u64 pft; /* pfault token [PFAULT] */
__u64 pfs; /* pfault select [PFAULT] */ __u64 pfs; /* pfault select [PFAULT] */
__u64 pfc; /* pfault compare [PFAULT] */ __u64 pfc; /* pfault compare [PFAULT] */
__u64 vrs[32][2]; /* vector registers */
__u8 reserved[512]; /* for future vector expansion */
__u32 fpc; /* only valid with vector registers */
}; };
#define KVM_REG_S390_TODPR (KVM_REG_S390 | KVM_REG_SIZE_U32 | 0x1) #define KVM_REG_S390_TODPR (KVM_REG_S390 | KVM_REG_SIZE_U32 | 0x1)
......
...@@ -230,7 +230,7 @@ ...@@ -230,7 +230,7 @@
* and returns a key, which can be used to find a mnemonic name * and returns a key, which can be used to find a mnemonic name
* of the instruction in the icpt_insn_codes table. * of the instruction in the icpt_insn_codes table.
*/ */
#define icpt_insn_decoder(insn) \ #define icpt_insn_decoder(insn) ( \
INSN_DECODE_IPA0(0x01, insn, 48, 0xff) \ INSN_DECODE_IPA0(0x01, insn, 48, 0xff) \
INSN_DECODE_IPA0(0xaa, insn, 48, 0x0f) \ INSN_DECODE_IPA0(0xaa, insn, 48, 0x0f) \
INSN_DECODE_IPA0(0xb2, insn, 48, 0xff) \ INSN_DECODE_IPA0(0xb2, insn, 48, 0xff) \
...@@ -239,6 +239,6 @@ ...@@ -239,6 +239,6 @@
INSN_DECODE_IPA0(0xe5, insn, 48, 0xff) \ INSN_DECODE_IPA0(0xe5, insn, 48, 0xff) \
INSN_DECODE_IPA0(0xeb, insn, 16, 0xff) \ INSN_DECODE_IPA0(0xeb, insn, 16, 0xff) \
INSN_DECODE_IPA0(0xc8, insn, 48, 0x0f) \ INSN_DECODE_IPA0(0xc8, insn, 48, 0x0f) \
INSN_DECODE(insn) INSN_DECODE(insn))
#endif /* _UAPI_ASM_S390_SIE_H */ #endif /* _UAPI_ASM_S390_SIE_H */
...@@ -171,6 +171,7 @@ int main(void) ...@@ -171,6 +171,7 @@ int main(void)
#else /* CONFIG_32BIT */ #else /* CONFIG_32BIT */
DEFINE(__LC_DATA_EXC_CODE, offsetof(struct _lowcore, data_exc_code)); DEFINE(__LC_DATA_EXC_CODE, offsetof(struct _lowcore, data_exc_code));
DEFINE(__LC_MCCK_FAIL_STOR_ADDR, offsetof(struct _lowcore, failing_storage_address)); DEFINE(__LC_MCCK_FAIL_STOR_ADDR, offsetof(struct _lowcore, failing_storage_address));
DEFINE(__LC_VX_SAVE_AREA_ADDR, offsetof(struct _lowcore, vector_save_area_addr));
DEFINE(__LC_EXT_PARAMS2, offsetof(struct _lowcore, ext_params2)); DEFINE(__LC_EXT_PARAMS2, offsetof(struct _lowcore, ext_params2));
DEFINE(SAVE_AREA_BASE, offsetof(struct _lowcore, floating_pt_save_area)); DEFINE(SAVE_AREA_BASE, offsetof(struct _lowcore, floating_pt_save_area));
DEFINE(__LC_PASTE, offsetof(struct _lowcore, paste)); DEFINE(__LC_PASTE, offsetof(struct _lowcore, paste));
......
...@@ -333,7 +333,7 @@ static int deref_table(struct kvm *kvm, unsigned long gpa, unsigned long *val) ...@@ -333,7 +333,7 @@ static int deref_table(struct kvm *kvm, unsigned long gpa, unsigned long *val)
* @write: indicates if access is a write access * @write: indicates if access is a write access
* *
* Translate a guest virtual address into a guest absolute address by means * Translate a guest virtual address into a guest absolute address by means
* of dynamic address translation as specified by the architecuture. * of dynamic address translation as specified by the architecture.
* If the resulting absolute address is not available in the configuration * If the resulting absolute address is not available in the configuration
* an addressing exception is indicated and @gpa will not be changed. * an addressing exception is indicated and @gpa will not be changed.
* *
......
...@@ -191,8 +191,8 @@ static int __import_wp_info(struct kvm_vcpu *vcpu, ...@@ -191,8 +191,8 @@ static int __import_wp_info(struct kvm_vcpu *vcpu,
if (!wp_info->old_data) if (!wp_info->old_data)
return -ENOMEM; return -ENOMEM;
/* try to backup the original value */ /* try to backup the original value */
ret = read_guest(vcpu, wp_info->phys_addr, wp_info->old_data, ret = read_guest_abs(vcpu, wp_info->phys_addr, wp_info->old_data,
wp_info->len); wp_info->len);
if (ret) { if (ret) {
kfree(wp_info->old_data); kfree(wp_info->old_data);
wp_info->old_data = NULL; wp_info->old_data = NULL;
...@@ -362,8 +362,8 @@ static struct kvm_hw_wp_info_arch *any_wp_changed(struct kvm_vcpu *vcpu) ...@@ -362,8 +362,8 @@ static struct kvm_hw_wp_info_arch *any_wp_changed(struct kvm_vcpu *vcpu)
continue; continue;
/* refetch the wp data and compare it to the old value */ /* refetch the wp data and compare it to the old value */
if (!read_guest(vcpu, wp_info->phys_addr, temp, if (!read_guest_abs(vcpu, wp_info->phys_addr, temp,
wp_info->len)) { wp_info->len)) {
if (memcmp(temp, wp_info->old_data, wp_info->len)) { if (memcmp(temp, wp_info->old_data, wp_info->len)) {
kfree(temp); kfree(temp);
return wp_info; return wp_info;
......
...@@ -165,6 +165,7 @@ static void __extract_prog_irq(struct kvm_vcpu *vcpu, ...@@ -165,6 +165,7 @@ static void __extract_prog_irq(struct kvm_vcpu *vcpu,
pgm_info->mon_class_nr = vcpu->arch.sie_block->mcn; pgm_info->mon_class_nr = vcpu->arch.sie_block->mcn;
pgm_info->mon_code = vcpu->arch.sie_block->tecmc; pgm_info->mon_code = vcpu->arch.sie_block->tecmc;
break; break;
case PGM_VECTOR_PROCESSING:
case PGM_DATA: case PGM_DATA:
pgm_info->data_exc_code = vcpu->arch.sie_block->dxc; pgm_info->data_exc_code = vcpu->arch.sie_block->dxc;
break; break;
......
/* /*
* handling kvm guest interrupts * handling kvm guest interrupts
* *
* Copyright IBM Corp. 2008,2014 * Copyright IBM Corp. 2008, 2015
* *
* This program is free software; you can redistribute it and/or modify * This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License (version 2 only) * it under the terms of the GNU General Public License (version 2 only)
...@@ -18,6 +18,7 @@ ...@@ -18,6 +18,7 @@
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/bitmap.h> #include <linux/bitmap.h>
#include <asm/asm-offsets.h> #include <asm/asm-offsets.h>
#include <asm/dis.h>
#include <asm/uaccess.h> #include <asm/uaccess.h>
#include <asm/sclp.h> #include <asm/sclp.h>
#include "kvm-s390.h" #include "kvm-s390.h"
...@@ -265,8 +266,6 @@ static void __set_intercept_indicator(struct kvm_vcpu *vcpu, ...@@ -265,8 +266,6 @@ static void __set_intercept_indicator(struct kvm_vcpu *vcpu,
static u16 get_ilc(struct kvm_vcpu *vcpu) static u16 get_ilc(struct kvm_vcpu *vcpu)
{ {
const unsigned short table[] = { 2, 4, 4, 6 };
switch (vcpu->arch.sie_block->icptcode) { switch (vcpu->arch.sie_block->icptcode) {
case ICPT_INST: case ICPT_INST:
case ICPT_INSTPROGI: case ICPT_INSTPROGI:
...@@ -274,7 +273,7 @@ static u16 get_ilc(struct kvm_vcpu *vcpu) ...@@ -274,7 +273,7 @@ static u16 get_ilc(struct kvm_vcpu *vcpu)
case ICPT_PARTEXEC: case ICPT_PARTEXEC:
case ICPT_IOINST: case ICPT_IOINST:
/* last instruction only stored for these icptcodes */ /* last instruction only stored for these icptcodes */
return table[vcpu->arch.sie_block->ipa >> 14]; return insn_length(vcpu->arch.sie_block->ipa >> 8);
case ICPT_PROGI: case ICPT_PROGI:
return vcpu->arch.sie_block->pgmilc; return vcpu->arch.sie_block->pgmilc;
default: default:
...@@ -352,6 +351,7 @@ static int __must_check __deliver_machine_check(struct kvm_vcpu *vcpu) ...@@ -352,6 +351,7 @@ static int __must_check __deliver_machine_check(struct kvm_vcpu *vcpu)
{ {
struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
struct kvm_s390_mchk_info mchk; struct kvm_s390_mchk_info mchk;
unsigned long adtl_status_addr;
int rc; int rc;
spin_lock(&li->lock); spin_lock(&li->lock);
...@@ -372,6 +372,9 @@ static int __must_check __deliver_machine_check(struct kvm_vcpu *vcpu) ...@@ -372,6 +372,9 @@ static int __must_check __deliver_machine_check(struct kvm_vcpu *vcpu)
mchk.cr14, mchk.mcic); mchk.cr14, mchk.mcic);
rc = kvm_s390_vcpu_store_status(vcpu, KVM_S390_STORE_STATUS_PREFIXED); rc = kvm_s390_vcpu_store_status(vcpu, KVM_S390_STORE_STATUS_PREFIXED);
rc |= read_guest_lc(vcpu, __LC_VX_SAVE_AREA_ADDR,
&adtl_status_addr, sizeof(unsigned long));
rc |= kvm_s390_vcpu_store_adtl_status(vcpu, adtl_status_addr);
rc |= put_guest_lc(vcpu, mchk.mcic, rc |= put_guest_lc(vcpu, mchk.mcic,
(u64 __user *) __LC_MCCK_CODE); (u64 __user *) __LC_MCCK_CODE);
rc |= put_guest_lc(vcpu, mchk.failing_storage_address, rc |= put_guest_lc(vcpu, mchk.failing_storage_address,
...@@ -484,7 +487,7 @@ static int __must_check __deliver_prog(struct kvm_vcpu *vcpu) ...@@ -484,7 +487,7 @@ static int __must_check __deliver_prog(struct kvm_vcpu *vcpu)
{ {
struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
struct kvm_s390_pgm_info pgm_info; struct kvm_s390_pgm_info pgm_info;
int rc = 0; int rc = 0, nullifying = false;
u16 ilc = get_ilc(vcpu); u16 ilc = get_ilc(vcpu);
spin_lock(&li->lock); spin_lock(&li->lock);
...@@ -509,6 +512,8 @@ static int __must_check __deliver_prog(struct kvm_vcpu *vcpu) ...@@ -509,6 +512,8 @@ static int __must_check __deliver_prog(struct kvm_vcpu *vcpu)
case PGM_LX_TRANSLATION: case PGM_LX_TRANSLATION:
case PGM_PRIMARY_AUTHORITY: case PGM_PRIMARY_AUTHORITY:
case PGM_SECONDARY_AUTHORITY: case PGM_SECONDARY_AUTHORITY:
nullifying = true;
/* fall through */
case PGM_SPACE_SWITCH: case PGM_SPACE_SWITCH:
rc = put_guest_lc(vcpu, pgm_info.trans_exc_code, rc = put_guest_lc(vcpu, pgm_info.trans_exc_code,
(u64 *)__LC_TRANS_EXC_CODE); (u64 *)__LC_TRANS_EXC_CODE);
...@@ -521,6 +526,7 @@ static int __must_check __deliver_prog(struct kvm_vcpu *vcpu) ...@@ -521,6 +526,7 @@ static int __must_check __deliver_prog(struct kvm_vcpu *vcpu)
case PGM_EXTENDED_AUTHORITY: case PGM_EXTENDED_AUTHORITY:
rc = put_guest_lc(vcpu, pgm_info.exc_access_id, rc = put_guest_lc(vcpu, pgm_info.exc_access_id,
(u8 *)__LC_EXC_ACCESS_ID); (u8 *)__LC_EXC_ACCESS_ID);
nullifying = true;
break; break;
case PGM_ASCE_TYPE: case PGM_ASCE_TYPE:
case PGM_PAGE_TRANSLATION: case PGM_PAGE_TRANSLATION:
...@@ -534,6 +540,7 @@ static int __must_check __deliver_prog(struct kvm_vcpu *vcpu) ...@@ -534,6 +540,7 @@ static int __must_check __deliver_prog(struct kvm_vcpu *vcpu)
(u8 *)__LC_EXC_ACCESS_ID); (u8 *)__LC_EXC_ACCESS_ID);
rc |= put_guest_lc(vcpu, pgm_info.op_access_id, rc |= put_guest_lc(vcpu, pgm_info.op_access_id,
(u8 *)__LC_OP_ACCESS_ID); (u8 *)__LC_OP_ACCESS_ID);
nullifying = true;
break; break;
case PGM_MONITOR: case PGM_MONITOR:
rc = put_guest_lc(vcpu, pgm_info.mon_class_nr, rc = put_guest_lc(vcpu, pgm_info.mon_class_nr,
...@@ -541,6 +548,7 @@ static int __must_check __deliver_prog(struct kvm_vcpu *vcpu) ...@@ -541,6 +548,7 @@ static int __must_check __deliver_prog(struct kvm_vcpu *vcpu)
rc |= put_guest_lc(vcpu, pgm_info.mon_code, rc |= put_guest_lc(vcpu, pgm_info.mon_code,
(u64 *)__LC_MON_CODE); (u64 *)__LC_MON_CODE);
break; break;
case PGM_VECTOR_PROCESSING:
case PGM_DATA: case PGM_DATA:
rc = put_guest_lc(vcpu, pgm_info.data_exc_code, rc = put_guest_lc(vcpu, pgm_info.data_exc_code,
(u32 *)__LC_DATA_EXC_CODE); (u32 *)__LC_DATA_EXC_CODE);
...@@ -551,6 +559,15 @@ static int __must_check __deliver_prog(struct kvm_vcpu *vcpu) ...@@ -551,6 +559,15 @@ static int __must_check __deliver_prog(struct kvm_vcpu *vcpu)
rc |= put_guest_lc(vcpu, pgm_info.exc_access_id, rc |= put_guest_lc(vcpu, pgm_info.exc_access_id,
(u8 *)__LC_EXC_ACCESS_ID); (u8 *)__LC_EXC_ACCESS_ID);
break; break;
case PGM_STACK_FULL:
case PGM_STACK_EMPTY:
case PGM_STACK_SPECIFICATION:
case PGM_STACK_TYPE:
case PGM_STACK_OPERATION:
case PGM_TRACE_TABEL:
case PGM_CRYPTO_OPERATION:
nullifying = true;
break;
} }
if (pgm_info.code & PGM_PER) { if (pgm_info.code & PGM_PER) {
...@@ -564,6 +581,9 @@ static int __must_check __deliver_prog(struct kvm_vcpu *vcpu) ...@@ -564,6 +581,9 @@ static int __must_check __deliver_prog(struct kvm_vcpu *vcpu)
(u8 *) __LC_PER_ACCESS_ID); (u8 *) __LC_PER_ACCESS_ID);
} }
if (nullifying && vcpu->arch.sie_block->icptcode == ICPT_INST)
kvm_s390_rewind_psw(vcpu, ilc);
rc |= put_guest_lc(vcpu, ilc, (u16 *) __LC_PGM_ILC); rc |= put_guest_lc(vcpu, ilc, (u16 *) __LC_PGM_ILC);
rc |= put_guest_lc(vcpu, pgm_info.code, rc |= put_guest_lc(vcpu, pgm_info.code,
(u16 *)__LC_PGM_INT_CODE); (u16 *)__LC_PGM_INT_CODE);
...@@ -1332,10 +1352,10 @@ int kvm_s390_inject_vm(struct kvm *kvm, ...@@ -1332,10 +1352,10 @@ int kvm_s390_inject_vm(struct kvm *kvm,
return rc; return rc;
} }
void kvm_s390_reinject_io_int(struct kvm *kvm, int kvm_s390_reinject_io_int(struct kvm *kvm,
struct kvm_s390_interrupt_info *inti) struct kvm_s390_interrupt_info *inti)
{ {
__inject_vm(kvm, inti); return __inject_vm(kvm, inti);
} }
int s390int_to_s390irq(struct kvm_s390_interrupt *s390int, int s390int_to_s390irq(struct kvm_s390_interrupt *s390int,
......
...@@ -87,6 +87,7 @@ struct kvm_stats_debugfs_item debugfs_entries[] = { ...@@ -87,6 +87,7 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
{ "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) }, { "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) },
{ "instruction_sigp_stop_store_status", VCPU_STAT(instruction_sigp_stop_store_status) }, { "instruction_sigp_stop_store_status", VCPU_STAT(instruction_sigp_stop_store_status) },
{ "instruction_sigp_store_status", VCPU_STAT(instruction_sigp_store_status) }, { "instruction_sigp_store_status", VCPU_STAT(instruction_sigp_store_status) },
{ "instruction_sigp_store_adtl_status", VCPU_STAT(instruction_sigp_store_adtl_status) },
{ "instruction_sigp_set_arch", VCPU_STAT(instruction_sigp_arch) }, { "instruction_sigp_set_arch", VCPU_STAT(instruction_sigp_arch) },
{ "instruction_sigp_set_prefix", VCPU_STAT(instruction_sigp_prefix) }, { "instruction_sigp_set_prefix", VCPU_STAT(instruction_sigp_prefix) },
{ "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart) }, { "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart) },
...@@ -103,6 +104,7 @@ struct kvm_stats_debugfs_item debugfs_entries[] = { ...@@ -103,6 +104,7 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
unsigned long kvm_s390_fac_list_mask[] = { unsigned long kvm_s390_fac_list_mask[] = {
0xff82fffbf4fc2000UL, 0xff82fffbf4fc2000UL,
0x005c000000000000UL, 0x005c000000000000UL,
0x4000000000000000UL,
}; };
unsigned long kvm_s390_fac_list_mask_size(void) unsigned long kvm_s390_fac_list_mask_size(void)
...@@ -185,6 +187,9 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) ...@@ -185,6 +187,9 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
case KVM_CAP_S390_COW: case KVM_CAP_S390_COW:
r = MACHINE_HAS_ESOP; r = MACHINE_HAS_ESOP;
break; break;
case KVM_CAP_S390_VECTOR_REGISTERS:
r = MACHINE_HAS_VX;
break;
default: default:
r = 0; r = 0;
} }
...@@ -265,6 +270,10 @@ static int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap) ...@@ -265,6 +270,10 @@ static int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap)
kvm->arch.user_sigp = 1; kvm->arch.user_sigp = 1;
r = 0; r = 0;
break; break;
case KVM_CAP_S390_VECTOR_REGISTERS:
kvm->arch.use_vectors = MACHINE_HAS_VX;
r = MACHINE_HAS_VX ? 0 : -EINVAL;
break;
default: default:
r = -EINVAL; r = -EINVAL;
break; break;
...@@ -942,6 +951,7 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) ...@@ -942,6 +951,7 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
kvm->arch.css_support = 0; kvm->arch.css_support = 0;
kvm->arch.use_irqchip = 0; kvm->arch.use_irqchip = 0;
kvm->arch.use_vectors = 0;
kvm->arch.epoch = 0; kvm->arch.epoch = 0;
spin_lock_init(&kvm->arch.start_stop_lock); spin_lock_init(&kvm->arch.start_stop_lock);
...@@ -1035,6 +1045,8 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu) ...@@ -1035,6 +1045,8 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
KVM_SYNC_CRS | KVM_SYNC_CRS |
KVM_SYNC_ARCH0 | KVM_SYNC_ARCH0 |
KVM_SYNC_PFAULT; KVM_SYNC_PFAULT;
if (test_kvm_facility(vcpu->kvm, 129))
vcpu->run->kvm_valid_regs |= KVM_SYNC_VRS;
if (kvm_is_ucontrol(vcpu->kvm)) if (kvm_is_ucontrol(vcpu->kvm))
return __kvm_ucontrol_vcpu_init(vcpu); return __kvm_ucontrol_vcpu_init(vcpu);
...@@ -1045,10 +1057,18 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu) ...@@ -1045,10 +1057,18 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
{ {
save_fp_ctl(&vcpu->arch.host_fpregs.fpc); save_fp_ctl(&vcpu->arch.host_fpregs.fpc);
save_fp_regs(vcpu->arch.host_fpregs.fprs); if (vcpu->kvm->arch.use_vectors)
save_vx_regs((__vector128 *)&vcpu->arch.host_vregs->vrs);
else
save_fp_regs(vcpu->arch.host_fpregs.fprs);
save_access_regs(vcpu->arch.host_acrs); save_access_regs(vcpu->arch.host_acrs);
restore_fp_ctl(&vcpu->arch.guest_fpregs.fpc); if (vcpu->kvm->arch.use_vectors) {
restore_fp_regs(vcpu->arch.guest_fpregs.fprs); restore_fp_ctl(&vcpu->run->s.regs.fpc);
restore_vx_regs((__vector128 *)&vcpu->run->s.regs.vrs);
} else {
restore_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
restore_fp_regs(vcpu->arch.guest_fpregs.fprs);
}
restore_access_regs(vcpu->run->s.regs.acrs); restore_access_regs(vcpu->run->s.regs.acrs);
gmap_enable(vcpu->arch.gmap); gmap_enable(vcpu->arch.gmap);
atomic_set_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags); atomic_set_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
...@@ -1058,11 +1078,19 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) ...@@ -1058,11 +1078,19 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
{ {
atomic_clear_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags); atomic_clear_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
gmap_disable(vcpu->arch.gmap); gmap_disable(vcpu->arch.gmap);
save_fp_ctl(&vcpu->arch.guest_fpregs.fpc); if (vcpu->kvm->arch.use_vectors) {
save_fp_regs(vcpu->arch.guest_fpregs.fprs); save_fp_ctl(&vcpu->run->s.regs.fpc);
save_vx_regs((__vector128 *)&vcpu->run->s.regs.vrs);
} else {
save_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
save_fp_regs(vcpu->arch.guest_fpregs.fprs);
}
save_access_regs(vcpu->run->s.regs.acrs); save_access_regs(vcpu->run->s.regs.acrs);
restore_fp_ctl(&vcpu->arch.host_fpregs.fpc); restore_fp_ctl(&vcpu->arch.host_fpregs.fpc);
restore_fp_regs(vcpu->arch.host_fpregs.fprs); if (vcpu->kvm->arch.use_vectors)
restore_vx_regs((__vector128 *)&vcpu->arch.host_vregs->vrs);
else
restore_fp_regs(vcpu->arch.host_fpregs.fprs);
restore_access_regs(vcpu->arch.host_acrs); restore_access_regs(vcpu->arch.host_acrs);
} }
...@@ -1130,6 +1158,15 @@ int kvm_s390_vcpu_setup_cmma(struct kvm_vcpu *vcpu) ...@@ -1130,6 +1158,15 @@ int kvm_s390_vcpu_setup_cmma(struct kvm_vcpu *vcpu)
return 0; return 0;
} }
static void kvm_s390_vcpu_setup_model(struct kvm_vcpu *vcpu)
{
struct kvm_s390_cpu_model *model = &vcpu->kvm->arch.model;
vcpu->arch.cpu_id = model->cpu_id;
vcpu->arch.sie_block->ibc = model->ibc;
vcpu->arch.sie_block->fac = (int) (long) model->fac->list;
}
int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
{ {
int rc = 0; int rc = 0;
...@@ -1138,6 +1175,8 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) ...@@ -1138,6 +1175,8 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
CPUSTAT_SM | CPUSTAT_SM |
CPUSTAT_STOPPED | CPUSTAT_STOPPED |
CPUSTAT_GED); CPUSTAT_GED);
kvm_s390_vcpu_setup_model(vcpu);
vcpu->arch.sie_block->ecb = 6; vcpu->arch.sie_block->ecb = 6;
if (test_kvm_facility(vcpu->kvm, 50) && test_kvm_facility(vcpu->kvm, 73)) if (test_kvm_facility(vcpu->kvm, 50) && test_kvm_facility(vcpu->kvm, 73))
vcpu->arch.sie_block->ecb |= 0x10; vcpu->arch.sie_block->ecb |= 0x10;
...@@ -1148,8 +1187,11 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) ...@@ -1148,8 +1187,11 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
vcpu->arch.sie_block->eca |= 1; vcpu->arch.sie_block->eca |= 1;
if (sclp_has_sigpif()) if (sclp_has_sigpif())
vcpu->arch.sie_block->eca |= 0x10000000U; vcpu->arch.sie_block->eca |= 0x10000000U;
vcpu->arch.sie_block->ictl |= ICTL_ISKE | ICTL_SSKE | ICTL_RRBE | if (vcpu->kvm->arch.use_vectors) {
ICTL_TPROT; vcpu->arch.sie_block->eca |= 0x00020000;
vcpu->arch.sie_block->ecd |= 0x20000000;
}
vcpu->arch.sie_block->ictl |= ICTL_ISKE | ICTL_SSKE | ICTL_RRBE;
if (kvm_s390_cmma_enabled(vcpu->kvm)) { if (kvm_s390_cmma_enabled(vcpu->kvm)) {
rc = kvm_s390_vcpu_setup_cmma(vcpu); rc = kvm_s390_vcpu_setup_cmma(vcpu);
...@@ -1159,11 +1201,6 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) ...@@ -1159,11 +1201,6 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup; vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup;
mutex_lock(&vcpu->kvm->lock);
vcpu->arch.cpu_id = vcpu->kvm->arch.model.cpu_id;
vcpu->arch.sie_block->ibc = vcpu->kvm->arch.model.ibc;
mutex_unlock(&vcpu->kvm->lock);
kvm_s390_vcpu_crypto_setup(vcpu); kvm_s390_vcpu_crypto_setup(vcpu);
return rc; return rc;
...@@ -1191,6 +1228,7 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, ...@@ -1191,6 +1228,7 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
vcpu->arch.sie_block = &sie_page->sie_block; vcpu->arch.sie_block = &sie_page->sie_block;
vcpu->arch.sie_block->itdba = (unsigned long) &sie_page->itdb; vcpu->arch.sie_block->itdba = (unsigned long) &sie_page->itdb;
vcpu->arch.host_vregs = &sie_page->vregs;
vcpu->arch.sie_block->icpua = id; vcpu->arch.sie_block->icpua = id;
if (!kvm_is_ucontrol(kvm)) { if (!kvm_is_ucontrol(kvm)) {
...@@ -1206,7 +1244,6 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, ...@@ -1206,7 +1244,6 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
vcpu->arch.sie_block->scaol = (__u32)(__u64)kvm->arch.sca; vcpu->arch.sie_block->scaol = (__u32)(__u64)kvm->arch.sca;
set_bit(63 - id, (unsigned long *) &kvm->arch.sca->mcn); set_bit(63 - id, (unsigned long *) &kvm->arch.sca->mcn);
} }
vcpu->arch.sie_block->fac = (int) (long) kvm->arch.model.fac->list;
spin_lock_init(&vcpu->arch.local_int.lock); spin_lock_init(&vcpu->arch.local_int.lock);
vcpu->arch.local_int.float_int = &kvm->arch.float_int; vcpu->arch.local_int.float_int = &kvm->arch.float_int;
...@@ -1726,6 +1763,31 @@ static int vcpu_pre_run(struct kvm_vcpu *vcpu) ...@@ -1726,6 +1763,31 @@ static int vcpu_pre_run(struct kvm_vcpu *vcpu)
return 0; return 0;
} }
static int vcpu_post_run_fault_in_sie(struct kvm_vcpu *vcpu)
{
psw_t *psw = &vcpu->arch.sie_block->gpsw;
u8 opcode;
int rc;
VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction");
trace_kvm_s390_sie_fault(vcpu);
/*
* We want to inject an addressing exception, which is defined as a
* suppressing or terminating exception. However, since we came here
* by a DAT access exception, the PSW still points to the faulting
* instruction since DAT exceptions are nullifying. So we've got
* to look up the current opcode to get the length of the instruction
* to be able to forward the PSW.
*/
rc = read_guest(vcpu, psw->addr, &opcode, 1);
if (rc)
return kvm_s390_inject_prog_cond(vcpu, rc);
psw->addr = __rewind_psw(*psw, -insn_length(opcode));
return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
}
static int vcpu_post_run(struct kvm_vcpu *vcpu, int exit_reason) static int vcpu_post_run(struct kvm_vcpu *vcpu, int exit_reason)
{ {
int rc = -1; int rc = -1;
...@@ -1757,11 +1819,8 @@ static int vcpu_post_run(struct kvm_vcpu *vcpu, int exit_reason) ...@@ -1757,11 +1819,8 @@ static int vcpu_post_run(struct kvm_vcpu *vcpu, int exit_reason)
} }
} }
if (rc == -1) { if (rc == -1)
VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction"); rc = vcpu_post_run_fault_in_sie(vcpu);
trace_kvm_s390_sie_fault(vcpu);
rc = kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
}
memcpy(&vcpu->run->s.regs.gprs[14], &vcpu->arch.sie_block->gg14, 16); memcpy(&vcpu->run->s.regs.gprs[14], &vcpu->arch.sie_block->gg14, 16);
...@@ -1977,6 +2036,35 @@ int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr) ...@@ -1977,6 +2036,35 @@ int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
return kvm_s390_store_status_unloaded(vcpu, addr); return kvm_s390_store_status_unloaded(vcpu, addr);
} }
/*
* store additional status at address
*/
int kvm_s390_store_adtl_status_unloaded(struct kvm_vcpu *vcpu,
unsigned long gpa)
{
/* Only bits 0-53 are used for address formation */
if (!(gpa & ~0x3ff))
return 0;
return write_guest_abs(vcpu, gpa & ~0x3ff,
(void *)&vcpu->run->s.regs.vrs, 512);
}
int kvm_s390_vcpu_store_adtl_status(struct kvm_vcpu *vcpu, unsigned long addr)
{
if (!test_kvm_facility(vcpu->kvm, 129))
return 0;
/*
* The guest VXRS are in the host VXRs due to the lazy
* copying in vcpu load/put. Let's update our copies before we save
* it into the save area.
*/
save_vx_regs((__vector128 *)&vcpu->run->s.regs.vrs);
return kvm_s390_store_adtl_status_unloaded(vcpu, addr);
}
static void __disable_ibs_on_vcpu(struct kvm_vcpu *vcpu) static void __disable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
{ {
kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu); kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu);
......
...@@ -151,8 +151,8 @@ int __must_check kvm_s390_inject_vcpu(struct kvm_vcpu *vcpu, ...@@ -151,8 +151,8 @@ int __must_check kvm_s390_inject_vcpu(struct kvm_vcpu *vcpu,
int __must_check kvm_s390_inject_program_int(struct kvm_vcpu *vcpu, u16 code); int __must_check kvm_s390_inject_program_int(struct kvm_vcpu *vcpu, u16 code);
struct kvm_s390_interrupt_info *kvm_s390_get_io_int(struct kvm *kvm, struct kvm_s390_interrupt_info *kvm_s390_get_io_int(struct kvm *kvm,
u64 cr6, u64 schid); u64 cr6, u64 schid);
void kvm_s390_reinject_io_int(struct kvm *kvm, int kvm_s390_reinject_io_int(struct kvm *kvm,
struct kvm_s390_interrupt_info *inti); struct kvm_s390_interrupt_info *inti);
int kvm_s390_mask_adapter(struct kvm *kvm, unsigned int id, bool masked); int kvm_s390_mask_adapter(struct kvm *kvm, unsigned int id, bool masked);
/* implemented in intercept.c */ /* implemented in intercept.c */
...@@ -177,7 +177,10 @@ int kvm_s390_handle_sigp_pei(struct kvm_vcpu *vcpu); ...@@ -177,7 +177,10 @@ int kvm_s390_handle_sigp_pei(struct kvm_vcpu *vcpu);
/* implemented in kvm-s390.c */ /* implemented in kvm-s390.c */
long kvm_arch_fault_in_page(struct kvm_vcpu *vcpu, gpa_t gpa, int writable); long kvm_arch_fault_in_page(struct kvm_vcpu *vcpu, gpa_t gpa, int writable);
int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long addr); int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long addr);
int kvm_s390_store_adtl_status_unloaded(struct kvm_vcpu *vcpu,
unsigned long addr);
int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr); int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr);
int kvm_s390_vcpu_store_adtl_status(struct kvm_vcpu *vcpu, unsigned long addr);
void kvm_s390_vcpu_start(struct kvm_vcpu *vcpu); void kvm_s390_vcpu_start(struct kvm_vcpu *vcpu);
void kvm_s390_vcpu_stop(struct kvm_vcpu *vcpu); void kvm_s390_vcpu_stop(struct kvm_vcpu *vcpu);
void s390_vcpu_block(struct kvm_vcpu *vcpu); void s390_vcpu_block(struct kvm_vcpu *vcpu);
......
...@@ -229,18 +229,19 @@ static int handle_tpi(struct kvm_vcpu *vcpu) ...@@ -229,18 +229,19 @@ static int handle_tpi(struct kvm_vcpu *vcpu)
struct kvm_s390_interrupt_info *inti; struct kvm_s390_interrupt_info *inti;
unsigned long len; unsigned long len;
u32 tpi_data[3]; u32 tpi_data[3];
int cc, rc; int rc;
u64 addr; u64 addr;
rc = 0;
addr = kvm_s390_get_base_disp_s(vcpu); addr = kvm_s390_get_base_disp_s(vcpu);
if (addr & 3) if (addr & 3)
return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
cc = 0;
inti = kvm_s390_get_io_int(vcpu->kvm, vcpu->arch.sie_block->gcr[6], 0); inti = kvm_s390_get_io_int(vcpu->kvm, vcpu->arch.sie_block->gcr[6], 0);
if (!inti) if (!inti) {
goto no_interrupt; kvm_s390_set_psw_cc(vcpu, 0);
cc = 1; return 0;
}
tpi_data[0] = inti->io.subchannel_id << 16 | inti->io.subchannel_nr; tpi_data[0] = inti->io.subchannel_id << 16 | inti->io.subchannel_nr;
tpi_data[1] = inti->io.io_int_parm; tpi_data[1] = inti->io.io_int_parm;
tpi_data[2] = inti->io.io_int_word; tpi_data[2] = inti->io.io_int_word;
...@@ -251,30 +252,38 @@ static int handle_tpi(struct kvm_vcpu *vcpu) ...@@ -251,30 +252,38 @@ static int handle_tpi(struct kvm_vcpu *vcpu)
*/ */
len = sizeof(tpi_data) - 4; len = sizeof(tpi_data) - 4;
rc = write_guest(vcpu, addr, &tpi_data, len); rc = write_guest(vcpu, addr, &tpi_data, len);
if (rc) if (rc) {
return kvm_s390_inject_prog_cond(vcpu, rc); rc = kvm_s390_inject_prog_cond(vcpu, rc);
goto reinject_interrupt;
}
} else { } else {
/* /*
* Store the three-word I/O interruption code into * Store the three-word I/O interruption code into
* the appropriate lowcore area. * the appropriate lowcore area.
*/ */
len = sizeof(tpi_data); len = sizeof(tpi_data);
if (write_guest_lc(vcpu, __LC_SUBCHANNEL_ID, &tpi_data, len)) if (write_guest_lc(vcpu, __LC_SUBCHANNEL_ID, &tpi_data, len)) {
/* failed writes to the low core are not recoverable */
rc = -EFAULT; rc = -EFAULT;
goto reinject_interrupt;
}
} }
/* irq was successfully handed to the guest */
kfree(inti);
kvm_s390_set_psw_cc(vcpu, 1);
return 0;
reinject_interrupt:
/* /*
* If we encounter a problem storing the interruption code, the * If we encounter a problem storing the interruption code, the
* instruction is suppressed from the guest's view: reinject the * instruction is suppressed from the guest's view: reinject the
* interrupt. * interrupt.
*/ */
if (!rc) if (kvm_s390_reinject_io_int(vcpu->kvm, inti)) {
kfree(inti); kfree(inti);
else rc = -EFAULT;
kvm_s390_reinject_io_int(vcpu->kvm, inti); }
no_interrupt: /* don't set the cc, a pgm irq was injected or we drop to user space */
/* Set condition code and we're done. */
if (!rc)
kvm_s390_set_psw_cc(vcpu, cc);
return rc ? -EFAULT : 0; return rc ? -EFAULT : 0;
} }
...@@ -467,6 +476,7 @@ static void handle_stsi_3_2_2(struct kvm_vcpu *vcpu, struct sysinfo_3_2_2 *mem) ...@@ -467,6 +476,7 @@ static void handle_stsi_3_2_2(struct kvm_vcpu *vcpu, struct sysinfo_3_2_2 *mem)
for (n = mem->count - 1; n > 0 ; n--) for (n = mem->count - 1; n > 0 ; n--)
memcpy(&mem->vm[n], &mem->vm[n - 1], sizeof(mem->vm[0])); memcpy(&mem->vm[n], &mem->vm[n - 1], sizeof(mem->vm[0]));
memset(&mem->vm[0], 0, sizeof(mem->vm[0]));
mem->vm[0].cpus_total = cpus; mem->vm[0].cpus_total = cpus;
mem->vm[0].cpus_configured = cpus; mem->vm[0].cpus_configured = cpus;
mem->vm[0].cpus_standby = 0; mem->vm[0].cpus_standby = 0;
......
...@@ -393,6 +393,9 @@ static int handle_sigp_order_in_user_space(struct kvm_vcpu *vcpu, u8 order_code) ...@@ -393,6 +393,9 @@ static int handle_sigp_order_in_user_space(struct kvm_vcpu *vcpu, u8 order_code)
case SIGP_STORE_STATUS_AT_ADDRESS: case SIGP_STORE_STATUS_AT_ADDRESS:
vcpu->stat.instruction_sigp_store_status++; vcpu->stat.instruction_sigp_store_status++;
break; break;
case SIGP_STORE_ADDITIONAL_STATUS:
vcpu->stat.instruction_sigp_store_adtl_status++;
break;
case SIGP_SET_PREFIX: case SIGP_SET_PREFIX:
vcpu->stat.instruction_sigp_prefix++; vcpu->stat.instruction_sigp_prefix++;
break; break;
......
...@@ -324,7 +324,7 @@ struct kvm_run { ...@@ -324,7 +324,7 @@ struct kvm_run {
__u64 kvm_dirty_regs; __u64 kvm_dirty_regs;
union { union {
struct kvm_sync_regs regs; struct kvm_sync_regs regs;
char padding[1024]; char padding[2048];
} s; } s;
}; };
...@@ -760,6 +760,7 @@ struct kvm_ppc_smmu_info { ...@@ -760,6 +760,7 @@ struct kvm_ppc_smmu_info {
#define KVM_CAP_PPC_ENABLE_HCALL 104 #define KVM_CAP_PPC_ENABLE_HCALL 104
#define KVM_CAP_CHECK_EXTENSION_VM 105 #define KVM_CAP_CHECK_EXTENSION_VM 105
#define KVM_CAP_S390_USER_SIGP 106 #define KVM_CAP_S390_USER_SIGP 106
#define KVM_CAP_S390_VECTOR_REGISTERS 107
#ifdef KVM_CAP_IRQ_ROUTING #ifdef KVM_CAP_IRQ_ROUTING
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment