Commit dd5bd0a6 authored by Paolo Bonzini's avatar Paolo Bonzini

Merge tag 'kvm-s390-next-4.20-1' of...

Merge tag 'kvm-s390-next-4.20-1' of git://git.kernel.org/pub/scm/linux/kernel/git/kvms390/linux into HEAD

KVM: s390: Features for 4.20
- Initial version of AP crypto virtualization via vfio-mdev
- Set the host program identifier
- Optimize page table locking
parents 7e712684 55d09dd4
This diff is collapsed.
...@@ -12668,6 +12668,18 @@ W: http://www.ibm.com/developerworks/linux/linux390/ ...@@ -12668,6 +12668,18 @@ W: http://www.ibm.com/developerworks/linux/linux390/
S: Supported S: Supported
F: drivers/s390/crypto/ F: drivers/s390/crypto/
S390 VFIO AP DRIVER
M: Tony Krowiak <akrowiak@linux.ibm.com>
M: Pierre Morel <pmorel@linux.ibm.com>
M: Halil Pasic <pasic@linux.ibm.com>
L: linux-s390@vger.kernel.org
W: http://www.ibm.com/developerworks/linux/linux390/
S: Supported
F: drivers/s390/crypto/vfio_ap_drv.c
F: drivers/s390/crypto/vfio_ap_private.h
F: drivers/s390/crypto/vfio_ap_ops.c
F: Documentation/s390/vfio-ap.txt
S390 ZFCP DRIVER S390 ZFCP DRIVER
M: Steffen Maier <maier@linux.ibm.com> M: Steffen Maier <maier@linux.ibm.com>
M: Benjamin Block <bblock@linux.ibm.com> M: Benjamin Block <bblock@linux.ibm.com>
......
...@@ -773,6 +773,17 @@ config VFIO_CCW ...@@ -773,6 +773,17 @@ config VFIO_CCW
To compile this driver as a module, choose M here: the To compile this driver as a module, choose M here: the
module will be called vfio_ccw. module will be called vfio_ccw.
config VFIO_AP
def_tristate n
prompt "VFIO support for AP devices"
depends on S390_AP_IOMMU && VFIO_MDEV_DEVICE && KVM
help
This driver grants access to Adjunct Processor (AP) devices
via the VFIO mediated device interface.
To compile this driver as a module, choose M here: the module
will be called vfio_ap.
endmenu endmenu
menu "Dump support" menu "Dump support"
......
...@@ -44,6 +44,7 @@ ...@@ -44,6 +44,7 @@
#define KVM_REQ_ICPT_OPEREXC KVM_ARCH_REQ(2) #define KVM_REQ_ICPT_OPEREXC KVM_ARCH_REQ(2)
#define KVM_REQ_START_MIGRATION KVM_ARCH_REQ(3) #define KVM_REQ_START_MIGRATION KVM_ARCH_REQ(3)
#define KVM_REQ_STOP_MIGRATION KVM_ARCH_REQ(4) #define KVM_REQ_STOP_MIGRATION KVM_ARCH_REQ(4)
#define KVM_REQ_VSIE_RESTART KVM_ARCH_REQ(5)
#define SIGP_CTRL_C 0x80 #define SIGP_CTRL_C 0x80
#define SIGP_CTRL_SCN_MASK 0x3f #define SIGP_CTRL_SCN_MASK 0x3f
...@@ -186,6 +187,7 @@ struct kvm_s390_sie_block { ...@@ -186,6 +187,7 @@ struct kvm_s390_sie_block {
#define ECA_AIV 0x00200000 #define ECA_AIV 0x00200000
#define ECA_VX 0x00020000 #define ECA_VX 0x00020000
#define ECA_PROTEXCI 0x00002000 #define ECA_PROTEXCI 0x00002000
#define ECA_APIE 0x00000008
#define ECA_SII 0x00000001 #define ECA_SII 0x00000001
__u32 eca; /* 0x004c */ __u32 eca; /* 0x004c */
#define ICPT_INST 0x04 #define ICPT_INST 0x04
...@@ -237,7 +239,11 @@ struct kvm_s390_sie_block { ...@@ -237,7 +239,11 @@ struct kvm_s390_sie_block {
psw_t gpsw; /* 0x0090 */ psw_t gpsw; /* 0x0090 */
__u64 gg14; /* 0x00a0 */ __u64 gg14; /* 0x00a0 */
__u64 gg15; /* 0x00a8 */ __u64 gg15; /* 0x00a8 */
__u8 reservedb0[20]; /* 0x00b0 */ __u8 reservedb0[8]; /* 0x00b0 */
#define HPID_KVM 0x4
#define HPID_VSIE 0x5
__u8 hpid; /* 0x00b8 */
__u8 reservedb9[11]; /* 0x00b9 */
__u16 extcpuaddr; /* 0x00c4 */ __u16 extcpuaddr; /* 0x00c4 */
__u16 eic; /* 0x00c6 */ __u16 eic; /* 0x00c6 */
__u32 reservedc8; /* 0x00c8 */ __u32 reservedc8; /* 0x00c8 */
...@@ -255,6 +261,8 @@ struct kvm_s390_sie_block { ...@@ -255,6 +261,8 @@ struct kvm_s390_sie_block {
__u8 reservede4[4]; /* 0x00e4 */ __u8 reservede4[4]; /* 0x00e4 */
__u64 tecmc; /* 0x00e8 */ __u64 tecmc; /* 0x00e8 */
__u8 reservedf0[12]; /* 0x00f0 */ __u8 reservedf0[12]; /* 0x00f0 */
#define CRYCB_FORMAT_MASK 0x00000003
#define CRYCB_FORMAT0 0x00000000
#define CRYCB_FORMAT1 0x00000001 #define CRYCB_FORMAT1 0x00000001
#define CRYCB_FORMAT2 0x00000003 #define CRYCB_FORMAT2 0x00000003
__u32 crycbd; /* 0x00fc */ __u32 crycbd; /* 0x00fc */
...@@ -715,6 +723,7 @@ struct kvm_s390_crypto { ...@@ -715,6 +723,7 @@ struct kvm_s390_crypto {
__u32 crycbd; __u32 crycbd;
__u8 aes_kw; __u8 aes_kw;
__u8 dea_kw; __u8 dea_kw;
__u8 apie;
}; };
#define APCB0_MASK_SIZE 1 #define APCB0_MASK_SIZE 1
...@@ -855,6 +864,8 @@ void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu, ...@@ -855,6 +864,8 @@ void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
void kvm_arch_async_page_present(struct kvm_vcpu *vcpu, void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
struct kvm_async_pf *work); struct kvm_async_pf *work);
void kvm_arch_crypto_clear_masks(struct kvm *kvm);
extern int sie64a(struct kvm_s390_sie_block *, u64 *); extern int sie64a(struct kvm_s390_sie_block *, u64 *);
extern char sie_exit; extern char sie_exit;
......
...@@ -160,6 +160,8 @@ struct kvm_s390_vm_cpu_subfunc { ...@@ -160,6 +160,8 @@ struct kvm_s390_vm_cpu_subfunc {
#define KVM_S390_VM_CRYPTO_ENABLE_DEA_KW 1 #define KVM_S390_VM_CRYPTO_ENABLE_DEA_KW 1
#define KVM_S390_VM_CRYPTO_DISABLE_AES_KW 2 #define KVM_S390_VM_CRYPTO_DISABLE_AES_KW 2
#define KVM_S390_VM_CRYPTO_DISABLE_DEA_KW 3 #define KVM_S390_VM_CRYPTO_DISABLE_DEA_KW 3
#define KVM_S390_VM_CRYPTO_ENABLE_APIE 4
#define KVM_S390_VM_CRYPTO_DISABLE_APIE 5
/* kvm attributes for migration mode */ /* kvm attributes for migration mode */
#define KVM_S390_VM_MIGRATION_STOP 0 #define KVM_S390_VM_MIGRATION_STOP 0
......
...@@ -40,6 +40,7 @@ ...@@ -40,6 +40,7 @@
#include <asm/sclp.h> #include <asm/sclp.h>
#include <asm/cpacf.h> #include <asm/cpacf.h>
#include <asm/timex.h> #include <asm/timex.h>
#include <asm/ap.h>
#include "kvm-s390.h" #include "kvm-s390.h"
#include "gaccess.h" #include "gaccess.h"
...@@ -844,20 +845,22 @@ void kvm_s390_vcpu_crypto_reset_all(struct kvm *kvm) ...@@ -844,20 +845,22 @@ void kvm_s390_vcpu_crypto_reset_all(struct kvm *kvm)
kvm_s390_vcpu_block_all(kvm); kvm_s390_vcpu_block_all(kvm);
kvm_for_each_vcpu(i, vcpu, kvm) kvm_for_each_vcpu(i, vcpu, kvm) {
kvm_s390_vcpu_crypto_setup(vcpu); kvm_s390_vcpu_crypto_setup(vcpu);
/* recreate the shadow crycb by leaving the VSIE handler */
kvm_s390_sync_request(KVM_REQ_VSIE_RESTART, vcpu);
}
kvm_s390_vcpu_unblock_all(kvm); kvm_s390_vcpu_unblock_all(kvm);
} }
static int kvm_s390_vm_set_crypto(struct kvm *kvm, struct kvm_device_attr *attr) static int kvm_s390_vm_set_crypto(struct kvm *kvm, struct kvm_device_attr *attr)
{ {
if (!test_kvm_facility(kvm, 76))
return -EINVAL;
mutex_lock(&kvm->lock); mutex_lock(&kvm->lock);
switch (attr->attr) { switch (attr->attr) {
case KVM_S390_VM_CRYPTO_ENABLE_AES_KW: case KVM_S390_VM_CRYPTO_ENABLE_AES_KW:
if (!test_kvm_facility(kvm, 76))
return -EINVAL;
get_random_bytes( get_random_bytes(
kvm->arch.crypto.crycb->aes_wrapping_key_mask, kvm->arch.crypto.crycb->aes_wrapping_key_mask,
sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask)); sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
...@@ -865,6 +868,8 @@ static int kvm_s390_vm_set_crypto(struct kvm *kvm, struct kvm_device_attr *attr) ...@@ -865,6 +868,8 @@ static int kvm_s390_vm_set_crypto(struct kvm *kvm, struct kvm_device_attr *attr)
VM_EVENT(kvm, 3, "%s", "ENABLE: AES keywrapping support"); VM_EVENT(kvm, 3, "%s", "ENABLE: AES keywrapping support");
break; break;
case KVM_S390_VM_CRYPTO_ENABLE_DEA_KW: case KVM_S390_VM_CRYPTO_ENABLE_DEA_KW:
if (!test_kvm_facility(kvm, 76))
return -EINVAL;
get_random_bytes( get_random_bytes(
kvm->arch.crypto.crycb->dea_wrapping_key_mask, kvm->arch.crypto.crycb->dea_wrapping_key_mask,
sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask)); sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
...@@ -872,17 +877,35 @@ static int kvm_s390_vm_set_crypto(struct kvm *kvm, struct kvm_device_attr *attr) ...@@ -872,17 +877,35 @@ static int kvm_s390_vm_set_crypto(struct kvm *kvm, struct kvm_device_attr *attr)
VM_EVENT(kvm, 3, "%s", "ENABLE: DEA keywrapping support"); VM_EVENT(kvm, 3, "%s", "ENABLE: DEA keywrapping support");
break; break;
case KVM_S390_VM_CRYPTO_DISABLE_AES_KW: case KVM_S390_VM_CRYPTO_DISABLE_AES_KW:
if (!test_kvm_facility(kvm, 76))
return -EINVAL;
kvm->arch.crypto.aes_kw = 0; kvm->arch.crypto.aes_kw = 0;
memset(kvm->arch.crypto.crycb->aes_wrapping_key_mask, 0, memset(kvm->arch.crypto.crycb->aes_wrapping_key_mask, 0,
sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask)); sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
VM_EVENT(kvm, 3, "%s", "DISABLE: AES keywrapping support"); VM_EVENT(kvm, 3, "%s", "DISABLE: AES keywrapping support");
break; break;
case KVM_S390_VM_CRYPTO_DISABLE_DEA_KW: case KVM_S390_VM_CRYPTO_DISABLE_DEA_KW:
if (!test_kvm_facility(kvm, 76))
return -EINVAL;
kvm->arch.crypto.dea_kw = 0; kvm->arch.crypto.dea_kw = 0;
memset(kvm->arch.crypto.crycb->dea_wrapping_key_mask, 0, memset(kvm->arch.crypto.crycb->dea_wrapping_key_mask, 0,
sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask)); sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
VM_EVENT(kvm, 3, "%s", "DISABLE: DEA keywrapping support"); VM_EVENT(kvm, 3, "%s", "DISABLE: DEA keywrapping support");
break; break;
case KVM_S390_VM_CRYPTO_ENABLE_APIE:
if (!ap_instructions_available()) {
mutex_unlock(&kvm->lock);
return -EOPNOTSUPP;
}
kvm->arch.crypto.apie = 1;
break;
case KVM_S390_VM_CRYPTO_DISABLE_APIE:
if (!ap_instructions_available()) {
mutex_unlock(&kvm->lock);
return -EOPNOTSUPP;
}
kvm->arch.crypto.apie = 0;
break;
default: default:
mutex_unlock(&kvm->lock); mutex_unlock(&kvm->lock);
return -ENXIO; return -ENXIO;
...@@ -1491,6 +1514,10 @@ static int kvm_s390_vm_has_attr(struct kvm *kvm, struct kvm_device_attr *attr) ...@@ -1491,6 +1514,10 @@ static int kvm_s390_vm_has_attr(struct kvm *kvm, struct kvm_device_attr *attr)
case KVM_S390_VM_CRYPTO_DISABLE_DEA_KW: case KVM_S390_VM_CRYPTO_DISABLE_DEA_KW:
ret = 0; ret = 0;
break; break;
case KVM_S390_VM_CRYPTO_ENABLE_APIE:
case KVM_S390_VM_CRYPTO_DISABLE_APIE:
ret = ap_instructions_available() ? 0 : -ENXIO;
break;
default: default:
ret = -ENXIO; ret = -ENXIO;
break; break;
...@@ -1992,55 +2019,60 @@ long kvm_arch_vm_ioctl(struct file *filp, ...@@ -1992,55 +2019,60 @@ long kvm_arch_vm_ioctl(struct file *filp,
return r; return r;
} }
static int kvm_s390_query_ap_config(u8 *config)
{
u32 fcn_code = 0x04000000UL;
u32 cc = 0;
memset(config, 0, 128);
asm volatile(
"lgr 0,%1\n"
"lgr 2,%2\n"
".long 0xb2af0000\n" /* PQAP(QCI) */
"0: ipm %0\n"
"srl %0,28\n"
"1:\n"
EX_TABLE(0b, 1b)
: "+r" (cc)
: "r" (fcn_code), "r" (config)
: "cc", "0", "2", "memory"
);
return cc;
}
static int kvm_s390_apxa_installed(void) static int kvm_s390_apxa_installed(void)
{ {
u8 config[128]; struct ap_config_info info;
int cc;
if (test_facility(12)) { if (ap_instructions_available()) {
cc = kvm_s390_query_ap_config(config); if (ap_qci(&info) == 0)
return info.apxa;
if (cc)
pr_err("PQAP(QCI) failed with cc=%d", cc);
else
return config[0] & 0x40;
} }
return 0; return 0;
} }
/*
* The format of the crypto control block (CRYCB) is specified in the 3 low
* order bits of the CRYCB designation (CRYCBD) field as follows:
* Format 0: Neither the message security assist extension 3 (MSAX3) nor the
* AP extended addressing (APXA) facility are installed.
* Format 1: The APXA facility is not installed but the MSAX3 facility is.
* Format 2: Both the APXA and MSAX3 facilities are installed
*/
static void kvm_s390_set_crycb_format(struct kvm *kvm) static void kvm_s390_set_crycb_format(struct kvm *kvm)
{ {
kvm->arch.crypto.crycbd = (__u32)(unsigned long) kvm->arch.crypto.crycb; kvm->arch.crypto.crycbd = (__u32)(unsigned long) kvm->arch.crypto.crycb;
/* Clear the CRYCB format bits - i.e., set format 0 by default */
kvm->arch.crypto.crycbd &= ~(CRYCB_FORMAT_MASK);
/* Check whether MSAX3 is installed */
if (!test_kvm_facility(kvm, 76))
return;
if (kvm_s390_apxa_installed()) if (kvm_s390_apxa_installed())
kvm->arch.crypto.crycbd |= CRYCB_FORMAT2; kvm->arch.crypto.crycbd |= CRYCB_FORMAT2;
else else
kvm->arch.crypto.crycbd |= CRYCB_FORMAT1; kvm->arch.crypto.crycbd |= CRYCB_FORMAT1;
} }
void kvm_arch_crypto_clear_masks(struct kvm *kvm)
{
mutex_lock(&kvm->lock);
kvm_s390_vcpu_block_all(kvm);
memset(&kvm->arch.crypto.crycb->apcb0, 0,
sizeof(kvm->arch.crypto.crycb->apcb0));
memset(&kvm->arch.crypto.crycb->apcb1, 0,
sizeof(kvm->arch.crypto.crycb->apcb1));
/* recreate the shadow crycb for each vcpu */
kvm_s390_sync_request_broadcast(kvm, KVM_REQ_VSIE_RESTART);
kvm_s390_vcpu_unblock_all(kvm);
mutex_unlock(&kvm->lock);
}
EXPORT_SYMBOL_GPL(kvm_arch_crypto_clear_masks);
static u64 kvm_s390_get_initial_cpuid(void) static u64 kvm_s390_get_initial_cpuid(void)
{ {
struct cpuid cpuid; struct cpuid cpuid;
...@@ -2052,12 +2084,12 @@ static u64 kvm_s390_get_initial_cpuid(void) ...@@ -2052,12 +2084,12 @@ static u64 kvm_s390_get_initial_cpuid(void)
static void kvm_s390_crypto_init(struct kvm *kvm) static void kvm_s390_crypto_init(struct kvm *kvm)
{ {
if (!test_kvm_facility(kvm, 76))
return;
kvm->arch.crypto.crycb = &kvm->arch.sie_page2->crycb; kvm->arch.crypto.crycb = &kvm->arch.sie_page2->crycb;
kvm_s390_set_crycb_format(kvm); kvm_s390_set_crycb_format(kvm);
if (!test_kvm_facility(kvm, 76))
return;
/* Enable AES/DEA protected key functions by default */ /* Enable AES/DEA protected key functions by default */
kvm->arch.crypto.aes_kw = 1; kvm->arch.crypto.aes_kw = 1;
kvm->arch.crypto.dea_kw = 1; kvm->arch.crypto.dea_kw = 1;
...@@ -2583,17 +2615,25 @@ void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu) ...@@ -2583,17 +2615,25 @@ void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu) static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu)
{ {
if (!test_kvm_facility(vcpu->kvm, 76)) /*
* If the AP instructions are not being interpreted and the MSAX3
* facility is not configured for the guest, there is nothing to set up.
*/
if (!vcpu->kvm->arch.crypto.apie && !test_kvm_facility(vcpu->kvm, 76))
return; return;
vcpu->arch.sie_block->crycbd = vcpu->kvm->arch.crypto.crycbd;
vcpu->arch.sie_block->ecb3 &= ~(ECB3_AES | ECB3_DEA); vcpu->arch.sie_block->ecb3 &= ~(ECB3_AES | ECB3_DEA);
vcpu->arch.sie_block->eca &= ~ECA_APIE;
if (vcpu->kvm->arch.crypto.apie)
vcpu->arch.sie_block->eca |= ECA_APIE;
/* Set up protected key support */
if (vcpu->kvm->arch.crypto.aes_kw) if (vcpu->kvm->arch.crypto.aes_kw)
vcpu->arch.sie_block->ecb3 |= ECB3_AES; vcpu->arch.sie_block->ecb3 |= ECB3_AES;
if (vcpu->kvm->arch.crypto.dea_kw) if (vcpu->kvm->arch.crypto.dea_kw)
vcpu->arch.sie_block->ecb3 |= ECB3_DEA; vcpu->arch.sie_block->ecb3 |= ECB3_DEA;
vcpu->arch.sie_block->crycbd = vcpu->kvm->arch.crypto.crycbd;
} }
void kvm_s390_vcpu_unsetup_cmma(struct kvm_vcpu *vcpu) void kvm_s390_vcpu_unsetup_cmma(struct kvm_vcpu *vcpu)
...@@ -2685,6 +2725,8 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) ...@@ -2685,6 +2725,8 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup; vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup;
vcpu->arch.sie_block->hpid = HPID_KVM;
kvm_s390_vcpu_crypto_setup(vcpu); kvm_s390_vcpu_crypto_setup(vcpu);
return rc; return rc;
...@@ -2768,18 +2810,25 @@ static void kvm_s390_vcpu_request(struct kvm_vcpu *vcpu) ...@@ -2768,18 +2810,25 @@ static void kvm_s390_vcpu_request(struct kvm_vcpu *vcpu)
exit_sie(vcpu); exit_sie(vcpu);
} }
bool kvm_s390_vcpu_sie_inhibited(struct kvm_vcpu *vcpu)
{
return atomic_read(&vcpu->arch.sie_block->prog20) &
(PROG_BLOCK_SIE | PROG_REQUEST);
}
static void kvm_s390_vcpu_request_handled(struct kvm_vcpu *vcpu) static void kvm_s390_vcpu_request_handled(struct kvm_vcpu *vcpu)
{ {
atomic_andnot(PROG_REQUEST, &vcpu->arch.sie_block->prog20); atomic_andnot(PROG_REQUEST, &vcpu->arch.sie_block->prog20);
} }
/* /*
* Kick a guest cpu out of SIE and wait until SIE is not running. * Kick a guest cpu out of (v)SIE and wait until (v)SIE is not running.
* If the CPU is not running (e.g. waiting as idle) the function will * If the CPU is not running (e.g. waiting as idle) the function will
* return immediately. */ * return immediately. */
void exit_sie(struct kvm_vcpu *vcpu) void exit_sie(struct kvm_vcpu *vcpu)
{ {
kvm_s390_set_cpuflags(vcpu, CPUSTAT_STOP_INT); kvm_s390_set_cpuflags(vcpu, CPUSTAT_STOP_INT);
kvm_s390_vsie_kick(vcpu);
while (vcpu->arch.sie_block->prog0c & PROG_IN_SIE) while (vcpu->arch.sie_block->prog0c & PROG_IN_SIE)
cpu_relax(); cpu_relax();
} }
...@@ -3196,6 +3245,8 @@ static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu) ...@@ -3196,6 +3245,8 @@ static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu)
/* nothing to do, just clear the request */ /* nothing to do, just clear the request */
kvm_clear_request(KVM_REQ_UNHALT, vcpu); kvm_clear_request(KVM_REQ_UNHALT, vcpu);
/* we left the vsie handler, nothing to do, just clear the request */
kvm_clear_request(KVM_REQ_VSIE_RESTART, vcpu);
return 0; return 0;
} }
......
...@@ -290,6 +290,7 @@ void kvm_s390_vcpu_start(struct kvm_vcpu *vcpu); ...@@ -290,6 +290,7 @@ void kvm_s390_vcpu_start(struct kvm_vcpu *vcpu);
void kvm_s390_vcpu_stop(struct kvm_vcpu *vcpu); void kvm_s390_vcpu_stop(struct kvm_vcpu *vcpu);
void kvm_s390_vcpu_block(struct kvm_vcpu *vcpu); void kvm_s390_vcpu_block(struct kvm_vcpu *vcpu);
void kvm_s390_vcpu_unblock(struct kvm_vcpu *vcpu); void kvm_s390_vcpu_unblock(struct kvm_vcpu *vcpu);
bool kvm_s390_vcpu_sie_inhibited(struct kvm_vcpu *vcpu);
void exit_sie(struct kvm_vcpu *vcpu); void exit_sie(struct kvm_vcpu *vcpu);
void kvm_s390_sync_request(int req, struct kvm_vcpu *vcpu); void kvm_s390_sync_request(int req, struct kvm_vcpu *vcpu);
int kvm_s390_vcpu_setup_cmma(struct kvm_vcpu *vcpu); int kvm_s390_vcpu_setup_cmma(struct kvm_vcpu *vcpu);
......
...@@ -135,14 +135,148 @@ static int prepare_cpuflags(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page) ...@@ -135,14 +135,148 @@ static int prepare_cpuflags(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
atomic_set(&scb_s->cpuflags, newflags); atomic_set(&scb_s->cpuflags, newflags);
return 0; return 0;
} }
/* Copy to APCB FORMAT1 from APCB FORMAT0 */
static int setup_apcb10(struct kvm_vcpu *vcpu, struct kvm_s390_apcb1 *apcb_s,
unsigned long apcb_o, struct kvm_s390_apcb1 *apcb_h)
{
struct kvm_s390_apcb0 tmp;
/* if (read_guest_real(vcpu, apcb_o, &tmp, sizeof(struct kvm_s390_apcb0)))
return -EFAULT;
apcb_s->apm[0] = apcb_h->apm[0] & tmp.apm[0];
apcb_s->aqm[0] = apcb_h->aqm[0] & tmp.aqm[0] & 0xffff000000000000UL;
apcb_s->adm[0] = apcb_h->adm[0] & tmp.adm[0] & 0xffff000000000000UL;
return 0;
}
/**
* setup_apcb00 - Copy to APCB FORMAT0 from APCB FORMAT0
* @vcpu: pointer to the virtual CPU
* @apcb_s: pointer to start of apcb in the shadow crycb
* @apcb_o: pointer to start of original apcb in the guest2
* @apcb_h: pointer to start of apcb in the guest1
*
* Returns 0 and -EFAULT on error reading guest apcb
*/
static int setup_apcb00(struct kvm_vcpu *vcpu, unsigned long *apcb_s,
unsigned long apcb_o, unsigned long *apcb_h)
{
if (read_guest_real(vcpu, apcb_o, apcb_s,
sizeof(struct kvm_s390_apcb0)))
return -EFAULT;
bitmap_and(apcb_s, apcb_s, apcb_h, sizeof(struct kvm_s390_apcb0));
return 0;
}
/**
* setup_apcb11 - Copy the FORMAT1 APCB from the guest to the shadow CRYCB
* @vcpu: pointer to the virtual CPU
* @apcb_s: pointer to start of apcb in the shadow crycb
* @apcb_o: pointer to start of original guest apcb
* @apcb_h: pointer to start of apcb in the host
*
* Returns 0 and -EFAULT on error reading guest apcb
*/
static int setup_apcb11(struct kvm_vcpu *vcpu, unsigned long *apcb_s,
unsigned long apcb_o,
unsigned long *apcb_h)
{
if (read_guest_real(vcpu, apcb_o, apcb_s,
sizeof(struct kvm_s390_apcb1)))
return -EFAULT;
bitmap_and(apcb_s, apcb_s, apcb_h, sizeof(struct kvm_s390_apcb1));
return 0;
}
/**
* setup_apcb - Create a shadow copy of the apcb.
* @vcpu: pointer to the virtual CPU
* @crycb_s: pointer to shadow crycb
* @crycb_o: pointer to original guest crycb
* @crycb_h: pointer to the host crycb
* @fmt_o: format of the original guest crycb.
* @fmt_h: format of the host crycb.
*
* Checks the compatibility between the guest and host crycb and calls the
* appropriate copy function.
*
* Return 0 or an error number if the guest and host crycb are incompatible.
*/
static int setup_apcb(struct kvm_vcpu *vcpu, struct kvm_s390_crypto_cb *crycb_s,
const u32 crycb_o,
struct kvm_s390_crypto_cb *crycb_h,
int fmt_o, int fmt_h)
{
struct kvm_s390_crypto_cb *crycb;
crycb = (struct kvm_s390_crypto_cb *) (unsigned long)crycb_o;
switch (fmt_o) {
case CRYCB_FORMAT2:
if ((crycb_o & PAGE_MASK) != ((crycb_o + 256) & PAGE_MASK))
return -EACCES;
if (fmt_h != CRYCB_FORMAT2)
return -EINVAL;
return setup_apcb11(vcpu, (unsigned long *)&crycb_s->apcb1,
(unsigned long) &crycb->apcb1,
(unsigned long *)&crycb_h->apcb1);
case CRYCB_FORMAT1:
switch (fmt_h) {
case CRYCB_FORMAT2:
return setup_apcb10(vcpu, &crycb_s->apcb1,
(unsigned long) &crycb->apcb0,
&crycb_h->apcb1);
case CRYCB_FORMAT1:
return setup_apcb00(vcpu,
(unsigned long *) &crycb_s->apcb0,
(unsigned long) &crycb->apcb0,
(unsigned long *) &crycb_h->apcb0);
}
break;
case CRYCB_FORMAT0:
if ((crycb_o & PAGE_MASK) != ((crycb_o + 32) & PAGE_MASK))
return -EACCES;
switch (fmt_h) {
case CRYCB_FORMAT2:
return setup_apcb10(vcpu, &crycb_s->apcb1,
(unsigned long) &crycb->apcb0,
&crycb_h->apcb1);
case CRYCB_FORMAT1:
case CRYCB_FORMAT0:
return setup_apcb00(vcpu,
(unsigned long *) &crycb_s->apcb0,
(unsigned long) &crycb->apcb0,
(unsigned long *) &crycb_h->apcb0);
}
}
return -EINVAL;
}
/**
* shadow_crycb - Create a shadow copy of the crycb block
* @vcpu: a pointer to the virtual CPU
* @vsie_page: a pointer to internal date used for the vSIE
*
* Create a shadow copy of the crycb block and setup key wrapping, if * Create a shadow copy of the crycb block and setup key wrapping, if
* requested for guest 3 and enabled for guest 2. * requested for guest 3 and enabled for guest 2.
* *
* We only accept format-1 (no AP in g2), but convert it into format-2 * We accept format-1 or format-2, but we convert format-1 into format-2
* in the shadow CRYCB.
* Using format-2 enables the firmware to choose the right format when
* scheduling the SIE.
* There is nothing to do for format-0. * There is nothing to do for format-0.
* *
* This function centralize the issuing of set_validity_icpt() for all
* the subfunctions working on the crycb.
*
* Returns: - 0 if shadowed or nothing to do * Returns: - 0 if shadowed or nothing to do
* - > 0 if control has to be given to guest 2 * - > 0 if control has to be given to guest 2
*/ */
...@@ -154,23 +288,40 @@ static int shadow_crycb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page) ...@@ -154,23 +288,40 @@ static int shadow_crycb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
const u32 crycb_addr = crycbd_o & 0x7ffffff8U; const u32 crycb_addr = crycbd_o & 0x7ffffff8U;
unsigned long *b1, *b2; unsigned long *b1, *b2;
u8 ecb3_flags; u8 ecb3_flags;
int apie_h;
int key_msk = test_kvm_facility(vcpu->kvm, 76);
int fmt_o = crycbd_o & CRYCB_FORMAT_MASK;
int fmt_h = vcpu->arch.sie_block->crycbd & CRYCB_FORMAT_MASK;
int ret = 0;
scb_s->crycbd = 0; scb_s->crycbd = 0;
if (!(crycbd_o & vcpu->arch.sie_block->crycbd & CRYCB_FORMAT1))
return 0; apie_h = vcpu->arch.sie_block->eca & ECA_APIE;
/* format-1 is supported with message-security-assist extension 3 */ if (!apie_h && !key_msk)
if (!test_kvm_facility(vcpu->kvm, 76))
return 0; return 0;
if (!crycb_addr)
return set_validity_icpt(scb_s, 0x0039U);
if (fmt_o == CRYCB_FORMAT1)
if ((crycb_addr & PAGE_MASK) !=
((crycb_addr + 128) & PAGE_MASK))
return set_validity_icpt(scb_s, 0x003CU);
if (apie_h && (scb_o->eca & ECA_APIE)) {
ret = setup_apcb(vcpu, &vsie_page->crycb, crycb_addr,
vcpu->kvm->arch.crypto.crycb,
fmt_o, fmt_h);
if (ret)
goto end;
scb_s->eca |= scb_o->eca & ECA_APIE;
}
/* we may only allow it if enabled for guest 2 */ /* we may only allow it if enabled for guest 2 */
ecb3_flags = scb_o->ecb3 & vcpu->arch.sie_block->ecb3 & ecb3_flags = scb_o->ecb3 & vcpu->arch.sie_block->ecb3 &
(ECB3_AES | ECB3_DEA); (ECB3_AES | ECB3_DEA);
if (!ecb3_flags) if (!ecb3_flags)
return 0; goto end;
if ((crycb_addr & PAGE_MASK) != ((crycb_addr + 128) & PAGE_MASK))
return set_validity_icpt(scb_s, 0x003CU);
else if (!crycb_addr)
return set_validity_icpt(scb_s, 0x0039U);
/* copy only the wrapping keys */ /* copy only the wrapping keys */
if (read_guest_real(vcpu, crycb_addr + 72, if (read_guest_real(vcpu, crycb_addr + 72,
...@@ -178,8 +329,6 @@ static int shadow_crycb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page) ...@@ -178,8 +329,6 @@ static int shadow_crycb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
return set_validity_icpt(scb_s, 0x0035U); return set_validity_icpt(scb_s, 0x0035U);
scb_s->ecb3 |= ecb3_flags; scb_s->ecb3 |= ecb3_flags;
scb_s->crycbd = ((__u32)(__u64) &vsie_page->crycb) | CRYCB_FORMAT1 |
CRYCB_FORMAT2;
/* xor both blocks in one run */ /* xor both blocks in one run */
b1 = (unsigned long *) vsie_page->crycb.dea_wrapping_key_mask; b1 = (unsigned long *) vsie_page->crycb.dea_wrapping_key_mask;
...@@ -187,6 +336,16 @@ static int shadow_crycb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page) ...@@ -187,6 +336,16 @@ static int shadow_crycb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
vcpu->kvm->arch.crypto.crycb->dea_wrapping_key_mask; vcpu->kvm->arch.crypto.crycb->dea_wrapping_key_mask;
/* as 56%8 == 0, bitmap_xor won't overwrite any data */ /* as 56%8 == 0, bitmap_xor won't overwrite any data */
bitmap_xor(b1, b1, b2, BITS_PER_BYTE * 56); bitmap_xor(b1, b1, b2, BITS_PER_BYTE * 56);
end:
switch (ret) {
case -EINVAL:
return set_validity_icpt(scb_s, 0x0020U);
case -EFAULT:
return set_validity_icpt(scb_s, 0x0035U);
case -EACCES:
return set_validity_icpt(scb_s, 0x003CU);
}
scb_s->crycbd = ((__u32)(__u64) &vsie_page->crycb) | CRYCB_FORMAT2;
return 0; return 0;
} }
...@@ -383,6 +542,8 @@ static int shadow_scb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page) ...@@ -383,6 +542,8 @@ static int shadow_scb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
if (test_kvm_facility(vcpu->kvm, 156)) if (test_kvm_facility(vcpu->kvm, 156))
scb_s->ecd |= scb_o->ecd & ECD_ETOKENF; scb_s->ecd |= scb_o->ecd & ECD_ETOKENF;
scb_s->hpid = HPID_VSIE;
prepare_ibc(vcpu, vsie_page); prepare_ibc(vcpu, vsie_page);
rc = shadow_crycb(vcpu, vsie_page); rc = shadow_crycb(vcpu, vsie_page);
out: out:
...@@ -830,7 +991,7 @@ static int do_vsie_run(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page) ...@@ -830,7 +991,7 @@ static int do_vsie_run(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s; struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s;
struct kvm_s390_sie_block *scb_o = vsie_page->scb_o; struct kvm_s390_sie_block *scb_o = vsie_page->scb_o;
int guest_bp_isolation; int guest_bp_isolation;
int rc; int rc = 0;
handle_last_fault(vcpu, vsie_page); handle_last_fault(vcpu, vsie_page);
...@@ -858,7 +1019,18 @@ static int do_vsie_run(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page) ...@@ -858,7 +1019,18 @@ static int do_vsie_run(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
guest_enter_irqoff(); guest_enter_irqoff();
local_irq_enable(); local_irq_enable();
/*
* Simulate a SIE entry of the VCPU (see sie64a), so VCPU blocking
* and VCPU requests also hinder the vSIE from running and lead
* to an immediate exit. kvm_s390_vsie_kick() has to be used to
* also kick the vSIE.
*/
vcpu->arch.sie_block->prog0c |= PROG_IN_SIE;
barrier();
if (!kvm_s390_vcpu_sie_inhibited(vcpu))
rc = sie64a(scb_s, vcpu->run->s.regs.gprs); rc = sie64a(scb_s, vcpu->run->s.regs.gprs);
barrier();
vcpu->arch.sie_block->prog0c &= ~PROG_IN_SIE;
local_irq_disable(); local_irq_disable();
guest_exit_irqoff(); guest_exit_irqoff();
...@@ -1005,7 +1177,8 @@ static int vsie_run(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page) ...@@ -1005,7 +1177,8 @@ static int vsie_run(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
if (rc == -EAGAIN) if (rc == -EAGAIN)
rc = 0; rc = 0;
if (rc || scb_s->icptcode || signal_pending(current) || if (rc || scb_s->icptcode || signal_pending(current) ||
kvm_s390_vcpu_has_irq(vcpu, 0)) kvm_s390_vcpu_has_irq(vcpu, 0) ||
kvm_s390_vcpu_sie_inhibited(vcpu))
break; break;
} }
...@@ -1122,7 +1295,8 @@ int kvm_s390_handle_vsie(struct kvm_vcpu *vcpu) ...@@ -1122,7 +1295,8 @@ int kvm_s390_handle_vsie(struct kvm_vcpu *vcpu)
if (unlikely(scb_addr & 0x1ffUL)) if (unlikely(scb_addr & 0x1ffUL))
return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
if (signal_pending(current) || kvm_s390_vcpu_has_irq(vcpu, 0)) if (signal_pending(current) || kvm_s390_vcpu_has_irq(vcpu, 0) ||
kvm_s390_vcpu_sie_inhibited(vcpu))
return 0; return 0;
vsie_page = get_vsie_page(vcpu->kvm, scb_addr); vsie_page = get_vsie_page(vcpu->kvm, scb_addr);
......
...@@ -907,10 +907,16 @@ static inline pmd_t *gmap_pmd_op_walk(struct gmap *gmap, unsigned long gaddr) ...@@ -907,10 +907,16 @@ static inline pmd_t *gmap_pmd_op_walk(struct gmap *gmap, unsigned long gaddr)
pmd_t *pmdp; pmd_t *pmdp;
BUG_ON(gmap_is_shadow(gmap)); BUG_ON(gmap_is_shadow(gmap));
spin_lock(&gmap->guest_table_lock);
pmdp = (pmd_t *) gmap_table_walk(gmap, gaddr, 1); pmdp = (pmd_t *) gmap_table_walk(gmap, gaddr, 1);
if (!pmdp)
return NULL;
if (!pmdp || pmd_none(*pmdp)) { /* without huge pages, there is no need to take the table lock */
if (!gmap->mm->context.allow_gmap_hpage_1m)
return pmd_none(*pmdp) ? NULL : pmdp;
spin_lock(&gmap->guest_table_lock);
if (pmd_none(*pmdp)) {
spin_unlock(&gmap->guest_table_lock); spin_unlock(&gmap->guest_table_lock);
return NULL; return NULL;
} }
......
...@@ -106,6 +106,8 @@ static struct facility_def facility_defs[] = { ...@@ -106,6 +106,8 @@ static struct facility_def facility_defs[] = {
.name = "FACILITIES_KVM_CPUMODEL", .name = "FACILITIES_KVM_CPUMODEL",
.bits = (int[]){ .bits = (int[]){
12, /* AP Query Configuration Information */
15, /* AP Facilities Test */
156, /* etoken facility */ 156, /* etoken facility */
-1 /* END */ -1 /* END */
} }
......
...@@ -372,6 +372,14 @@ config S390_CCW_IOMMU ...@@ -372,6 +372,14 @@ config S390_CCW_IOMMU
Enables bits of IOMMU API required by VFIO. The iommu_ops Enables bits of IOMMU API required by VFIO. The iommu_ops
is not implemented as it is not necessary for VFIO. is not implemented as it is not necessary for VFIO.
config S390_AP_IOMMU
bool "S390 AP IOMMU Support"
depends on S390 && ZCRYPT
select IOMMU_API
help
Enables bits of IOMMU API required by VFIO. The iommu_ops
is not implemented as it is not necessary for VFIO.
config MTK_IOMMU config MTK_IOMMU
bool "MTK IOMMU Support" bool "MTK IOMMU Support"
depends on ARM || ARM64 depends on ARM || ARM64
......
...@@ -15,3 +15,7 @@ obj-$(CONFIG_ZCRYPT) += zcrypt_pcixcc.o zcrypt_cex2a.o zcrypt_cex4.o ...@@ -15,3 +15,7 @@ obj-$(CONFIG_ZCRYPT) += zcrypt_pcixcc.o zcrypt_cex2a.o zcrypt_cex4.o
# pkey kernel module # pkey kernel module
pkey-objs := pkey_api.o pkey-objs := pkey_api.o
obj-$(CONFIG_PKEY) += pkey.o obj-$(CONFIG_PKEY) += pkey.o
# adjunct processor matrix
vfio_ap-objs := vfio_ap_drv.o vfio_ap_ops.o
obj-$(CONFIG_VFIO_AP) += vfio_ap.o
// SPDX-License-Identifier: GPL-2.0+
/*
* VFIO based AP device driver
*
* Copyright IBM Corp. 2018
*
* Author(s): Tony Krowiak <akrowiak@linux.ibm.com>
*/
#include <linux/module.h>
#include <linux/mod_devicetable.h>
#include <linux/slab.h>
#include <linux/string.h>
#include "vfio_ap_private.h"
#define VFIO_AP_ROOT_NAME "vfio_ap"
#define VFIO_AP_DEV_TYPE_NAME "ap_matrix"
#define VFIO_AP_DEV_NAME "matrix"
MODULE_AUTHOR("IBM Corporation");
MODULE_DESCRIPTION("VFIO AP device driver, Copyright IBM Corp. 2018");
MODULE_LICENSE("GPL v2");
static struct ap_driver vfio_ap_drv;
static struct device_type vfio_ap_dev_type = {
.name = VFIO_AP_DEV_TYPE_NAME,
};
struct ap_matrix_dev *matrix_dev;
/* Only type 10 adapters (CEX4 and later) are supported
* by the AP matrix device driver
*/
static struct ap_device_id ap_queue_ids[] = {
{ .dev_type = AP_DEVICE_TYPE_CEX4,
.match_flags = AP_DEVICE_ID_MATCH_QUEUE_TYPE },
{ .dev_type = AP_DEVICE_TYPE_CEX5,
.match_flags = AP_DEVICE_ID_MATCH_QUEUE_TYPE },
{ .dev_type = AP_DEVICE_TYPE_CEX6,
.match_flags = AP_DEVICE_ID_MATCH_QUEUE_TYPE },
{ /* end of sibling */ },
};
MODULE_DEVICE_TABLE(vfio_ap, ap_queue_ids);
static int vfio_ap_queue_dev_probe(struct ap_device *apdev)
{
return 0;
}
static void vfio_ap_queue_dev_remove(struct ap_device *apdev)
{
/* Nothing to do yet */
}
static void vfio_ap_matrix_dev_release(struct device *dev)
{
struct ap_matrix_dev *matrix_dev = dev_get_drvdata(dev);
kfree(matrix_dev);
}
static int vfio_ap_matrix_dev_create(void)
{
int ret;
struct device *root_device;
root_device = root_device_register(VFIO_AP_ROOT_NAME);
if (IS_ERR(root_device))
return PTR_ERR(root_device);
matrix_dev = kzalloc(sizeof(*matrix_dev), GFP_KERNEL);
if (!matrix_dev) {
ret = -ENOMEM;
goto matrix_alloc_err;
}
/* Fill in config info via PQAP(QCI), if available */
if (test_facility(12)) {
ret = ap_qci(&matrix_dev->info);
if (ret)
goto matrix_alloc_err;
}
mutex_init(&matrix_dev->lock);
INIT_LIST_HEAD(&matrix_dev->mdev_list);
matrix_dev->device.type = &vfio_ap_dev_type;
dev_set_name(&matrix_dev->device, "%s", VFIO_AP_DEV_NAME);
matrix_dev->device.parent = root_device;
matrix_dev->device.release = vfio_ap_matrix_dev_release;
matrix_dev->device.driver = &vfio_ap_drv.driver;
ret = device_register(&matrix_dev->device);
if (ret)
goto matrix_reg_err;
return 0;
matrix_reg_err:
put_device(&matrix_dev->device);
matrix_alloc_err:
root_device_unregister(root_device);
return ret;
}
static void vfio_ap_matrix_dev_destroy(void)
{
device_unregister(&matrix_dev->device);
root_device_unregister(matrix_dev->device.parent);
}
int __init vfio_ap_init(void)
{
int ret;
/* If there are no AP instructions, there is nothing to pass through. */
if (!ap_instructions_available())
return -ENODEV;
ret = vfio_ap_matrix_dev_create();
if (ret)
return ret;
memset(&vfio_ap_drv, 0, sizeof(vfio_ap_drv));
vfio_ap_drv.probe = vfio_ap_queue_dev_probe;
vfio_ap_drv.remove = vfio_ap_queue_dev_remove;
vfio_ap_drv.ids = ap_queue_ids;
ret = ap_driver_register(&vfio_ap_drv, THIS_MODULE, VFIO_AP_DRV_NAME);
if (ret) {
vfio_ap_matrix_dev_destroy();
return ret;
}
ret = vfio_ap_mdev_register();
if (ret) {
ap_driver_unregister(&vfio_ap_drv);
vfio_ap_matrix_dev_destroy();
return ret;
}
return 0;
}
void __exit vfio_ap_exit(void)
{
vfio_ap_mdev_unregister();
ap_driver_unregister(&vfio_ap_drv);
vfio_ap_matrix_dev_destroy();
}
module_init(vfio_ap_init);
module_exit(vfio_ap_exit);
This diff is collapsed.
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Private data and functions for adjunct processor VFIO matrix driver.
*
* Author(s): Tony Krowiak <akrowiak@linux.ibm.com>
* Halil Pasic <pasic@linux.ibm.com>
*
* Copyright IBM Corp. 2018
*/
#ifndef _VFIO_AP_PRIVATE_H_
#define _VFIO_AP_PRIVATE_H_
#include <linux/types.h>
#include <linux/device.h>
#include <linux/mdev.h>
#include <linux/delay.h>
#include <linux/mutex.h>
#include "ap_bus.h"
#define VFIO_AP_MODULE_NAME "vfio_ap"
#define VFIO_AP_DRV_NAME "vfio_ap"
/**
* ap_matrix_dev - the AP matrix device structure
* @device: generic device structure associated with the AP matrix device
* @available_instances: number of mediated matrix devices that can be created
* @info: the struct containing the output from the PQAP(QCI) instruction
* mdev_list: the list of mediated matrix devices created
* lock: mutex for locking the AP matrix device. This lock will be
* taken every time we fiddle with state managed by the vfio_ap
* driver, be it using @mdev_list or writing the state of a
* single ap_matrix_mdev device. It's quite coarse but we don't
* expect much contention.
*/
struct ap_matrix_dev {
struct device device;
atomic_t available_instances;
struct ap_config_info info;
struct list_head mdev_list;
struct mutex lock;
};
extern struct ap_matrix_dev *matrix_dev;
/**
* The AP matrix is comprised of three bit masks identifying the adapters,
* queues (domains) and control domains that belong to an AP matrix. The bits i
* each mask, from least significant to most significant bit, correspond to IDs
* 0 to 255. When a bit is set, the corresponding ID belongs to the matrix.
*
* @apm_max: max adapter number in @apm
* @apm identifies the AP adapters in the matrix
* @aqm_max: max domain number in @aqm
* @aqm identifies the AP queues (domains) in the matrix
* @adm_max: max domain number in @adm
* @adm identifies the AP control domains in the matrix
*/
struct ap_matrix {
unsigned long apm_max;
DECLARE_BITMAP(apm, 256);
unsigned long aqm_max;
DECLARE_BITMAP(aqm, 256);
unsigned long adm_max;
DECLARE_BITMAP(adm, 256);
};
/**
* struct ap_matrix_mdev - the mediated matrix device structure
* @list: allows the ap_matrix_mdev struct to be added to a list
* @matrix: the adapters, usage domains and control domains assigned to the
* mediated matrix device.
* @group_notifier: notifier block used for specifying callback function for
* handling the VFIO_GROUP_NOTIFY_SET_KVM event
* @kvm: the struct holding guest's state
*/
struct ap_matrix_mdev {
struct list_head node;
struct ap_matrix matrix;
struct notifier_block group_notifier;
struct kvm *kvm;
};
extern int vfio_ap_mdev_register(void);
extern void vfio_ap_mdev_unregister(void);
#endif /* _VFIO_AP_PRIVATE_H_ */
...@@ -200,6 +200,7 @@ struct vfio_device_info { ...@@ -200,6 +200,7 @@ struct vfio_device_info {
#define VFIO_DEVICE_FLAGS_PLATFORM (1 << 2) /* vfio-platform device */ #define VFIO_DEVICE_FLAGS_PLATFORM (1 << 2) /* vfio-platform device */
#define VFIO_DEVICE_FLAGS_AMBA (1 << 3) /* vfio-amba device */ #define VFIO_DEVICE_FLAGS_AMBA (1 << 3) /* vfio-amba device */
#define VFIO_DEVICE_FLAGS_CCW (1 << 4) /* vfio-ccw device */ #define VFIO_DEVICE_FLAGS_CCW (1 << 4) /* vfio-ccw device */
#define VFIO_DEVICE_FLAGS_AP (1 << 5) /* vfio-ap device */
__u32 num_regions; /* Max region index + 1 */ __u32 num_regions; /* Max region index + 1 */
__u32 num_irqs; /* Max IRQ index + 1 */ __u32 num_irqs; /* Max IRQ index + 1 */
}; };
...@@ -215,6 +216,7 @@ struct vfio_device_info { ...@@ -215,6 +216,7 @@ struct vfio_device_info {
#define VFIO_DEVICE_API_PLATFORM_STRING "vfio-platform" #define VFIO_DEVICE_API_PLATFORM_STRING "vfio-platform"
#define VFIO_DEVICE_API_AMBA_STRING "vfio-amba" #define VFIO_DEVICE_API_AMBA_STRING "vfio-amba"
#define VFIO_DEVICE_API_CCW_STRING "vfio-ccw" #define VFIO_DEVICE_API_CCW_STRING "vfio-ccw"
#define VFIO_DEVICE_API_AP_STRING "vfio-ap"
/** /**
* VFIO_DEVICE_GET_REGION_INFO - _IOWR(VFIO_TYPE, VFIO_BASE + 8, * VFIO_DEVICE_GET_REGION_INFO - _IOWR(VFIO_TYPE, VFIO_BASE + 8,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment