Commit cba3d276 authored by Paolo Bonzini's avatar Paolo Bonzini

Merge tag 'kvm-s390-next-20150508' of...

Merge tag 'kvm-s390-next-20150508' of git://git.kernel.org/pub/scm/linux/kernel/git/kvms390/linux into HEAD

KVM: s390: Fixes and features for 4.2 (kvm/next)

Mostly a bunch of fixes, reworks and optimizations for s390.
There is one new feature (EDAT-2 inside the guest), which boils
down to 2GB pages.
parents 31fd9880 06b36753
...@@ -80,6 +80,7 @@ struct sca_block { ...@@ -80,6 +80,7 @@ struct sca_block {
#define CPUSTAT_MCDS 0x00000100 #define CPUSTAT_MCDS 0x00000100
#define CPUSTAT_SM 0x00000080 #define CPUSTAT_SM 0x00000080
#define CPUSTAT_IBS 0x00000040 #define CPUSTAT_IBS 0x00000040
#define CPUSTAT_GED2 0x00000010
#define CPUSTAT_G 0x00000008 #define CPUSTAT_G 0x00000008
#define CPUSTAT_GED 0x00000004 #define CPUSTAT_GED 0x00000004
#define CPUSTAT_J 0x00000002 #define CPUSTAT_J 0x00000002
...@@ -95,7 +96,8 @@ struct kvm_s390_sie_block { ...@@ -95,7 +96,8 @@ struct kvm_s390_sie_block {
#define PROG_IN_SIE (1<<0) #define PROG_IN_SIE (1<<0)
__u32 prog0c; /* 0x000c */ __u32 prog0c; /* 0x000c */
__u8 reserved10[16]; /* 0x0010 */ __u8 reserved10[16]; /* 0x0010 */
#define PROG_BLOCK_SIE 0x00000001 #define PROG_BLOCK_SIE (1<<0)
#define PROG_REQUEST (1<<1)
atomic_t prog20; /* 0x0020 */ atomic_t prog20; /* 0x0020 */
__u8 reserved24[4]; /* 0x0024 */ __u8 reserved24[4]; /* 0x0024 */
__u64 cputm; /* 0x0028 */ __u64 cputm; /* 0x0028 */
......
...@@ -1005,7 +1005,7 @@ ENTRY(sie64a) ...@@ -1005,7 +1005,7 @@ ENTRY(sie64a)
.Lsie_gmap: .Lsie_gmap:
lg %r14,__SF_EMPTY(%r15) # get control block pointer lg %r14,__SF_EMPTY(%r15) # get control block pointer
oi __SIE_PROG0C+3(%r14),1 # we are going into SIE now oi __SIE_PROG0C+3(%r14),1 # we are going into SIE now
tm __SIE_PROG20+3(%r14),1 # last exit... tm __SIE_PROG20+3(%r14),3 # last exit...
jnz .Lsie_done jnz .Lsie_done
LPP __SF_EMPTY(%r15) # set guest id LPP __SF_EMPTY(%r15) # set guest id
sie 0(%r14) sie 0(%r14)
......
...@@ -241,21 +241,6 @@ static int handle_prog(struct kvm_vcpu *vcpu) ...@@ -241,21 +241,6 @@ static int handle_prog(struct kvm_vcpu *vcpu)
return kvm_s390_inject_prog_irq(vcpu, &pgm_info); return kvm_s390_inject_prog_irq(vcpu, &pgm_info);
} }
static int handle_instruction_and_prog(struct kvm_vcpu *vcpu)
{
int rc, rc2;
vcpu->stat.exit_instr_and_program++;
rc = handle_instruction(vcpu);
rc2 = handle_prog(vcpu);
if (rc == -EOPNOTSUPP)
vcpu->arch.sie_block->icptcode = 0x04;
if (rc)
return rc;
return rc2;
}
/** /**
* handle_external_interrupt - used for external interruption interceptions * handle_external_interrupt - used for external interruption interceptions
* *
...@@ -355,7 +340,6 @@ static const intercept_handler_t intercept_funcs[] = { ...@@ -355,7 +340,6 @@ static const intercept_handler_t intercept_funcs[] = {
[0x00 >> 2] = handle_noop, [0x00 >> 2] = handle_noop,
[0x04 >> 2] = handle_instruction, [0x04 >> 2] = handle_instruction,
[0x08 >> 2] = handle_prog, [0x08 >> 2] = handle_prog,
[0x0C >> 2] = handle_instruction_and_prog,
[0x10 >> 2] = handle_noop, [0x10 >> 2] = handle_noop,
[0x14 >> 2] = handle_external_interrupt, [0x14 >> 2] = handle_external_interrupt,
[0x18 >> 2] = handle_noop, [0x18 >> 2] = handle_noop,
......
...@@ -134,6 +134,8 @@ static unsigned long deliverable_irqs(struct kvm_vcpu *vcpu) ...@@ -134,6 +134,8 @@ static unsigned long deliverable_irqs(struct kvm_vcpu *vcpu)
active_mask = pending_local_irqs(vcpu); active_mask = pending_local_irqs(vcpu);
active_mask |= pending_floating_irqs(vcpu); active_mask |= pending_floating_irqs(vcpu);
if (!active_mask)
return 0;
if (psw_extint_disabled(vcpu)) if (psw_extint_disabled(vcpu))
active_mask &= ~IRQ_PEND_EXT_MASK; active_mask &= ~IRQ_PEND_EXT_MASK;
...@@ -941,12 +943,9 @@ int __must_check kvm_s390_deliver_pending_interrupts(struct kvm_vcpu *vcpu) ...@@ -941,12 +943,9 @@ int __must_check kvm_s390_deliver_pending_interrupts(struct kvm_vcpu *vcpu)
if (cpu_timer_irq_pending(vcpu)) if (cpu_timer_irq_pending(vcpu))
set_bit(IRQ_PEND_EXT_CPU_TIMER, &li->pending_irqs); set_bit(IRQ_PEND_EXT_CPU_TIMER, &li->pending_irqs);
do { while ((irqs = deliverable_irqs(vcpu)) && !rc) {
irqs = deliverable_irqs(vcpu);
/* bits are in the order of interrupt priority */ /* bits are in the order of interrupt priority */
irq_type = find_first_bit(&irqs, IRQ_PEND_COUNT); irq_type = find_first_bit(&irqs, IRQ_PEND_COUNT);
if (irq_type == IRQ_PEND_COUNT)
break;
if (is_ioirq(irq_type)) { if (is_ioirq(irq_type)) {
rc = __deliver_io(vcpu, irq_type); rc = __deliver_io(vcpu, irq_type);
} else { } else {
...@@ -958,9 +957,7 @@ int __must_check kvm_s390_deliver_pending_interrupts(struct kvm_vcpu *vcpu) ...@@ -958,9 +957,7 @@ int __must_check kvm_s390_deliver_pending_interrupts(struct kvm_vcpu *vcpu)
} }
rc = func(vcpu); rc = func(vcpu);
} }
if (rc) }
break;
} while (!rc);
set_intercept_indicators(vcpu); set_intercept_indicators(vcpu);
...@@ -1061,7 +1058,7 @@ static int __inject_extcall(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq) ...@@ -1061,7 +1058,7 @@ static int __inject_extcall(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
if (sclp_has_sigpif()) if (sclp_has_sigpif())
return __inject_extcall_sigpif(vcpu, src_id); return __inject_extcall_sigpif(vcpu, src_id);
if (!test_and_set_bit(IRQ_PEND_EXT_EXTERNAL, &li->pending_irqs)) if (test_and_set_bit(IRQ_PEND_EXT_EXTERNAL, &li->pending_irqs))
return -EBUSY; return -EBUSY;
*extcall = irq->u.extcall; *extcall = irq->u.extcall;
atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags); atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags);
...@@ -1340,12 +1337,54 @@ static int __inject_io(struct kvm *kvm, struct kvm_s390_interrupt_info *inti) ...@@ -1340,12 +1337,54 @@ static int __inject_io(struct kvm *kvm, struct kvm_s390_interrupt_info *inti)
return 0; return 0;
} }
static int __inject_vm(struct kvm *kvm, struct kvm_s390_interrupt_info *inti) /*
* Find a destination VCPU for a floating irq and kick it.
*/
static void __floating_irq_kick(struct kvm *kvm, u64 type)
{ {
struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int;
struct kvm_s390_local_interrupt *li; struct kvm_s390_local_interrupt *li;
struct kvm_vcpu *dst_vcpu;
int sigcpu, online_vcpus, nr_tries = 0;
online_vcpus = atomic_read(&kvm->online_vcpus);
if (!online_vcpus)
return;
/* find idle VCPUs first, then round robin */
sigcpu = find_first_bit(fi->idle_mask, online_vcpus);
if (sigcpu == online_vcpus) {
do {
sigcpu = fi->next_rr_cpu;
fi->next_rr_cpu = (fi->next_rr_cpu + 1) % online_vcpus;
/* avoid endless loops if all vcpus are stopped */
if (nr_tries++ >= online_vcpus)
return;
} while (is_vcpu_stopped(kvm_get_vcpu(kvm, sigcpu)));
}
dst_vcpu = kvm_get_vcpu(kvm, sigcpu);
/* make the VCPU drop out of the SIE, or wake it up if sleeping */
li = &dst_vcpu->arch.local_int;
spin_lock(&li->lock);
switch (type) {
case KVM_S390_MCHK:
atomic_set_mask(CPUSTAT_STOP_INT, li->cpuflags);
break;
case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
atomic_set_mask(CPUSTAT_IO_INT, li->cpuflags);
break;
default:
atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags);
break;
}
spin_unlock(&li->lock);
kvm_s390_vcpu_wakeup(dst_vcpu);
}
static int __inject_vm(struct kvm *kvm, struct kvm_s390_interrupt_info *inti)
{
struct kvm_s390_float_interrupt *fi; struct kvm_s390_float_interrupt *fi;
struct kvm_vcpu *dst_vcpu = NULL;
int sigcpu;
u64 type = READ_ONCE(inti->type); u64 type = READ_ONCE(inti->type);
int rc; int rc;
...@@ -1373,32 +1412,8 @@ static int __inject_vm(struct kvm *kvm, struct kvm_s390_interrupt_info *inti) ...@@ -1373,32 +1412,8 @@ static int __inject_vm(struct kvm *kvm, struct kvm_s390_interrupt_info *inti)
if (rc) if (rc)
return rc; return rc;
sigcpu = find_first_bit(fi->idle_mask, KVM_MAX_VCPUS); __floating_irq_kick(kvm, type);
if (sigcpu == KVM_MAX_VCPUS) {
do {
sigcpu = fi->next_rr_cpu++;
if (sigcpu == KVM_MAX_VCPUS)
sigcpu = fi->next_rr_cpu = 0;
} while (kvm_get_vcpu(kvm, sigcpu) == NULL);
}
dst_vcpu = kvm_get_vcpu(kvm, sigcpu);
li = &dst_vcpu->arch.local_int;
spin_lock(&li->lock);
switch (type) {
case KVM_S390_MCHK:
atomic_set_mask(CPUSTAT_STOP_INT, li->cpuflags);
break;
case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
atomic_set_mask(CPUSTAT_IO_INT, li->cpuflags);
break;
default:
atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags);
break;
}
spin_unlock(&li->lock);
kvm_s390_vcpu_wakeup(kvm_get_vcpu(kvm, sigcpu));
return 0; return 0;
} }
int kvm_s390_inject_vm(struct kvm *kvm, int kvm_s390_inject_vm(struct kvm *kvm,
......
...@@ -110,7 +110,7 @@ struct kvm_stats_debugfs_item debugfs_entries[] = { ...@@ -110,7 +110,7 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
/* upper facilities limit for kvm */ /* upper facilities limit for kvm */
unsigned long kvm_s390_fac_list_mask[] = { unsigned long kvm_s390_fac_list_mask[] = {
0xffe6fffbfcfdfc40UL, 0xffe6fffbfcfdfc40UL,
0x005c800000000000UL, 0x005e800000000000UL,
}; };
unsigned long kvm_s390_fac_list_mask_size(void) unsigned long kvm_s390_fac_list_mask_size(void)
...@@ -454,10 +454,10 @@ static int kvm_s390_set_tod_low(struct kvm *kvm, struct kvm_device_attr *attr) ...@@ -454,10 +454,10 @@ static int kvm_s390_set_tod_low(struct kvm *kvm, struct kvm_device_attr *attr)
mutex_lock(&kvm->lock); mutex_lock(&kvm->lock);
kvm->arch.epoch = gtod - host_tod; kvm->arch.epoch = gtod - host_tod;
kvm_for_each_vcpu(vcpu_idx, cur_vcpu, kvm) { kvm_s390_vcpu_block_all(kvm);
kvm_for_each_vcpu(vcpu_idx, cur_vcpu, kvm)
cur_vcpu->arch.sie_block->epoch = kvm->arch.epoch; cur_vcpu->arch.sie_block->epoch = kvm->arch.epoch;
exit_sie(cur_vcpu); kvm_s390_vcpu_unblock_all(kvm);
}
mutex_unlock(&kvm->lock); mutex_unlock(&kvm->lock);
return 0; return 0;
} }
...@@ -1311,8 +1311,13 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) ...@@ -1311,8 +1311,13 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH | atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH |
CPUSTAT_SM | CPUSTAT_SM |
CPUSTAT_STOPPED | CPUSTAT_STOPPED);
CPUSTAT_GED);
if (test_kvm_facility(vcpu->kvm, 78))
atomic_set_mask(CPUSTAT_GED2, &vcpu->arch.sie_block->cpuflags);
else if (test_kvm_facility(vcpu->kvm, 8))
atomic_set_mask(CPUSTAT_GED, &vcpu->arch.sie_block->cpuflags);
kvm_s390_vcpu_setup_model(vcpu); kvm_s390_vcpu_setup_model(vcpu);
vcpu->arch.sie_block->ecb = 6; vcpu->arch.sie_block->ecb = 6;
...@@ -1409,16 +1414,26 @@ int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu) ...@@ -1409,16 +1414,26 @@ int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
return kvm_s390_vcpu_has_irq(vcpu, 0); return kvm_s390_vcpu_has_irq(vcpu, 0);
} }
void s390_vcpu_block(struct kvm_vcpu *vcpu) void kvm_s390_vcpu_block(struct kvm_vcpu *vcpu)
{ {
atomic_set_mask(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20); atomic_set_mask(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
} }
void s390_vcpu_unblock(struct kvm_vcpu *vcpu) void kvm_s390_vcpu_unblock(struct kvm_vcpu *vcpu)
{ {
atomic_clear_mask(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20); atomic_clear_mask(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
} }
static void kvm_s390_vcpu_request(struct kvm_vcpu *vcpu)
{
atomic_set_mask(PROG_REQUEST, &vcpu->arch.sie_block->prog20);
}
static void kvm_s390_vcpu_request_handled(struct kvm_vcpu *vcpu)
{
atomic_clear_mask(PROG_REQUEST, &vcpu->arch.sie_block->prog20);
}
/* /*
* Kick a guest cpu out of SIE and wait until SIE is not running. * Kick a guest cpu out of SIE and wait until SIE is not running.
* If the CPU is not running (e.g. waiting as idle) the function will * If the CPU is not running (e.g. waiting as idle) the function will
...@@ -1430,10 +1445,11 @@ void exit_sie(struct kvm_vcpu *vcpu) ...@@ -1430,10 +1445,11 @@ void exit_sie(struct kvm_vcpu *vcpu)
cpu_relax(); cpu_relax();
} }
/* Kick a guest cpu out of SIE and prevent SIE-reentry */ /* Kick a guest cpu out of SIE to process a request synchronously */
void exit_sie_sync(struct kvm_vcpu *vcpu) void kvm_s390_sync_request(int req, struct kvm_vcpu *vcpu)
{ {
s390_vcpu_block(vcpu); kvm_make_request(req, vcpu);
kvm_s390_vcpu_request(vcpu);
exit_sie(vcpu); exit_sie(vcpu);
} }
...@@ -1447,8 +1463,7 @@ static void kvm_gmap_notifier(struct gmap *gmap, unsigned long address) ...@@ -1447,8 +1463,7 @@ static void kvm_gmap_notifier(struct gmap *gmap, unsigned long address)
/* match against both prefix pages */ /* match against both prefix pages */
if (kvm_s390_get_prefix(vcpu) == (address & ~0x1000UL)) { if (kvm_s390_get_prefix(vcpu) == (address & ~0x1000UL)) {
VCPU_EVENT(vcpu, 2, "gmap notifier for %lx", address); VCPU_EVENT(vcpu, 2, "gmap notifier for %lx", address);
kvm_make_request(KVM_REQ_MMU_RELOAD, vcpu); kvm_s390_sync_request(KVM_REQ_MMU_RELOAD, vcpu);
exit_sie_sync(vcpu);
} }
} }
} }
...@@ -1720,8 +1735,10 @@ static bool ibs_enabled(struct kvm_vcpu *vcpu) ...@@ -1720,8 +1735,10 @@ static bool ibs_enabled(struct kvm_vcpu *vcpu)
static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu) static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu)
{ {
if (!vcpu->requests)
return 0;
retry: retry:
s390_vcpu_unblock(vcpu); kvm_s390_vcpu_request_handled(vcpu);
/* /*
* We use MMU_RELOAD just to re-arm the ipte notifier for the * We use MMU_RELOAD just to re-arm the ipte notifier for the
* guest prefix page. gmap_ipte_notify will wait on the ptl lock. * guest prefix page. gmap_ipte_notify will wait on the ptl lock.
...@@ -2208,8 +2225,7 @@ int kvm_s390_vcpu_store_adtl_status(struct kvm_vcpu *vcpu, unsigned long addr) ...@@ -2208,8 +2225,7 @@ int kvm_s390_vcpu_store_adtl_status(struct kvm_vcpu *vcpu, unsigned long addr)
static void __disable_ibs_on_vcpu(struct kvm_vcpu *vcpu) static void __disable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
{ {
kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu); kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu);
kvm_make_request(KVM_REQ_DISABLE_IBS, vcpu); kvm_s390_sync_request(KVM_REQ_DISABLE_IBS, vcpu);
exit_sie_sync(vcpu);
} }
static void __disable_ibs_on_all_vcpus(struct kvm *kvm) static void __disable_ibs_on_all_vcpus(struct kvm *kvm)
...@@ -2225,8 +2241,7 @@ static void __disable_ibs_on_all_vcpus(struct kvm *kvm) ...@@ -2225,8 +2241,7 @@ static void __disable_ibs_on_all_vcpus(struct kvm *kvm)
static void __enable_ibs_on_vcpu(struct kvm_vcpu *vcpu) static void __enable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
{ {
kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu); kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu);
kvm_make_request(KVM_REQ_ENABLE_IBS, vcpu); kvm_s390_sync_request(KVM_REQ_ENABLE_IBS, vcpu);
exit_sie_sync(vcpu);
} }
void kvm_s390_vcpu_start(struct kvm_vcpu *vcpu) void kvm_s390_vcpu_start(struct kvm_vcpu *vcpu)
......
...@@ -211,10 +211,10 @@ int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr); ...@@ -211,10 +211,10 @@ int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr);
int kvm_s390_vcpu_store_adtl_status(struct kvm_vcpu *vcpu, unsigned long addr); int kvm_s390_vcpu_store_adtl_status(struct kvm_vcpu *vcpu, unsigned long addr);
void kvm_s390_vcpu_start(struct kvm_vcpu *vcpu); void kvm_s390_vcpu_start(struct kvm_vcpu *vcpu);
void kvm_s390_vcpu_stop(struct kvm_vcpu *vcpu); void kvm_s390_vcpu_stop(struct kvm_vcpu *vcpu);
void s390_vcpu_block(struct kvm_vcpu *vcpu); void kvm_s390_vcpu_block(struct kvm_vcpu *vcpu);
void s390_vcpu_unblock(struct kvm_vcpu *vcpu); void kvm_s390_vcpu_unblock(struct kvm_vcpu *vcpu);
void exit_sie(struct kvm_vcpu *vcpu); void exit_sie(struct kvm_vcpu *vcpu);
void exit_sie_sync(struct kvm_vcpu *vcpu); void kvm_s390_sync_request(int req, struct kvm_vcpu *vcpu);
int kvm_s390_vcpu_setup_cmma(struct kvm_vcpu *vcpu); int kvm_s390_vcpu_setup_cmma(struct kvm_vcpu *vcpu);
void kvm_s390_vcpu_unsetup_cmma(struct kvm_vcpu *vcpu); void kvm_s390_vcpu_unsetup_cmma(struct kvm_vcpu *vcpu);
/* is cmma enabled */ /* is cmma enabled */
...@@ -228,6 +228,25 @@ int kvm_s390_handle_diag(struct kvm_vcpu *vcpu); ...@@ -228,6 +228,25 @@ int kvm_s390_handle_diag(struct kvm_vcpu *vcpu);
int kvm_s390_inject_prog_irq(struct kvm_vcpu *vcpu, int kvm_s390_inject_prog_irq(struct kvm_vcpu *vcpu,
struct kvm_s390_pgm_info *pgm_info); struct kvm_s390_pgm_info *pgm_info);
static inline void kvm_s390_vcpu_block_all(struct kvm *kvm)
{
int i;
struct kvm_vcpu *vcpu;
WARN_ON(!mutex_is_locked(&kvm->lock));
kvm_for_each_vcpu(i, vcpu, kvm)
kvm_s390_vcpu_block(vcpu);
}
static inline void kvm_s390_vcpu_unblock_all(struct kvm *kvm)
{
int i;
struct kvm_vcpu *vcpu;
kvm_for_each_vcpu(i, vcpu, kvm)
kvm_s390_vcpu_unblock(vcpu);
}
/** /**
* kvm_s390_inject_prog_cond - conditionally inject a program check * kvm_s390_inject_prog_cond - conditionally inject a program check
* @vcpu: virtual cpu * @vcpu: virtual cpu
......
...@@ -698,10 +698,14 @@ static int handle_pfmf(struct kvm_vcpu *vcpu) ...@@ -698,10 +698,14 @@ static int handle_pfmf(struct kvm_vcpu *vcpu)
case 0x00001000: case 0x00001000:
end = (start + (1UL << 20)) & ~((1UL << 20) - 1); end = (start + (1UL << 20)) & ~((1UL << 20) - 1);
break; break;
/* We dont support EDAT2
case 0x00002000: case 0x00002000:
/* only support 2G frame size if EDAT2 is available and we are
not in 24-bit addressing mode */
if (!test_kvm_facility(vcpu->kvm, 78) ||
psw_bits(vcpu->arch.sie_block->gpsw).eaba == PSW_AMODE_24BIT)
return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
end = (start + (1UL << 31)) & ~((1UL << 31) - 1); end = (start + (1UL << 31)) & ~((1UL << 31) - 1);
break;*/ break;
default: default:
return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment