Commit 4d5140c5 authored by Paolo Bonzini's avatar Paolo Bonzini

Merge tag 'kvm-s390-next-20151028' of...

Merge tag 'kvm-s390-next-20151028' of git://git.kernel.org/pub/scm/linux/kernel/git/kvms390/linux into HEAD

KVM: s390: Bugfix and cleanups

There is one important bug fix for a potential memory corruption
and/or guest errors for guests with 63 or 64 vCPUs. This fix would
qualify for 4.3 but is some days too late giving that we are
about to release 4.3.
Given that this patch is cc stable >= 3.15 anyway, we can handle
it via 4.4. merge window.

This pull request also contains two cleanups.
parents 8c85ac1c 46b708ea
......@@ -336,28 +336,28 @@ static int handle_partial_execution(struct kvm_vcpu *vcpu)
return -EOPNOTSUPP;
}
static const intercept_handler_t intercept_funcs[] = {
[0x00 >> 2] = handle_noop,
[0x04 >> 2] = handle_instruction,
[0x08 >> 2] = handle_prog,
[0x10 >> 2] = handle_noop,
[0x14 >> 2] = handle_external_interrupt,
[0x18 >> 2] = handle_noop,
[0x1C >> 2] = kvm_s390_handle_wait,
[0x20 >> 2] = handle_validity,
[0x28 >> 2] = handle_stop,
[0x38 >> 2] = handle_partial_execution,
};
int kvm_handle_sie_intercept(struct kvm_vcpu *vcpu)
{
intercept_handler_t func;
u8 code = vcpu->arch.sie_block->icptcode;
if (code & 3 || (code >> 2) >= ARRAY_SIZE(intercept_funcs))
switch (vcpu->arch.sie_block->icptcode) {
case 0x00:
case 0x10:
case 0x18:
return handle_noop(vcpu);
case 0x04:
return handle_instruction(vcpu);
case 0x08:
return handle_prog(vcpu);
case 0x14:
return handle_external_interrupt(vcpu);
case 0x1c:
return kvm_s390_handle_wait(vcpu);
case 0x20:
return handle_validity(vcpu);
case 0x28:
return handle_stop(vcpu);
case 0x38:
return handle_partial_execution(vcpu);
default:
return -EOPNOTSUPP;
func = intercept_funcs[code >> 2];
if (func)
return func(vcpu);
return -EOPNOTSUPP;
}
}
......@@ -514,7 +514,7 @@ static int kvm_s390_set_tod_high(struct kvm *kvm, struct kvm_device_attr *attr)
if (gtod_high != 0)
return -EINVAL;
VM_EVENT(kvm, 3, "SET: TOD extension: 0x%x\n", gtod_high);
VM_EVENT(kvm, 3, "SET: TOD extension: 0x%x", gtod_high);
return 0;
}
......@@ -527,7 +527,7 @@ static int kvm_s390_set_tod_low(struct kvm *kvm, struct kvm_device_attr *attr)
return -EFAULT;
kvm_s390_set_tod_clock(kvm, gtod);
VM_EVENT(kvm, 3, "SET: TOD base: 0x%llx\n", gtod);
VM_EVENT(kvm, 3, "SET: TOD base: 0x%llx", gtod);
return 0;
}
......@@ -559,7 +559,7 @@ static int kvm_s390_get_tod_high(struct kvm *kvm, struct kvm_device_attr *attr)
if (copy_to_user((void __user *)attr->addr, &gtod_high,
sizeof(gtod_high)))
return -EFAULT;
VM_EVENT(kvm, 3, "QUERY: TOD extension: 0x%x\n", gtod_high);
VM_EVENT(kvm, 3, "QUERY: TOD extension: 0x%x", gtod_high);
return 0;
}
......@@ -571,7 +571,7 @@ static int kvm_s390_get_tod_low(struct kvm *kvm, struct kvm_device_attr *attr)
gtod = kvm_s390_get_tod_clock_fast(kvm);
if (copy_to_user((void __user *)attr->addr, &gtod, sizeof(gtod)))
return -EFAULT;
VM_EVENT(kvm, 3, "QUERY: TOD base: 0x%llx\n", gtod);
VM_EVENT(kvm, 3, "QUERY: TOD base: 0x%llx", gtod);
return 0;
}
......@@ -1098,7 +1098,9 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
if (!kvm->arch.sca)
goto out_err;
spin_lock(&kvm_lock);
sca_offset = (sca_offset + 16) & 0x7f0;
sca_offset += 16;
if (sca_offset + sizeof(struct sca_block) > PAGE_SIZE)
sca_offset = 0;
kvm->arch.sca = (struct sca_block *) ((char *) kvm->arch.sca + sca_offset);
spin_unlock(&kvm_lock);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment