Commit 3b53f553 authored by Paolo Bonzini's avatar Paolo Bonzini

Merge tag 'kvm-s390-next-5.18-2' of...

Merge tag 'kvm-s390-next-5.18-2' of https://git.kernel.org/pub/scm/linux/kernel/git/kvms390/linux into HEAD

KVM: s390: Fix, test and feature for 5.18 part 2

- memop selftest
- fix SCK locking
- adapter interruptions virtualization for secure guests
parents 4a204f78 3bcc372c
...@@ -80,6 +80,7 @@ enum uv_cmds_inst { ...@@ -80,6 +80,7 @@ enum uv_cmds_inst {
enum uv_feat_ind { enum uv_feat_ind {
BIT_UV_FEAT_MISC = 0, BIT_UV_FEAT_MISC = 0,
BIT_UV_FEAT_AIV = 1,
}; };
struct uv_cb_header { struct uv_cb_header {
......
...@@ -1901,13 +1901,12 @@ static int __inject_io(struct kvm *kvm, struct kvm_s390_interrupt_info *inti) ...@@ -1901,13 +1901,12 @@ static int __inject_io(struct kvm *kvm, struct kvm_s390_interrupt_info *inti)
isc = int_word_to_isc(inti->io.io_int_word); isc = int_word_to_isc(inti->io.io_int_word);
/* /*
* Do not make use of gisa in protected mode. We do not use the lock * We do not use the lock checking variant as this is just a
* checking variant as this is just a performance optimization and we * performance optimization and we do not hold the lock here.
* do not hold the lock here. This is ok as the code will pick * This is ok as the code will pick interrupts from both "lists"
* interrupts from both "lists" for delivery. * for delivery.
*/ */
if (!kvm_s390_pv_get_handle(kvm) && if (gi->origin && inti->type & KVM_S390_INT_IO_AI_MASK) {
gi->origin && inti->type & KVM_S390_INT_IO_AI_MASK) {
VM_EVENT(kvm, 4, "%s isc %1u", "inject: I/O (AI/gisa)", isc); VM_EVENT(kvm, 4, "%s isc %1u", "inject: I/O (AI/gisa)", isc);
gisa_set_ipm_gisc(gi->origin, isc); gisa_set_ipm_gisc(gi->origin, isc);
kfree(inti); kfree(inti);
...@@ -3171,9 +3170,33 @@ void kvm_s390_gisa_init(struct kvm *kvm) ...@@ -3171,9 +3170,33 @@ void kvm_s390_gisa_init(struct kvm *kvm)
VM_EVENT(kvm, 3, "gisa 0x%pK initialized", gi->origin); VM_EVENT(kvm, 3, "gisa 0x%pK initialized", gi->origin);
} }
void kvm_s390_gisa_enable(struct kvm *kvm)
{
struct kvm_s390_gisa_interrupt *gi = &kvm->arch.gisa_int;
struct kvm_vcpu *vcpu;
unsigned long i;
u32 gisa_desc;
if (gi->origin)
return;
kvm_s390_gisa_init(kvm);
gisa_desc = kvm_s390_get_gisa_desc(kvm);
if (!gisa_desc)
return;
kvm_for_each_vcpu(i, vcpu, kvm) {
mutex_lock(&vcpu->mutex);
vcpu->arch.sie_block->gd = gisa_desc;
vcpu->arch.sie_block->eca |= ECA_AIV;
VCPU_EVENT(vcpu, 3, "AIV gisa format-%u enabled for cpu %03u",
vcpu->arch.sie_block->gd & 0x3, vcpu->vcpu_id);
mutex_unlock(&vcpu->mutex);
}
}
void kvm_s390_gisa_destroy(struct kvm *kvm) void kvm_s390_gisa_destroy(struct kvm *kvm)
{ {
struct kvm_s390_gisa_interrupt *gi = &kvm->arch.gisa_int; struct kvm_s390_gisa_interrupt *gi = &kvm->arch.gisa_int;
struct kvm_s390_gisa *gisa = gi->origin;
if (!gi->origin) if (!gi->origin)
return; return;
...@@ -3184,6 +3207,25 @@ void kvm_s390_gisa_destroy(struct kvm *kvm) ...@@ -3184,6 +3207,25 @@ void kvm_s390_gisa_destroy(struct kvm *kvm)
cpu_relax(); cpu_relax();
hrtimer_cancel(&gi->timer); hrtimer_cancel(&gi->timer);
gi->origin = NULL; gi->origin = NULL;
VM_EVENT(kvm, 3, "gisa 0x%pK destroyed", gisa);
}
void kvm_s390_gisa_disable(struct kvm *kvm)
{
struct kvm_s390_gisa_interrupt *gi = &kvm->arch.gisa_int;
struct kvm_vcpu *vcpu;
unsigned long i;
if (!gi->origin)
return;
kvm_for_each_vcpu(i, vcpu, kvm) {
mutex_lock(&vcpu->mutex);
vcpu->arch.sie_block->eca &= ~ECA_AIV;
vcpu->arch.sie_block->gd = 0U;
mutex_unlock(&vcpu->mutex);
VCPU_EVENT(vcpu, 3, "AIV disabled for cpu %03u", vcpu->vcpu_id);
}
kvm_s390_gisa_destroy(kvm);
} }
/** /**
......
...@@ -2195,6 +2195,9 @@ static int kvm_s390_cpus_from_pv(struct kvm *kvm, u16 *rcp, u16 *rrcp) ...@@ -2195,6 +2195,9 @@ static int kvm_s390_cpus_from_pv(struct kvm *kvm, u16 *rcp, u16 *rrcp)
} }
mutex_unlock(&vcpu->mutex); mutex_unlock(&vcpu->mutex);
} }
/* Ensure that we re-enable gisa if the non-PV guest used it but the PV guest did not. */
if (use_gisa)
kvm_s390_gisa_enable(kvm);
return ret; return ret;
} }
...@@ -2206,6 +2209,10 @@ static int kvm_s390_cpus_to_pv(struct kvm *kvm, u16 *rc, u16 *rrc) ...@@ -2206,6 +2209,10 @@ static int kvm_s390_cpus_to_pv(struct kvm *kvm, u16 *rc, u16 *rrc)
struct kvm_vcpu *vcpu; struct kvm_vcpu *vcpu;
/* Disable the GISA if the ultravisor does not support AIV. */
if (!test_bit_inv(BIT_UV_FEAT_AIV, &uv_info.uv_feature_indications))
kvm_s390_gisa_disable(kvm);
kvm_for_each_vcpu(i, vcpu, kvm) { kvm_for_each_vcpu(i, vcpu, kvm) {
mutex_lock(&vcpu->mutex); mutex_lock(&vcpu->mutex);
r = kvm_s390_pv_create_cpu(vcpu, rc, rrc); r = kvm_s390_pv_create_cpu(vcpu, rc, rrc);
...@@ -3350,9 +3357,7 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu) ...@@ -3350,9 +3357,7 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
vcpu->arch.sie_block->icpua = vcpu->vcpu_id; vcpu->arch.sie_block->icpua = vcpu->vcpu_id;
spin_lock_init(&vcpu->arch.local_int.lock); spin_lock_init(&vcpu->arch.local_int.lock);
vcpu->arch.sie_block->gd = (u32)(u64)vcpu->kvm->arch.gisa_int.origin; vcpu->arch.sie_block->gd = kvm_s390_get_gisa_desc(vcpu->kvm);
if (vcpu->arch.sie_block->gd && sclp.has_gisaf)
vcpu->arch.sie_block->gd |= GISA_FORMAT1;
seqcount_init(&vcpu->arch.cputm_seqcount); seqcount_init(&vcpu->arch.cputm_seqcount);
vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID; vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
...@@ -3956,14 +3961,12 @@ static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu) ...@@ -3956,14 +3961,12 @@ static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu)
return 0; return 0;
} }
void kvm_s390_set_tod_clock(struct kvm *kvm, static void __kvm_s390_set_tod_clock(struct kvm *kvm, const struct kvm_s390_vm_tod_clock *gtod)
const struct kvm_s390_vm_tod_clock *gtod)
{ {
struct kvm_vcpu *vcpu; struct kvm_vcpu *vcpu;
union tod_clock clk; union tod_clock clk;
unsigned long i; unsigned long i;
mutex_lock(&kvm->lock);
preempt_disable(); preempt_disable();
store_tod_clock_ext(&clk); store_tod_clock_ext(&clk);
...@@ -3984,7 +3987,22 @@ void kvm_s390_set_tod_clock(struct kvm *kvm, ...@@ -3984,7 +3987,22 @@ void kvm_s390_set_tod_clock(struct kvm *kvm,
kvm_s390_vcpu_unblock_all(kvm); kvm_s390_vcpu_unblock_all(kvm);
preempt_enable(); preempt_enable();
}
void kvm_s390_set_tod_clock(struct kvm *kvm, const struct kvm_s390_vm_tod_clock *gtod)
{
mutex_lock(&kvm->lock);
__kvm_s390_set_tod_clock(kvm, gtod);
mutex_unlock(&kvm->lock);
}
int kvm_s390_try_set_tod_clock(struct kvm *kvm, const struct kvm_s390_vm_tod_clock *gtod)
{
if (!mutex_trylock(&kvm->lock))
return 0;
__kvm_s390_set_tod_clock(kvm, gtod);
mutex_unlock(&kvm->lock); mutex_unlock(&kvm->lock);
return 1;
} }
/** /**
......
...@@ -231,6 +231,15 @@ static inline unsigned long kvm_s390_get_gfn_end(struct kvm_memslots *slots) ...@@ -231,6 +231,15 @@ static inline unsigned long kvm_s390_get_gfn_end(struct kvm_memslots *slots)
return ms->base_gfn + ms->npages; return ms->base_gfn + ms->npages;
} }
static inline u32 kvm_s390_get_gisa_desc(struct kvm *kvm)
{
u32 gd = (u32)(u64)kvm->arch.gisa_int.origin;
if (gd && sclp.has_gisaf)
gd |= GISA_FORMAT1;
return gd;
}
/* implemented in pv.c */ /* implemented in pv.c */
int kvm_s390_pv_destroy_cpu(struct kvm_vcpu *vcpu, u16 *rc, u16 *rrc); int kvm_s390_pv_destroy_cpu(struct kvm_vcpu *vcpu, u16 *rc, u16 *rrc);
int kvm_s390_pv_create_cpu(struct kvm_vcpu *vcpu, u16 *rc, u16 *rrc); int kvm_s390_pv_create_cpu(struct kvm_vcpu *vcpu, u16 *rc, u16 *rrc);
...@@ -349,8 +358,8 @@ int kvm_s390_handle_sigp(struct kvm_vcpu *vcpu); ...@@ -349,8 +358,8 @@ int kvm_s390_handle_sigp(struct kvm_vcpu *vcpu);
int kvm_s390_handle_sigp_pei(struct kvm_vcpu *vcpu); int kvm_s390_handle_sigp_pei(struct kvm_vcpu *vcpu);
/* implemented in kvm-s390.c */ /* implemented in kvm-s390.c */
void kvm_s390_set_tod_clock(struct kvm *kvm, void kvm_s390_set_tod_clock(struct kvm *kvm, const struct kvm_s390_vm_tod_clock *gtod);
const struct kvm_s390_vm_tod_clock *gtod); int kvm_s390_try_set_tod_clock(struct kvm *kvm, const struct kvm_s390_vm_tod_clock *gtod);
long kvm_arch_fault_in_page(struct kvm_vcpu *vcpu, gpa_t gpa, int writable); long kvm_arch_fault_in_page(struct kvm_vcpu *vcpu, gpa_t gpa, int writable);
int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long addr); int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long addr);
int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr); int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr);
...@@ -450,6 +459,8 @@ int kvm_s390_get_irq_state(struct kvm_vcpu *vcpu, ...@@ -450,6 +459,8 @@ int kvm_s390_get_irq_state(struct kvm_vcpu *vcpu,
void kvm_s390_gisa_init(struct kvm *kvm); void kvm_s390_gisa_init(struct kvm *kvm);
void kvm_s390_gisa_clear(struct kvm *kvm); void kvm_s390_gisa_clear(struct kvm *kvm);
void kvm_s390_gisa_destroy(struct kvm *kvm); void kvm_s390_gisa_destroy(struct kvm *kvm);
void kvm_s390_gisa_disable(struct kvm *kvm);
void kvm_s390_gisa_enable(struct kvm *kvm);
int kvm_s390_gib_init(u8 nisc); int kvm_s390_gib_init(u8 nisc);
void kvm_s390_gib_destroy(void); void kvm_s390_gib_destroy(void);
......
...@@ -102,7 +102,20 @@ static int handle_set_clock(struct kvm_vcpu *vcpu) ...@@ -102,7 +102,20 @@ static int handle_set_clock(struct kvm_vcpu *vcpu)
return kvm_s390_inject_prog_cond(vcpu, rc); return kvm_s390_inject_prog_cond(vcpu, rc);
VCPU_EVENT(vcpu, 3, "SCK: setting guest TOD to 0x%llx", gtod.tod); VCPU_EVENT(vcpu, 3, "SCK: setting guest TOD to 0x%llx", gtod.tod);
kvm_s390_set_tod_clock(vcpu->kvm, &gtod); /*
* To set the TOD clock the kvm lock must be taken, but the vcpu lock
* is already held in handle_set_clock. The usual lock order is the
* opposite. As SCK is deprecated and should not be used in several
* cases, for example when the multiple epoch facility or TOD clock
* steering facility is installed (see Principles of Operation), a
* slow path can be used. If the lock can not be taken via try_lock,
* the instruction will be retried via -EAGAIN at a later point in
* time.
*/
if (!kvm_s390_try_set_tod_clock(vcpu->kvm, &gtod)) {
kvm_s390_retry_instr(vcpu);
return -EAGAIN;
}
kvm_s390_set_psw_cc(vcpu, 0); kvm_s390_set_psw_cc(vcpu, 0);
return 0; return 0;
......
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment