Commit 35a2d585 authored by Christoffer Dall's avatar Christoffer Dall

KVM: arm/arm64: vgic-new: Synchronize changes to active state

When modifying the active state of an interrupt via the MMIO interface,
we should ensure that the write has the intended effect.

If a guest sets an interrupt to active, but that interrupt is already
flushed into a list register on a running VCPU, then that VCPU will
write the active state back into the struct vgic_irq upon returning from
the guest and syncing its state.  This is a non-benign race, because the
guest can observe that an interrupt is not active, and it can have a
reasonable expectations that other VCPUs will not ack any IRQs, and then
set the state to active, and expect it to stay that way.  Currently we
are not honoring this case.

Thefore, change both the SACTIVE and CACTIVE mmio handlers to stop the
world, change the irq state, potentially queue the irq if we're setting
it to active, and then continue.

We take this chance to slightly optimize these functions by not stopping
the world when touching private interrupts where there is inherently no
possible race.
Signed-off-by: default avatarChristoffer Dall <christoffer.dall@linaro.org>
parent efffe55a
......@@ -229,6 +229,8 @@ struct kvm_vcpu *kvm_arm_get_running_vcpu(void);
struct kvm_vcpu __percpu **kvm_get_running_vcpus(void);
void kvm_arm_halt_guest(struct kvm *kvm);
void kvm_arm_resume_guest(struct kvm *kvm);
void kvm_arm_halt_vcpu(struct kvm_vcpu *vcpu);
void kvm_arm_resume_vcpu(struct kvm_vcpu *vcpu);
int kvm_arm_copy_coproc_indices(struct kvm_vcpu *vcpu, u64 __user *uindices);
unsigned long kvm_arm_num_coproc_regs(struct kvm_vcpu *vcpu);
......
......@@ -502,7 +502,13 @@ void kvm_arm_halt_guest(struct kvm *kvm)
kvm_make_all_cpus_request(kvm, KVM_REQ_VCPU_EXIT);
}
static void kvm_arm_resume_vcpu(struct kvm_vcpu *vcpu)
void kvm_arm_halt_vcpu(struct kvm_vcpu *vcpu)
{
vcpu->arch.pause = true;
kvm_vcpu_kick(vcpu);
}
void kvm_arm_resume_vcpu(struct kvm_vcpu *vcpu)
{
struct swait_queue_head *wq = kvm_arch_vcpu_wq(vcpu);
......
......@@ -329,6 +329,8 @@ struct kvm_vcpu *kvm_arm_get_running_vcpu(void);
struct kvm_vcpu * __percpu *kvm_get_running_vcpus(void);
void kvm_arm_halt_guest(struct kvm *kvm);
void kvm_arm_resume_guest(struct kvm *kvm);
void kvm_arm_halt_vcpu(struct kvm_vcpu *vcpu);
void kvm_arm_resume_vcpu(struct kvm_vcpu *vcpu);
u64 __kvm_call_hyp(void *hypfn, ...);
#define kvm_call_hyp(f, ...) __kvm_call_hyp(kvm_ksym_ref(f), ##__VA_ARGS__)
......
......@@ -173,17 +173,9 @@ unsigned long vgic_mmio_read_active(struct kvm_vcpu *vcpu,
return value;
}
void vgic_mmio_write_cactive(struct kvm_vcpu *vcpu,
gpa_t addr, unsigned int len,
unsigned long val)
static void vgic_mmio_change_active(struct kvm_vcpu *vcpu, struct vgic_irq *irq,
bool new_active_state)
{
u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
int i;
kvm_arm_halt_guest(vcpu->kvm);
for_each_set_bit(i, &val, len * 8) {
struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
spin_lock(&irq->irq_lock);
/*
* If this virtual IRQ was written into a list register, we
......@@ -199,41 +191,76 @@ void vgic_mmio_write_cactive(struct kvm_vcpu *vcpu,
* other thread sync back the IRQ.
*/
while (irq->vcpu && /* IRQ may have state in an LR somewhere */
irq->vcpu->cpu != -1) /* VCPU thread is running */
irq->vcpu->cpu != -1) { /* VCPU thread is running */
BUG_ON(irq->intid < VGIC_NR_PRIVATE_IRQS);
cond_resched_lock(&irq->irq_lock);
}
irq->active = false;
irq->active = new_active_state;
if (new_active_state)
vgic_queue_irq_unlock(vcpu->kvm, irq);
else
spin_unlock(&irq->irq_lock);
}
}
/*
* If we are fiddling with an IRQ's active state, we have to make sure the IRQ
* is not queued on some running VCPU's LRs, because then the change to the
* active state can be overwritten when the VCPU's state is synced coming back
* from the guest.
*
* For shared interrupts, we have to stop all the VCPUs because interrupts can
* be migrated while we don't hold the IRQ locks and we don't want to be
* chasing moving targets.
*
* For private interrupts, we only have to make sure the single and only VCPU
* that can potentially queue the IRQ is stopped.
*/
static void vgic_change_active_prepare(struct kvm_vcpu *vcpu, u32 intid)
{
if (intid < VGIC_NR_PRIVATE_IRQS)
kvm_arm_halt_vcpu(vcpu);
else
kvm_arm_halt_guest(vcpu->kvm);
}
/* See vgic_change_active_prepare */
static void vgic_change_active_finish(struct kvm_vcpu *vcpu, u32 intid)
{
if (intid < VGIC_NR_PRIVATE_IRQS)
kvm_arm_resume_vcpu(vcpu);
else
kvm_arm_resume_guest(vcpu->kvm);
}
void vgic_mmio_write_sactive(struct kvm_vcpu *vcpu,
void vgic_mmio_write_cactive(struct kvm_vcpu *vcpu,
gpa_t addr, unsigned int len,
unsigned long val)
{
u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
int i;
vgic_change_active_prepare(vcpu, intid);
for_each_set_bit(i, &val, len * 8) {
struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
spin_lock(&irq->irq_lock);
/*
* If the IRQ was already active or there is no target VCPU
* assigned at the moment, then just proceed.
*/
if (irq->active || !irq->target_vcpu) {
irq->active = true;
spin_unlock(&irq->irq_lock);
continue;
vgic_mmio_change_active(vcpu, irq, false);
}
vgic_change_active_finish(vcpu, intid);
}
irq->active = true;
vgic_queue_irq_unlock(vcpu->kvm, irq);
void vgic_mmio_write_sactive(struct kvm_vcpu *vcpu,
gpa_t addr, unsigned int len,
unsigned long val)
{
u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
int i;
vgic_change_active_prepare(vcpu, intid);
for_each_set_bit(i, &val, len * 8) {
struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
vgic_mmio_change_active(vcpu, irq, true);
}
vgic_change_active_finish(vcpu, intid);
}
unsigned long vgic_mmio_read_priority(struct kvm_vcpu *vcpu,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment