Commit 6d3cfbe2 authored by Peter Maydell's avatar Peter Maydell Committed by Christoffer Dall

arm/arm64: KVM: vgic: move reset initialization into vgic_init_maps()

VGIC initialization currently happens in three phases:
 (1) kvm_vgic_create() (triggered by userspace GIC creation)
 (2) vgic_init_maps() (triggered by userspace GIC register read/write
     requests, or from kvm_vgic_init() if not already run)
 (3) kvm_vgic_init() (triggered by first VM run)

We were doing initialization of some state to correspond with the
state of a freshly-reset GIC in kvm_vgic_init(); this is too late,
since it will overwrite changes made by userspace using the
register access APIs before the VM is run. Move this initialization
earlier, into the vgic_init_maps() phase.

This fixes a bug where QEMU could successfully restore a saved
VM state snapshot into a VM that had already been run, but could
not restore it "from cold" using the -loadvm command line option
(the symptoms being that the restored VM would run but interrupts
were ignored).

Finally rename vgic_init_maps to vgic_init and renamed kvm_vgic_init to
kvm_vgic_map_resources.

  [ This patch is originally written by Peter Maydell, but I have
    modified it somewhat heavily, renaming various bits and moving code
    around.  If something is broken, I am to be blamed. - Christoffer ]
Acked-by: default avatarMarc Zyngier <marc.zyngier@arm.com>
Reviewed-by: default avatarEric Auger <eric.auger@linaro.org>
Signed-off-by: default avatarPeter Maydell <peter.maydell@linaro.org>
Signed-off-by: default avatarChristoffer Dall <christoffer.dall@linaro.org>
parent 957db105
...@@ -428,11 +428,11 @@ static int kvm_vcpu_first_run_init(struct kvm_vcpu *vcpu) ...@@ -428,11 +428,11 @@ static int kvm_vcpu_first_run_init(struct kvm_vcpu *vcpu)
vcpu->arch.has_run_once = true; vcpu->arch.has_run_once = true;
/* /*
* Initialize the VGIC before running a vcpu the first time on * Map the VGIC hardware resources before running a vcpu the first
* this VM. * time on this VM.
*/ */
if (unlikely(!vgic_initialized(vcpu->kvm))) { if (unlikely(!vgic_initialized(vcpu->kvm))) {
ret = kvm_vgic_init(vcpu->kvm); ret = kvm_vgic_map_resources(vcpu->kvm);
if (ret) if (ret)
return ret; return ret;
} }
......
...@@ -274,7 +274,7 @@ struct kvm_exit_mmio; ...@@ -274,7 +274,7 @@ struct kvm_exit_mmio;
#ifdef CONFIG_KVM_ARM_VGIC #ifdef CONFIG_KVM_ARM_VGIC
int kvm_vgic_addr(struct kvm *kvm, unsigned long type, u64 *addr, bool write); int kvm_vgic_addr(struct kvm *kvm, unsigned long type, u64 *addr, bool write);
int kvm_vgic_hyp_init(void); int kvm_vgic_hyp_init(void);
int kvm_vgic_init(struct kvm *kvm); int kvm_vgic_map_resources(struct kvm *kvm);
int kvm_vgic_create(struct kvm *kvm); int kvm_vgic_create(struct kvm *kvm);
void kvm_vgic_destroy(struct kvm *kvm); void kvm_vgic_destroy(struct kvm *kvm);
void kvm_vgic_vcpu_destroy(struct kvm_vcpu *vcpu); void kvm_vgic_vcpu_destroy(struct kvm_vcpu *vcpu);
...@@ -321,7 +321,7 @@ static inline int kvm_vgic_addr(struct kvm *kvm, unsigned long type, u64 *addr, ...@@ -321,7 +321,7 @@ static inline int kvm_vgic_addr(struct kvm *kvm, unsigned long type, u64 *addr,
return -ENXIO; return -ENXIO;
} }
static inline int kvm_vgic_init(struct kvm *kvm) static inline int kvm_vgic_map_resources(struct kvm *kvm)
{ {
return 0; return 0;
} }
......
...@@ -91,6 +91,7 @@ ...@@ -91,6 +91,7 @@
#define ACCESS_WRITE_VALUE (3 << 1) #define ACCESS_WRITE_VALUE (3 << 1)
#define ACCESS_WRITE_MASK(x) ((x) & (3 << 1)) #define ACCESS_WRITE_MASK(x) ((x) & (3 << 1))
static int vgic_init(struct kvm *kvm);
static void vgic_retire_disabled_irqs(struct kvm_vcpu *vcpu); static void vgic_retire_disabled_irqs(struct kvm_vcpu *vcpu);
static void vgic_retire_lr(int lr_nr, int irq, struct kvm_vcpu *vcpu); static void vgic_retire_lr(int lr_nr, int irq, struct kvm_vcpu *vcpu);
static void vgic_update_state(struct kvm *kvm); static void vgic_update_state(struct kvm *kvm);
...@@ -1732,39 +1733,14 @@ static int vgic_vcpu_init_maps(struct kvm_vcpu *vcpu, int nr_irqs) ...@@ -1732,39 +1733,14 @@ static int vgic_vcpu_init_maps(struct kvm_vcpu *vcpu, int nr_irqs)
int sz = (nr_irqs - VGIC_NR_PRIVATE_IRQS) / 8; int sz = (nr_irqs - VGIC_NR_PRIVATE_IRQS) / 8;
vgic_cpu->pending_shared = kzalloc(sz, GFP_KERNEL); vgic_cpu->pending_shared = kzalloc(sz, GFP_KERNEL);
vgic_cpu->vgic_irq_lr_map = kzalloc(nr_irqs, GFP_KERNEL); vgic_cpu->vgic_irq_lr_map = kmalloc(nr_irqs, GFP_KERNEL);
if (!vgic_cpu->pending_shared || !vgic_cpu->vgic_irq_lr_map) { if (!vgic_cpu->pending_shared || !vgic_cpu->vgic_irq_lr_map) {
kvm_vgic_vcpu_destroy(vcpu); kvm_vgic_vcpu_destroy(vcpu);
return -ENOMEM; return -ENOMEM;
} }
return 0; memset(vgic_cpu->vgic_irq_lr_map, LR_EMPTY, nr_irqs);
}
/**
* kvm_vgic_vcpu_init - Initialize per-vcpu VGIC state
* @vcpu: pointer to the vcpu struct
*
* Initialize the vgic_cpu struct and vgic_dist struct fields pertaining to
* this vcpu and enable the VGIC for this VCPU
*/
static void kvm_vgic_vcpu_init(struct kvm_vcpu *vcpu)
{
struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
int i;
for (i = 0; i < dist->nr_irqs; i++) {
if (i < VGIC_NR_PPIS)
vgic_bitmap_set_irq_val(&dist->irq_enabled,
vcpu->vcpu_id, i, 1);
if (i < VGIC_NR_PRIVATE_IRQS)
vgic_bitmap_set_irq_val(&dist->irq_cfg,
vcpu->vcpu_id, i, VGIC_CFG_EDGE);
vgic_cpu->vgic_irq_lr_map[i] = LR_EMPTY;
}
/* /*
* Store the number of LRs per vcpu, so we don't have to go * Store the number of LRs per vcpu, so we don't have to go
...@@ -1773,7 +1749,7 @@ static void kvm_vgic_vcpu_init(struct kvm_vcpu *vcpu) ...@@ -1773,7 +1749,7 @@ static void kvm_vgic_vcpu_init(struct kvm_vcpu *vcpu)
*/ */
vgic_cpu->nr_lr = vgic->nr_lr; vgic_cpu->nr_lr = vgic->nr_lr;
vgic_enable(vcpu); return 0;
} }
void kvm_vgic_destroy(struct kvm *kvm) void kvm_vgic_destroy(struct kvm *kvm)
...@@ -1810,12 +1786,12 @@ void kvm_vgic_destroy(struct kvm *kvm) ...@@ -1810,12 +1786,12 @@ void kvm_vgic_destroy(struct kvm *kvm)
* Allocate and initialize the various data structures. Must be called * Allocate and initialize the various data structures. Must be called
* with kvm->lock held! * with kvm->lock held!
*/ */
static int vgic_init_maps(struct kvm *kvm) static int vgic_init(struct kvm *kvm)
{ {
struct vgic_dist *dist = &kvm->arch.vgic; struct vgic_dist *dist = &kvm->arch.vgic;
struct kvm_vcpu *vcpu; struct kvm_vcpu *vcpu;
int nr_cpus, nr_irqs; int nr_cpus, nr_irqs;
int ret, i; int ret, i, vcpu_id;
if (dist->nr_cpus) /* Already allocated */ if (dist->nr_cpus) /* Already allocated */
return 0; return 0;
...@@ -1865,16 +1841,28 @@ static int vgic_init_maps(struct kvm *kvm) ...@@ -1865,16 +1841,28 @@ static int vgic_init_maps(struct kvm *kvm)
if (ret) if (ret)
goto out; goto out;
kvm_for_each_vcpu(i, vcpu, kvm) { for (i = VGIC_NR_PRIVATE_IRQS; i < dist->nr_irqs; i += 4)
vgic_set_target_reg(kvm, 0, i);
kvm_for_each_vcpu(vcpu_id, vcpu, kvm) {
ret = vgic_vcpu_init_maps(vcpu, nr_irqs); ret = vgic_vcpu_init_maps(vcpu, nr_irqs);
if (ret) { if (ret) {
kvm_err("VGIC: Failed to allocate vcpu memory\n"); kvm_err("VGIC: Failed to allocate vcpu memory\n");
break; break;
} }
}
for (i = VGIC_NR_PRIVATE_IRQS; i < dist->nr_irqs; i += 4) for (i = 0; i < dist->nr_irqs; i++) {
vgic_set_target_reg(kvm, 0, i); if (i < VGIC_NR_PPIS)
vgic_bitmap_set_irq_val(&dist->irq_enabled,
vcpu->vcpu_id, i, 1);
if (i < VGIC_NR_PRIVATE_IRQS)
vgic_bitmap_set_irq_val(&dist->irq_cfg,
vcpu->vcpu_id, i,
VGIC_CFG_EDGE);
}
vgic_enable(vcpu);
}
out: out:
if (ret) if (ret)
...@@ -1884,18 +1872,16 @@ static int vgic_init_maps(struct kvm *kvm) ...@@ -1884,18 +1872,16 @@ static int vgic_init_maps(struct kvm *kvm)
} }
/** /**
* kvm_vgic_init - Initialize global VGIC state before running any VCPUs * kvm_vgic_map_resources - Configure global VGIC state before running any VCPUs
* @kvm: pointer to the kvm struct * @kvm: pointer to the kvm struct
* *
* Map the virtual CPU interface into the VM before running any VCPUs. We * Map the virtual CPU interface into the VM before running any VCPUs. We
* can't do this at creation time, because user space must first set the * can't do this at creation time, because user space must first set the
* virtual CPU interface address in the guest physical address space. Also * virtual CPU interface address in the guest physical address space.
* initialize the ITARGETSRn regs to 0 on the emulated distributor.
*/ */
int kvm_vgic_init(struct kvm *kvm) int kvm_vgic_map_resources(struct kvm *kvm)
{ {
struct kvm_vcpu *vcpu; int ret = 0;
int ret = 0, i;
if (!irqchip_in_kernel(kvm)) if (!irqchip_in_kernel(kvm))
return 0; return 0;
...@@ -1912,7 +1898,11 @@ int kvm_vgic_init(struct kvm *kvm) ...@@ -1912,7 +1898,11 @@ int kvm_vgic_init(struct kvm *kvm)
goto out; goto out;
} }
ret = vgic_init_maps(kvm); /*
* Initialize the vgic if this hasn't already been done on demand by
* accessing the vgic state from userspace.
*/
ret = vgic_init(kvm);
if (ret) { if (ret) {
kvm_err("Unable to allocate maps\n"); kvm_err("Unable to allocate maps\n");
goto out; goto out;
...@@ -1926,9 +1916,6 @@ int kvm_vgic_init(struct kvm *kvm) ...@@ -1926,9 +1916,6 @@ int kvm_vgic_init(struct kvm *kvm)
goto out; goto out;
} }
kvm_for_each_vcpu(i, vcpu, kvm)
kvm_vgic_vcpu_init(vcpu);
kvm->arch.vgic.ready = true; kvm->arch.vgic.ready = true;
out: out:
if (ret) if (ret)
...@@ -2173,7 +2160,7 @@ static int vgic_attr_regs_access(struct kvm_device *dev, ...@@ -2173,7 +2160,7 @@ static int vgic_attr_regs_access(struct kvm_device *dev,
mutex_lock(&dev->kvm->lock); mutex_lock(&dev->kvm->lock);
ret = vgic_init_maps(dev->kvm); ret = vgic_init(dev->kvm);
if (ret) if (ret)
goto out; goto out;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment