Commit 3cd1d327 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm

Pull KVM fixes from Paolo Bonzini:
 "PPC:
   - fix bug leading to lost IPIs and smp_call_function_many() lockups
     on POWER9

  ARM:
   - locking fix
   - reset fix
   - GICv2 multi-source SGI injection fix
   - GICv2-on-v3 MMIO synchronization fix
   - make the console less verbose.

  x86:
   - fix device passthrough on AMD SME"

* tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm:
  KVM: x86: Fix device passthrough when SME is active
  kvm: arm/arm64: vgic-v3: Tighten synchronization for guests using v2 on v3
  KVM: arm/arm64: vgic: Don't populate multiple LRs with the same vintid
  KVM: arm/arm64: Reduce verbosity of KVM init log
  KVM: arm/arm64: Reset mapped IRQs on VM reset
  KVM: arm/arm64: Avoid vcpu_load for other vcpu ioctls than KVM_RUN
  KVM: arm/arm64: vgic: Add missing irq_lock to vgic_mmio_read_pending
  KVM: PPC: Book3S HV: Fix trap number return from __kvmppc_vcore_entry
parents 9ef0f88f daaf216c
...@@ -363,8 +363,6 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, ...@@ -363,8 +363,6 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
{ {
int ret = 0; int ret = 0;
vcpu_load(vcpu);
trace_kvm_set_guest_debug(vcpu, dbg->control); trace_kvm_set_guest_debug(vcpu, dbg->control);
if (dbg->control & ~KVM_GUESTDBG_VALID_MASK) { if (dbg->control & ~KVM_GUESTDBG_VALID_MASK) {
...@@ -386,7 +384,6 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, ...@@ -386,7 +384,6 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
} }
out: out:
vcpu_put(vcpu);
return ret; return ret;
} }
......
...@@ -320,7 +320,6 @@ kvm_novcpu_exit: ...@@ -320,7 +320,6 @@ kvm_novcpu_exit:
stw r12, STACK_SLOT_TRAP(r1) stw r12, STACK_SLOT_TRAP(r1)
bl kvmhv_commence_exit bl kvmhv_commence_exit
nop nop
lwz r12, STACK_SLOT_TRAP(r1)
b kvmhv_switch_to_host b kvmhv_switch_to_host
/* /*
...@@ -1220,6 +1219,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) ...@@ -1220,6 +1219,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
secondary_too_late: secondary_too_late:
li r12, 0 li r12, 0
stw r12, STACK_SLOT_TRAP(r1)
cmpdi r4, 0 cmpdi r4, 0
beq 11f beq 11f
stw r12, VCPU_TRAP(r4) stw r12, VCPU_TRAP(r4)
...@@ -1558,12 +1558,12 @@ mc_cont: ...@@ -1558,12 +1558,12 @@ mc_cont:
3: stw r5,VCPU_SLB_MAX(r9) 3: stw r5,VCPU_SLB_MAX(r9)
guest_bypass: guest_bypass:
stw r12, STACK_SLOT_TRAP(r1)
mr r3, r12 mr r3, r12
/* Increment exit count, poke other threads to exit */ /* Increment exit count, poke other threads to exit */
bl kvmhv_commence_exit bl kvmhv_commence_exit
nop nop
ld r9, HSTATE_KVM_VCPU(r13) ld r9, HSTATE_KVM_VCPU(r13)
lwz r12, VCPU_TRAP(r9)
/* Stop others sending VCPU interrupts to this physical CPU */ /* Stop others sending VCPU interrupts to this physical CPU */
li r0, -1 li r0, -1
...@@ -1898,6 +1898,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_POWER9_DD1) ...@@ -1898,6 +1898,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_POWER9_DD1)
* POWER7/POWER8 guest -> host partition switch code. * POWER7/POWER8 guest -> host partition switch code.
* We don't have to lock against tlbies but we do * We don't have to lock against tlbies but we do
* have to coordinate the hardware threads. * have to coordinate the hardware threads.
* Here STACK_SLOT_TRAP(r1) contains the trap number.
*/ */
kvmhv_switch_to_host: kvmhv_switch_to_host:
/* Secondary threads wait for primary to do partition switch */ /* Secondary threads wait for primary to do partition switch */
...@@ -1950,12 +1951,12 @@ BEGIN_FTR_SECTION ...@@ -1950,12 +1951,12 @@ BEGIN_FTR_SECTION
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
/* If HMI, call kvmppc_realmode_hmi_handler() */ /* If HMI, call kvmppc_realmode_hmi_handler() */
lwz r12, STACK_SLOT_TRAP(r1)
cmpwi r12, BOOK3S_INTERRUPT_HMI cmpwi r12, BOOK3S_INTERRUPT_HMI
bne 27f bne 27f
bl kvmppc_realmode_hmi_handler bl kvmppc_realmode_hmi_handler
nop nop
cmpdi r3, 0 cmpdi r3, 0
li r12, BOOK3S_INTERRUPT_HMI
/* /*
* At this point kvmppc_realmode_hmi_handler may have resync-ed * At this point kvmppc_realmode_hmi_handler may have resync-ed
* the TB, and if it has, we must not subtract the guest timebase * the TB, and if it has, we must not subtract the guest timebase
...@@ -2008,10 +2009,8 @@ BEGIN_FTR_SECTION ...@@ -2008,10 +2009,8 @@ BEGIN_FTR_SECTION
lwz r8, KVM_SPLIT_DO_RESTORE(r3) lwz r8, KVM_SPLIT_DO_RESTORE(r3)
cmpwi r8, 0 cmpwi r8, 0
beq 47f beq 47f
stw r12, STACK_SLOT_TRAP(r1)
bl kvmhv_p9_restore_lpcr bl kvmhv_p9_restore_lpcr
nop nop
lwz r12, STACK_SLOT_TRAP(r1)
b 48f b 48f
47: 47:
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
...@@ -2049,6 +2048,7 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_RADIX) ...@@ -2049,6 +2048,7 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_RADIX)
li r0, KVM_GUEST_MODE_NONE li r0, KVM_GUEST_MODE_NONE
stb r0, HSTATE_IN_GUEST(r13) stb r0, HSTATE_IN_GUEST(r13)
lwz r12, STACK_SLOT_TRAP(r1) /* return trap # in r12 */
ld r0, SFS+PPC_LR_STKOFF(r1) ld r0, SFS+PPC_LR_STKOFF(r1)
addi r1, r1, SFS addi r1, r1, SFS
mtlr r0 mtlr r0
......
...@@ -2770,8 +2770,10 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep, ...@@ -2770,8 +2770,10 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
else else
pte_access &= ~ACC_WRITE_MASK; pte_access &= ~ACC_WRITE_MASK;
if (!kvm_is_mmio_pfn(pfn))
spte |= shadow_me_mask;
spte |= (u64)pfn << PAGE_SHIFT; spte |= (u64)pfn << PAGE_SHIFT;
spte |= shadow_me_mask;
if (pte_access & ACC_WRITE_MASK) { if (pte_access & ACC_WRITE_MASK) {
......
...@@ -360,6 +360,7 @@ void kvm_vgic_put(struct kvm_vcpu *vcpu); ...@@ -360,6 +360,7 @@ void kvm_vgic_put(struct kvm_vcpu *vcpu);
bool kvm_vcpu_has_pending_irqs(struct kvm_vcpu *vcpu); bool kvm_vcpu_has_pending_irqs(struct kvm_vcpu *vcpu);
void kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu); void kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu);
void kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu); void kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu);
void kvm_vgic_reset_mapped_irq(struct kvm_vcpu *vcpu, u32 vintid);
void vgic_v3_dispatch_sgi(struct kvm_vcpu *vcpu, u64 reg); void vgic_v3_dispatch_sgi(struct kvm_vcpu *vcpu, u64 reg);
......
...@@ -503,6 +503,7 @@ ...@@ -503,6 +503,7 @@
#define ICH_HCR_EN (1 << 0) #define ICH_HCR_EN (1 << 0)
#define ICH_HCR_UIE (1 << 1) #define ICH_HCR_UIE (1 << 1)
#define ICH_HCR_NPIE (1 << 3)
#define ICH_HCR_TC (1 << 10) #define ICH_HCR_TC (1 << 10)
#define ICH_HCR_TALL0 (1 << 11) #define ICH_HCR_TALL0 (1 << 11)
#define ICH_HCR_TALL1 (1 << 12) #define ICH_HCR_TALL1 (1 << 12)
......
...@@ -84,6 +84,7 @@ ...@@ -84,6 +84,7 @@
#define GICH_HCR_EN (1 << 0) #define GICH_HCR_EN (1 << 0)
#define GICH_HCR_UIE (1 << 1) #define GICH_HCR_UIE (1 << 1)
#define GICH_HCR_NPIE (1 << 3)
#define GICH_LR_VIRTUALID (0x3ff << 0) #define GICH_LR_VIRTUALID (0x3ff << 0)
#define GICH_LR_PHYSID_CPUID_SHIFT (10) #define GICH_LR_PHYSID_CPUID_SHIFT (10)
......
...@@ -581,6 +581,7 @@ void kvm_timer_sync_hwstate(struct kvm_vcpu *vcpu) ...@@ -581,6 +581,7 @@ void kvm_timer_sync_hwstate(struct kvm_vcpu *vcpu)
int kvm_timer_vcpu_reset(struct kvm_vcpu *vcpu) int kvm_timer_vcpu_reset(struct kvm_vcpu *vcpu)
{ {
struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
struct arch_timer_context *vtimer = vcpu_vtimer(vcpu); struct arch_timer_context *vtimer = vcpu_vtimer(vcpu);
struct arch_timer_context *ptimer = vcpu_ptimer(vcpu); struct arch_timer_context *ptimer = vcpu_ptimer(vcpu);
...@@ -594,6 +595,9 @@ int kvm_timer_vcpu_reset(struct kvm_vcpu *vcpu) ...@@ -594,6 +595,9 @@ int kvm_timer_vcpu_reset(struct kvm_vcpu *vcpu)
ptimer->cnt_ctl = 0; ptimer->cnt_ctl = 0;
kvm_timer_update_state(vcpu); kvm_timer_update_state(vcpu);
if (timer->enabled && irqchip_in_kernel(vcpu->kvm))
kvm_vgic_reset_mapped_irq(vcpu, vtimer->irq.irq);
return 0; return 0;
} }
...@@ -767,7 +771,7 @@ int kvm_timer_hyp_init(bool has_gic) ...@@ -767,7 +771,7 @@ int kvm_timer_hyp_init(bool has_gic)
static_branch_enable(&has_gic_active_state); static_branch_enable(&has_gic_active_state);
} }
kvm_info("virtual timer IRQ%d\n", host_vtimer_irq); kvm_debug("virtual timer IRQ%d\n", host_vtimer_irq);
cpuhp_setup_state(CPUHP_AP_KVM_ARM_TIMER_STARTING, cpuhp_setup_state(CPUHP_AP_KVM_ARM_TIMER_STARTING,
"kvm/arm/timer:starting", kvm_timer_starting_cpu, "kvm/arm/timer:starting", kvm_timer_starting_cpu,
......
...@@ -384,14 +384,11 @@ static void vcpu_power_off(struct kvm_vcpu *vcpu) ...@@ -384,14 +384,11 @@ static void vcpu_power_off(struct kvm_vcpu *vcpu)
int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu, int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
struct kvm_mp_state *mp_state) struct kvm_mp_state *mp_state)
{ {
vcpu_load(vcpu);
if (vcpu->arch.power_off) if (vcpu->arch.power_off)
mp_state->mp_state = KVM_MP_STATE_STOPPED; mp_state->mp_state = KVM_MP_STATE_STOPPED;
else else
mp_state->mp_state = KVM_MP_STATE_RUNNABLE; mp_state->mp_state = KVM_MP_STATE_RUNNABLE;
vcpu_put(vcpu);
return 0; return 0;
} }
...@@ -400,8 +397,6 @@ int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu, ...@@ -400,8 +397,6 @@ int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
{ {
int ret = 0; int ret = 0;
vcpu_load(vcpu);
switch (mp_state->mp_state) { switch (mp_state->mp_state) {
case KVM_MP_STATE_RUNNABLE: case KVM_MP_STATE_RUNNABLE:
vcpu->arch.power_off = false; vcpu->arch.power_off = false;
...@@ -413,7 +408,6 @@ int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu, ...@@ -413,7 +408,6 @@ int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
ret = -EINVAL; ret = -EINVAL;
} }
vcpu_put(vcpu);
return ret; return ret;
} }
...@@ -1036,8 +1030,6 @@ long kvm_arch_vcpu_ioctl(struct file *filp, ...@@ -1036,8 +1030,6 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
struct kvm_device_attr attr; struct kvm_device_attr attr;
long r; long r;
vcpu_load(vcpu);
switch (ioctl) { switch (ioctl) {
case KVM_ARM_VCPU_INIT: { case KVM_ARM_VCPU_INIT: {
struct kvm_vcpu_init init; struct kvm_vcpu_init init;
...@@ -1114,7 +1106,6 @@ long kvm_arch_vcpu_ioctl(struct file *filp, ...@@ -1114,7 +1106,6 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
r = -EINVAL; r = -EINVAL;
} }
vcpu_put(vcpu);
return r; return r;
} }
......
...@@ -215,7 +215,8 @@ void __hyp_text __vgic_v3_save_state(struct kvm_vcpu *vcpu) ...@@ -215,7 +215,8 @@ void __hyp_text __vgic_v3_save_state(struct kvm_vcpu *vcpu)
* are now visible to the system register interface. * are now visible to the system register interface.
*/ */
if (!cpu_if->vgic_sre) { if (!cpu_if->vgic_sre) {
dsb(st); dsb(sy);
isb();
cpu_if->vgic_vmcr = read_gicreg(ICH_VMCR_EL2); cpu_if->vgic_vmcr = read_gicreg(ICH_VMCR_EL2);
} }
......
...@@ -1810,9 +1810,9 @@ int kvm_mmu_init(void) ...@@ -1810,9 +1810,9 @@ int kvm_mmu_init(void)
*/ */
BUG_ON((hyp_idmap_start ^ (hyp_idmap_end - 1)) & PAGE_MASK); BUG_ON((hyp_idmap_start ^ (hyp_idmap_end - 1)) & PAGE_MASK);
kvm_info("IDMAP page: %lx\n", hyp_idmap_start); kvm_debug("IDMAP page: %lx\n", hyp_idmap_start);
kvm_info("HYP VA range: %lx:%lx\n", kvm_debug("HYP VA range: %lx:%lx\n",
kern_hyp_va(PAGE_OFFSET), kern_hyp_va(~0UL)); kern_hyp_va(PAGE_OFFSET), kern_hyp_va(~0UL));
if (hyp_idmap_start >= kern_hyp_va(PAGE_OFFSET) && if (hyp_idmap_start >= kern_hyp_va(PAGE_OFFSET) &&
hyp_idmap_start < kern_hyp_va(~0UL) && hyp_idmap_start < kern_hyp_va(~0UL) &&
......
...@@ -113,9 +113,12 @@ unsigned long vgic_mmio_read_pending(struct kvm_vcpu *vcpu, ...@@ -113,9 +113,12 @@ unsigned long vgic_mmio_read_pending(struct kvm_vcpu *vcpu,
/* Loop over all IRQs affected by this read */ /* Loop over all IRQs affected by this read */
for (i = 0; i < len * 8; i++) { for (i = 0; i < len * 8; i++) {
struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
unsigned long flags;
spin_lock_irqsave(&irq->irq_lock, flags);
if (irq_is_pending(irq)) if (irq_is_pending(irq))
value |= (1U << i); value |= (1U << i);
spin_unlock_irqrestore(&irq->irq_lock, flags);
vgic_put_irq(vcpu->kvm, irq); vgic_put_irq(vcpu->kvm, irq);
} }
......
...@@ -37,6 +37,13 @@ void vgic_v2_init_lrs(void) ...@@ -37,6 +37,13 @@ void vgic_v2_init_lrs(void)
vgic_v2_write_lr(i, 0); vgic_v2_write_lr(i, 0);
} }
void vgic_v2_set_npie(struct kvm_vcpu *vcpu)
{
struct vgic_v2_cpu_if *cpuif = &vcpu->arch.vgic_cpu.vgic_v2;
cpuif->vgic_hcr |= GICH_HCR_NPIE;
}
void vgic_v2_set_underflow(struct kvm_vcpu *vcpu) void vgic_v2_set_underflow(struct kvm_vcpu *vcpu)
{ {
struct vgic_v2_cpu_if *cpuif = &vcpu->arch.vgic_cpu.vgic_v2; struct vgic_v2_cpu_if *cpuif = &vcpu->arch.vgic_cpu.vgic_v2;
...@@ -64,7 +71,7 @@ void vgic_v2_fold_lr_state(struct kvm_vcpu *vcpu) ...@@ -64,7 +71,7 @@ void vgic_v2_fold_lr_state(struct kvm_vcpu *vcpu)
int lr; int lr;
unsigned long flags; unsigned long flags;
cpuif->vgic_hcr &= ~GICH_HCR_UIE; cpuif->vgic_hcr &= ~(GICH_HCR_UIE | GICH_HCR_NPIE);
for (lr = 0; lr < vgic_cpu->used_lrs; lr++) { for (lr = 0; lr < vgic_cpu->used_lrs; lr++) {
u32 val = cpuif->vgic_lr[lr]; u32 val = cpuif->vgic_lr[lr];
...@@ -410,7 +417,7 @@ int vgic_v2_probe(const struct gic_kvm_info *info) ...@@ -410,7 +417,7 @@ int vgic_v2_probe(const struct gic_kvm_info *info)
kvm_vgic_global_state.type = VGIC_V2; kvm_vgic_global_state.type = VGIC_V2;
kvm_vgic_global_state.max_gic_vcpus = VGIC_V2_MAX_CPUS; kvm_vgic_global_state.max_gic_vcpus = VGIC_V2_MAX_CPUS;
kvm_info("vgic-v2@%llx\n", info->vctrl.start); kvm_debug("vgic-v2@%llx\n", info->vctrl.start);
return 0; return 0;
out: out:
......
...@@ -26,6 +26,13 @@ static bool group1_trap; ...@@ -26,6 +26,13 @@ static bool group1_trap;
static bool common_trap; static bool common_trap;
static bool gicv4_enable; static bool gicv4_enable;
void vgic_v3_set_npie(struct kvm_vcpu *vcpu)
{
struct vgic_v3_cpu_if *cpuif = &vcpu->arch.vgic_cpu.vgic_v3;
cpuif->vgic_hcr |= ICH_HCR_NPIE;
}
void vgic_v3_set_underflow(struct kvm_vcpu *vcpu) void vgic_v3_set_underflow(struct kvm_vcpu *vcpu)
{ {
struct vgic_v3_cpu_if *cpuif = &vcpu->arch.vgic_cpu.vgic_v3; struct vgic_v3_cpu_if *cpuif = &vcpu->arch.vgic_cpu.vgic_v3;
...@@ -47,7 +54,7 @@ void vgic_v3_fold_lr_state(struct kvm_vcpu *vcpu) ...@@ -47,7 +54,7 @@ void vgic_v3_fold_lr_state(struct kvm_vcpu *vcpu)
int lr; int lr;
unsigned long flags; unsigned long flags;
cpuif->vgic_hcr &= ~ICH_HCR_UIE; cpuif->vgic_hcr &= ~(ICH_HCR_UIE | ICH_HCR_NPIE);
for (lr = 0; lr < vgic_cpu->used_lrs; lr++) { for (lr = 0; lr < vgic_cpu->used_lrs; lr++) {
u64 val = cpuif->vgic_lr[lr]; u64 val = cpuif->vgic_lr[lr];
......
...@@ -495,6 +495,32 @@ int kvm_vgic_map_phys_irq(struct kvm_vcpu *vcpu, unsigned int host_irq, ...@@ -495,6 +495,32 @@ int kvm_vgic_map_phys_irq(struct kvm_vcpu *vcpu, unsigned int host_irq,
return ret; return ret;
} }
/**
* kvm_vgic_reset_mapped_irq - Reset a mapped IRQ
* @vcpu: The VCPU pointer
* @vintid: The INTID of the interrupt
*
* Reset the active and pending states of a mapped interrupt. Kernel
* subsystems injecting mapped interrupts should reset their interrupt lines
* when we are doing a reset of the VM.
*/
void kvm_vgic_reset_mapped_irq(struct kvm_vcpu *vcpu, u32 vintid)
{
struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, vintid);
unsigned long flags;
if (!irq->hw)
goto out;
spin_lock_irqsave(&irq->irq_lock, flags);
irq->active = false;
irq->pending_latch = false;
irq->line_level = false;
spin_unlock_irqrestore(&irq->irq_lock, flags);
out:
vgic_put_irq(vcpu->kvm, irq);
}
int kvm_vgic_unmap_phys_irq(struct kvm_vcpu *vcpu, unsigned int vintid) int kvm_vgic_unmap_phys_irq(struct kvm_vcpu *vcpu, unsigned int vintid)
{ {
struct vgic_irq *irq; struct vgic_irq *irq;
...@@ -684,22 +710,37 @@ static inline void vgic_set_underflow(struct kvm_vcpu *vcpu) ...@@ -684,22 +710,37 @@ static inline void vgic_set_underflow(struct kvm_vcpu *vcpu)
vgic_v3_set_underflow(vcpu); vgic_v3_set_underflow(vcpu);
} }
static inline void vgic_set_npie(struct kvm_vcpu *vcpu)
{
if (kvm_vgic_global_state.type == VGIC_V2)
vgic_v2_set_npie(vcpu);
else
vgic_v3_set_npie(vcpu);
}
/* Requires the ap_list_lock to be held. */ /* Requires the ap_list_lock to be held. */
static int compute_ap_list_depth(struct kvm_vcpu *vcpu) static int compute_ap_list_depth(struct kvm_vcpu *vcpu,
bool *multi_sgi)
{ {
struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
struct vgic_irq *irq; struct vgic_irq *irq;
int count = 0; int count = 0;
*multi_sgi = false;
DEBUG_SPINLOCK_BUG_ON(!spin_is_locked(&vgic_cpu->ap_list_lock)); DEBUG_SPINLOCK_BUG_ON(!spin_is_locked(&vgic_cpu->ap_list_lock));
list_for_each_entry(irq, &vgic_cpu->ap_list_head, ap_list) { list_for_each_entry(irq, &vgic_cpu->ap_list_head, ap_list) {
spin_lock(&irq->irq_lock); spin_lock(&irq->irq_lock);
/* GICv2 SGIs can count for more than one... */ /* GICv2 SGIs can count for more than one... */
if (vgic_irq_is_sgi(irq->intid) && irq->source) if (vgic_irq_is_sgi(irq->intid) && irq->source) {
count += hweight8(irq->source); int w = hweight8(irq->source);
else
count += w;
*multi_sgi |= (w > 1);
} else {
count++; count++;
}
spin_unlock(&irq->irq_lock); spin_unlock(&irq->irq_lock);
} }
return count; return count;
...@@ -710,28 +751,43 @@ static void vgic_flush_lr_state(struct kvm_vcpu *vcpu) ...@@ -710,28 +751,43 @@ static void vgic_flush_lr_state(struct kvm_vcpu *vcpu)
{ {
struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
struct vgic_irq *irq; struct vgic_irq *irq;
int count = 0; int count;
bool npie = false;
bool multi_sgi;
u8 prio = 0xff;
DEBUG_SPINLOCK_BUG_ON(!spin_is_locked(&vgic_cpu->ap_list_lock)); DEBUG_SPINLOCK_BUG_ON(!spin_is_locked(&vgic_cpu->ap_list_lock));
if (compute_ap_list_depth(vcpu) > kvm_vgic_global_state.nr_lr) count = compute_ap_list_depth(vcpu, &multi_sgi);
if (count > kvm_vgic_global_state.nr_lr || multi_sgi)
vgic_sort_ap_list(vcpu); vgic_sort_ap_list(vcpu);
count = 0;
list_for_each_entry(irq, &vgic_cpu->ap_list_head, ap_list) { list_for_each_entry(irq, &vgic_cpu->ap_list_head, ap_list) {
spin_lock(&irq->irq_lock); spin_lock(&irq->irq_lock);
if (unlikely(vgic_target_oracle(irq) != vcpu))
goto next;
/* /*
* If we get an SGI with multiple sources, try to get * If we have multi-SGIs in the pipeline, we need to
* them in all at once. * guarantee that they are all seen before any IRQ of
* lower priority. In that case, we need to filter out
* these interrupts by exiting early. This is easy as
* the AP list has been sorted already.
*/ */
do { if (multi_sgi && irq->priority > prio) {
spin_unlock(&irq->irq_lock);
break;
}
if (likely(vgic_target_oracle(irq) == vcpu)) {
vgic_populate_lr(vcpu, irq, count++); vgic_populate_lr(vcpu, irq, count++);
} while (irq->source && count < kvm_vgic_global_state.nr_lr);
next: if (irq->source) {
npie = true;
prio = irq->priority;
}
}
spin_unlock(&irq->irq_lock); spin_unlock(&irq->irq_lock);
if (count == kvm_vgic_global_state.nr_lr) { if (count == kvm_vgic_global_state.nr_lr) {
...@@ -742,6 +798,9 @@ static void vgic_flush_lr_state(struct kvm_vcpu *vcpu) ...@@ -742,6 +798,9 @@ static void vgic_flush_lr_state(struct kvm_vcpu *vcpu)
} }
} }
if (npie)
vgic_set_npie(vcpu);
vcpu->arch.vgic_cpu.used_lrs = count; vcpu->arch.vgic_cpu.used_lrs = count;
/* Nuke remaining LRs */ /* Nuke remaining LRs */
......
...@@ -96,6 +96,7 @@ ...@@ -96,6 +96,7 @@
/* we only support 64 kB translation table page size */ /* we only support 64 kB translation table page size */
#define KVM_ITS_L1E_ADDR_MASK GENMASK_ULL(51, 16) #define KVM_ITS_L1E_ADDR_MASK GENMASK_ULL(51, 16)
/* Requires the irq_lock to be held by the caller. */
static inline bool irq_is_pending(struct vgic_irq *irq) static inline bool irq_is_pending(struct vgic_irq *irq)
{ {
if (irq->config == VGIC_CONFIG_EDGE) if (irq->config == VGIC_CONFIG_EDGE)
...@@ -159,6 +160,7 @@ void vgic_v2_fold_lr_state(struct kvm_vcpu *vcpu); ...@@ -159,6 +160,7 @@ void vgic_v2_fold_lr_state(struct kvm_vcpu *vcpu);
void vgic_v2_populate_lr(struct kvm_vcpu *vcpu, struct vgic_irq *irq, int lr); void vgic_v2_populate_lr(struct kvm_vcpu *vcpu, struct vgic_irq *irq, int lr);
void vgic_v2_clear_lr(struct kvm_vcpu *vcpu, int lr); void vgic_v2_clear_lr(struct kvm_vcpu *vcpu, int lr);
void vgic_v2_set_underflow(struct kvm_vcpu *vcpu); void vgic_v2_set_underflow(struct kvm_vcpu *vcpu);
void vgic_v2_set_npie(struct kvm_vcpu *vcpu);
int vgic_v2_has_attr_regs(struct kvm_device *dev, struct kvm_device_attr *attr); int vgic_v2_has_attr_regs(struct kvm_device *dev, struct kvm_device_attr *attr);
int vgic_v2_dist_uaccess(struct kvm_vcpu *vcpu, bool is_write, int vgic_v2_dist_uaccess(struct kvm_vcpu *vcpu, bool is_write,
int offset, u32 *val); int offset, u32 *val);
...@@ -188,6 +190,7 @@ void vgic_v3_fold_lr_state(struct kvm_vcpu *vcpu); ...@@ -188,6 +190,7 @@ void vgic_v3_fold_lr_state(struct kvm_vcpu *vcpu);
void vgic_v3_populate_lr(struct kvm_vcpu *vcpu, struct vgic_irq *irq, int lr); void vgic_v3_populate_lr(struct kvm_vcpu *vcpu, struct vgic_irq *irq, int lr);
void vgic_v3_clear_lr(struct kvm_vcpu *vcpu, int lr); void vgic_v3_clear_lr(struct kvm_vcpu *vcpu, int lr);
void vgic_v3_set_underflow(struct kvm_vcpu *vcpu); void vgic_v3_set_underflow(struct kvm_vcpu *vcpu);
void vgic_v3_set_npie(struct kvm_vcpu *vcpu);
void vgic_v3_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr); void vgic_v3_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr);
void vgic_v3_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr); void vgic_v3_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr);
void vgic_v3_enable(struct kvm_vcpu *vcpu); void vgic_v3_enable(struct kvm_vcpu *vcpu);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment