Commit 05fe125f authored by Paolo Bonzini's avatar Paolo Bonzini

Merge tag 'kvm-arm-for-4.2' of...

Merge tag 'kvm-arm-for-4.2' of git://git.kernel.org/pub/scm/linux/kernel/git/kvmarm/kvmarm into HEAD

KVM/ARM changes for v4.2:

- Proper guest time accounting
- FP access fix for 32bit
- The usual pile of GIC fixes
- PSCI fixes
- Random cleanups
parents e80a4a94 c62e631d
...@@ -28,6 +28,7 @@ config KVM ...@@ -28,6 +28,7 @@ config KVM
select KVM_GENERIC_DIRTYLOG_READ_PROTECT select KVM_GENERIC_DIRTYLOG_READ_PROTECT
select SRCU select SRCU
select MMU_NOTIFIER select MMU_NOTIFIER
select KVM_VFIO
select HAVE_KVM_EVENTFD select HAVE_KVM_EVENTFD
select HAVE_KVM_IRQFD select HAVE_KVM_IRQFD
depends on ARM_VIRT_EXT && ARM_LPAE && ARM_ARCH_TIMER depends on ARM_VIRT_EXT && ARM_LPAE && ARM_ARCH_TIMER
......
...@@ -15,7 +15,7 @@ AFLAGS_init.o := -Wa,-march=armv7-a$(plus_virt) ...@@ -15,7 +15,7 @@ AFLAGS_init.o := -Wa,-march=armv7-a$(plus_virt)
AFLAGS_interrupts.o := -Wa,-march=armv7-a$(plus_virt) AFLAGS_interrupts.o := -Wa,-march=armv7-a$(plus_virt)
KVM := ../../../virt/kvm KVM := ../../../virt/kvm
kvm-arm-y = $(KVM)/kvm_main.o $(KVM)/coalesced_mmio.o $(KVM)/eventfd.o kvm-arm-y = $(KVM)/kvm_main.o $(KVM)/coalesced_mmio.o $(KVM)/eventfd.o $(KVM)/vfio.o
obj-y += kvm-arm.o init.o interrupts.o obj-y += kvm-arm.o init.o interrupts.o
obj-y += arm.o handle_exit.o guest.o mmu.o emulate.o reset.o obj-y += arm.o handle_exit.o guest.o mmu.o emulate.o reset.o
......
...@@ -171,7 +171,6 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) ...@@ -171,7 +171,6 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
int r; int r;
switch (ext) { switch (ext) {
case KVM_CAP_IRQCHIP: case KVM_CAP_IRQCHIP:
case KVM_CAP_IRQFD:
case KVM_CAP_IOEVENTFD: case KVM_CAP_IOEVENTFD:
case KVM_CAP_DEVICE_CTRL: case KVM_CAP_DEVICE_CTRL:
case KVM_CAP_USER_MEMORY: case KVM_CAP_USER_MEMORY:
...@@ -532,6 +531,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) ...@@ -532,6 +531,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
kvm_vgic_flush_hwstate(vcpu); kvm_vgic_flush_hwstate(vcpu);
kvm_timer_flush_hwstate(vcpu); kvm_timer_flush_hwstate(vcpu);
preempt_disable();
local_irq_disable(); local_irq_disable();
/* /*
...@@ -544,6 +544,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) ...@@ -544,6 +544,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
if (ret <= 0 || need_new_vmid_gen(vcpu->kvm)) { if (ret <= 0 || need_new_vmid_gen(vcpu->kvm)) {
local_irq_enable(); local_irq_enable();
preempt_enable();
kvm_timer_sync_hwstate(vcpu); kvm_timer_sync_hwstate(vcpu);
kvm_vgic_sync_hwstate(vcpu); kvm_vgic_sync_hwstate(vcpu);
continue; continue;
...@@ -559,8 +560,10 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) ...@@ -559,8 +560,10 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
ret = kvm_call_hyp(__kvm_vcpu_run, vcpu); ret = kvm_call_hyp(__kvm_vcpu_run, vcpu);
vcpu->mode = OUTSIDE_GUEST_MODE; vcpu->mode = OUTSIDE_GUEST_MODE;
__kvm_guest_exit(); /*
trace_kvm_exit(kvm_vcpu_trap_get_class(vcpu), *vcpu_pc(vcpu)); * Back from guest
*************************************************************/
/* /*
* We may have taken a host interrupt in HYP mode (ie * We may have taken a host interrupt in HYP mode (ie
* while executing the guest). This interrupt is still * while executing the guest). This interrupt is still
...@@ -574,8 +577,17 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) ...@@ -574,8 +577,17 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
local_irq_enable(); local_irq_enable();
/* /*
* Back from guest * We do local_irq_enable() before calling kvm_guest_exit() so
*************************************************************/ * that if a timer interrupt hits while running the guest we
* account that tick as being spent in the guest. We enable
* preemption after calling kvm_guest_exit() so that if we get
* preempted we make sure ticks after that is not counted as
* guest time.
*/
kvm_guest_exit();
trace_kvm_exit(kvm_vcpu_trap_get_class(vcpu), *vcpu_pc(vcpu));
preempt_enable();
kvm_timer_sync_hwstate(vcpu); kvm_timer_sync_hwstate(vcpu);
kvm_vgic_sync_hwstate(vcpu); kvm_vgic_sync_hwstate(vcpu);
......
...@@ -170,13 +170,9 @@ __kvm_vcpu_return: ...@@ -170,13 +170,9 @@ __kvm_vcpu_return:
@ Don't trap coprocessor accesses for host kernel @ Don't trap coprocessor accesses for host kernel
set_hstr vmexit set_hstr vmexit
set_hdcr vmexit set_hdcr vmexit
set_hcptr vmexit, (HCPTR_TTA | HCPTR_TCP(10) | HCPTR_TCP(11)) set_hcptr vmexit, (HCPTR_TTA | HCPTR_TCP(10) | HCPTR_TCP(11)), after_vfp_restore
#ifdef CONFIG_VFPv3 #ifdef CONFIG_VFPv3
@ Save floating point registers we if let guest use them.
tst r2, #(HCPTR_TCP(10) | HCPTR_TCP(11))
bne after_vfp_restore
@ Switch VFP/NEON hardware state to the host's @ Switch VFP/NEON hardware state to the host's
add r7, vcpu, #VCPU_VFP_GUEST add r7, vcpu, #VCPU_VFP_GUEST
store_vfp_state r7 store_vfp_state r7
...@@ -188,6 +184,8 @@ after_vfp_restore: ...@@ -188,6 +184,8 @@ after_vfp_restore:
@ Restore FPEXC_EN which we clobbered on entry @ Restore FPEXC_EN which we clobbered on entry
pop {r2} pop {r2}
VFPFMXR FPEXC, r2 VFPFMXR FPEXC, r2
#else
after_vfp_restore:
#endif #endif
@ Reset Hyp-role @ Reset Hyp-role
...@@ -483,7 +481,7 @@ switch_to_guest_vfp: ...@@ -483,7 +481,7 @@ switch_to_guest_vfp:
push {r3-r7} push {r3-r7}
@ NEON/VFP used. Turn on VFP access. @ NEON/VFP used. Turn on VFP access.
set_hcptr vmexit, (HCPTR_TCP(10) | HCPTR_TCP(11)) set_hcptr vmtrap, (HCPTR_TCP(10) | HCPTR_TCP(11))
@ Switch VFP/NEON hardware state to the guest's @ Switch VFP/NEON hardware state to the guest's
add r7, r0, #VCPU_VFP_HOST add r7, r0, #VCPU_VFP_HOST
......
...@@ -412,7 +412,6 @@ vcpu .req r0 @ vcpu pointer always in r0 ...@@ -412,7 +412,6 @@ vcpu .req r0 @ vcpu pointer always in r0
add r11, vcpu, #VCPU_VGIC_CPU add r11, vcpu, #VCPU_VGIC_CPU
/* Save all interesting registers */ /* Save all interesting registers */
ldr r3, [r2, #GICH_HCR]
ldr r4, [r2, #GICH_VMCR] ldr r4, [r2, #GICH_VMCR]
ldr r5, [r2, #GICH_MISR] ldr r5, [r2, #GICH_MISR]
ldr r6, [r2, #GICH_EISR0] ldr r6, [r2, #GICH_EISR0]
...@@ -420,7 +419,6 @@ vcpu .req r0 @ vcpu pointer always in r0 ...@@ -420,7 +419,6 @@ vcpu .req r0 @ vcpu pointer always in r0
ldr r8, [r2, #GICH_ELRSR0] ldr r8, [r2, #GICH_ELRSR0]
ldr r9, [r2, #GICH_ELRSR1] ldr r9, [r2, #GICH_ELRSR1]
ldr r10, [r2, #GICH_APR] ldr r10, [r2, #GICH_APR]
ARM_BE8(rev r3, r3 )
ARM_BE8(rev r4, r4 ) ARM_BE8(rev r4, r4 )
ARM_BE8(rev r5, r5 ) ARM_BE8(rev r5, r5 )
ARM_BE8(rev r6, r6 ) ARM_BE8(rev r6, r6 )
...@@ -429,7 +427,6 @@ ARM_BE8(rev r8, r8 ) ...@@ -429,7 +427,6 @@ ARM_BE8(rev r8, r8 )
ARM_BE8(rev r9, r9 ) ARM_BE8(rev r9, r9 )
ARM_BE8(rev r10, r10 ) ARM_BE8(rev r10, r10 )
str r3, [r11, #VGIC_V2_CPU_HCR]
str r4, [r11, #VGIC_V2_CPU_VMCR] str r4, [r11, #VGIC_V2_CPU_VMCR]
str r5, [r11, #VGIC_V2_CPU_MISR] str r5, [r11, #VGIC_V2_CPU_MISR]
#ifdef CONFIG_CPU_ENDIAN_BE8 #ifdef CONFIG_CPU_ENDIAN_BE8
...@@ -591,8 +588,13 @@ ARM_BE8(rev r6, r6 ) ...@@ -591,8 +588,13 @@ ARM_BE8(rev r6, r6 )
.endm .endm
/* Configures the HCPTR (Hyp Coprocessor Trap Register) on entry/return /* Configures the HCPTR (Hyp Coprocessor Trap Register) on entry/return
* (hardware reset value is 0). Keep previous value in r2. */ * (hardware reset value is 0). Keep previous value in r2.
.macro set_hcptr operation, mask * An ISB is emited on vmexit/vmtrap, but executed on vmexit only if
* VFP wasn't already enabled (always executed on vmtrap).
* If a label is specified with vmexit, it is branched to if VFP wasn't
* enabled.
*/
.macro set_hcptr operation, mask, label = none
mrc p15, 4, r2, c1, c1, 2 mrc p15, 4, r2, c1, c1, 2
ldr r3, =\mask ldr r3, =\mask
.if \operation == vmentry .if \operation == vmentry
...@@ -601,6 +603,17 @@ ARM_BE8(rev r6, r6 ) ...@@ -601,6 +603,17 @@ ARM_BE8(rev r6, r6 )
bic r3, r2, r3 @ Don't trap defined coproc-accesses bic r3, r2, r3 @ Don't trap defined coproc-accesses
.endif .endif
mcr p15, 4, r3, c1, c1, 2 mcr p15, 4, r3, c1, c1, 2
.if \operation != vmentry
.if \operation == vmexit
tst r2, #(HCPTR_TCP(10) | HCPTR_TCP(11))
beq 1f
.endif
isb
.if \label != none
b \label
.endif
1:
.endif
.endm .endm
/* Configures the HDCR (Hyp Debug Configuration Register) on entry/return /* Configures the HDCR (Hyp Debug Configuration Register) on entry/return
......
...@@ -691,8 +691,8 @@ int kvm_alloc_stage2_pgd(struct kvm *kvm) ...@@ -691,8 +691,8 @@ int kvm_alloc_stage2_pgd(struct kvm *kvm)
* work. This is not used by the hardware and we have no * work. This is not used by the hardware and we have no
* alignment requirement for this allocation. * alignment requirement for this allocation.
*/ */
pgd = (pgd_t *)kmalloc(PTRS_PER_S2_PGD * sizeof(pgd_t), pgd = kmalloc(PTRS_PER_S2_PGD * sizeof(pgd_t),
GFP_KERNEL | __GFP_ZERO); GFP_KERNEL | __GFP_ZERO);
if (!pgd) { if (!pgd) {
kvm_free_hwpgd(hwpgd); kvm_free_hwpgd(hwpgd);
......
...@@ -230,10 +230,6 @@ static int kvm_psci_0_2_call(struct kvm_vcpu *vcpu) ...@@ -230,10 +230,6 @@ static int kvm_psci_0_2_call(struct kvm_vcpu *vcpu)
case PSCI_0_2_FN64_AFFINITY_INFO: case PSCI_0_2_FN64_AFFINITY_INFO:
val = kvm_psci_vcpu_affinity_info(vcpu); val = kvm_psci_vcpu_affinity_info(vcpu);
break; break;
case PSCI_0_2_FN_MIGRATE:
case PSCI_0_2_FN64_MIGRATE:
val = PSCI_RET_NOT_SUPPORTED;
break;
case PSCI_0_2_FN_MIGRATE_INFO_TYPE: case PSCI_0_2_FN_MIGRATE_INFO_TYPE:
/* /*
* Trusted OS is MP hence does not require migration * Trusted OS is MP hence does not require migration
...@@ -242,10 +238,6 @@ static int kvm_psci_0_2_call(struct kvm_vcpu *vcpu) ...@@ -242,10 +238,6 @@ static int kvm_psci_0_2_call(struct kvm_vcpu *vcpu)
*/ */
val = PSCI_0_2_TOS_MP; val = PSCI_0_2_TOS_MP;
break; break;
case PSCI_0_2_FN_MIGRATE_INFO_UP_CPU:
case PSCI_0_2_FN64_MIGRATE_INFO_UP_CPU:
val = PSCI_RET_NOT_SUPPORTED;
break;
case PSCI_0_2_FN_SYSTEM_OFF: case PSCI_0_2_FN_SYSTEM_OFF:
kvm_psci_system_off(vcpu); kvm_psci_system_off(vcpu);
/* /*
...@@ -271,7 +263,8 @@ static int kvm_psci_0_2_call(struct kvm_vcpu *vcpu) ...@@ -271,7 +263,8 @@ static int kvm_psci_0_2_call(struct kvm_vcpu *vcpu)
ret = 0; ret = 0;
break; break;
default: default:
return -EINVAL; val = PSCI_RET_NOT_SUPPORTED;
break;
} }
*vcpu_reg(vcpu, 0) = val; *vcpu_reg(vcpu, 0) = val;
...@@ -291,12 +284,9 @@ static int kvm_psci_0_1_call(struct kvm_vcpu *vcpu) ...@@ -291,12 +284,9 @@ static int kvm_psci_0_1_call(struct kvm_vcpu *vcpu)
case KVM_PSCI_FN_CPU_ON: case KVM_PSCI_FN_CPU_ON:
val = kvm_psci_vcpu_on(vcpu); val = kvm_psci_vcpu_on(vcpu);
break; break;
case KVM_PSCI_FN_CPU_SUSPEND: default:
case KVM_PSCI_FN_MIGRATE:
val = PSCI_RET_NOT_SUPPORTED; val = PSCI_RET_NOT_SUPPORTED;
break; break;
default:
return -EINVAL;
} }
*vcpu_reg(vcpu, 0) = val; *vcpu_reg(vcpu, 0) = val;
......
...@@ -28,6 +28,7 @@ config KVM ...@@ -28,6 +28,7 @@ config KVM
select KVM_ARM_HOST select KVM_ARM_HOST
select KVM_GENERIC_DIRTYLOG_READ_PROTECT select KVM_GENERIC_DIRTYLOG_READ_PROTECT
select SRCU select SRCU
select KVM_VFIO
select HAVE_KVM_EVENTFD select HAVE_KVM_EVENTFD
select HAVE_KVM_IRQFD select HAVE_KVM_IRQFD
---help--- ---help---
......
...@@ -11,7 +11,7 @@ ARM=../../../arch/arm/kvm ...@@ -11,7 +11,7 @@ ARM=../../../arch/arm/kvm
obj-$(CONFIG_KVM_ARM_HOST) += kvm.o obj-$(CONFIG_KVM_ARM_HOST) += kvm.o
kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/kvm_main.o $(KVM)/coalesced_mmio.o $(KVM)/eventfd.o kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/kvm_main.o $(KVM)/coalesced_mmio.o $(KVM)/eventfd.o $(KVM)/vfio.o
kvm-$(CONFIG_KVM_ARM_HOST) += $(ARM)/arm.o $(ARM)/mmu.o $(ARM)/mmio.o kvm-$(CONFIG_KVM_ARM_HOST) += $(ARM)/arm.o $(ARM)/mmu.o $(ARM)/mmio.o
kvm-$(CONFIG_KVM_ARM_HOST) += $(ARM)/psci.o $(ARM)/perf.o kvm-$(CONFIG_KVM_ARM_HOST) += $(ARM)/psci.o $(ARM)/perf.o
......
...@@ -50,8 +50,8 @@ ...@@ -50,8 +50,8 @@
stp x29, lr, [x3, #80] stp x29, lr, [x3, #80]
mrs x19, sp_el0 mrs x19, sp_el0
mrs x20, elr_el2 // EL1 PC mrs x20, elr_el2 // pc before entering el2
mrs x21, spsr_el2 // EL1 pstate mrs x21, spsr_el2 // pstate before entering el2
stp x19, x20, [x3, #96] stp x19, x20, [x3, #96]
str x21, [x3, #112] str x21, [x3, #112]
...@@ -82,8 +82,8 @@ ...@@ -82,8 +82,8 @@
ldr x21, [x3, #16] ldr x21, [x3, #16]
msr sp_el0, x19 msr sp_el0, x19
msr elr_el2, x20 // EL1 PC msr elr_el2, x20 // pc on return from el2
msr spsr_el2, x21 // EL1 pstate msr spsr_el2, x21 // pstate on return from el2
add x3, x2, #CPU_XREG_OFFSET(19) add x3, x2, #CPU_XREG_OFFSET(19)
ldp x19, x20, [x3] ldp x19, x20, [x3]
......
...@@ -47,7 +47,6 @@ __save_vgic_v2_state: ...@@ -47,7 +47,6 @@ __save_vgic_v2_state:
add x3, x0, #VCPU_VGIC_CPU add x3, x0, #VCPU_VGIC_CPU
/* Save all interesting registers */ /* Save all interesting registers */
ldr w4, [x2, #GICH_HCR]
ldr w5, [x2, #GICH_VMCR] ldr w5, [x2, #GICH_VMCR]
ldr w6, [x2, #GICH_MISR] ldr w6, [x2, #GICH_MISR]
ldr w7, [x2, #GICH_EISR0] ldr w7, [x2, #GICH_EISR0]
...@@ -55,7 +54,6 @@ __save_vgic_v2_state: ...@@ -55,7 +54,6 @@ __save_vgic_v2_state:
ldr w9, [x2, #GICH_ELRSR0] ldr w9, [x2, #GICH_ELRSR0]
ldr w10, [x2, #GICH_ELRSR1] ldr w10, [x2, #GICH_ELRSR1]
ldr w11, [x2, #GICH_APR] ldr w11, [x2, #GICH_APR]
CPU_BE( rev w4, w4 )
CPU_BE( rev w5, w5 ) CPU_BE( rev w5, w5 )
CPU_BE( rev w6, w6 ) CPU_BE( rev w6, w6 )
CPU_BE( rev w7, w7 ) CPU_BE( rev w7, w7 )
...@@ -64,7 +62,6 @@ CPU_BE( rev w9, w9 ) ...@@ -64,7 +62,6 @@ CPU_BE( rev w9, w9 )
CPU_BE( rev w10, w10 ) CPU_BE( rev w10, w10 )
CPU_BE( rev w11, w11 ) CPU_BE( rev w11, w11 )
str w4, [x3, #VGIC_V2_CPU_HCR]
str w5, [x3, #VGIC_V2_CPU_VMCR] str w5, [x3, #VGIC_V2_CPU_VMCR]
str w6, [x3, #VGIC_V2_CPU_MISR] str w6, [x3, #VGIC_V2_CPU_MISR]
CPU_LE( str w7, [x3, #VGIC_V2_CPU_EISR] ) CPU_LE( str w7, [x3, #VGIC_V2_CPU_EISR] )
......
...@@ -48,13 +48,11 @@ ...@@ -48,13 +48,11 @@
dsb st dsb st
// Save all interesting registers // Save all interesting registers
mrs_s x4, ICH_HCR_EL2
mrs_s x5, ICH_VMCR_EL2 mrs_s x5, ICH_VMCR_EL2
mrs_s x6, ICH_MISR_EL2 mrs_s x6, ICH_MISR_EL2
mrs_s x7, ICH_EISR_EL2 mrs_s x7, ICH_EISR_EL2
mrs_s x8, ICH_ELSR_EL2 mrs_s x8, ICH_ELSR_EL2
str w4, [x3, #VGIC_V3_CPU_HCR]
str w5, [x3, #VGIC_V3_CPU_VMCR] str w5, [x3, #VGIC_V3_CPU_VMCR]
str w6, [x3, #VGIC_V3_CPU_MISR] str w6, [x3, #VGIC_V3_CPU_MISR]
str w7, [x3, #VGIC_V3_CPU_EISR] str w7, [x3, #VGIC_V3_CPU_EISR]
......
...@@ -897,7 +897,7 @@ struct kvm_xen_hvm_config { ...@@ -897,7 +897,7 @@ struct kvm_xen_hvm_config {
* *
* KVM_IRQFD_FLAG_RESAMPLE indicates resamplefd is valid and specifies * KVM_IRQFD_FLAG_RESAMPLE indicates resamplefd is valid and specifies
* the irqfd to operate in resampling mode for level triggered interrupt * the irqfd to operate in resampling mode for level triggered interrupt
* emlation. See Documentation/virtual/kvm/api.txt. * emulation. See Documentation/virtual/kvm/api.txt.
*/ */
#define KVM_IRQFD_FLAG_RESAMPLE (1 << 1) #define KVM_IRQFD_FLAG_RESAMPLE (1 << 1)
......
...@@ -76,8 +76,6 @@ static bool handle_mmio_ctlr(struct kvm_vcpu *vcpu, ...@@ -76,8 +76,6 @@ static bool handle_mmio_ctlr(struct kvm_vcpu *vcpu,
vgic_reg_access(mmio, &reg, offset, vgic_reg_access(mmio, &reg, offset,
ACCESS_READ_VALUE | ACCESS_WRITE_VALUE); ACCESS_READ_VALUE | ACCESS_WRITE_VALUE);
if (mmio->is_write) { if (mmio->is_write) {
if (reg & GICD_CTLR_ENABLE_SS_G0)
kvm_info("guest tried to enable unsupported Group0 interrupts\n");
vcpu->kvm->arch.vgic.enabled = !!(reg & GICD_CTLR_ENABLE_SS_G1); vcpu->kvm->arch.vgic.enabled = !!(reg & GICD_CTLR_ENABLE_SS_G1);
vgic_update_state(vcpu->kvm); vgic_update_state(vcpu->kvm);
return true; return true;
...@@ -173,6 +171,32 @@ static bool handle_mmio_clear_pending_reg_dist(struct kvm_vcpu *vcpu, ...@@ -173,6 +171,32 @@ static bool handle_mmio_clear_pending_reg_dist(struct kvm_vcpu *vcpu,
return false; return false;
} }
static bool handle_mmio_set_active_reg_dist(struct kvm_vcpu *vcpu,
struct kvm_exit_mmio *mmio,
phys_addr_t offset)
{
if (likely(offset >= VGIC_NR_PRIVATE_IRQS / 8))
return vgic_handle_set_active_reg(vcpu->kvm, mmio, offset,
vcpu->vcpu_id);
vgic_reg_access(mmio, NULL, offset,
ACCESS_READ_RAZ | ACCESS_WRITE_IGNORED);
return false;
}
static bool handle_mmio_clear_active_reg_dist(struct kvm_vcpu *vcpu,
struct kvm_exit_mmio *mmio,
phys_addr_t offset)
{
if (likely(offset >= VGIC_NR_PRIVATE_IRQS / 8))
return vgic_handle_clear_active_reg(vcpu->kvm, mmio, offset,
vcpu->vcpu_id);
vgic_reg_access(mmio, NULL, offset,
ACCESS_READ_RAZ | ACCESS_WRITE_IGNORED);
return false;
}
static bool handle_mmio_priority_reg_dist(struct kvm_vcpu *vcpu, static bool handle_mmio_priority_reg_dist(struct kvm_vcpu *vcpu,
struct kvm_exit_mmio *mmio, struct kvm_exit_mmio *mmio,
phys_addr_t offset) phys_addr_t offset)
...@@ -428,13 +452,13 @@ static const struct vgic_io_range vgic_v3_dist_ranges[] = { ...@@ -428,13 +452,13 @@ static const struct vgic_io_range vgic_v3_dist_ranges[] = {
.base = GICD_ISACTIVER, .base = GICD_ISACTIVER,
.len = 0x80, .len = 0x80,
.bits_per_irq = 1, .bits_per_irq = 1,
.handle_mmio = handle_mmio_raz_wi, .handle_mmio = handle_mmio_set_active_reg_dist,
}, },
{ {
.base = GICD_ICACTIVER, .base = GICD_ICACTIVER,
.len = 0x80, .len = 0x80,
.bits_per_irq = 1, .bits_per_irq = 1,
.handle_mmio = handle_mmio_raz_wi, .handle_mmio = handle_mmio_clear_active_reg_dist,
}, },
{ {
.base = GICD_IPRIORITYR, .base = GICD_IPRIORITYR,
...@@ -561,6 +585,26 @@ static bool handle_mmio_clear_enable_reg_redist(struct kvm_vcpu *vcpu, ...@@ -561,6 +585,26 @@ static bool handle_mmio_clear_enable_reg_redist(struct kvm_vcpu *vcpu,
ACCESS_WRITE_CLEARBIT); ACCESS_WRITE_CLEARBIT);
} }
static bool handle_mmio_set_active_reg_redist(struct kvm_vcpu *vcpu,
struct kvm_exit_mmio *mmio,
phys_addr_t offset)
{
struct kvm_vcpu *redist_vcpu = mmio->private;
return vgic_handle_set_active_reg(vcpu->kvm, mmio, offset,
redist_vcpu->vcpu_id);
}
static bool handle_mmio_clear_active_reg_redist(struct kvm_vcpu *vcpu,
struct kvm_exit_mmio *mmio,
phys_addr_t offset)
{
struct kvm_vcpu *redist_vcpu = mmio->private;
return vgic_handle_clear_active_reg(vcpu->kvm, mmio, offset,
redist_vcpu->vcpu_id);
}
static bool handle_mmio_set_pending_reg_redist(struct kvm_vcpu *vcpu, static bool handle_mmio_set_pending_reg_redist(struct kvm_vcpu *vcpu,
struct kvm_exit_mmio *mmio, struct kvm_exit_mmio *mmio,
phys_addr_t offset) phys_addr_t offset)
...@@ -674,13 +718,13 @@ static const struct vgic_io_range vgic_redist_ranges[] = { ...@@ -674,13 +718,13 @@ static const struct vgic_io_range vgic_redist_ranges[] = {
.base = SGI_base(GICR_ISACTIVER0), .base = SGI_base(GICR_ISACTIVER0),
.len = 0x04, .len = 0x04,
.bits_per_irq = 1, .bits_per_irq = 1,
.handle_mmio = handle_mmio_raz_wi, .handle_mmio = handle_mmio_set_active_reg_redist,
}, },
{ {
.base = SGI_base(GICR_ICACTIVER0), .base = SGI_base(GICR_ICACTIVER0),
.len = 0x04, .len = 0x04,
.bits_per_irq = 1, .bits_per_irq = 1,
.handle_mmio = handle_mmio_raz_wi, .handle_mmio = handle_mmio_clear_active_reg_redist,
}, },
{ {
.base = SGI_base(GICR_IPRIORITYR0), .base = SGI_base(GICR_IPRIORITYR0),
......
...@@ -26,8 +26,6 @@ ...@@ -26,8 +26,6 @@
#include <linux/of_irq.h> #include <linux/of_irq.h>
#include <linux/uaccess.h> #include <linux/uaccess.h>
#include <linux/irqchip/arm-gic.h>
#include <asm/kvm_emulate.h> #include <asm/kvm_emulate.h>
#include <asm/kvm_arm.h> #include <asm/kvm_arm.h>
#include <asm/kvm_mmu.h> #include <asm/kvm_mmu.h>
...@@ -1561,7 +1559,7 @@ int kvm_vgic_inject_irq(struct kvm *kvm, int cpuid, unsigned int irq_num, ...@@ -1561,7 +1559,7 @@ int kvm_vgic_inject_irq(struct kvm *kvm, int cpuid, unsigned int irq_num,
goto out; goto out;
} }
if (irq_num >= kvm->arch.vgic.nr_irqs) if (irq_num >= min(kvm->arch.vgic.nr_irqs, 1020))
return -EINVAL; return -EINVAL;
vcpu_id = vgic_update_irq_pending(kvm, cpuid, irq_num, level); vcpu_id = vgic_update_irq_pending(kvm, cpuid, irq_num, level);
...@@ -2161,10 +2159,7 @@ int kvm_set_irq(struct kvm *kvm, int irq_source_id, ...@@ -2161,10 +2159,7 @@ int kvm_set_irq(struct kvm *kvm, int irq_source_id,
BUG_ON(!vgic_initialized(kvm)); BUG_ON(!vgic_initialized(kvm));
if (spi > kvm->arch.vgic.nr_irqs)
return -EINVAL;
return kvm_vgic_inject_irq(kvm, 0, spi, level); return kvm_vgic_inject_irq(kvm, 0, spi, level);
} }
/* MSI not implemented yet */ /* MSI not implemented yet */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment