Commit 44bcc922 authored by Paolo Bonzini's avatar Paolo Bonzini

Merge tag 'kvm-arm-for-4-7-take2' of...

Merge tag 'kvm-arm-for-4-7-take2' of git://git.kernel.org/pub/scm/linux/kernel/git/kvmarm/kvmarm into kvm-next

KVM/ARM Changes for v4.7 take 2

"The GIC is dead; Long live the GIC"

This set of changes include the new vgic, which is a reimplementation of
our horribly broken legacy vgic implementation.  The two implementations
will live side-by-side (with the new being the configured default) for
one kernel release and then we'll remove it.

Also fixes a non-critical issue with virtual abort injection to guests.
parents 9842df62 35a2d585
...@@ -41,6 +41,8 @@ ...@@ -41,6 +41,8 @@
#define KVM_MAX_VCPUS VGIC_V2_MAX_CPUS #define KVM_MAX_VCPUS VGIC_V2_MAX_CPUS
#define KVM_REQ_VCPU_EXIT 8
u32 *kvm_vcpu_reg(struct kvm_vcpu *vcpu, u8 reg_num, u32 mode); u32 *kvm_vcpu_reg(struct kvm_vcpu *vcpu, u8 reg_num, u32 mode);
int __attribute_const__ kvm_target_cpu(void); int __attribute_const__ kvm_target_cpu(void);
int kvm_reset_vcpu(struct kvm_vcpu *vcpu); int kvm_reset_vcpu(struct kvm_vcpu *vcpu);
...@@ -226,6 +228,10 @@ static inline void kvm_arch_mmu_notifier_invalidate_page(struct kvm *kvm, ...@@ -226,6 +228,10 @@ static inline void kvm_arch_mmu_notifier_invalidate_page(struct kvm *kvm,
struct kvm_vcpu *kvm_arm_get_running_vcpu(void); struct kvm_vcpu *kvm_arm_get_running_vcpu(void);
struct kvm_vcpu __percpu **kvm_get_running_vcpus(void); struct kvm_vcpu __percpu **kvm_get_running_vcpus(void);
void kvm_arm_halt_guest(struct kvm *kvm);
void kvm_arm_resume_guest(struct kvm *kvm);
void kvm_arm_halt_vcpu(struct kvm_vcpu *vcpu);
void kvm_arm_resume_vcpu(struct kvm_vcpu *vcpu);
int kvm_arm_copy_coproc_indices(struct kvm_vcpu *vcpu, u64 __user *uindices); int kvm_arm_copy_coproc_indices(struct kvm_vcpu *vcpu, u64 __user *uindices);
unsigned long kvm_arm_num_coproc_regs(struct kvm_vcpu *vcpu); unsigned long kvm_arm_num_coproc_regs(struct kvm_vcpu *vcpu);
......
...@@ -28,6 +28,9 @@ struct kvm_decode { ...@@ -28,6 +28,9 @@ struct kvm_decode {
bool sign_extend; bool sign_extend;
}; };
void kvm_mmio_write_buf(void *buf, unsigned int len, unsigned long data);
unsigned long kvm_mmio_read_buf(const void *buf, unsigned int len);
int kvm_handle_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run); int kvm_handle_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run);
int io_mem_abort(struct kvm_vcpu *vcpu, struct kvm_run *run, int io_mem_abort(struct kvm_vcpu *vcpu, struct kvm_run *run,
phys_addr_t fault_ipa); phys_addr_t fault_ipa);
......
...@@ -46,6 +46,13 @@ config KVM_ARM_HOST ...@@ -46,6 +46,13 @@ config KVM_ARM_HOST
---help--- ---help---
Provides host support for ARM processors. Provides host support for ARM processors.
config KVM_NEW_VGIC
bool "New VGIC implementation"
depends on KVM
default y
---help---
uses the new VGIC implementation
source drivers/vhost/Kconfig source drivers/vhost/Kconfig
endif # VIRTUALIZATION endif # VIRTUALIZATION
...@@ -21,7 +21,18 @@ obj-$(CONFIG_KVM_ARM_HOST) += hyp/ ...@@ -21,7 +21,18 @@ obj-$(CONFIG_KVM_ARM_HOST) += hyp/
obj-y += kvm-arm.o init.o interrupts.o obj-y += kvm-arm.o init.o interrupts.o
obj-y += arm.o handle_exit.o guest.o mmu.o emulate.o reset.o obj-y += arm.o handle_exit.o guest.o mmu.o emulate.o reset.o
obj-y += coproc.o coproc_a15.o coproc_a7.o mmio.o psci.o perf.o obj-y += coproc.o coproc_a15.o coproc_a7.o mmio.o psci.o perf.o
ifeq ($(CONFIG_KVM_NEW_VGIC),y)
obj-y += $(KVM)/arm/vgic/vgic.o
obj-y += $(KVM)/arm/vgic/vgic-init.o
obj-y += $(KVM)/arm/vgic/vgic-irqfd.o
obj-y += $(KVM)/arm/vgic/vgic-v2.o
obj-y += $(KVM)/arm/vgic/vgic-mmio.o
obj-y += $(KVM)/arm/vgic/vgic-mmio-v2.o
obj-y += $(KVM)/arm/vgic/vgic-kvm-device.o
else
obj-y += $(KVM)/arm/vgic.o obj-y += $(KVM)/arm/vgic.o
obj-y += $(KVM)/arm/vgic-v2.o obj-y += $(KVM)/arm/vgic-v2.o
obj-y += $(KVM)/arm/vgic-v2-emul.o obj-y += $(KVM)/arm/vgic-v2-emul.o
endif
obj-y += $(KVM)/arm/arch_timer.o obj-y += $(KVM)/arm/arch_timer.o
...@@ -459,7 +459,7 @@ static void update_vttbr(struct kvm *kvm) ...@@ -459,7 +459,7 @@ static void update_vttbr(struct kvm *kvm)
static int kvm_vcpu_first_run_init(struct kvm_vcpu *vcpu) static int kvm_vcpu_first_run_init(struct kvm_vcpu *vcpu)
{ {
struct kvm *kvm = vcpu->kvm; struct kvm *kvm = vcpu->kvm;
int ret; int ret = 0;
if (likely(vcpu->arch.has_run_once)) if (likely(vcpu->arch.has_run_once))
return 0; return 0;
...@@ -482,9 +482,9 @@ static int kvm_vcpu_first_run_init(struct kvm_vcpu *vcpu) ...@@ -482,9 +482,9 @@ static int kvm_vcpu_first_run_init(struct kvm_vcpu *vcpu)
* interrupts from the virtual timer with a userspace gic. * interrupts from the virtual timer with a userspace gic.
*/ */
if (irqchip_in_kernel(kvm) && vgic_initialized(kvm)) if (irqchip_in_kernel(kvm) && vgic_initialized(kvm))
kvm_timer_enable(kvm); ret = kvm_timer_enable(vcpu);
return 0; return ret;
} }
bool kvm_arch_intc_initialized(struct kvm *kvm) bool kvm_arch_intc_initialized(struct kvm *kvm)
...@@ -492,30 +492,37 @@ bool kvm_arch_intc_initialized(struct kvm *kvm) ...@@ -492,30 +492,37 @@ bool kvm_arch_intc_initialized(struct kvm *kvm)
return vgic_initialized(kvm); return vgic_initialized(kvm);
} }
static void kvm_arm_halt_guest(struct kvm *kvm) __maybe_unused; void kvm_arm_halt_guest(struct kvm *kvm)
static void kvm_arm_resume_guest(struct kvm *kvm) __maybe_unused;
static void kvm_arm_halt_guest(struct kvm *kvm)
{ {
int i; int i;
struct kvm_vcpu *vcpu; struct kvm_vcpu *vcpu;
kvm_for_each_vcpu(i, vcpu, kvm) kvm_for_each_vcpu(i, vcpu, kvm)
vcpu->arch.pause = true; vcpu->arch.pause = true;
force_vm_exit(cpu_all_mask); kvm_make_all_cpus_request(kvm, KVM_REQ_VCPU_EXIT);
}
void kvm_arm_halt_vcpu(struct kvm_vcpu *vcpu)
{
vcpu->arch.pause = true;
kvm_vcpu_kick(vcpu);
} }
static void kvm_arm_resume_guest(struct kvm *kvm) void kvm_arm_resume_vcpu(struct kvm_vcpu *vcpu)
{
struct swait_queue_head *wq = kvm_arch_vcpu_wq(vcpu);
vcpu->arch.pause = false;
swake_up(wq);
}
void kvm_arm_resume_guest(struct kvm *kvm)
{ {
int i; int i;
struct kvm_vcpu *vcpu; struct kvm_vcpu *vcpu;
kvm_for_each_vcpu(i, vcpu, kvm) { kvm_for_each_vcpu(i, vcpu, kvm)
struct swait_queue_head *wq = kvm_arch_vcpu_wq(vcpu); kvm_arm_resume_vcpu(vcpu);
vcpu->arch.pause = false;
swake_up(wq);
}
} }
static void vcpu_sleep(struct kvm_vcpu *vcpu) static void vcpu_sleep(struct kvm_vcpu *vcpu)
......
...@@ -23,7 +23,7 @@ ...@@ -23,7 +23,7 @@
#include "trace.h" #include "trace.h"
static void mmio_write_buf(char *buf, unsigned int len, unsigned long data) void kvm_mmio_write_buf(void *buf, unsigned int len, unsigned long data)
{ {
void *datap = NULL; void *datap = NULL;
union { union {
...@@ -55,7 +55,7 @@ static void mmio_write_buf(char *buf, unsigned int len, unsigned long data) ...@@ -55,7 +55,7 @@ static void mmio_write_buf(char *buf, unsigned int len, unsigned long data)
memcpy(buf, datap, len); memcpy(buf, datap, len);
} }
static unsigned long mmio_read_buf(char *buf, unsigned int len) unsigned long kvm_mmio_read_buf(const void *buf, unsigned int len)
{ {
unsigned long data = 0; unsigned long data = 0;
union { union {
...@@ -66,7 +66,7 @@ static unsigned long mmio_read_buf(char *buf, unsigned int len) ...@@ -66,7 +66,7 @@ static unsigned long mmio_read_buf(char *buf, unsigned int len)
switch (len) { switch (len) {
case 1: case 1:
data = buf[0]; data = *(u8 *)buf;
break; break;
case 2: case 2:
memcpy(&tmp.hword, buf, len); memcpy(&tmp.hword, buf, len);
...@@ -87,11 +87,10 @@ static unsigned long mmio_read_buf(char *buf, unsigned int len) ...@@ -87,11 +87,10 @@ static unsigned long mmio_read_buf(char *buf, unsigned int len)
/** /**
* kvm_handle_mmio_return -- Handle MMIO loads after user space emulation * kvm_handle_mmio_return -- Handle MMIO loads after user space emulation
* or in-kernel IO emulation
*
* @vcpu: The VCPU pointer * @vcpu: The VCPU pointer
* @run: The VCPU run struct containing the mmio data * @run: The VCPU run struct containing the mmio data
*
* This should only be called after returning from userspace for MMIO load
* emulation.
*/ */
int kvm_handle_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run) int kvm_handle_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run)
{ {
...@@ -104,7 +103,7 @@ int kvm_handle_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run) ...@@ -104,7 +103,7 @@ int kvm_handle_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run)
if (len > sizeof(unsigned long)) if (len > sizeof(unsigned long))
return -EINVAL; return -EINVAL;
data = mmio_read_buf(run->mmio.data, len); data = kvm_mmio_read_buf(run->mmio.data, len);
if (vcpu->arch.mmio_decode.sign_extend && if (vcpu->arch.mmio_decode.sign_extend &&
len < sizeof(unsigned long)) { len < sizeof(unsigned long)) {
...@@ -190,7 +189,7 @@ int io_mem_abort(struct kvm_vcpu *vcpu, struct kvm_run *run, ...@@ -190,7 +189,7 @@ int io_mem_abort(struct kvm_vcpu *vcpu, struct kvm_run *run,
len); len);
trace_kvm_mmio(KVM_TRACE_MMIO_WRITE, len, fault_ipa, data); trace_kvm_mmio(KVM_TRACE_MMIO_WRITE, len, fault_ipa, data);
mmio_write_buf(data_buf, len, data); kvm_mmio_write_buf(data_buf, len, data);
ret = kvm_io_bus_write(vcpu, KVM_MMIO_BUS, fault_ipa, len, ret = kvm_io_bus_write(vcpu, KVM_MMIO_BUS, fault_ipa, len,
data_buf); data_buf);
...@@ -206,18 +205,19 @@ int io_mem_abort(struct kvm_vcpu *vcpu, struct kvm_run *run, ...@@ -206,18 +205,19 @@ int io_mem_abort(struct kvm_vcpu *vcpu, struct kvm_run *run,
run->mmio.is_write = is_write; run->mmio.is_write = is_write;
run->mmio.phys_addr = fault_ipa; run->mmio.phys_addr = fault_ipa;
run->mmio.len = len; run->mmio.len = len;
if (is_write)
memcpy(run->mmio.data, data_buf, len);
if (!ret) { if (!ret) {
/* We handled the access successfully in the kernel. */ /* We handled the access successfully in the kernel. */
if (!is_write)
memcpy(run->mmio.data, data_buf, len);
vcpu->stat.mmio_exit_kernel++; vcpu->stat.mmio_exit_kernel++;
kvm_handle_mmio_return(vcpu, run); kvm_handle_mmio_return(vcpu, run);
return 1; return 1;
} else {
vcpu->stat.mmio_exit_user++;
} }
if (is_write)
memcpy(run->mmio.data, data_buf, len);
vcpu->stat.mmio_exit_user++;
run->exit_reason = KVM_EXIT_MMIO; run->exit_reason = KVM_EXIT_MMIO;
return 0; return 0;
} }
...@@ -43,6 +43,8 @@ ...@@ -43,6 +43,8 @@
#define KVM_VCPU_MAX_FEATURES 4 #define KVM_VCPU_MAX_FEATURES 4
#define KVM_REQ_VCPU_EXIT 8
int __attribute_const__ kvm_target_cpu(void); int __attribute_const__ kvm_target_cpu(void);
int kvm_reset_vcpu(struct kvm_vcpu *vcpu); int kvm_reset_vcpu(struct kvm_vcpu *vcpu);
int kvm_arch_dev_ioctl_check_extension(long ext); int kvm_arch_dev_ioctl_check_extension(long ext);
...@@ -325,6 +327,10 @@ static inline void kvm_arch_mmu_notifier_invalidate_page(struct kvm *kvm, ...@@ -325,6 +327,10 @@ static inline void kvm_arch_mmu_notifier_invalidate_page(struct kvm *kvm,
struct kvm_vcpu *kvm_arm_get_running_vcpu(void); struct kvm_vcpu *kvm_arm_get_running_vcpu(void);
struct kvm_vcpu * __percpu *kvm_get_running_vcpus(void); struct kvm_vcpu * __percpu *kvm_get_running_vcpus(void);
void kvm_arm_halt_guest(struct kvm *kvm);
void kvm_arm_resume_guest(struct kvm *kvm);
void kvm_arm_halt_vcpu(struct kvm_vcpu *vcpu);
void kvm_arm_resume_vcpu(struct kvm_vcpu *vcpu);
u64 __kvm_call_hyp(void *hypfn, ...); u64 __kvm_call_hyp(void *hypfn, ...);
#define kvm_call_hyp(f, ...) __kvm_call_hyp(kvm_ksym_ref(f), ##__VA_ARGS__) #define kvm_call_hyp(f, ...) __kvm_call_hyp(kvm_ksym_ref(f), ##__VA_ARGS__)
......
...@@ -30,6 +30,9 @@ struct kvm_decode { ...@@ -30,6 +30,9 @@ struct kvm_decode {
bool sign_extend; bool sign_extend;
}; };
void kvm_mmio_write_buf(void *buf, unsigned int len, unsigned long data);
unsigned long kvm_mmio_read_buf(const void *buf, unsigned int len);
int kvm_handle_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run); int kvm_handle_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run);
int io_mem_abort(struct kvm_vcpu *vcpu, struct kvm_run *run, int io_mem_abort(struct kvm_vcpu *vcpu, struct kvm_run *run,
phys_addr_t fault_ipa); phys_addr_t fault_ipa);
......
...@@ -54,6 +54,13 @@ config KVM_ARM_PMU ...@@ -54,6 +54,13 @@ config KVM_ARM_PMU
Adds support for a virtual Performance Monitoring Unit (PMU) in Adds support for a virtual Performance Monitoring Unit (PMU) in
virtual machines. virtual machines.
config KVM_NEW_VGIC
bool "New VGIC implementation"
depends on KVM
default y
---help---
uses the new VGIC implementation
source drivers/vhost/Kconfig source drivers/vhost/Kconfig
endif # VIRTUALIZATION endif # VIRTUALIZATION
...@@ -20,10 +20,22 @@ kvm-$(CONFIG_KVM_ARM_HOST) += emulate.o inject_fault.o regmap.o ...@@ -20,10 +20,22 @@ kvm-$(CONFIG_KVM_ARM_HOST) += emulate.o inject_fault.o regmap.o
kvm-$(CONFIG_KVM_ARM_HOST) += hyp.o hyp-init.o handle_exit.o kvm-$(CONFIG_KVM_ARM_HOST) += hyp.o hyp-init.o handle_exit.o
kvm-$(CONFIG_KVM_ARM_HOST) += guest.o debug.o reset.o sys_regs.o sys_regs_generic_v8.o kvm-$(CONFIG_KVM_ARM_HOST) += guest.o debug.o reset.o sys_regs.o sys_regs_generic_v8.o
ifeq ($(CONFIG_KVM_NEW_VGIC),y)
kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic/vgic.o
kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic/vgic-init.o
kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic/vgic-irqfd.o
kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic/vgic-v2.o
kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic/vgic-v3.o
kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic/vgic-mmio.o
kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic/vgic-mmio-v2.o
kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic/vgic-mmio-v3.o
kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic/vgic-kvm-device.o
else
kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic.o kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic.o
kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic-v2.o kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic-v2.o
kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic-v2-emul.o kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic-v2-emul.o
kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic-v3.o kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic-v3.o
kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic-v3-emul.o kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic-v3-emul.o
endif
kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/arch_timer.o kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/arch_timer.o
kvm-$(CONFIG_KVM_ARM_PMU) += $(KVM)/arm/pmu.o kvm-$(CONFIG_KVM_ARM_PMU) += $(KVM)/arm/pmu.o
...@@ -162,7 +162,7 @@ static void inject_abt64(struct kvm_vcpu *vcpu, bool is_iabt, unsigned long addr ...@@ -162,7 +162,7 @@ static void inject_abt64(struct kvm_vcpu *vcpu, bool is_iabt, unsigned long addr
esr |= (ESR_ELx_EC_IABT_CUR << ESR_ELx_EC_SHIFT); esr |= (ESR_ELx_EC_IABT_CUR << ESR_ELx_EC_SHIFT);
if (!is_iabt) if (!is_iabt)
esr |= ESR_ELx_EC_DABT_LOW; esr |= ESR_ELx_EC_DABT_LOW << ESR_ELx_EC_SHIFT;
vcpu_sys_reg(vcpu, ESR_EL1) = esr | ESR_ELx_FSC_EXTABT; vcpu_sys_reg(vcpu, ESR_EL1) = esr | ESR_ELx_FSC_EXTABT;
} }
......
...@@ -24,9 +24,6 @@ ...@@ -24,9 +24,6 @@
#include <linux/workqueue.h> #include <linux/workqueue.h>
struct arch_timer_kvm { struct arch_timer_kvm {
/* Is the timer enabled */
bool enabled;
/* Virtual offset */ /* Virtual offset */
cycle_t cntvoff; cycle_t cntvoff;
}; };
...@@ -53,15 +50,15 @@ struct arch_timer_cpu { ...@@ -53,15 +50,15 @@ struct arch_timer_cpu {
/* Timer IRQ */ /* Timer IRQ */
struct kvm_irq_level irq; struct kvm_irq_level irq;
/* VGIC mapping */
struct irq_phys_map *map;
/* Active IRQ state caching */ /* Active IRQ state caching */
bool active_cleared_last; bool active_cleared_last;
/* Is the timer enabled */
bool enabled;
}; };
int kvm_timer_hyp_init(void); int kvm_timer_hyp_init(void);
void kvm_timer_enable(struct kvm *kvm); int kvm_timer_enable(struct kvm_vcpu *vcpu);
void kvm_timer_init(struct kvm *kvm); void kvm_timer_init(struct kvm *kvm);
int kvm_timer_vcpu_reset(struct kvm_vcpu *vcpu, int kvm_timer_vcpu_reset(struct kvm_vcpu *vcpu,
const struct kvm_irq_level *irq); const struct kvm_irq_level *irq);
......
...@@ -19,6 +19,10 @@ ...@@ -19,6 +19,10 @@
#ifndef __ASM_ARM_KVM_VGIC_H #ifndef __ASM_ARM_KVM_VGIC_H
#define __ASM_ARM_KVM_VGIC_H #define __ASM_ARM_KVM_VGIC_H
#ifdef CONFIG_KVM_NEW_VGIC
#include <kvm/vgic/vgic.h>
#else
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/kvm.h> #include <linux/kvm.h>
#include <linux/irqreturn.h> #include <linux/irqreturn.h>
...@@ -158,7 +162,6 @@ struct vgic_io_device { ...@@ -158,7 +162,6 @@ struct vgic_io_device {
struct irq_phys_map { struct irq_phys_map {
u32 virt_irq; u32 virt_irq;
u32 phys_irq; u32 phys_irq;
u32 irq;
}; };
struct irq_phys_map_entry { struct irq_phys_map_entry {
...@@ -305,9 +308,6 @@ struct vgic_cpu { ...@@ -305,9 +308,6 @@ struct vgic_cpu {
unsigned long *active_shared; unsigned long *active_shared;
unsigned long *pend_act_shared; unsigned long *pend_act_shared;
/* Number of list registers on this CPU */
int nr_lr;
/* CPU vif control registers for world switch */ /* CPU vif control registers for world switch */
union { union {
struct vgic_v2_cpu_if vgic_v2; struct vgic_v2_cpu_if vgic_v2;
...@@ -342,17 +342,18 @@ void kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu); ...@@ -342,17 +342,18 @@ void kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu);
int kvm_vgic_inject_irq(struct kvm *kvm, int cpuid, unsigned int irq_num, int kvm_vgic_inject_irq(struct kvm *kvm, int cpuid, unsigned int irq_num,
bool level); bool level);
int kvm_vgic_inject_mapped_irq(struct kvm *kvm, int cpuid, int kvm_vgic_inject_mapped_irq(struct kvm *kvm, int cpuid,
struct irq_phys_map *map, bool level); unsigned int virt_irq, bool level);
void vgic_v3_dispatch_sgi(struct kvm_vcpu *vcpu, u64 reg); void vgic_v3_dispatch_sgi(struct kvm_vcpu *vcpu, u64 reg);
int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu); int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu);
struct irq_phys_map *kvm_vgic_map_phys_irq(struct kvm_vcpu *vcpu, int kvm_vgic_map_phys_irq(struct kvm_vcpu *vcpu, int virt_irq, int phys_irq);
int virt_irq, int irq); int kvm_vgic_unmap_phys_irq(struct kvm_vcpu *vcpu, unsigned int virt_irq);
int kvm_vgic_unmap_phys_irq(struct kvm_vcpu *vcpu, struct irq_phys_map *map); bool kvm_vgic_map_is_active(struct kvm_vcpu *vcpu, unsigned int virt_irq);
bool kvm_vgic_map_is_active(struct kvm_vcpu *vcpu, struct irq_phys_map *map);
#define irqchip_in_kernel(k) (!!((k)->arch.vgic.in_kernel)) #define irqchip_in_kernel(k) (!!((k)->arch.vgic.in_kernel))
#define vgic_initialized(k) (!!((k)->arch.vgic.nr_cpus)) #define vgic_initialized(k) (!!((k)->arch.vgic.nr_cpus))
#define vgic_ready(k) ((k)->arch.vgic.ready) #define vgic_ready(k) ((k)->arch.vgic.ready)
#define vgic_valid_spi(k, i) (((i) >= VGIC_NR_PRIVATE_IRQS) && \
((i) < (k)->arch.vgic.nr_irqs))
int vgic_v2_probe(const struct gic_kvm_info *gic_kvm_info, int vgic_v2_probe(const struct gic_kvm_info *gic_kvm_info,
const struct vgic_ops **ops, const struct vgic_ops **ops,
...@@ -370,4 +371,5 @@ static inline int vgic_v3_probe(const struct gic_kvm_info *gic_kvm_info, ...@@ -370,4 +371,5 @@ static inline int vgic_v3_probe(const struct gic_kvm_info *gic_kvm_info,
} }
#endif #endif
#endif /* old VGIC include */
#endif #endif
/*
* Copyright (C) 2015, 2016 ARM Ltd.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef __ASM_ARM_KVM_VGIC_VGIC_H
#define __ASM_ARM_KVM_VGIC_VGIC_H
#include <linux/kernel.h>
#include <linux/kvm.h>
#include <linux/irqreturn.h>
#include <linux/spinlock.h>
#include <linux/types.h>
#include <kvm/iodev.h>
#define VGIC_V3_MAX_CPUS 255
#define VGIC_V2_MAX_CPUS 8
#define VGIC_NR_IRQS_LEGACY 256
#define VGIC_NR_SGIS 16
#define VGIC_NR_PPIS 16
#define VGIC_NR_PRIVATE_IRQS (VGIC_NR_SGIS + VGIC_NR_PPIS)
#define VGIC_MAX_PRIVATE (VGIC_NR_PRIVATE_IRQS - 1)
#define VGIC_MAX_SPI 1019
#define VGIC_MAX_RESERVED 1023
#define VGIC_MIN_LPI 8192
enum vgic_type {
VGIC_V2, /* Good ol' GICv2 */
VGIC_V3, /* New fancy GICv3 */
};
/* same for all guests, as depending only on the _host's_ GIC model */
struct vgic_global {
/* type of the host GIC */
enum vgic_type type;
/* Physical address of vgic virtual cpu interface */
phys_addr_t vcpu_base;
/* virtual control interface mapping */
void __iomem *vctrl_base;
/* Number of implemented list registers */
int nr_lr;
/* Maintenance IRQ number */
unsigned int maint_irq;
/* maximum number of VCPUs allowed (GICv2 limits us to 8) */
int max_gic_vcpus;
/* Only needed for the legacy KVM_CREATE_IRQCHIP */
bool can_emulate_gicv2;
};
extern struct vgic_global kvm_vgic_global_state;
#define VGIC_V2_MAX_LRS (1 << 6)
#define VGIC_V3_MAX_LRS 16
#define VGIC_V3_LR_INDEX(lr) (VGIC_V3_MAX_LRS - 1 - lr)
enum vgic_irq_config {
VGIC_CONFIG_EDGE = 0,
VGIC_CONFIG_LEVEL
};
struct vgic_irq {
spinlock_t irq_lock; /* Protects the content of the struct */
struct list_head ap_list;
struct kvm_vcpu *vcpu; /* SGIs and PPIs: The VCPU
* SPIs and LPIs: The VCPU whose ap_list
* this is queued on.
*/
struct kvm_vcpu *target_vcpu; /* The VCPU that this interrupt should
* be sent to, as a result of the
* targets reg (v2) or the
* affinity reg (v3).
*/
u32 intid; /* Guest visible INTID */
bool pending;
bool line_level; /* Level only */
bool soft_pending; /* Level only */
bool active; /* not used for LPIs */
bool enabled;
bool hw; /* Tied to HW IRQ */
u32 hwintid; /* HW INTID number */
union {
u8 targets; /* GICv2 target VCPUs mask */
u32 mpidr; /* GICv3 target VCPU */
};
u8 source; /* GICv2 SGIs only */
u8 priority;
enum vgic_irq_config config; /* Level or edge */
};
struct vgic_register_region;
struct vgic_io_device {
gpa_t base_addr;
struct kvm_vcpu *redist_vcpu;
const struct vgic_register_region *regions;
int nr_regions;
struct kvm_io_device dev;
};
struct vgic_dist {
bool in_kernel;
bool ready;
bool initialized;
/* vGIC model the kernel emulates for the guest (GICv2 or GICv3) */
u32 vgic_model;
int nr_spis;
/* TODO: Consider moving to global state */
/* Virtual control interface mapping */
void __iomem *vctrl_base;
/* base addresses in guest physical address space: */
gpa_t vgic_dist_base; /* distributor */
union {
/* either a GICv2 CPU interface */
gpa_t vgic_cpu_base;
/* or a number of GICv3 redistributor regions */
gpa_t vgic_redist_base;
};
/* distributor enabled */
bool enabled;
struct vgic_irq *spis;
struct vgic_io_device dist_iodev;
struct vgic_io_device *redist_iodevs;
};
struct vgic_v2_cpu_if {
u32 vgic_hcr;
u32 vgic_vmcr;
u32 vgic_misr; /* Saved only */
u64 vgic_eisr; /* Saved only */
u64 vgic_elrsr; /* Saved only */
u32 vgic_apr;
u32 vgic_lr[VGIC_V2_MAX_LRS];
};
struct vgic_v3_cpu_if {
#ifdef CONFIG_KVM_ARM_VGIC_V3
u32 vgic_hcr;
u32 vgic_vmcr;
u32 vgic_sre; /* Restored only, change ignored */
u32 vgic_misr; /* Saved only */
u32 vgic_eisr; /* Saved only */
u32 vgic_elrsr; /* Saved only */
u32 vgic_ap0r[4];
u32 vgic_ap1r[4];
u64 vgic_lr[VGIC_V3_MAX_LRS];
#endif
};
struct vgic_cpu {
/* CPU vif control registers for world switch */
union {
struct vgic_v2_cpu_if vgic_v2;
struct vgic_v3_cpu_if vgic_v3;
};
unsigned int used_lrs;
struct vgic_irq private_irqs[VGIC_NR_PRIVATE_IRQS];
spinlock_t ap_list_lock; /* Protects the ap_list */
/*
* List of IRQs that this VCPU should consider because they are either
* Active or Pending (hence the name; AP list), or because they recently
* were one of the two and need to be migrated off this list to another
* VCPU.
*/
struct list_head ap_list_head;
u64 live_lrs;
};
int kvm_vgic_addr(struct kvm *kvm, unsigned long type, u64 *addr, bool write);
void kvm_vgic_early_init(struct kvm *kvm);
int kvm_vgic_create(struct kvm *kvm, u32 type);
void kvm_vgic_destroy(struct kvm *kvm);
void kvm_vgic_vcpu_early_init(struct kvm_vcpu *vcpu);
void kvm_vgic_vcpu_destroy(struct kvm_vcpu *vcpu);
int kvm_vgic_map_resources(struct kvm *kvm);
int kvm_vgic_hyp_init(void);
int kvm_vgic_inject_irq(struct kvm *kvm, int cpuid, unsigned int intid,
bool level);
int kvm_vgic_inject_mapped_irq(struct kvm *kvm, int cpuid, unsigned int intid,
bool level);
int kvm_vgic_map_phys_irq(struct kvm_vcpu *vcpu, u32 virt_irq, u32 phys_irq);
int kvm_vgic_unmap_phys_irq(struct kvm_vcpu *vcpu, unsigned int virt_irq);
bool kvm_vgic_map_is_active(struct kvm_vcpu *vcpu, unsigned int virt_irq);
int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu);
#define irqchip_in_kernel(k) (!!((k)->arch.vgic.in_kernel))
#define vgic_initialized(k) ((k)->arch.vgic.initialized)
#define vgic_ready(k) ((k)->arch.vgic.ready)
#define vgic_valid_spi(k, i) (((i) >= VGIC_NR_PRIVATE_IRQS) && \
((i) < (k)->arch.vgic.nr_spis + VGIC_NR_PRIVATE_IRQS))
bool kvm_vcpu_has_pending_irqs(struct kvm_vcpu *vcpu);
void kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu);
void kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu);
#ifdef CONFIG_KVM_ARM_VGIC_V3
void vgic_v3_dispatch_sgi(struct kvm_vcpu *vcpu, u64 reg);
#else
static inline void vgic_v3_dispatch_sgi(struct kvm_vcpu *vcpu, u64 reg)
{
}
#endif
/**
* kvm_vgic_get_max_vcpus - Get the maximum number of VCPUs allowed by HW
*
* The host's GIC naturally limits the maximum amount of VCPUs a guest
* can use.
*/
static inline int kvm_vgic_get_max_vcpus(void)
{
return kvm_vgic_global_state.max_gic_vcpus;
}
#endif /* __ASM_ARM_KVM_VGIC_VGIC_H */
...@@ -275,6 +275,12 @@ ...@@ -275,6 +275,12 @@
#define ICH_LR_ACTIVE_BIT (1ULL << 63) #define ICH_LR_ACTIVE_BIT (1ULL << 63)
#define ICH_LR_PHYS_ID_SHIFT 32 #define ICH_LR_PHYS_ID_SHIFT 32
#define ICH_LR_PHYS_ID_MASK (0x3ffULL << ICH_LR_PHYS_ID_SHIFT) #define ICH_LR_PHYS_ID_MASK (0x3ffULL << ICH_LR_PHYS_ID_SHIFT)
#define ICH_LR_PRIORITY_SHIFT 48
/* These are for GICv2 emulation only */
#define GICH_LR_VIRTUALID (0x3ffUL << 0)
#define GICH_LR_PHYSID_CPUID_SHIFT (10)
#define GICH_LR_PHYSID_CPUID (7UL << GICH_LR_PHYSID_CPUID_SHIFT)
#define ICH_MISR_EOI (1 << 0) #define ICH_MISR_EOI (1 << 0)
#define ICH_MISR_U (1 << 1) #define ICH_MISR_U (1 << 1)
......
...@@ -33,6 +33,7 @@ ...@@ -33,6 +33,7 @@
#define GIC_DIST_CTRL 0x000 #define GIC_DIST_CTRL 0x000
#define GIC_DIST_CTR 0x004 #define GIC_DIST_CTR 0x004
#define GIC_DIST_IIDR 0x008
#define GIC_DIST_IGROUP 0x080 #define GIC_DIST_IGROUP 0x080
#define GIC_DIST_ENABLE_SET 0x100 #define GIC_DIST_ENABLE_SET 0x100
#define GIC_DIST_ENABLE_CLEAR 0x180 #define GIC_DIST_ENABLE_CLEAR 0x180
...@@ -76,6 +77,7 @@ ...@@ -76,6 +77,7 @@
#define GICH_LR_VIRTUALID (0x3ff << 0) #define GICH_LR_VIRTUALID (0x3ff << 0)
#define GICH_LR_PHYSID_CPUID_SHIFT (10) #define GICH_LR_PHYSID_CPUID_SHIFT (10)
#define GICH_LR_PHYSID_CPUID (0x3ff << GICH_LR_PHYSID_CPUID_SHIFT) #define GICH_LR_PHYSID_CPUID (0x3ff << GICH_LR_PHYSID_CPUID_SHIFT)
#define GICH_LR_PRIORITY_SHIFT 23
#define GICH_LR_STATE (3 << 28) #define GICH_LR_STATE (3 << 28)
#define GICH_LR_PENDING_BIT (1 << 28) #define GICH_LR_PENDING_BIT (1 << 28)
#define GICH_LR_ACTIVE_BIT (1 << 29) #define GICH_LR_ACTIVE_BIT (1 << 29)
......
...@@ -20,6 +20,7 @@ ...@@ -20,6 +20,7 @@
#include <linux/kvm.h> #include <linux/kvm.h>
#include <linux/kvm_host.h> #include <linux/kvm_host.h>
#include <linux/interrupt.h> #include <linux/interrupt.h>
#include <linux/irq.h>
#include <clocksource/arm_arch_timer.h> #include <clocksource/arm_arch_timer.h>
#include <asm/arch_timer.h> #include <asm/arch_timer.h>
...@@ -174,10 +175,10 @@ static void kvm_timer_update_irq(struct kvm_vcpu *vcpu, bool new_level) ...@@ -174,10 +175,10 @@ static void kvm_timer_update_irq(struct kvm_vcpu *vcpu, bool new_level)
timer->active_cleared_last = false; timer->active_cleared_last = false;
timer->irq.level = new_level; timer->irq.level = new_level;
trace_kvm_timer_update_irq(vcpu->vcpu_id, timer->map->virt_irq, trace_kvm_timer_update_irq(vcpu->vcpu_id, timer->irq.irq,
timer->irq.level); timer->irq.level);
ret = kvm_vgic_inject_mapped_irq(vcpu->kvm, vcpu->vcpu_id, ret = kvm_vgic_inject_mapped_irq(vcpu->kvm, vcpu->vcpu_id,
timer->map, timer->irq.irq,
timer->irq.level); timer->irq.level);
WARN_ON(ret); WARN_ON(ret);
} }
...@@ -196,7 +197,7 @@ static int kvm_timer_update_state(struct kvm_vcpu *vcpu) ...@@ -196,7 +197,7 @@ static int kvm_timer_update_state(struct kvm_vcpu *vcpu)
* because the guest would never see the interrupt. Instead wait * because the guest would never see the interrupt. Instead wait
* until we call this function from kvm_timer_flush_hwstate. * until we call this function from kvm_timer_flush_hwstate.
*/ */
if (!vgic_initialized(vcpu->kvm)) if (!vgic_initialized(vcpu->kvm) || !timer->enabled)
return -ENODEV; return -ENODEV;
if (kvm_timer_should_fire(vcpu) != timer->irq.level) if (kvm_timer_should_fire(vcpu) != timer->irq.level)
...@@ -274,10 +275,8 @@ void kvm_timer_flush_hwstate(struct kvm_vcpu *vcpu) ...@@ -274,10 +275,8 @@ void kvm_timer_flush_hwstate(struct kvm_vcpu *vcpu)
* to ensure that hardware interrupts from the timer triggers a guest * to ensure that hardware interrupts from the timer triggers a guest
* exit. * exit.
*/ */
if (timer->irq.level || kvm_vgic_map_is_active(vcpu, timer->map)) phys_active = timer->irq.level ||
phys_active = true; kvm_vgic_map_is_active(vcpu, timer->irq.irq);
else
phys_active = false;
/* /*
* We want to avoid hitting the (re)distributor as much as * We want to avoid hitting the (re)distributor as much as
...@@ -302,7 +301,7 @@ void kvm_timer_flush_hwstate(struct kvm_vcpu *vcpu) ...@@ -302,7 +301,7 @@ void kvm_timer_flush_hwstate(struct kvm_vcpu *vcpu)
if (timer->active_cleared_last && !phys_active) if (timer->active_cleared_last && !phys_active)
return; return;
ret = irq_set_irqchip_state(timer->map->irq, ret = irq_set_irqchip_state(host_vtimer_irq,
IRQCHIP_STATE_ACTIVE, IRQCHIP_STATE_ACTIVE,
phys_active); phys_active);
WARN_ON(ret); WARN_ON(ret);
...@@ -334,7 +333,6 @@ int kvm_timer_vcpu_reset(struct kvm_vcpu *vcpu, ...@@ -334,7 +333,6 @@ int kvm_timer_vcpu_reset(struct kvm_vcpu *vcpu,
const struct kvm_irq_level *irq) const struct kvm_irq_level *irq)
{ {
struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu; struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
struct irq_phys_map *map;
/* /*
* The vcpu timer irq number cannot be determined in * The vcpu timer irq number cannot be determined in
...@@ -353,15 +351,6 @@ int kvm_timer_vcpu_reset(struct kvm_vcpu *vcpu, ...@@ -353,15 +351,6 @@ int kvm_timer_vcpu_reset(struct kvm_vcpu *vcpu,
timer->cntv_ctl = 0; timer->cntv_ctl = 0;
kvm_timer_update_state(vcpu); kvm_timer_update_state(vcpu);
/*
* Tell the VGIC that the virtual interrupt is tied to a
* physical interrupt. We do that once per VCPU.
*/
map = kvm_vgic_map_phys_irq(vcpu, irq->irq, host_vtimer_irq);
if (WARN_ON(IS_ERR(map)))
return PTR_ERR(map);
timer->map = map;
return 0; return 0;
} }
...@@ -487,14 +476,43 @@ void kvm_timer_vcpu_terminate(struct kvm_vcpu *vcpu) ...@@ -487,14 +476,43 @@ void kvm_timer_vcpu_terminate(struct kvm_vcpu *vcpu)
struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu; struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
timer_disarm(timer); timer_disarm(timer);
if (timer->map) kvm_vgic_unmap_phys_irq(vcpu, timer->irq.irq);
kvm_vgic_unmap_phys_irq(vcpu, timer->map);
} }
void kvm_timer_enable(struct kvm *kvm) int kvm_timer_enable(struct kvm_vcpu *vcpu)
{ {
if (kvm->arch.timer.enabled) struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
return; struct irq_desc *desc;
struct irq_data *data;
int phys_irq;
int ret;
if (timer->enabled)
return 0;
/*
* Find the physical IRQ number corresponding to the host_vtimer_irq
*/
desc = irq_to_desc(host_vtimer_irq);
if (!desc) {
kvm_err("%s: no interrupt descriptor\n", __func__);
return -EINVAL;
}
data = irq_desc_get_irq_data(desc);
while (data->parent_data)
data = data->parent_data;
phys_irq = data->hwirq;
/*
* Tell the VGIC that the virtual interrupt is tied to a
* physical interrupt. We do that once per VCPU.
*/
ret = kvm_vgic_map_phys_irq(vcpu, timer->irq.irq, phys_irq);
if (ret)
return ret;
/* /*
* There is a potential race here between VCPUs starting for the first * There is a potential race here between VCPUs starting for the first
...@@ -505,7 +523,9 @@ void kvm_timer_enable(struct kvm *kvm) ...@@ -505,7 +523,9 @@ void kvm_timer_enable(struct kvm *kvm)
* the arch timers are enabled. * the arch timers are enabled.
*/ */
if (timecounter && wqueue) if (timecounter && wqueue)
kvm->arch.timer.enabled = 1; timer->enabled = 1;
return 0;
} }
void kvm_timer_init(struct kvm *kvm) void kvm_timer_init(struct kvm *kvm)
......
...@@ -24,11 +24,10 @@ ...@@ -24,11 +24,10 @@
/* vcpu is already in the HYP VA space */ /* vcpu is already in the HYP VA space */
void __hyp_text __timer_save_state(struct kvm_vcpu *vcpu) void __hyp_text __timer_save_state(struct kvm_vcpu *vcpu)
{ {
struct kvm *kvm = kern_hyp_va(vcpu->kvm);
struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu; struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
u64 val; u64 val;
if (kvm->arch.timer.enabled) { if (timer->enabled) {
timer->cntv_ctl = read_sysreg_el0(cntv_ctl); timer->cntv_ctl = read_sysreg_el0(cntv_ctl);
timer->cntv_cval = read_sysreg_el0(cntv_cval); timer->cntv_cval = read_sysreg_el0(cntv_cval);
} }
...@@ -60,7 +59,7 @@ void __hyp_text __timer_restore_state(struct kvm_vcpu *vcpu) ...@@ -60,7 +59,7 @@ void __hyp_text __timer_restore_state(struct kvm_vcpu *vcpu)
val |= CNTHCTL_EL1PCTEN; val |= CNTHCTL_EL1PCTEN;
write_sysreg(val, cnthctl_el2); write_sysreg(val, cnthctl_el2);
if (kvm->arch.timer.enabled) { if (timer->enabled) {
write_sysreg(kvm->arch.timer.cntvoff, cntvoff_el2); write_sysreg(kvm->arch.timer.cntvoff, cntvoff_el2);
write_sysreg_el0(timer->cntv_cval, cntv_cval); write_sysreg_el0(timer->cntv_cval, cntv_cval);
isb(); isb();
......
...@@ -21,11 +21,18 @@ ...@@ -21,11 +21,18 @@
#include <asm/kvm_hyp.h> #include <asm/kvm_hyp.h>
#ifdef CONFIG_KVM_NEW_VGIC
extern struct vgic_global kvm_vgic_global_state;
#define vgic_v2_params kvm_vgic_global_state
#else
extern struct vgic_params vgic_v2_params;
#endif
static void __hyp_text save_maint_int_state(struct kvm_vcpu *vcpu, static void __hyp_text save_maint_int_state(struct kvm_vcpu *vcpu,
void __iomem *base) void __iomem *base)
{ {
struct vgic_v2_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v2; struct vgic_v2_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v2;
int nr_lr = vcpu->arch.vgic_cpu.nr_lr; int nr_lr = (kern_hyp_va(&vgic_v2_params))->nr_lr;
u32 eisr0, eisr1; u32 eisr0, eisr1;
int i; int i;
bool expect_mi; bool expect_mi;
...@@ -67,7 +74,7 @@ static void __hyp_text save_maint_int_state(struct kvm_vcpu *vcpu, ...@@ -67,7 +74,7 @@ static void __hyp_text save_maint_int_state(struct kvm_vcpu *vcpu,
static void __hyp_text save_elrsr(struct kvm_vcpu *vcpu, void __iomem *base) static void __hyp_text save_elrsr(struct kvm_vcpu *vcpu, void __iomem *base)
{ {
struct vgic_v2_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v2; struct vgic_v2_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v2;
int nr_lr = vcpu->arch.vgic_cpu.nr_lr; int nr_lr = (kern_hyp_va(&vgic_v2_params))->nr_lr;
u32 elrsr0, elrsr1; u32 elrsr0, elrsr1;
elrsr0 = readl_relaxed(base + GICH_ELRSR0); elrsr0 = readl_relaxed(base + GICH_ELRSR0);
...@@ -86,7 +93,7 @@ static void __hyp_text save_elrsr(struct kvm_vcpu *vcpu, void __iomem *base) ...@@ -86,7 +93,7 @@ static void __hyp_text save_elrsr(struct kvm_vcpu *vcpu, void __iomem *base)
static void __hyp_text save_lrs(struct kvm_vcpu *vcpu, void __iomem *base) static void __hyp_text save_lrs(struct kvm_vcpu *vcpu, void __iomem *base)
{ {
struct vgic_v2_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v2; struct vgic_v2_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v2;
int nr_lr = vcpu->arch.vgic_cpu.nr_lr; int nr_lr = (kern_hyp_va(&vgic_v2_params))->nr_lr;
int i; int i;
for (i = 0; i < nr_lr; i++) { for (i = 0; i < nr_lr; i++) {
...@@ -141,13 +148,13 @@ void __hyp_text __vgic_v2_restore_state(struct kvm_vcpu *vcpu) ...@@ -141,13 +148,13 @@ void __hyp_text __vgic_v2_restore_state(struct kvm_vcpu *vcpu)
struct vgic_v2_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v2; struct vgic_v2_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v2;
struct vgic_dist *vgic = &kvm->arch.vgic; struct vgic_dist *vgic = &kvm->arch.vgic;
void __iomem *base = kern_hyp_va(vgic->vctrl_base); void __iomem *base = kern_hyp_va(vgic->vctrl_base);
int i, nr_lr; int nr_lr = (kern_hyp_va(&vgic_v2_params))->nr_lr;
int i;
u64 live_lrs = 0; u64 live_lrs = 0;
if (!base) if (!base)
return; return;
nr_lr = vcpu->arch.vgic_cpu.nr_lr;
for (i = 0; i < nr_lr; i++) for (i = 0; i < nr_lr; i++)
if (cpu_if->vgic_lr[i] & GICH_LR_STATE) if (cpu_if->vgic_lr[i] & GICH_LR_STATE)
......
...@@ -436,7 +436,14 @@ static int kvm_arm_pmu_v3_init(struct kvm_vcpu *vcpu) ...@@ -436,7 +436,14 @@ static int kvm_arm_pmu_v3_init(struct kvm_vcpu *vcpu)
return 0; return 0;
} }
static bool irq_is_valid(struct kvm *kvm, int irq, bool is_ppi) #define irq_is_ppi(irq) ((irq) >= VGIC_NR_SGIS && (irq) < VGIC_NR_PRIVATE_IRQS)
/*
* For one VM the interrupt type must be same for each vcpu.
* As a PPI, the interrupt number is the same for all vcpus,
* while as an SPI it must be a separate number per vcpu.
*/
static bool pmu_irq_is_valid(struct kvm *kvm, int irq)
{ {
int i; int i;
struct kvm_vcpu *vcpu; struct kvm_vcpu *vcpu;
...@@ -445,7 +452,7 @@ static bool irq_is_valid(struct kvm *kvm, int irq, bool is_ppi) ...@@ -445,7 +452,7 @@ static bool irq_is_valid(struct kvm *kvm, int irq, bool is_ppi)
if (!kvm_arm_pmu_irq_initialized(vcpu)) if (!kvm_arm_pmu_irq_initialized(vcpu))
continue; continue;
if (is_ppi) { if (irq_is_ppi(irq)) {
if (vcpu->arch.pmu.irq_num != irq) if (vcpu->arch.pmu.irq_num != irq)
return false; return false;
} else { } else {
...@@ -457,7 +464,6 @@ static bool irq_is_valid(struct kvm *kvm, int irq, bool is_ppi) ...@@ -457,7 +464,6 @@ static bool irq_is_valid(struct kvm *kvm, int irq, bool is_ppi)
return true; return true;
} }
int kvm_arm_pmu_v3_set_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr) int kvm_arm_pmu_v3_set_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
{ {
switch (attr->attr) { switch (attr->attr) {
...@@ -471,14 +477,11 @@ int kvm_arm_pmu_v3_set_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr) ...@@ -471,14 +477,11 @@ int kvm_arm_pmu_v3_set_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
if (get_user(irq, uaddr)) if (get_user(irq, uaddr))
return -EFAULT; return -EFAULT;
/* /* The PMU overflow interrupt can be a PPI or a valid SPI. */
* The PMU overflow interrupt could be a PPI or SPI, but for one if (!(irq_is_ppi(irq) || vgic_valid_spi(vcpu->kvm, irq)))
* VM the interrupt type must be same for each vcpu. As a PPI, return -EINVAL;
* the interrupt number is the same for all vcpus, while as an
* SPI it must be a separate number per vcpu. if (!pmu_irq_is_valid(vcpu->kvm, irq))
*/
if (irq < VGIC_NR_SGIS || irq >= vcpu->kvm->arch.vgic.nr_irqs ||
!irq_is_valid(vcpu->kvm, irq, irq < VGIC_NR_PRIVATE_IRQS))
return -EINVAL; return -EINVAL;
if (kvm_arm_pmu_irq_initialized(vcpu)) if (kvm_arm_pmu_irq_initialized(vcpu))
......
...@@ -171,7 +171,7 @@ static const struct vgic_ops vgic_v2_ops = { ...@@ -171,7 +171,7 @@ static const struct vgic_ops vgic_v2_ops = {
.enable = vgic_v2_enable, .enable = vgic_v2_enable,
}; };
static struct vgic_params vgic_v2_params; struct vgic_params __section(.hyp.text) vgic_v2_params;
static void vgic_cpu_init_lrs(void *params) static void vgic_cpu_init_lrs(void *params)
{ {
...@@ -201,6 +201,8 @@ int vgic_v2_probe(const struct gic_kvm_info *gic_kvm_info, ...@@ -201,6 +201,8 @@ int vgic_v2_probe(const struct gic_kvm_info *gic_kvm_info,
const struct resource *vctrl_res = &gic_kvm_info->vctrl; const struct resource *vctrl_res = &gic_kvm_info->vctrl;
const struct resource *vcpu_res = &gic_kvm_info->vcpu; const struct resource *vcpu_res = &gic_kvm_info->vcpu;
memset(vgic, 0, sizeof(*vgic));
if (!gic_kvm_info->maint_irq) { if (!gic_kvm_info->maint_irq) {
kvm_err("error getting vgic maintenance irq\n"); kvm_err("error getting vgic maintenance irq\n");
ret = -ENXIO; ret = -ENXIO;
......
...@@ -29,12 +29,6 @@ ...@@ -29,12 +29,6 @@
#include <asm/kvm_asm.h> #include <asm/kvm_asm.h>
#include <asm/kvm_mmu.h> #include <asm/kvm_mmu.h>
/* These are for GICv2 emulation only */
#define GICH_LR_VIRTUALID (0x3ffUL << 0)
#define GICH_LR_PHYSID_CPUID_SHIFT (10)
#define GICH_LR_PHYSID_CPUID (7UL << GICH_LR_PHYSID_CPUID_SHIFT)
#define ICH_LR_VIRTUALID_MASK (BIT_ULL(32) - 1)
static u32 ich_vtr_el2; static u32 ich_vtr_el2;
static struct vgic_lr vgic_v3_get_lr(const struct kvm_vcpu *vcpu, int lr) static struct vgic_lr vgic_v3_get_lr(const struct kvm_vcpu *vcpu, int lr)
...@@ -43,7 +37,7 @@ static struct vgic_lr vgic_v3_get_lr(const struct kvm_vcpu *vcpu, int lr) ...@@ -43,7 +37,7 @@ static struct vgic_lr vgic_v3_get_lr(const struct kvm_vcpu *vcpu, int lr)
u64 val = vcpu->arch.vgic_cpu.vgic_v3.vgic_lr[lr]; u64 val = vcpu->arch.vgic_cpu.vgic_v3.vgic_lr[lr];
if (vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3) if (vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3)
lr_desc.irq = val & ICH_LR_VIRTUALID_MASK; lr_desc.irq = val & ICH_LR_VIRTUAL_ID_MASK;
else else
lr_desc.irq = val & GICH_LR_VIRTUALID; lr_desc.irq = val & GICH_LR_VIRTUALID;
......
...@@ -690,12 +690,11 @@ bool vgic_handle_cfg_reg(u32 *reg, struct kvm_exit_mmio *mmio, ...@@ -690,12 +690,11 @@ bool vgic_handle_cfg_reg(u32 *reg, struct kvm_exit_mmio *mmio,
*/ */
void vgic_unqueue_irqs(struct kvm_vcpu *vcpu) void vgic_unqueue_irqs(struct kvm_vcpu *vcpu)
{ {
struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
u64 elrsr = vgic_get_elrsr(vcpu); u64 elrsr = vgic_get_elrsr(vcpu);
unsigned long *elrsr_ptr = u64_to_bitmask(&elrsr); unsigned long *elrsr_ptr = u64_to_bitmask(&elrsr);
int i; int i;
for_each_clear_bit(i, elrsr_ptr, vgic_cpu->nr_lr) { for_each_clear_bit(i, elrsr_ptr, vgic->nr_lr) {
struct vgic_lr lr = vgic_get_lr(vcpu, i); struct vgic_lr lr = vgic_get_lr(vcpu, i);
/* /*
...@@ -820,7 +819,6 @@ static int vgic_handle_mmio_access(struct kvm_vcpu *vcpu, ...@@ -820,7 +819,6 @@ static int vgic_handle_mmio_access(struct kvm_vcpu *vcpu,
struct vgic_dist *dist = &vcpu->kvm->arch.vgic; struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
struct vgic_io_device *iodev = container_of(this, struct vgic_io_device *iodev = container_of(this,
struct vgic_io_device, dev); struct vgic_io_device, dev);
struct kvm_run *run = vcpu->run;
const struct vgic_io_range *range; const struct vgic_io_range *range;
struct kvm_exit_mmio mmio; struct kvm_exit_mmio mmio;
bool updated_state; bool updated_state;
...@@ -849,12 +847,6 @@ static int vgic_handle_mmio_access(struct kvm_vcpu *vcpu, ...@@ -849,12 +847,6 @@ static int vgic_handle_mmio_access(struct kvm_vcpu *vcpu,
updated_state = false; updated_state = false;
} }
spin_unlock(&dist->lock); spin_unlock(&dist->lock);
run->mmio.is_write = is_write;
run->mmio.len = len;
run->mmio.phys_addr = addr;
memcpy(run->mmio.data, val, len);
kvm_handle_mmio_return(vcpu, run);
if (updated_state) if (updated_state)
vgic_kick_vcpus(vcpu->kvm); vgic_kick_vcpus(vcpu->kvm);
...@@ -1102,18 +1094,18 @@ static bool dist_active_irq(struct kvm_vcpu *vcpu) ...@@ -1102,18 +1094,18 @@ static bool dist_active_irq(struct kvm_vcpu *vcpu)
return test_bit(vcpu->vcpu_id, dist->irq_active_on_cpu); return test_bit(vcpu->vcpu_id, dist->irq_active_on_cpu);
} }
bool kvm_vgic_map_is_active(struct kvm_vcpu *vcpu, struct irq_phys_map *map) bool kvm_vgic_map_is_active(struct kvm_vcpu *vcpu, unsigned int virt_irq)
{ {
int i; int i;
for (i = 0; i < vcpu->arch.vgic_cpu.nr_lr; i++) { for (i = 0; i < vgic->nr_lr; i++) {
struct vgic_lr vlr = vgic_get_lr(vcpu, i); struct vgic_lr vlr = vgic_get_lr(vcpu, i);
if (vlr.irq == map->virt_irq && vlr.state & LR_STATE_ACTIVE) if (vlr.irq == virt_irq && vlr.state & LR_STATE_ACTIVE)
return true; return true;
} }
return vgic_irq_is_active(vcpu, map->virt_irq); return vgic_irq_is_active(vcpu, virt_irq);
} }
/* /*
...@@ -1521,7 +1513,6 @@ static int vgic_validate_injection(struct kvm_vcpu *vcpu, int irq, int level) ...@@ -1521,7 +1513,6 @@ static int vgic_validate_injection(struct kvm_vcpu *vcpu, int irq, int level)
} }
static int vgic_update_irq_pending(struct kvm *kvm, int cpuid, static int vgic_update_irq_pending(struct kvm *kvm, int cpuid,
struct irq_phys_map *map,
unsigned int irq_num, bool level) unsigned int irq_num, bool level)
{ {
struct vgic_dist *dist = &kvm->arch.vgic; struct vgic_dist *dist = &kvm->arch.vgic;
...@@ -1660,14 +1651,14 @@ int kvm_vgic_inject_irq(struct kvm *kvm, int cpuid, unsigned int irq_num, ...@@ -1660,14 +1651,14 @@ int kvm_vgic_inject_irq(struct kvm *kvm, int cpuid, unsigned int irq_num,
if (map) if (map)
return -EINVAL; return -EINVAL;
return vgic_update_irq_pending(kvm, cpuid, NULL, irq_num, level); return vgic_update_irq_pending(kvm, cpuid, irq_num, level);
} }
/** /**
* kvm_vgic_inject_mapped_irq - Inject a physically mapped IRQ to the vgic * kvm_vgic_inject_mapped_irq - Inject a physically mapped IRQ to the vgic
* @kvm: The VM structure pointer * @kvm: The VM structure pointer
* @cpuid: The CPU for PPIs * @cpuid: The CPU for PPIs
* @map: Pointer to a irq_phys_map structure describing the mapping * @virt_irq: The virtual IRQ to be injected
* @level: Edge-triggered: true: to trigger the interrupt * @level: Edge-triggered: true: to trigger the interrupt
* false: to ignore the call * false: to ignore the call
* Level-sensitive true: raise the input signal * Level-sensitive true: raise the input signal
...@@ -1678,7 +1669,7 @@ int kvm_vgic_inject_irq(struct kvm *kvm, int cpuid, unsigned int irq_num, ...@@ -1678,7 +1669,7 @@ int kvm_vgic_inject_irq(struct kvm *kvm, int cpuid, unsigned int irq_num,
* being HIGH and 0 being LOW and all devices being active-HIGH. * being HIGH and 0 being LOW and all devices being active-HIGH.
*/ */
int kvm_vgic_inject_mapped_irq(struct kvm *kvm, int cpuid, int kvm_vgic_inject_mapped_irq(struct kvm *kvm, int cpuid,
struct irq_phys_map *map, bool level) unsigned int virt_irq, bool level)
{ {
int ret; int ret;
...@@ -1686,7 +1677,7 @@ int kvm_vgic_inject_mapped_irq(struct kvm *kvm, int cpuid, ...@@ -1686,7 +1677,7 @@ int kvm_vgic_inject_mapped_irq(struct kvm *kvm, int cpuid,
if (ret) if (ret)
return ret; return ret;
return vgic_update_irq_pending(kvm, cpuid, map, map->virt_irq, level); return vgic_update_irq_pending(kvm, cpuid, virt_irq, level);
} }
static irqreturn_t vgic_maintenance_handler(int irq, void *data) static irqreturn_t vgic_maintenance_handler(int irq, void *data)
...@@ -1712,43 +1703,28 @@ static struct list_head *vgic_get_irq_phys_map_list(struct kvm_vcpu *vcpu, ...@@ -1712,43 +1703,28 @@ static struct list_head *vgic_get_irq_phys_map_list(struct kvm_vcpu *vcpu,
/** /**
* kvm_vgic_map_phys_irq - map a virtual IRQ to a physical IRQ * kvm_vgic_map_phys_irq - map a virtual IRQ to a physical IRQ
* @vcpu: The VCPU pointer * @vcpu: The VCPU pointer
* @virt_irq: The virtual irq number * @virt_irq: The virtual IRQ number for the guest
* @irq: The Linux IRQ number * @phys_irq: The hardware IRQ number of the host
* *
* Establish a mapping between a guest visible irq (@virt_irq) and a * Establish a mapping between a guest visible irq (@virt_irq) and a
* Linux irq (@irq). On injection, @virt_irq will be associated with * hardware irq (@phys_irq). On injection, @virt_irq will be associated with
* the physical interrupt represented by @irq. This mapping can be * the physical interrupt represented by @phys_irq. This mapping can be
* established multiple times as long as the parameters are the same. * established multiple times as long as the parameters are the same.
* *
* Returns a valid pointer on success, and an error pointer otherwise * Returns 0 on success or an error value otherwise.
*/ */
struct irq_phys_map *kvm_vgic_map_phys_irq(struct kvm_vcpu *vcpu, int kvm_vgic_map_phys_irq(struct kvm_vcpu *vcpu, int virt_irq, int phys_irq)
int virt_irq, int irq)
{ {
struct vgic_dist *dist = &vcpu->kvm->arch.vgic; struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
struct list_head *root = vgic_get_irq_phys_map_list(vcpu, virt_irq); struct list_head *root = vgic_get_irq_phys_map_list(vcpu, virt_irq);
struct irq_phys_map *map; struct irq_phys_map *map;
struct irq_phys_map_entry *entry; struct irq_phys_map_entry *entry;
struct irq_desc *desc; int ret = 0;
struct irq_data *data;
int phys_irq;
desc = irq_to_desc(irq);
if (!desc) {
kvm_err("%s: no interrupt descriptor\n", __func__);
return ERR_PTR(-EINVAL);
}
data = irq_desc_get_irq_data(desc);
while (data->parent_data)
data = data->parent_data;
phys_irq = data->hwirq;
/* Create a new mapping */ /* Create a new mapping */
entry = kzalloc(sizeof(*entry), GFP_KERNEL); entry = kzalloc(sizeof(*entry), GFP_KERNEL);
if (!entry) if (!entry)
return ERR_PTR(-ENOMEM); return -ENOMEM;
spin_lock(&dist->irq_phys_map_lock); spin_lock(&dist->irq_phys_map_lock);
...@@ -1756,9 +1732,8 @@ struct irq_phys_map *kvm_vgic_map_phys_irq(struct kvm_vcpu *vcpu, ...@@ -1756,9 +1732,8 @@ struct irq_phys_map *kvm_vgic_map_phys_irq(struct kvm_vcpu *vcpu,
map = vgic_irq_map_search(vcpu, virt_irq); map = vgic_irq_map_search(vcpu, virt_irq);
if (map) { if (map) {
/* Make sure this mapping matches */ /* Make sure this mapping matches */
if (map->phys_irq != phys_irq || if (map->phys_irq != phys_irq)
map->irq != irq) ret = -EINVAL;
map = ERR_PTR(-EINVAL);
/* Found an existing, valid mapping */ /* Found an existing, valid mapping */
goto out; goto out;
...@@ -1767,7 +1742,6 @@ struct irq_phys_map *kvm_vgic_map_phys_irq(struct kvm_vcpu *vcpu, ...@@ -1767,7 +1742,6 @@ struct irq_phys_map *kvm_vgic_map_phys_irq(struct kvm_vcpu *vcpu,
map = &entry->map; map = &entry->map;
map->virt_irq = virt_irq; map->virt_irq = virt_irq;
map->phys_irq = phys_irq; map->phys_irq = phys_irq;
map->irq = irq;
list_add_tail_rcu(&entry->entry, root); list_add_tail_rcu(&entry->entry, root);
...@@ -1775,9 +1749,9 @@ struct irq_phys_map *kvm_vgic_map_phys_irq(struct kvm_vcpu *vcpu, ...@@ -1775,9 +1749,9 @@ struct irq_phys_map *kvm_vgic_map_phys_irq(struct kvm_vcpu *vcpu,
spin_unlock(&dist->irq_phys_map_lock); spin_unlock(&dist->irq_phys_map_lock);
/* If we've found a hit in the existing list, free the useless /* If we've found a hit in the existing list, free the useless
* entry */ * entry */
if (IS_ERR(map) || map != &entry->map) if (ret || map != &entry->map)
kfree(entry); kfree(entry);
return map; return ret;
} }
static struct irq_phys_map *vgic_irq_map_search(struct kvm_vcpu *vcpu, static struct irq_phys_map *vgic_irq_map_search(struct kvm_vcpu *vcpu,
...@@ -1813,25 +1787,22 @@ static void vgic_free_phys_irq_map_rcu(struct rcu_head *rcu) ...@@ -1813,25 +1787,22 @@ static void vgic_free_phys_irq_map_rcu(struct rcu_head *rcu)
/** /**
* kvm_vgic_unmap_phys_irq - Remove a virtual to physical IRQ mapping * kvm_vgic_unmap_phys_irq - Remove a virtual to physical IRQ mapping
* @vcpu: The VCPU pointer * @vcpu: The VCPU pointer
* @map: The pointer to a mapping obtained through kvm_vgic_map_phys_irq * @virt_irq: The virtual IRQ number to be unmapped
* *
* Remove an existing mapping between virtual and physical interrupts. * Remove an existing mapping between virtual and physical interrupts.
*/ */
int kvm_vgic_unmap_phys_irq(struct kvm_vcpu *vcpu, struct irq_phys_map *map) int kvm_vgic_unmap_phys_irq(struct kvm_vcpu *vcpu, unsigned int virt_irq)
{ {
struct vgic_dist *dist = &vcpu->kvm->arch.vgic; struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
struct irq_phys_map_entry *entry; struct irq_phys_map_entry *entry;
struct list_head *root; struct list_head *root;
if (!map) root = vgic_get_irq_phys_map_list(vcpu, virt_irq);
return -EINVAL;
root = vgic_get_irq_phys_map_list(vcpu, map->virt_irq);
spin_lock(&dist->irq_phys_map_lock); spin_lock(&dist->irq_phys_map_lock);
list_for_each_entry(entry, root, entry) { list_for_each_entry(entry, root, entry) {
if (&entry->map == map) { if (entry->map.virt_irq == virt_irq) {
list_del_rcu(&entry->entry); list_del_rcu(&entry->entry);
call_rcu(&entry->rcu, vgic_free_phys_irq_map_rcu); call_rcu(&entry->rcu, vgic_free_phys_irq_map_rcu);
break; break;
...@@ -1887,13 +1858,6 @@ static int vgic_vcpu_init_maps(struct kvm_vcpu *vcpu, int nr_irqs) ...@@ -1887,13 +1858,6 @@ static int vgic_vcpu_init_maps(struct kvm_vcpu *vcpu, int nr_irqs)
return -ENOMEM; return -ENOMEM;
} }
/*
* Store the number of LRs per vcpu, so we don't have to go
* all the way to the distributor structure to find out. Only
* assembly code should use this one.
*/
vgic_cpu->nr_lr = vgic->nr_lr;
return 0; return 0;
} }
......
This diff is collapsed.
/*
* Copyright (C) 2015, 2016 ARM Ltd.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/kvm.h>
#include <linux/kvm_host.h>
#include <trace/events/kvm.h>
int kvm_irq_map_gsi(struct kvm *kvm,
struct kvm_kernel_irq_routing_entry *entries,
int gsi)
{
return 0;
}
int kvm_irq_map_chip_pin(struct kvm *kvm, unsigned int irqchip,
unsigned int pin)
{
return pin;
}
int kvm_set_irq(struct kvm *kvm, int irq_source_id,
u32 irq, int level, bool line_status)
{
unsigned int spi = irq + VGIC_NR_PRIVATE_IRQS;
trace_kvm_set_irq(irq, level, irq_source_id);
BUG_ON(!vgic_initialized(kvm));
return kvm_vgic_inject_irq(kvm, 0, spi, level);
}
/* MSI not implemented yet */
int kvm_set_msi(struct kvm_kernel_irq_routing_entry *e,
struct kvm *kvm, int irq_source_id,
int level, bool line_status)
{
return 0;
}
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
/*
* Copyright (C) 2015, 2016 ARM Ltd.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef __KVM_ARM_VGIC_MMIO_H__
#define __KVM_ARM_VGIC_MMIO_H__
struct vgic_register_region {
unsigned int reg_offset;
unsigned int len;
unsigned int bits_per_irq;
unsigned int access_flags;
unsigned long (*read)(struct kvm_vcpu *vcpu, gpa_t addr,
unsigned int len);
void (*write)(struct kvm_vcpu *vcpu, gpa_t addr, unsigned int len,
unsigned long val);
};
extern struct kvm_io_device_ops kvm_io_gic_ops;
#define VGIC_ACCESS_8bit 1
#define VGIC_ACCESS_32bit 2
#define VGIC_ACCESS_64bit 4
/*
* Generate a mask that covers the number of bytes required to address
* up to 1024 interrupts, each represented by <bits> bits. This assumes
* that <bits> is a power of two.
*/
#define VGIC_ADDR_IRQ_MASK(bits) (((bits) * 1024 / 8) - 1)
/*
* (addr & mask) gives us the byte offset for the INT ID, so we want to
* divide this with 'bytes per irq' to get the INT ID, which is given
* by '(bits) / 8'. But we do this with fixed-point-arithmetic and
* take advantage of the fact that division by a fraction equals
* multiplication with the inverted fraction, and scale up both the
* numerator and denominator with 8 to support at most 64 bits per IRQ:
*/
#define VGIC_ADDR_TO_INTID(addr, bits) (((addr) & VGIC_ADDR_IRQ_MASK(bits)) * \
64 / (bits) / 8)
/*
* Some VGIC registers store per-IRQ information, with a different number
* of bits per IRQ. For those registers this macro is used.
* The _WITH_LENGTH version instantiates registers with a fixed length
* and is mutually exclusive with the _PER_IRQ version.
*/
#define REGISTER_DESC_WITH_BITS_PER_IRQ(off, rd, wr, bpi, acc) \
{ \
.reg_offset = off, \
.bits_per_irq = bpi, \
.len = bpi * 1024 / 8, \
.access_flags = acc, \
.read = rd, \
.write = wr, \
}
#define REGISTER_DESC_WITH_LENGTH(off, rd, wr, length, acc) \
{ \
.reg_offset = off, \
.bits_per_irq = 0, \
.len = length, \
.access_flags = acc, \
.read = rd, \
.write = wr, \
}
int kvm_vgic_register_mmio_region(struct kvm *kvm, struct kvm_vcpu *vcpu,
struct vgic_register_region *reg_desc,
struct vgic_io_device *region,
int nr_irqs, bool offset_private);
unsigned long vgic_data_mmio_bus_to_host(const void *val, unsigned int len);
void vgic_data_host_to_mmio_bus(void *buf, unsigned int len,
unsigned long data);
unsigned long vgic_mmio_read_raz(struct kvm_vcpu *vcpu,
gpa_t addr, unsigned int len);
unsigned long vgic_mmio_read_rao(struct kvm_vcpu *vcpu,
gpa_t addr, unsigned int len);
void vgic_mmio_write_wi(struct kvm_vcpu *vcpu, gpa_t addr,
unsigned int len, unsigned long val);
unsigned long vgic_mmio_read_enable(struct kvm_vcpu *vcpu,
gpa_t addr, unsigned int len);
void vgic_mmio_write_senable(struct kvm_vcpu *vcpu,
gpa_t addr, unsigned int len,
unsigned long val);
void vgic_mmio_write_cenable(struct kvm_vcpu *vcpu,
gpa_t addr, unsigned int len,
unsigned long val);
unsigned long vgic_mmio_read_pending(struct kvm_vcpu *vcpu,
gpa_t addr, unsigned int len);
void vgic_mmio_write_spending(struct kvm_vcpu *vcpu,
gpa_t addr, unsigned int len,
unsigned long val);
void vgic_mmio_write_cpending(struct kvm_vcpu *vcpu,
gpa_t addr, unsigned int len,
unsigned long val);
unsigned long vgic_mmio_read_active(struct kvm_vcpu *vcpu,
gpa_t addr, unsigned int len);
void vgic_mmio_write_cactive(struct kvm_vcpu *vcpu,
gpa_t addr, unsigned int len,
unsigned long val);
void vgic_mmio_write_sactive(struct kvm_vcpu *vcpu,
gpa_t addr, unsigned int len,
unsigned long val);
unsigned long vgic_mmio_read_priority(struct kvm_vcpu *vcpu,
gpa_t addr, unsigned int len);
void vgic_mmio_write_priority(struct kvm_vcpu *vcpu,
gpa_t addr, unsigned int len,
unsigned long val);
unsigned long vgic_mmio_read_config(struct kvm_vcpu *vcpu,
gpa_t addr, unsigned int len);
void vgic_mmio_write_config(struct kvm_vcpu *vcpu,
gpa_t addr, unsigned int len,
unsigned long val);
unsigned int vgic_v2_init_dist_iodev(struct vgic_io_device *dev);
unsigned int vgic_v3_init_dist_iodev(struct vgic_io_device *dev);
#endif
/*
* Copyright (C) 2015, 2016 ARM Ltd.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/irqchip/arm-gic.h>
#include <linux/kvm.h>
#include <linux/kvm_host.h>
#include <kvm/arm_vgic.h>
#include <asm/kvm_mmu.h>
#include "vgic.h"
/*
* Call this function to convert a u64 value to an unsigned long * bitmask
* in a way that works on both 32-bit and 64-bit LE and BE platforms.
*
* Warning: Calling this function may modify *val.
*/
static unsigned long *u64_to_bitmask(u64 *val)
{
#if defined(CONFIG_CPU_BIG_ENDIAN) && BITS_PER_LONG == 32
*val = (*val >> 32) | (*val << 32);
#endif
return (unsigned long *)val;
}
void vgic_v2_process_maintenance(struct kvm_vcpu *vcpu)
{
struct vgic_v2_cpu_if *cpuif = &vcpu->arch.vgic_cpu.vgic_v2;
if (cpuif->vgic_misr & GICH_MISR_EOI) {
u64 eisr = cpuif->vgic_eisr;
unsigned long *eisr_bmap = u64_to_bitmask(&eisr);
int lr;
for_each_set_bit(lr, eisr_bmap, kvm_vgic_global_state.nr_lr) {
u32 intid = cpuif->vgic_lr[lr] & GICH_LR_VIRTUALID;
WARN_ON(cpuif->vgic_lr[lr] & GICH_LR_STATE);
kvm_notify_acked_irq(vcpu->kvm, 0,
intid - VGIC_NR_PRIVATE_IRQS);
}
}
/* check and disable underflow maintenance IRQ */
cpuif->vgic_hcr &= ~GICH_HCR_UIE;
/*
* In the next iterations of the vcpu loop, if we sync the
* vgic state after flushing it, but before entering the guest
* (this happens for pending signals and vmid rollovers), then
* make sure we don't pick up any old maintenance interrupts
* here.
*/
cpuif->vgic_eisr = 0;
}
void vgic_v2_set_underflow(struct kvm_vcpu *vcpu)
{
struct vgic_v2_cpu_if *cpuif = &vcpu->arch.vgic_cpu.vgic_v2;
cpuif->vgic_hcr |= GICH_HCR_UIE;
}
/*
* transfer the content of the LRs back into the corresponding ap_list:
* - active bit is transferred as is
* - pending bit is
* - transferred as is in case of edge sensitive IRQs
* - set to the line-level (resample time) for level sensitive IRQs
*/
void vgic_v2_fold_lr_state(struct kvm_vcpu *vcpu)
{
struct vgic_v2_cpu_if *cpuif = &vcpu->arch.vgic_cpu.vgic_v2;
int lr;
for (lr = 0; lr < vcpu->arch.vgic_cpu.used_lrs; lr++) {
u32 val = cpuif->vgic_lr[lr];
u32 intid = val & GICH_LR_VIRTUALID;
struct vgic_irq *irq;
irq = vgic_get_irq(vcpu->kvm, vcpu, intid);
spin_lock(&irq->irq_lock);
/* Always preserve the active bit */
irq->active = !!(val & GICH_LR_ACTIVE_BIT);
/* Edge is the only case where we preserve the pending bit */
if (irq->config == VGIC_CONFIG_EDGE &&
(val & GICH_LR_PENDING_BIT)) {
irq->pending = true;
if (vgic_irq_is_sgi(intid)) {
u32 cpuid = val & GICH_LR_PHYSID_CPUID;
cpuid >>= GICH_LR_PHYSID_CPUID_SHIFT;
irq->source |= (1 << cpuid);
}
}
/* Clear soft pending state when level IRQs have been acked */
if (irq->config == VGIC_CONFIG_LEVEL &&
!(val & GICH_LR_PENDING_BIT)) {
irq->soft_pending = false;
irq->pending = irq->line_level;
}
spin_unlock(&irq->irq_lock);
}
}
/*
* Populates the particular LR with the state of a given IRQ:
* - for an edge sensitive IRQ the pending state is cleared in struct vgic_irq
* - for a level sensitive IRQ the pending state value is unchanged;
* it is dictated directly by the input level
*
* If @irq describes an SGI with multiple sources, we choose the
* lowest-numbered source VCPU and clear that bit in the source bitmap.
*
* The irq_lock must be held by the caller.
*/
void vgic_v2_populate_lr(struct kvm_vcpu *vcpu, struct vgic_irq *irq, int lr)
{
u32 val = irq->intid;
if (irq->pending) {
val |= GICH_LR_PENDING_BIT;
if (irq->config == VGIC_CONFIG_EDGE)
irq->pending = false;
if (vgic_irq_is_sgi(irq->intid)) {
u32 src = ffs(irq->source);
BUG_ON(!src);
val |= (src - 1) << GICH_LR_PHYSID_CPUID_SHIFT;
irq->source &= ~(1 << (src - 1));
if (irq->source)
irq->pending = true;
}
}
if (irq->active)
val |= GICH_LR_ACTIVE_BIT;
if (irq->hw) {
val |= GICH_LR_HW;
val |= irq->hwintid << GICH_LR_PHYSID_CPUID_SHIFT;
} else {
if (irq->config == VGIC_CONFIG_LEVEL)
val |= GICH_LR_EOI;
}
/* The GICv2 LR only holds five bits of priority. */
val |= (irq->priority >> 3) << GICH_LR_PRIORITY_SHIFT;
vcpu->arch.vgic_cpu.vgic_v2.vgic_lr[lr] = val;
}
void vgic_v2_clear_lr(struct kvm_vcpu *vcpu, int lr)
{
vcpu->arch.vgic_cpu.vgic_v2.vgic_lr[lr] = 0;
}
void vgic_v2_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcrp)
{
u32 vmcr;
vmcr = (vmcrp->ctlr << GICH_VMCR_CTRL_SHIFT) & GICH_VMCR_CTRL_MASK;
vmcr |= (vmcrp->abpr << GICH_VMCR_ALIAS_BINPOINT_SHIFT) &
GICH_VMCR_ALIAS_BINPOINT_MASK;
vmcr |= (vmcrp->bpr << GICH_VMCR_BINPOINT_SHIFT) &
GICH_VMCR_BINPOINT_MASK;
vmcr |= (vmcrp->pmr << GICH_VMCR_PRIMASK_SHIFT) &
GICH_VMCR_PRIMASK_MASK;
vcpu->arch.vgic_cpu.vgic_v2.vgic_vmcr = vmcr;
}
void vgic_v2_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcrp)
{
u32 vmcr = vcpu->arch.vgic_cpu.vgic_v2.vgic_vmcr;
vmcrp->ctlr = (vmcr & GICH_VMCR_CTRL_MASK) >>
GICH_VMCR_CTRL_SHIFT;
vmcrp->abpr = (vmcr & GICH_VMCR_ALIAS_BINPOINT_MASK) >>
GICH_VMCR_ALIAS_BINPOINT_SHIFT;
vmcrp->bpr = (vmcr & GICH_VMCR_BINPOINT_MASK) >>
GICH_VMCR_BINPOINT_SHIFT;
vmcrp->pmr = (vmcr & GICH_VMCR_PRIMASK_MASK) >>
GICH_VMCR_PRIMASK_SHIFT;
}
void vgic_v2_enable(struct kvm_vcpu *vcpu)
{
/*
* By forcing VMCR to zero, the GIC will restore the binary
* points to their reset values. Anything else resets to zero
* anyway.
*/
vcpu->arch.vgic_cpu.vgic_v2.vgic_vmcr = 0;
vcpu->arch.vgic_cpu.vgic_v2.vgic_elrsr = ~0;
/* Get the show on the road... */
vcpu->arch.vgic_cpu.vgic_v2.vgic_hcr = GICH_HCR_EN;
}
/* check for overlapping regions and for regions crossing the end of memory */
static bool vgic_v2_check_base(gpa_t dist_base, gpa_t cpu_base)
{
if (dist_base + KVM_VGIC_V2_DIST_SIZE < dist_base)
return false;
if (cpu_base + KVM_VGIC_V2_CPU_SIZE < cpu_base)
return false;
if (dist_base + KVM_VGIC_V2_DIST_SIZE <= cpu_base)
return true;
if (cpu_base + KVM_VGIC_V2_CPU_SIZE <= dist_base)
return true;
return false;
}
int vgic_v2_map_resources(struct kvm *kvm)
{
struct vgic_dist *dist = &kvm->arch.vgic;
int ret = 0;
if (vgic_ready(kvm))
goto out;
if (IS_VGIC_ADDR_UNDEF(dist->vgic_dist_base) ||
IS_VGIC_ADDR_UNDEF(dist->vgic_cpu_base)) {
kvm_err("Need to set vgic cpu and dist addresses first\n");
ret = -ENXIO;
goto out;
}
if (!vgic_v2_check_base(dist->vgic_dist_base, dist->vgic_cpu_base)) {
kvm_err("VGIC CPU and dist frames overlap\n");
ret = -EINVAL;
goto out;
}
/*
* Initialize the vgic if this hasn't already been done on demand by
* accessing the vgic state from userspace.
*/
ret = vgic_init(kvm);
if (ret) {
kvm_err("Unable to initialize VGIC dynamic data structures\n");
goto out;
}
ret = vgic_register_dist_iodev(kvm, dist->vgic_dist_base, VGIC_V2);
if (ret) {
kvm_err("Unable to register VGIC MMIO regions\n");
goto out;
}
ret = kvm_phys_addr_ioremap(kvm, dist->vgic_cpu_base,
kvm_vgic_global_state.vcpu_base,
KVM_VGIC_V2_CPU_SIZE, true);
if (ret) {
kvm_err("Unable to remap VGIC CPU to VCPU\n");
goto out;
}
dist->ready = true;
out:
if (ret)
kvm_vgic_destroy(kvm);
return ret;
}
/**
* vgic_v2_probe - probe for a GICv2 compatible interrupt controller in DT
* @node: pointer to the DT node
*
* Returns 0 if a GICv2 has been found, returns an error code otherwise
*/
int vgic_v2_probe(const struct gic_kvm_info *info)
{
int ret;
u32 vtr;
if (!info->vctrl.start) {
kvm_err("GICH not present in the firmware table\n");
return -ENXIO;
}
if (!PAGE_ALIGNED(info->vcpu.start)) {
kvm_err("GICV physical address 0x%llx not page aligned\n",
(unsigned long long)info->vcpu.start);
return -ENXIO;
}
if (!PAGE_ALIGNED(resource_size(&info->vcpu))) {
kvm_err("GICV size 0x%llx not a multiple of page size 0x%lx\n",
(unsigned long long)resource_size(&info->vcpu),
PAGE_SIZE);
return -ENXIO;
}
kvm_vgic_global_state.vctrl_base = ioremap(info->vctrl.start,
resource_size(&info->vctrl));
if (!kvm_vgic_global_state.vctrl_base) {
kvm_err("Cannot ioremap GICH\n");
return -ENOMEM;
}
vtr = readl_relaxed(kvm_vgic_global_state.vctrl_base + GICH_VTR);
kvm_vgic_global_state.nr_lr = (vtr & 0x3f) + 1;
ret = create_hyp_io_mappings(kvm_vgic_global_state.vctrl_base,
kvm_vgic_global_state.vctrl_base +
resource_size(&info->vctrl),
info->vctrl.start);
if (ret) {
kvm_err("Cannot map VCTRL into hyp\n");
iounmap(kvm_vgic_global_state.vctrl_base);
return ret;
}
kvm_vgic_global_state.can_emulate_gicv2 = true;
kvm_register_vgic_device(KVM_DEV_TYPE_ARM_VGIC_V2);
kvm_vgic_global_state.vcpu_base = info->vcpu.start;
kvm_vgic_global_state.type = VGIC_V2;
kvm_vgic_global_state.max_gic_vcpus = VGIC_V2_MAX_CPUS;
kvm_info("vgic-v2@%llx\n", info->vctrl.start);
return 0;
}
/*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/irqchip/arm-gic-v3.h>
#include <linux/kvm.h>
#include <linux/kvm_host.h>
#include <kvm/arm_vgic.h>
#include <asm/kvm_mmu.h>
#include <asm/kvm_asm.h>
#include "vgic.h"
void vgic_v3_process_maintenance(struct kvm_vcpu *vcpu)
{
struct vgic_v3_cpu_if *cpuif = &vcpu->arch.vgic_cpu.vgic_v3;
u32 model = vcpu->kvm->arch.vgic.vgic_model;
if (cpuif->vgic_misr & ICH_MISR_EOI) {
unsigned long eisr_bmap = cpuif->vgic_eisr;
int lr;
for_each_set_bit(lr, &eisr_bmap, kvm_vgic_global_state.nr_lr) {
u32 intid;
u64 val = cpuif->vgic_lr[lr];
if (model == KVM_DEV_TYPE_ARM_VGIC_V3)
intid = val & ICH_LR_VIRTUAL_ID_MASK;
else
intid = val & GICH_LR_VIRTUALID;
WARN_ON(cpuif->vgic_lr[lr] & ICH_LR_STATE);
kvm_notify_acked_irq(vcpu->kvm, 0,
intid - VGIC_NR_PRIVATE_IRQS);
}
/*
* In the next iterations of the vcpu loop, if we sync
* the vgic state after flushing it, but before
* entering the guest (this happens for pending
* signals and vmid rollovers), then make sure we
* don't pick up any old maintenance interrupts here.
*/
cpuif->vgic_eisr = 0;
}
cpuif->vgic_hcr &= ~ICH_HCR_UIE;
}
void vgic_v3_set_underflow(struct kvm_vcpu *vcpu)
{
struct vgic_v3_cpu_if *cpuif = &vcpu->arch.vgic_cpu.vgic_v3;
cpuif->vgic_hcr |= ICH_HCR_UIE;
}
void vgic_v3_fold_lr_state(struct kvm_vcpu *vcpu)
{
struct vgic_v3_cpu_if *cpuif = &vcpu->arch.vgic_cpu.vgic_v3;
u32 model = vcpu->kvm->arch.vgic.vgic_model;
int lr;
for (lr = 0; lr < vcpu->arch.vgic_cpu.used_lrs; lr++) {
u64 val = cpuif->vgic_lr[lr];
u32 intid;
struct vgic_irq *irq;
if (model == KVM_DEV_TYPE_ARM_VGIC_V3)
intid = val & ICH_LR_VIRTUAL_ID_MASK;
else
intid = val & GICH_LR_VIRTUALID;
irq = vgic_get_irq(vcpu->kvm, vcpu, intid);
spin_lock(&irq->irq_lock);
/* Always preserve the active bit */
irq->active = !!(val & ICH_LR_ACTIVE_BIT);
/* Edge is the only case where we preserve the pending bit */
if (irq->config == VGIC_CONFIG_EDGE &&
(val & ICH_LR_PENDING_BIT)) {
irq->pending = true;
if (vgic_irq_is_sgi(intid) &&
model == KVM_DEV_TYPE_ARM_VGIC_V2) {
u32 cpuid = val & GICH_LR_PHYSID_CPUID;
cpuid >>= GICH_LR_PHYSID_CPUID_SHIFT;
irq->source |= (1 << cpuid);
}
}
/* Clear soft pending state when level irqs have been acked */
if (irq->config == VGIC_CONFIG_LEVEL &&
!(val & ICH_LR_PENDING_BIT)) {
irq->soft_pending = false;
irq->pending = irq->line_level;
}
spin_unlock(&irq->irq_lock);
}
}
/* Requires the irq to be locked already */
void vgic_v3_populate_lr(struct kvm_vcpu *vcpu, struct vgic_irq *irq, int lr)
{
u32 model = vcpu->kvm->arch.vgic.vgic_model;
u64 val = irq->intid;
if (irq->pending) {
val |= ICH_LR_PENDING_BIT;
if (irq->config == VGIC_CONFIG_EDGE)
irq->pending = false;
if (vgic_irq_is_sgi(irq->intid) &&
model == KVM_DEV_TYPE_ARM_VGIC_V2) {
u32 src = ffs(irq->source);
BUG_ON(!src);
val |= (src - 1) << GICH_LR_PHYSID_CPUID_SHIFT;
irq->source &= ~(1 << (src - 1));
if (irq->source)
irq->pending = true;
}
}
if (irq->active)
val |= ICH_LR_ACTIVE_BIT;
if (irq->hw) {
val |= ICH_LR_HW;
val |= ((u64)irq->hwintid) << ICH_LR_PHYS_ID_SHIFT;
} else {
if (irq->config == VGIC_CONFIG_LEVEL)
val |= ICH_LR_EOI;
}
/*
* We currently only support Group1 interrupts, which is a
* known defect. This needs to be addressed at some point.
*/
if (model == KVM_DEV_TYPE_ARM_VGIC_V3)
val |= ICH_LR_GROUP;
val |= (u64)irq->priority << ICH_LR_PRIORITY_SHIFT;
vcpu->arch.vgic_cpu.vgic_v3.vgic_lr[lr] = val;
}
void vgic_v3_clear_lr(struct kvm_vcpu *vcpu, int lr)
{
vcpu->arch.vgic_cpu.vgic_v3.vgic_lr[lr] = 0;
}
void vgic_v3_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcrp)
{
u32 vmcr;
vmcr = (vmcrp->ctlr << ICH_VMCR_CTLR_SHIFT) & ICH_VMCR_CTLR_MASK;
vmcr |= (vmcrp->abpr << ICH_VMCR_BPR1_SHIFT) & ICH_VMCR_BPR1_MASK;
vmcr |= (vmcrp->bpr << ICH_VMCR_BPR0_SHIFT) & ICH_VMCR_BPR0_MASK;
vmcr |= (vmcrp->pmr << ICH_VMCR_PMR_SHIFT) & ICH_VMCR_PMR_MASK;
vcpu->arch.vgic_cpu.vgic_v3.vgic_vmcr = vmcr;
}
void vgic_v3_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcrp)
{
u32 vmcr = vcpu->arch.vgic_cpu.vgic_v3.vgic_vmcr;
vmcrp->ctlr = (vmcr & ICH_VMCR_CTLR_MASK) >> ICH_VMCR_CTLR_SHIFT;
vmcrp->abpr = (vmcr & ICH_VMCR_BPR1_MASK) >> ICH_VMCR_BPR1_SHIFT;
vmcrp->bpr = (vmcr & ICH_VMCR_BPR0_MASK) >> ICH_VMCR_BPR0_SHIFT;
vmcrp->pmr = (vmcr & ICH_VMCR_PMR_MASK) >> ICH_VMCR_PMR_SHIFT;
}
void vgic_v3_enable(struct kvm_vcpu *vcpu)
{
struct vgic_v3_cpu_if *vgic_v3 = &vcpu->arch.vgic_cpu.vgic_v3;
/*
* By forcing VMCR to zero, the GIC will restore the binary
* points to their reset values. Anything else resets to zero
* anyway.
*/
vgic_v3->vgic_vmcr = 0;
vgic_v3->vgic_elrsr = ~0;
/*
* If we are emulating a GICv3, we do it in an non-GICv2-compatible
* way, so we force SRE to 1 to demonstrate this to the guest.
* This goes with the spec allowing the value to be RAO/WI.
*/
if (vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3)
vgic_v3->vgic_sre = ICC_SRE_EL1_SRE;
else
vgic_v3->vgic_sre = 0;
/* Get the show on the road... */
vgic_v3->vgic_hcr = ICH_HCR_EN;
}
/* check for overlapping regions and for regions crossing the end of memory */
static bool vgic_v3_check_base(struct kvm *kvm)
{
struct vgic_dist *d = &kvm->arch.vgic;
gpa_t redist_size = KVM_VGIC_V3_REDIST_SIZE;
redist_size *= atomic_read(&kvm->online_vcpus);
if (d->vgic_dist_base + KVM_VGIC_V3_DIST_SIZE < d->vgic_dist_base)
return false;
if (d->vgic_redist_base + redist_size < d->vgic_redist_base)
return false;
if (d->vgic_dist_base + KVM_VGIC_V3_DIST_SIZE <= d->vgic_redist_base)
return true;
if (d->vgic_redist_base + redist_size <= d->vgic_dist_base)
return true;
return false;
}
int vgic_v3_map_resources(struct kvm *kvm)
{
int ret = 0;
struct vgic_dist *dist = &kvm->arch.vgic;
if (vgic_ready(kvm))
goto out;
if (IS_VGIC_ADDR_UNDEF(dist->vgic_dist_base) ||
IS_VGIC_ADDR_UNDEF(dist->vgic_redist_base)) {
kvm_err("Need to set vgic distributor addresses first\n");
ret = -ENXIO;
goto out;
}
if (!vgic_v3_check_base(kvm)) {
kvm_err("VGIC redist and dist frames overlap\n");
ret = -EINVAL;
goto out;
}
/*
* For a VGICv3 we require the userland to explicitly initialize
* the VGIC before we need to use it.
*/
if (!vgic_initialized(kvm)) {
ret = -EBUSY;
goto out;
}
ret = vgic_register_dist_iodev(kvm, dist->vgic_dist_base, VGIC_V3);
if (ret) {
kvm_err("Unable to register VGICv3 dist MMIO regions\n");
goto out;
}
ret = vgic_register_redist_iodevs(kvm, dist->vgic_redist_base);
if (ret) {
kvm_err("Unable to register VGICv3 redist MMIO regions\n");
goto out;
}
dist->ready = true;
out:
if (ret)
kvm_vgic_destroy(kvm);
return ret;
}
/**
* vgic_v3_probe - probe for a GICv3 compatible interrupt controller in DT
* @node: pointer to the DT node
*
* Returns 0 if a GICv3 has been found, returns an error code otherwise
*/
int vgic_v3_probe(const struct gic_kvm_info *info)
{
u32 ich_vtr_el2 = kvm_call_hyp(__vgic_v3_get_ich_vtr_el2);
/*
* The ListRegs field is 5 bits, but there is a architectural
* maximum of 16 list registers. Just ignore bit 4...
*/
kvm_vgic_global_state.nr_lr = (ich_vtr_el2 & 0xf) + 1;
kvm_vgic_global_state.can_emulate_gicv2 = false;
if (!info->vcpu.start) {
kvm_info("GICv3: no GICV resource entry\n");
kvm_vgic_global_state.vcpu_base = 0;
} else if (!PAGE_ALIGNED(info->vcpu.start)) {
pr_warn("GICV physical address 0x%llx not page aligned\n",
(unsigned long long)info->vcpu.start);
kvm_vgic_global_state.vcpu_base = 0;
} else if (!PAGE_ALIGNED(resource_size(&info->vcpu))) {
pr_warn("GICV size 0x%llx not a multiple of page size 0x%lx\n",
(unsigned long long)resource_size(&info->vcpu),
PAGE_SIZE);
kvm_vgic_global_state.vcpu_base = 0;
} else {
kvm_vgic_global_state.vcpu_base = info->vcpu.start;
kvm_vgic_global_state.can_emulate_gicv2 = true;
kvm_register_vgic_device(KVM_DEV_TYPE_ARM_VGIC_V2);
kvm_info("vgic-v2@%llx\n", info->vcpu.start);
}
if (kvm_vgic_global_state.vcpu_base == 0)
kvm_info("disabling GICv2 emulation\n");
kvm_register_vgic_device(KVM_DEV_TYPE_ARM_VGIC_V3);
kvm_vgic_global_state.vctrl_base = NULL;
kvm_vgic_global_state.type = VGIC_V3;
kvm_vgic_global_state.max_gic_vcpus = VGIC_V3_MAX_CPUS;
return 0;
}
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment