Commit 75c8f387 authored by Catalin Marinas's avatar Catalin Marinas

Merge tag 'kvmarm-fixes-6.11-2' of...

Merge tag 'kvmarm-fixes-6.11-2' of git://git.kernel.org/pub/scm/linux/kernel/git/kvmarm/kvmarm into for-next/fixes

KVM/arm64 fixes for 6.11, round #2

 - Don't drop references on LPIs that weren't visited by the
   vgic-debug iterator

 - Cure lock ordering issue when unregistering vgic redistributors

 - Fix for misaligned stage-2 mappings when VMs are backed by hugetlb
   pages

 - Treat SGI registers as UNDEFINED if a VM hasn't been configured for
   GICv3

* tag 'kvmarm-fixes-6.11-2' of git://git.kernel.org/pub/scm/linux/kernel/git/kvmarm/kvmarm:
  KVM: arm64: Make ICC_*SGI*_EL1 undef in the absence of a vGICv3
  KVM: arm64: Ensure canonical IPA is hugepage-aligned when handling fault
  KVM: arm64: vgic: Don't hold config_lock while unregistering redistributors
  KVM: arm64: vgic-debug: Don't put unmarked LPIs
  KVM: arm64: vgic: Hold config_lock while tearing down a CPU interface
  KVM: selftests: arm64: Correct feature test for S1PIE in get-reg-list
  KVM: arm64: Tidying up PAuth code in KVM
  KVM: arm64: vgic-debug: Exit the iterator properly w/o LPI
  KVM: arm64: Enforce dependency on an ARMv8.4-aware toolchain
  docs: KVM: Fix register ID of SPSR_FIQ
  KVM: arm64: vgic: fix unexpected unlock sparse warnings
  KVM: arm64: fix kdoc warnings in W=1 builds
  KVM: arm64: fix override-init warnings in W=1 builds
  KVM: arm64: free kvm->arch.nested_mmus with kvfree()
parents f75c2355 3e6245eb
...@@ -2592,7 +2592,7 @@ Specifically: ...@@ -2592,7 +2592,7 @@ Specifically:
0x6030 0000 0010 004a SPSR_ABT 64 spsr[KVM_SPSR_ABT] 0x6030 0000 0010 004a SPSR_ABT 64 spsr[KVM_SPSR_ABT]
0x6030 0000 0010 004c SPSR_UND 64 spsr[KVM_SPSR_UND] 0x6030 0000 0010 004c SPSR_UND 64 spsr[KVM_SPSR_UND]
0x6030 0000 0010 004e SPSR_IRQ 64 spsr[KVM_SPSR_IRQ] 0x6030 0000 0010 004e SPSR_IRQ 64 spsr[KVM_SPSR_IRQ]
0x6060 0000 0010 0050 SPSR_FIQ 64 spsr[KVM_SPSR_FIQ] 0x6030 0000 0010 0050 SPSR_FIQ 64 spsr[KVM_SPSR_FIQ]
0x6040 0000 0010 0054 V0 128 fp_regs.vregs[0] [1]_ 0x6040 0000 0010 0054 V0 128 fp_regs.vregs[0] [1]_
0x6040 0000 0010 0058 V1 128 fp_regs.vregs[1] [1]_ 0x6040 0000 0010 0058 V1 128 fp_regs.vregs[1] [1]_
... ...
......
...@@ -104,7 +104,7 @@ alternative_else_nop_endif ...@@ -104,7 +104,7 @@ alternative_else_nop_endif
#define __ptrauth_save_key(ctxt, key) \ #define __ptrauth_save_key(ctxt, key) \
do { \ do { \
u64 __val; \ u64 __val; \
__val = read_sysreg_s(SYS_ ## key ## KEYLO_EL1); \ __val = read_sysreg_s(SYS_ ## key ## KEYLO_EL1); \
ctxt_sys_reg(ctxt, key ## KEYLO_EL1) = __val; \ ctxt_sys_reg(ctxt, key ## KEYLO_EL1) = __val; \
__val = read_sysreg_s(SYS_ ## key ## KEYHI_EL1); \ __val = read_sysreg_s(SYS_ ## key ## KEYHI_EL1); \
......
...@@ -19,6 +19,7 @@ if VIRTUALIZATION ...@@ -19,6 +19,7 @@ if VIRTUALIZATION
menuconfig KVM menuconfig KVM
bool "Kernel-based Virtual Machine (KVM) support" bool "Kernel-based Virtual Machine (KVM) support"
depends on AS_HAS_ARMV8_4
select KVM_COMMON select KVM_COMMON
select KVM_GENERIC_HARDWARE_ENABLING select KVM_GENERIC_HARDWARE_ENABLING
select KVM_GENERIC_MMU_NOTIFIER select KVM_GENERIC_MMU_NOTIFIER
......
...@@ -10,6 +10,9 @@ include $(srctree)/virt/kvm/Makefile.kvm ...@@ -10,6 +10,9 @@ include $(srctree)/virt/kvm/Makefile.kvm
obj-$(CONFIG_KVM) += kvm.o obj-$(CONFIG_KVM) += kvm.o
obj-$(CONFIG_KVM) += hyp/ obj-$(CONFIG_KVM) += hyp/
CFLAGS_sys_regs.o += -Wno-override-init
CFLAGS_handle_exit.o += -Wno-override-init
kvm-y += arm.o mmu.o mmio.o psci.o hypercalls.o pvtime.o \ kvm-y += arm.o mmu.o mmio.o psci.o hypercalls.o pvtime.o \
inject_fault.o va_layout.o handle_exit.o \ inject_fault.o va_layout.o handle_exit.o \
guest.o debug.o reset.o sys_regs.o stacktrace.o \ guest.o debug.o reset.o sys_regs.o stacktrace.o \
......
...@@ -164,6 +164,7 @@ static int kvm_arm_default_max_vcpus(void) ...@@ -164,6 +164,7 @@ static int kvm_arm_default_max_vcpus(void)
/** /**
* kvm_arch_init_vm - initializes a VM data structure * kvm_arch_init_vm - initializes a VM data structure
* @kvm: pointer to the KVM struct * @kvm: pointer to the KVM struct
* @type: kvm device type
*/ */
int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
{ {
...@@ -521,10 +522,10 @@ void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu) ...@@ -521,10 +522,10 @@ void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu)
static void vcpu_set_pauth_traps(struct kvm_vcpu *vcpu) static void vcpu_set_pauth_traps(struct kvm_vcpu *vcpu)
{ {
if (vcpu_has_ptrauth(vcpu)) { if (vcpu_has_ptrauth(vcpu) && !is_protected_kvm_enabled()) {
/* /*
* Either we're running running an L2 guest, and the API/APK * Either we're running an L2 guest, and the API/APK bits come
* bits come from L1's HCR_EL2, or API/APK are both set. * from L1's HCR_EL2, or API/APK are both set.
*/ */
if (unlikely(vcpu_has_nv(vcpu) && !is_hyp_ctxt(vcpu))) { if (unlikely(vcpu_has_nv(vcpu) && !is_hyp_ctxt(vcpu))) {
u64 val; u64 val;
...@@ -541,16 +542,10 @@ static void vcpu_set_pauth_traps(struct kvm_vcpu *vcpu) ...@@ -541,16 +542,10 @@ static void vcpu_set_pauth_traps(struct kvm_vcpu *vcpu)
* Save the host keys if there is any chance for the guest * Save the host keys if there is any chance for the guest
* to use pauth, as the entry code will reload the guest * to use pauth, as the entry code will reload the guest
* keys in that case. * keys in that case.
* Protected mode is the exception to that rule, as the
* entry into the EL2 code eagerly switch back and forth
* between host and hyp keys (and kvm_hyp_ctxt is out of
* reach anyway).
*/ */
if (is_protected_kvm_enabled())
return;
if (vcpu->arch.hcr_el2 & (HCR_API | HCR_APK)) { if (vcpu->arch.hcr_el2 & (HCR_API | HCR_APK)) {
struct kvm_cpu_context *ctxt; struct kvm_cpu_context *ctxt;
ctxt = this_cpu_ptr_hyp_sym(kvm_hyp_ctxt); ctxt = this_cpu_ptr_hyp_sym(kvm_hyp_ctxt);
ptrauth_save_keys(ctxt); ptrauth_save_keys(ctxt);
} }
......
...@@ -27,7 +27,6 @@ ...@@ -27,7 +27,6 @@
#include <asm/kvm_hyp.h> #include <asm/kvm_hyp.h>
#include <asm/kvm_mmu.h> #include <asm/kvm_mmu.h>
#include <asm/kvm_nested.h> #include <asm/kvm_nested.h>
#include <asm/kvm_ptrauth.h>
#include <asm/fpsimd.h> #include <asm/fpsimd.h>
#include <asm/debug-monitors.h> #include <asm/debug-monitors.h>
#include <asm/processor.h> #include <asm/processor.h>
......
...@@ -20,6 +20,8 @@ HOST_EXTRACFLAGS += -I$(objtree)/include ...@@ -20,6 +20,8 @@ HOST_EXTRACFLAGS += -I$(objtree)/include
lib-objs := clear_page.o copy_page.o memcpy.o memset.o lib-objs := clear_page.o copy_page.o memcpy.o memset.o
lib-objs := $(addprefix ../../../lib/, $(lib-objs)) lib-objs := $(addprefix ../../../lib/, $(lib-objs))
CFLAGS_switch.nvhe.o += -Wno-override-init
hyp-obj-y := timer-sr.o sysreg-sr.o debug-sr.o switch.o tlb.o hyp-init.o host.o \ hyp-obj-y := timer-sr.o sysreg-sr.o debug-sr.o switch.o tlb.o hyp-init.o host.o \
hyp-main.o hyp-smp.o psci-relay.o early_alloc.o page_alloc.o \ hyp-main.o hyp-smp.o psci-relay.o early_alloc.o page_alloc.o \
cache.o setup.o mm.o mem_protect.o sys_regs.o pkvm.o stacktrace.o ffa.o cache.o setup.o mm.o mem_protect.o sys_regs.o pkvm.o stacktrace.o ffa.o
......
...@@ -173,9 +173,8 @@ static void __pmu_switch_to_host(struct kvm_vcpu *vcpu) ...@@ -173,9 +173,8 @@ static void __pmu_switch_to_host(struct kvm_vcpu *vcpu)
static bool kvm_handle_pvm_sys64(struct kvm_vcpu *vcpu, u64 *exit_code) static bool kvm_handle_pvm_sys64(struct kvm_vcpu *vcpu, u64 *exit_code)
{ {
/* /*
* Make sure we handle the exit for workarounds and ptrauth * Make sure we handle the exit for workarounds before the pKVM
* before the pKVM handling, as the latter could decide to * handling, as the latter could decide to UNDEF.
* UNDEF.
*/ */
return (kvm_hyp_handle_sysreg(vcpu, exit_code) || return (kvm_hyp_handle_sysreg(vcpu, exit_code) ||
kvm_handle_pvm_sysreg(vcpu, exit_code)); kvm_handle_pvm_sysreg(vcpu, exit_code));
......
...@@ -6,6 +6,8 @@ ...@@ -6,6 +6,8 @@
asflags-y := -D__KVM_VHE_HYPERVISOR__ asflags-y := -D__KVM_VHE_HYPERVISOR__
ccflags-y := -D__KVM_VHE_HYPERVISOR__ ccflags-y := -D__KVM_VHE_HYPERVISOR__
CFLAGS_switch.o += -Wno-override-init
obj-y := timer-sr.o sysreg-sr.o debug-sr.o switch.o tlb.o obj-y := timer-sr.o sysreg-sr.o debug-sr.o switch.o tlb.o
obj-y += ../vgic-v3-sr.o ../aarch32.o ../vgic-v2-cpuif-proxy.o ../entry.o \ obj-y += ../vgic-v3-sr.o ../aarch32.o ../vgic-v2-cpuif-proxy.o ../entry.o \
../fpsimd.o ../hyp-entry.o ../exception.o ../fpsimd.o ../hyp-entry.o ../exception.o
...@@ -1540,8 +1540,15 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, ...@@ -1540,8 +1540,15 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
vma_pagesize = min(vma_pagesize, (long)max_map_size); vma_pagesize = min(vma_pagesize, (long)max_map_size);
} }
if (vma_pagesize == PMD_SIZE || vma_pagesize == PUD_SIZE) /*
* Both the canonical IPA and fault IPA must be hugepage-aligned to
* ensure we find the right PFN and lay down the mapping in the right
* place.
*/
if (vma_pagesize == PMD_SIZE || vma_pagesize == PUD_SIZE) {
fault_ipa &= ~(vma_pagesize - 1); fault_ipa &= ~(vma_pagesize - 1);
ipa &= ~(vma_pagesize - 1);
}
gfn = ipa >> PAGE_SHIFT; gfn = ipa >> PAGE_SHIFT;
mte_allowed = kvm_vma_mte_allowed(vma); mte_allowed = kvm_vma_mte_allowed(vma);
......
...@@ -786,7 +786,7 @@ void kvm_arch_flush_shadow_all(struct kvm *kvm) ...@@ -786,7 +786,7 @@ void kvm_arch_flush_shadow_all(struct kvm *kvm)
if (!WARN_ON(atomic_read(&mmu->refcnt))) if (!WARN_ON(atomic_read(&mmu->refcnt)))
kvm_free_stage2_pgd(mmu); kvm_free_stage2_pgd(mmu);
} }
kfree(kvm->arch.nested_mmus); kvfree(kvm->arch.nested_mmus);
kvm->arch.nested_mmus = NULL; kvm->arch.nested_mmus = NULL;
kvm->arch.nested_mmus_size = 0; kvm->arch.nested_mmus_size = 0;
kvm_uninit_stage2_mmu(kvm); kvm_uninit_stage2_mmu(kvm);
......
...@@ -33,6 +33,7 @@ ...@@ -33,6 +33,7 @@
#include <trace/events/kvm.h> #include <trace/events/kvm.h>
#include "sys_regs.h" #include "sys_regs.h"
#include "vgic/vgic.h"
#include "trace.h" #include "trace.h"
...@@ -435,6 +436,11 @@ static bool access_gic_sgi(struct kvm_vcpu *vcpu, ...@@ -435,6 +436,11 @@ static bool access_gic_sgi(struct kvm_vcpu *vcpu,
{ {
bool g1; bool g1;
if (!kvm_has_gicv3(vcpu->kvm)) {
kvm_inject_undefined(vcpu);
return false;
}
if (!p->is_write) if (!p->is_write)
return read_from_write_only(vcpu, p, r); return read_from_write_only(vcpu, p, r);
......
...@@ -45,7 +45,8 @@ static void iter_next(struct kvm *kvm, struct vgic_state_iter *iter) ...@@ -45,7 +45,8 @@ static void iter_next(struct kvm *kvm, struct vgic_state_iter *iter)
* Let the xarray drive the iterator after the last SPI, as the iterator * Let the xarray drive the iterator after the last SPI, as the iterator
* has exhausted the sequentially-allocated INTID space. * has exhausted the sequentially-allocated INTID space.
*/ */
if (iter->intid >= (iter->nr_spis + VGIC_NR_PRIVATE_IRQS - 1)) { if (iter->intid >= (iter->nr_spis + VGIC_NR_PRIVATE_IRQS - 1) &&
iter->nr_lpis) {
if (iter->lpi_idx < iter->nr_lpis) if (iter->lpi_idx < iter->nr_lpis)
xa_find_after(&dist->lpi_xa, &iter->intid, xa_find_after(&dist->lpi_xa, &iter->intid,
VGIC_LPI_MAX_INTID, VGIC_LPI_MAX_INTID,
...@@ -84,7 +85,7 @@ static void iter_unmark_lpis(struct kvm *kvm) ...@@ -84,7 +85,7 @@ static void iter_unmark_lpis(struct kvm *kvm)
struct vgic_irq *irq; struct vgic_irq *irq;
unsigned long intid; unsigned long intid;
xa_for_each(&dist->lpi_xa, intid, irq) { xa_for_each_marked(&dist->lpi_xa, intid, irq, LPI_XA_MARK_DEBUG_ITER) {
xa_clear_mark(&dist->lpi_xa, intid, LPI_XA_MARK_DEBUG_ITER); xa_clear_mark(&dist->lpi_xa, intid, LPI_XA_MARK_DEBUG_ITER);
vgic_put_irq(kvm, irq); vgic_put_irq(kvm, irq);
} }
...@@ -112,7 +113,7 @@ static bool end_of_vgic(struct vgic_state_iter *iter) ...@@ -112,7 +113,7 @@ static bool end_of_vgic(struct vgic_state_iter *iter)
return iter->dist_id > 0 && return iter->dist_id > 0 &&
iter->vcpu_id == iter->nr_cpus && iter->vcpu_id == iter->nr_cpus &&
iter->intid >= (iter->nr_spis + VGIC_NR_PRIVATE_IRQS) && iter->intid >= (iter->nr_spis + VGIC_NR_PRIVATE_IRQS) &&
iter->lpi_idx > iter->nr_lpis; (!iter->nr_lpis || iter->lpi_idx > iter->nr_lpis);
} }
static void *vgic_debug_start(struct seq_file *s, loff_t *pos) static void *vgic_debug_start(struct seq_file *s, loff_t *pos)
......
...@@ -417,10 +417,8 @@ static void __kvm_vgic_vcpu_destroy(struct kvm_vcpu *vcpu) ...@@ -417,10 +417,8 @@ static void __kvm_vgic_vcpu_destroy(struct kvm_vcpu *vcpu)
kfree(vgic_cpu->private_irqs); kfree(vgic_cpu->private_irqs);
vgic_cpu->private_irqs = NULL; vgic_cpu->private_irqs = NULL;
if (vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3) { if (vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3)
vgic_unregister_redist_iodev(vcpu);
vgic_cpu->rd_iodev.base_addr = VGIC_ADDR_UNDEF; vgic_cpu->rd_iodev.base_addr = VGIC_ADDR_UNDEF;
}
} }
void kvm_vgic_vcpu_destroy(struct kvm_vcpu *vcpu) void kvm_vgic_vcpu_destroy(struct kvm_vcpu *vcpu)
...@@ -438,17 +436,21 @@ void kvm_vgic_destroy(struct kvm *kvm) ...@@ -438,17 +436,21 @@ void kvm_vgic_destroy(struct kvm *kvm)
unsigned long i; unsigned long i;
mutex_lock(&kvm->slots_lock); mutex_lock(&kvm->slots_lock);
mutex_lock(&kvm->arch.config_lock);
vgic_debug_destroy(kvm); vgic_debug_destroy(kvm);
kvm_for_each_vcpu(i, vcpu, kvm) kvm_for_each_vcpu(i, vcpu, kvm)
__kvm_vgic_vcpu_destroy(vcpu); __kvm_vgic_vcpu_destroy(vcpu);
mutex_lock(&kvm->arch.config_lock);
kvm_vgic_dist_destroy(kvm); kvm_vgic_dist_destroy(kvm);
mutex_unlock(&kvm->arch.config_lock); mutex_unlock(&kvm->arch.config_lock);
if (kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3)
kvm_for_each_vcpu(i, vcpu, kvm)
vgic_unregister_redist_iodev(vcpu);
mutex_unlock(&kvm->slots_lock); mutex_unlock(&kvm->slots_lock);
} }
......
...@@ -9,7 +9,7 @@ ...@@ -9,7 +9,7 @@
#include <kvm/arm_vgic.h> #include <kvm/arm_vgic.h>
#include "vgic.h" #include "vgic.h"
/** /*
* vgic_irqfd_set_irq: inject the IRQ corresponding to the * vgic_irqfd_set_irq: inject the IRQ corresponding to the
* irqchip routing entry * irqchip routing entry
* *
...@@ -75,7 +75,8 @@ static void kvm_populate_msi(struct kvm_kernel_irq_routing_entry *e, ...@@ -75,7 +75,8 @@ static void kvm_populate_msi(struct kvm_kernel_irq_routing_entry *e,
msi->flags = e->msi.flags; msi->flags = e->msi.flags;
msi->devid = e->msi.devid; msi->devid = e->msi.devid;
} }
/**
/*
* kvm_set_msi: inject the MSI corresponding to the * kvm_set_msi: inject the MSI corresponding to the
* MSI routing entry * MSI routing entry
* *
...@@ -98,7 +99,7 @@ int kvm_set_msi(struct kvm_kernel_irq_routing_entry *e, ...@@ -98,7 +99,7 @@ int kvm_set_msi(struct kvm_kernel_irq_routing_entry *e,
return vgic_its_inject_msi(kvm, &msi); return vgic_its_inject_msi(kvm, &msi);
} }
/** /*
* kvm_arch_set_irq_inatomic: fast-path for irqfd injection * kvm_arch_set_irq_inatomic: fast-path for irqfd injection
*/ */
int kvm_arch_set_irq_inatomic(struct kvm_kernel_irq_routing_entry *e, int kvm_arch_set_irq_inatomic(struct kvm_kernel_irq_routing_entry *e,
......
...@@ -2040,6 +2040,7 @@ typedef int (*entry_fn_t)(struct vgic_its *its, u32 id, void *entry, ...@@ -2040,6 +2040,7 @@ typedef int (*entry_fn_t)(struct vgic_its *its, u32 id, void *entry,
* @start_id: the ID of the first entry in the table * @start_id: the ID of the first entry in the table
* (non zero for 2d level tables) * (non zero for 2d level tables)
* @fn: function to apply on each entry * @fn: function to apply on each entry
* @opaque: pointer to opaque data
* *
* Return: < 0 on error, 0 if last element was identified, 1 otherwise * Return: < 0 on error, 0 if last element was identified, 1 otherwise
* (the last element may not be found on second level tables) * (the last element may not be found on second level tables)
...@@ -2079,7 +2080,7 @@ static int scan_its_table(struct vgic_its *its, gpa_t base, int size, u32 esz, ...@@ -2079,7 +2080,7 @@ static int scan_its_table(struct vgic_its *its, gpa_t base, int size, u32 esz,
return 1; return 1;
} }
/** /*
* vgic_its_save_ite - Save an interrupt translation entry at @gpa * vgic_its_save_ite - Save an interrupt translation entry at @gpa
*/ */
static int vgic_its_save_ite(struct vgic_its *its, struct its_device *dev, static int vgic_its_save_ite(struct vgic_its *its, struct its_device *dev,
...@@ -2099,6 +2100,8 @@ static int vgic_its_save_ite(struct vgic_its *its, struct its_device *dev, ...@@ -2099,6 +2100,8 @@ static int vgic_its_save_ite(struct vgic_its *its, struct its_device *dev,
/** /**
* vgic_its_restore_ite - restore an interrupt translation entry * vgic_its_restore_ite - restore an interrupt translation entry
*
* @its: its handle
* @event_id: id used for indexing * @event_id: id used for indexing
* @ptr: pointer to the ITE entry * @ptr: pointer to the ITE entry
* @opaque: pointer to the its_device * @opaque: pointer to the its_device
...@@ -2231,6 +2234,7 @@ static int vgic_its_restore_itt(struct vgic_its *its, struct its_device *dev) ...@@ -2231,6 +2234,7 @@ static int vgic_its_restore_itt(struct vgic_its *its, struct its_device *dev)
* @its: ITS handle * @its: ITS handle
* @dev: ITS device * @dev: ITS device
* @ptr: GPA * @ptr: GPA
* @dte_esz: device table entry size
*/ */
static int vgic_its_save_dte(struct vgic_its *its, struct its_device *dev, static int vgic_its_save_dte(struct vgic_its *its, struct its_device *dev,
gpa_t ptr, int dte_esz) gpa_t ptr, int dte_esz)
...@@ -2313,7 +2317,7 @@ static int vgic_its_device_cmp(void *priv, const struct list_head *a, ...@@ -2313,7 +2317,7 @@ static int vgic_its_device_cmp(void *priv, const struct list_head *a,
return 1; return 1;
} }
/** /*
* vgic_its_save_device_tables - Save the device table and all ITT * vgic_its_save_device_tables - Save the device table and all ITT
* into guest RAM * into guest RAM
* *
...@@ -2386,7 +2390,7 @@ static int handle_l1_dte(struct vgic_its *its, u32 id, void *addr, ...@@ -2386,7 +2390,7 @@ static int handle_l1_dte(struct vgic_its *its, u32 id, void *addr,
return ret; return ret;
} }
/** /*
* vgic_its_restore_device_tables - Restore the device table and all ITT * vgic_its_restore_device_tables - Restore the device table and all ITT
* from guest RAM to internal data structs * from guest RAM to internal data structs
*/ */
...@@ -2478,7 +2482,7 @@ static int vgic_its_restore_cte(struct vgic_its *its, gpa_t gpa, int esz) ...@@ -2478,7 +2482,7 @@ static int vgic_its_restore_cte(struct vgic_its *its, gpa_t gpa, int esz)
return 1; return 1;
} }
/** /*
* vgic_its_save_collection_table - Save the collection table into * vgic_its_save_collection_table - Save the collection table into
* guest RAM * guest RAM
*/ */
...@@ -2518,7 +2522,7 @@ static int vgic_its_save_collection_table(struct vgic_its *its) ...@@ -2518,7 +2522,7 @@ static int vgic_its_save_collection_table(struct vgic_its *its)
return ret; return ret;
} }
/** /*
* vgic_its_restore_collection_table - reads the collection table * vgic_its_restore_collection_table - reads the collection table
* in guest memory and restores the ITS internal state. Requires the * in guest memory and restores the ITS internal state. Requires the
* BASER registers to be restored before. * BASER registers to be restored before.
...@@ -2556,7 +2560,7 @@ static int vgic_its_restore_collection_table(struct vgic_its *its) ...@@ -2556,7 +2560,7 @@ static int vgic_its_restore_collection_table(struct vgic_its *its)
return ret; return ret;
} }
/** /*
* vgic_its_save_tables_v0 - Save the ITS tables into guest ARM * vgic_its_save_tables_v0 - Save the ITS tables into guest ARM
* according to v0 ABI * according to v0 ABI
*/ */
...@@ -2571,7 +2575,7 @@ static int vgic_its_save_tables_v0(struct vgic_its *its) ...@@ -2571,7 +2575,7 @@ static int vgic_its_save_tables_v0(struct vgic_its *its)
return vgic_its_save_collection_table(its); return vgic_its_save_collection_table(its);
} }
/** /*
* vgic_its_restore_tables_v0 - Restore the ITS tables from guest RAM * vgic_its_restore_tables_v0 - Restore the ITS tables from guest RAM
* to internal data structs according to V0 ABI * to internal data structs according to V0 ABI
* *
......
...@@ -370,7 +370,7 @@ static void map_all_vpes(struct kvm *kvm) ...@@ -370,7 +370,7 @@ static void map_all_vpes(struct kvm *kvm)
dist->its_vm.vpes[i]->irq)); dist->its_vm.vpes[i]->irq));
} }
/** /*
* vgic_v3_save_pending_tables - Save the pending tables into guest RAM * vgic_v3_save_pending_tables - Save the pending tables into guest RAM
* kvm lock and all vcpu lock must be held * kvm lock and all vcpu lock must be held
*/ */
......
...@@ -36,6 +36,11 @@ struct vgic_global kvm_vgic_global_state __ro_after_init = { ...@@ -36,6 +36,11 @@ struct vgic_global kvm_vgic_global_state __ro_after_init = {
* we have to disable IRQs before taking this lock and everything lower * we have to disable IRQs before taking this lock and everything lower
* than it. * than it.
* *
* The config_lock has additional ordering requirements:
* kvm->slots_lock
* kvm->srcu
* kvm->arch.config_lock
*
* If you need to take multiple locks, always take the upper lock first, * If you need to take multiple locks, always take the upper lock first,
* then the lower ones, e.g. first take the its_lock, then the irq_lock. * then the lower ones, e.g. first take the its_lock, then the irq_lock.
* If you are already holding a lock and need to take a higher one, you * If you are already holding a lock and need to take a higher one, you
...@@ -313,7 +318,7 @@ static bool vgic_validate_injection(struct vgic_irq *irq, bool level, void *owne ...@@ -313,7 +318,7 @@ static bool vgic_validate_injection(struct vgic_irq *irq, bool level, void *owne
* with all locks dropped. * with all locks dropped.
*/ */
bool vgic_queue_irq_unlock(struct kvm *kvm, struct vgic_irq *irq, bool vgic_queue_irq_unlock(struct kvm *kvm, struct vgic_irq *irq,
unsigned long flags) unsigned long flags) __releases(&irq->irq_lock)
{ {
struct kvm_vcpu *vcpu; struct kvm_vcpu *vcpu;
......
...@@ -186,7 +186,7 @@ bool vgic_get_phys_line_level(struct vgic_irq *irq); ...@@ -186,7 +186,7 @@ bool vgic_get_phys_line_level(struct vgic_irq *irq);
void vgic_irq_set_phys_pending(struct vgic_irq *irq, bool pending); void vgic_irq_set_phys_pending(struct vgic_irq *irq, bool pending);
void vgic_irq_set_phys_active(struct vgic_irq *irq, bool active); void vgic_irq_set_phys_active(struct vgic_irq *irq, bool active);
bool vgic_queue_irq_unlock(struct kvm *kvm, struct vgic_irq *irq, bool vgic_queue_irq_unlock(struct kvm *kvm, struct vgic_irq *irq,
unsigned long flags); unsigned long flags) __releases(&irq->irq_lock);
void vgic_kick_vcpus(struct kvm *kvm); void vgic_kick_vcpus(struct kvm *kvm);
void vgic_irq_handle_resampling(struct vgic_irq *irq, void vgic_irq_handle_resampling(struct vgic_irq *irq,
bool lr_deactivated, bool lr_pending); bool lr_deactivated, bool lr_pending);
...@@ -346,4 +346,11 @@ void vgic_v4_configure_vsgis(struct kvm *kvm); ...@@ -346,4 +346,11 @@ void vgic_v4_configure_vsgis(struct kvm *kvm);
void vgic_v4_get_vlpi_state(struct vgic_irq *irq, bool *val); void vgic_v4_get_vlpi_state(struct vgic_irq *irq, bool *val);
int vgic_v4_request_vpe_irq(struct kvm_vcpu *vcpu, int irq); int vgic_v4_request_vpe_irq(struct kvm_vcpu *vcpu, int irq);
static inline bool kvm_has_gicv3(struct kvm *kvm)
{
return (static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif) &&
irqchip_in_kernel(kvm) &&
kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3);
}
#endif #endif
...@@ -32,13 +32,13 @@ static struct feature_id_reg feat_id_regs[] = { ...@@ -32,13 +32,13 @@ static struct feature_id_reg feat_id_regs[] = {
{ {
ARM64_SYS_REG(3, 0, 10, 2, 2), /* PIRE0_EL1 */ ARM64_SYS_REG(3, 0, 10, 2, 2), /* PIRE0_EL1 */
ARM64_SYS_REG(3, 0, 0, 7, 3), /* ID_AA64MMFR3_EL1 */ ARM64_SYS_REG(3, 0, 0, 7, 3), /* ID_AA64MMFR3_EL1 */
4, 8,
1 1
}, },
{ {
ARM64_SYS_REG(3, 0, 10, 2, 3), /* PIR_EL1 */ ARM64_SYS_REG(3, 0, 10, 2, 3), /* PIR_EL1 */
ARM64_SYS_REG(3, 0, 0, 7, 3), /* ID_AA64MMFR3_EL1 */ ARM64_SYS_REG(3, 0, 0, 7, 3), /* ID_AA64MMFR3_EL1 */
4, 8,
1 1
} }
}; };
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment