Commit 7fd55a02 authored by Paolo Bonzini's avatar Paolo Bonzini

Merge tag 'kvmarm-5.17' of git://git.kernel.org/pub/scm/linux/kernel/git/kvmarm/kvmarm into HEAD

KVM/arm64 updates for Linux 5.16

- Simplification of the 'vcpu first run' by integrating it into
  KVM's 'pid change' flow

- Refactoring of the FP and SVE state tracking, also leading to
  a simpler state and less shared data between EL1 and EL2 in
  the nVHE case

- Tidy up the header file usage for the nvhe hyp object

- New HYP unsharing mechanism, finally allowing pages to be
  unmapped from the Stage-1 EL2 page-tables

- Various pKVM cleanups around refcounting and sharing

- A couple of vgic fixes for bugs that would trigger once
  the vcpu xarray rework is merged, but not sooner

- Add minimal support for ARMv8.7's PMU extension

- Rework kvm_pgtable initialisation ahead of the NV work

- New selftest for IRQ injection

- Teach selftests about the lack of default IPA space and
  page sizes

- Expand sysreg selftest to deal with Pointer Authentication

- The usual bunch of cleanups and doc update
parents 5e4e84f1 1c53a1ae
......@@ -63,6 +63,7 @@ enum __kvm_host_smccc_func {
/* Hypercalls available after pKVM finalisation */
__KVM_HOST_SMCCC_FUNC___pkvm_host_share_hyp,
__KVM_HOST_SMCCC_FUNC___pkvm_host_unshare_hyp,
__KVM_HOST_SMCCC_FUNC___kvm_adjust_pc,
__KVM_HOST_SMCCC_FUNC___kvm_vcpu_run,
__KVM_HOST_SMCCC_FUNC___kvm_flush_vm_context,
......
......@@ -388,7 +388,7 @@ static inline void kvm_vcpu_set_be(struct kvm_vcpu *vcpu)
*vcpu_cpsr(vcpu) |= PSR_AA32_E_BIT;
} else {
u64 sctlr = vcpu_read_sys_reg(vcpu, SCTLR_EL1);
sctlr |= (1 << 25);
sctlr |= SCTLR_ELx_EE;
vcpu_write_sys_reg(vcpu, sctlr, SCTLR_EL1);
}
}
......
......@@ -26,7 +26,6 @@
#include <asm/fpsimd.h>
#include <asm/kvm.h>
#include <asm/kvm_asm.h>
#include <asm/thread_info.h>
#define __KVM_HAVE_ARCH_INTC_INITIALIZED
......@@ -298,9 +297,6 @@ struct kvm_vcpu_arch {
/* Exception Information */
struct kvm_vcpu_fault_info fault;
/* State of various workarounds, see kvm_asm.h for bit assignment */
u64 workaround_flags;
/* Miscellaneous vcpu state flags */
u64 flags;
......@@ -321,8 +317,8 @@ struct kvm_vcpu_arch {
struct kvm_guest_debug_arch vcpu_debug_state;
struct kvm_guest_debug_arch external_debug_state;
struct thread_info *host_thread_info; /* hyp VA */
struct user_fpsimd_state *host_fpsimd_state; /* hyp VA */
struct task_struct *parent_task;
struct {
/* {Break,watch}point registers */
......@@ -367,9 +363,6 @@ struct kvm_vcpu_arch {
int target;
DECLARE_BITMAP(features, KVM_VCPU_MAX_FEATURES);
/* Detect first run of a vcpu */
bool has_run_once;
/* Virtual SError ESR to restore when HCR_EL2.VSE is set */
u64 vsesr_el2;
......@@ -411,20 +404,17 @@ struct kvm_vcpu_arch {
#define KVM_ARM64_DEBUG_DIRTY (1 << 0)
#define KVM_ARM64_FP_ENABLED (1 << 1) /* guest FP regs loaded */
#define KVM_ARM64_FP_HOST (1 << 2) /* host FP regs loaded */
#define KVM_ARM64_HOST_SVE_IN_USE (1 << 3) /* backup for host TIF_SVE */
#define KVM_ARM64_HOST_SVE_ENABLED (1 << 4) /* SVE enabled for EL0 */
#define KVM_ARM64_GUEST_HAS_SVE (1 << 5) /* SVE exposed to guest */
#define KVM_ARM64_VCPU_SVE_FINALIZED (1 << 6) /* SVE config completed */
#define KVM_ARM64_GUEST_HAS_PTRAUTH (1 << 7) /* PTRAUTH exposed to guest */
#define KVM_ARM64_PENDING_EXCEPTION (1 << 8) /* Exception pending */
/*
* Overlaps with KVM_ARM64_EXCEPT_MASK on purpose so that it can't be
* set together with an exception...
*/
#define KVM_ARM64_INCREMENT_PC (1 << 9) /* Increment PC */
#define KVM_ARM64_EXCEPT_MASK (7 << 9) /* Target EL/MODE */
#define KVM_ARM64_DEBUG_STATE_SAVE_SPE (1 << 12) /* Save SPE context if active */
#define KVM_ARM64_DEBUG_STATE_SAVE_TRBE (1 << 13) /* Save TRBE context if active */
#define KVM_GUESTDBG_VALID_MASK (KVM_GUESTDBG_ENABLE | \
KVM_GUESTDBG_USE_SW_BP | \
KVM_GUESTDBG_USE_HW | \
KVM_GUESTDBG_SINGLESTEP)
/*
* When KVM_ARM64_PENDING_EXCEPTION is set, KVM_ARM64_EXCEPT_MASK can
* take the following values:
......@@ -442,11 +432,14 @@ struct kvm_vcpu_arch {
#define KVM_ARM64_EXCEPT_AA64_EL1 (0 << 11)
#define KVM_ARM64_EXCEPT_AA64_EL2 (1 << 11)
/*
* Overlaps with KVM_ARM64_EXCEPT_MASK on purpose so that it can't be
* set together with an exception...
*/
#define KVM_ARM64_INCREMENT_PC (1 << 9) /* Increment PC */
#define KVM_ARM64_DEBUG_STATE_SAVE_SPE (1 << 12) /* Save SPE context if active */
#define KVM_ARM64_DEBUG_STATE_SAVE_TRBE (1 << 13) /* Save TRBE context if active */
#define KVM_ARM64_FP_FOREIGN_FPSTATE (1 << 14)
#define KVM_GUESTDBG_VALID_MASK (KVM_GUESTDBG_ENABLE | \
KVM_GUESTDBG_USE_SW_BP | \
KVM_GUESTDBG_USE_HW | \
KVM_GUESTDBG_SINGLESTEP)
#define vcpu_has_sve(vcpu) (system_supports_sve() && \
((vcpu)->arch.flags & KVM_ARM64_GUEST_HAS_SVE))
......@@ -606,6 +599,8 @@ int __kvm_arm_vcpu_set_events(struct kvm_vcpu *vcpu,
void kvm_arm_halt_guest(struct kvm *kvm);
void kvm_arm_resume_guest(struct kvm *kvm);
#define vcpu_has_run_once(vcpu) !!rcu_access_pointer((vcpu)->pid)
#ifndef __KVM_NVHE_HYPERVISOR__
#define kvm_call_hyp_nvhe(f, ...) \
({ \
......@@ -736,8 +731,10 @@ long kvm_vm_ioctl_mte_copy_tags(struct kvm *kvm,
/* Guest/host FPSIMD coordination helpers */
int kvm_arch_vcpu_run_map_fp(struct kvm_vcpu *vcpu);
void kvm_arch_vcpu_load_fp(struct kvm_vcpu *vcpu);
void kvm_arch_vcpu_ctxflush_fp(struct kvm_vcpu *vcpu);
void kvm_arch_vcpu_ctxsync_fp(struct kvm_vcpu *vcpu);
void kvm_arch_vcpu_put_fp(struct kvm_vcpu *vcpu);
void kvm_vcpu_unshare_task_fp(struct kvm_vcpu *vcpu);
static inline bool kvm_pmu_counter_deferred(struct perf_event_attr *attr)
{
......@@ -748,12 +745,7 @@ static inline bool kvm_pmu_counter_deferred(struct perf_event_attr *attr)
void kvm_arch_vcpu_load_debug_state_flags(struct kvm_vcpu *vcpu);
void kvm_arch_vcpu_put_debug_state_flags(struct kvm_vcpu *vcpu);
#ifdef CONFIG_KVM /* Avoid conflicts with core headers if CONFIG_KVM=n */
static inline int kvm_arch_vcpu_run_pid_change(struct kvm_vcpu *vcpu)
{
return kvm_arch_vcpu_run_map_fp(vcpu);
}
#ifdef CONFIG_KVM
void kvm_set_pmu_events(u32 set, struct perf_event_attr *attr);
void kvm_clr_pmu_events(u32 clr);
......
......@@ -90,7 +90,6 @@ void __debug_restore_host_buffers_nvhe(struct kvm_vcpu *vcpu);
void __fpsimd_save_state(struct user_fpsimd_state *fp_regs);
void __fpsimd_restore_state(struct user_fpsimd_state *fp_regs);
void __sve_save_state(void *sve_pffr, u32 *fpsr);
void __sve_restore_state(void *sve_pffr, u32 *fpsr);
#ifndef __KVM_NVHE_HYPERVISOR__
......
......@@ -150,6 +150,8 @@ static __always_inline unsigned long __kern_hyp_va(unsigned long v)
#include <asm/kvm_pgtable.h>
#include <asm/stage2_pgtable.h>
int kvm_share_hyp(void *from, void *to);
void kvm_unshare_hyp(void *from, void *to);
int create_hyp_mappings(void *from, void *to, enum kvm_pgtable_prot prot);
int create_hyp_io_mappings(phys_addr_t phys_addr, size_t size,
void __iomem **kaddr,
......
......@@ -251,6 +251,27 @@ void kvm_pgtable_hyp_destroy(struct kvm_pgtable *pgt);
int kvm_pgtable_hyp_map(struct kvm_pgtable *pgt, u64 addr, u64 size, u64 phys,
enum kvm_pgtable_prot prot);
/**
* kvm_pgtable_hyp_unmap() - Remove a mapping from a hypervisor stage-1 page-table.
* @pgt: Page-table structure initialised by kvm_pgtable_hyp_init().
* @addr: Virtual address from which to remove the mapping.
* @size: Size of the mapping.
*
* The offset of @addr within a page is ignored, @size is rounded-up to
* the next page boundary and @phys is rounded-down to the previous page
* boundary.
*
* TLB invalidation is performed for each page-table entry cleared during the
* unmapping operation and the reference count for the page-table page
* containing the cleared entry is decremented, with unreferenced pages being
* freed. The unmapping operation will stop early if it encounters either an
* invalid page-table entry or a valid block mapping which maps beyond the range
* being unmapped.
*
* Return: Number of bytes unmapped, which may be 0.
*/
u64 kvm_pgtable_hyp_unmap(struct kvm_pgtable *pgt, u64 addr, u64 size);
/**
* kvm_get_vtcr() - Helper to construct VTCR_EL2
* @mmfr0: Sanitized value of SYS_ID_AA64MMFR0_EL1 register.
......@@ -270,8 +291,7 @@ u64 kvm_get_vtcr(u64 mmfr0, u64 mmfr1, u32 phys_shift);
/**
* __kvm_pgtable_stage2_init() - Initialise a guest stage-2 page-table.
* @pgt: Uninitialised page-table structure to initialise.
* @arch: Arch-specific KVM structure representing the guest virtual
* machine.
* @mmu: S2 MMU context for this S2 translation
* @mm_ops: Memory management callbacks.
* @flags: Stage-2 configuration flags.
* @force_pte_cb: Function that returns true if page level mappings must
......@@ -279,13 +299,13 @@ u64 kvm_get_vtcr(u64 mmfr0, u64 mmfr1, u32 phys_shift);
*
* Return: 0 on success, negative error code on failure.
*/
int __kvm_pgtable_stage2_init(struct kvm_pgtable *pgt, struct kvm_arch *arch,
int __kvm_pgtable_stage2_init(struct kvm_pgtable *pgt, struct kvm_s2_mmu *mmu,
struct kvm_pgtable_mm_ops *mm_ops,
enum kvm_pgtable_stage2_flags flags,
kvm_pgtable_force_pte_cb_t force_pte_cb);
#define kvm_pgtable_stage2_init(pgt, arch, mm_ops) \
__kvm_pgtable_stage2_init(pgt, arch, mm_ops, 0, NULL)
#define kvm_pgtable_stage2_init(pgt, mmu, mm_ops) \
__kvm_pgtable_stage2_init(pgt, mmu, mm_ops, 0, NULL)
/**
* kvm_pgtable_stage2_destroy() - Destroy an unused guest stage-2 page-table.
......
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2020 - Google LLC
* Author: Quentin Perret <qperret@google.com>
*/
#ifndef __ARM64_KVM_PKVM_H__
#define __ARM64_KVM_PKVM_H__
#include <linux/memblock.h>
#include <asm/kvm_pgtable.h>
#define HYP_MEMBLOCK_REGIONS 128
extern struct memblock_region kvm_nvhe_sym(hyp_memory)[];
extern unsigned int kvm_nvhe_sym(hyp_memblock_nr);
static inline unsigned long __hyp_pgtable_max_pages(unsigned long nr_pages)
{
unsigned long total = 0, i;
/* Provision the worst case scenario */
for (i = 0; i < KVM_PGTABLE_MAX_LEVELS; i++) {
nr_pages = DIV_ROUND_UP(nr_pages, PTRS_PER_PTE);
total += nr_pages;
}
return total;
}
static inline unsigned long __hyp_pgtable_total_pages(void)
{
unsigned long res = 0, i;
/* Cover all of memory with page-granularity */
for (i = 0; i < kvm_nvhe_sym(hyp_memblock_nr); i++) {
struct memblock_region *reg = &kvm_nvhe_sym(hyp_memory)[i];
res += __hyp_pgtable_max_pages(reg->size >> PAGE_SHIFT);
}
return res;
}
static inline unsigned long hyp_s1_pgtable_pages(void)
{
unsigned long res;
res = __hyp_pgtable_total_pages();
/* Allow 1 GiB for private mappings */
res += __hyp_pgtable_max_pages(SZ_1G >> PAGE_SHIFT);
return res;
}
static inline unsigned long host_s2_pgtable_pages(void)
{
unsigned long res;
/*
* Include an extra 16 pages to safely upper-bound the worst case of
* concatenated pgds.
*/
res = __hyp_pgtable_total_pages() + 16;
/* Allow 1 GiB for MMIO mappings */
res += __hyp_pgtable_max_pages(SZ_1G >> PAGE_SHIFT);
return res;
}
#endif /* __ARM64_KVM_PKVM_H__ */
......@@ -15,6 +15,7 @@
#ifndef __ASSEMBLY__
#include <linux/refcount.h>
#include <asm/cpufeature.h>
typedef struct {
atomic64_t id;
......
......@@ -937,6 +937,7 @@
#define ID_AA64DFR0_PMUVER_8_1 0x4
#define ID_AA64DFR0_PMUVER_8_4 0x5
#define ID_AA64DFR0_PMUVER_8_5 0x6
#define ID_AA64DFR0_PMUVER_8_7 0x7
#define ID_AA64DFR0_PMUVER_IMP_DEF 0xf
#define ID_AA64DFR0_PMSVER_8_2 0x1
......
......@@ -111,7 +111,6 @@ int main(void)
#ifdef CONFIG_KVM
DEFINE(VCPU_CONTEXT, offsetof(struct kvm_vcpu, arch.ctxt));
DEFINE(VCPU_FAULT_DISR, offsetof(struct kvm_vcpu, arch.fault.disr_el1));
DEFINE(VCPU_WORKAROUND_FLAGS, offsetof(struct kvm_vcpu, arch.workaround_flags));
DEFINE(VCPU_HCR_EL2, offsetof(struct kvm_vcpu, arch.hcr_el2));
DEFINE(CPU_USER_PT_REGS, offsetof(struct kvm_cpu_context, regs));
DEFINE(CPU_RGSR_EL1, offsetof(struct kvm_cpu_context, sys_regs[RGSR_EL1]));
......
......@@ -78,7 +78,11 @@
* indicate whether or not the userland FPSIMD state of the current task is
* present in the registers. The flag is set unless the FPSIMD registers of this
* CPU currently contain the most recent userland FPSIMD state of the current
* task.
* task. If the task is behaving as a VMM, then this is will be managed by
* KVM which will clear it to indicate that the vcpu FPSIMD state is currently
* loaded on the CPU, allowing the state to be saved if a FPSIMD-aware
* softirq kicks in. Upon vcpu_put(), KVM will save the vcpu FP state and
* flag the register state as invalid.
*
* In order to allow softirq handlers to use FPSIMD, kernel_neon_begin() may
* save the task's FPSIMD context back to task_struct from softirq context.
......
# SPDX-License-Identifier: GPL-2.0-only
hyp_constants.h
......@@ -13,7 +13,7 @@ obj-$(CONFIG_KVM) += hyp/
kvm-y += arm.o mmu.o mmio.o psci.o perf.o hypercalls.o pvtime.o \
inject_fault.o va_layout.o handle_exit.o \
guest.o debug.o reset.o sys_regs.o \
vgic-sys-reg-v3.o fpsimd.o pmu.o \
vgic-sys-reg-v3.o fpsimd.o pmu.o pkvm.o \
arch_timer.o trng.o\
vgic/vgic.o vgic/vgic-init.o \
vgic/vgic-irqfd.o vgic/vgic-v2.o \
......@@ -23,3 +23,19 @@ kvm-y += arm.o mmu.o mmio.o psci.o perf.o hypercalls.o pvtime.o \
vgic/vgic-its.o vgic/vgic-debug.o
kvm-$(CONFIG_HW_PERF_EVENTS) += pmu-emul.o
always-y := hyp_constants.h hyp-constants.s
define rule_gen_hyp_constants
$(call filechk,offsets,__HYP_CONSTANTS_H__)
endef
CFLAGS_hyp-constants.o = -I $(srctree)/$(src)/hyp/include
$(obj)/hyp-constants.s: $(src)/hyp/hyp-constants.c FORCE
$(call if_changed_dep,cc_s_c)
$(obj)/hyp_constants.h: $(obj)/hyp-constants.s FORCE
$(call if_changed_rule,gen_hyp_constants)
obj-kvm := $(addprefix $(obj)/, $(kvm-y))
$(obj-kvm): $(obj)/hyp_constants.h
......@@ -146,7 +146,7 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
if (ret)
return ret;
ret = create_hyp_mappings(kvm, kvm + 1, PAGE_HYP);
ret = kvm_share_hyp(kvm, kvm + 1);
if (ret)
goto out_free_stage2_pgd;
......@@ -180,6 +180,8 @@ void kvm_arch_destroy_vm(struct kvm *kvm)
kvm_vgic_destroy(kvm);
kvm_destroy_vcpus(kvm);
kvm_unshare_hyp(kvm, kvm + 1);
}
int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
......@@ -334,7 +336,7 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
if (err)
return err;
return create_hyp_mappings(vcpu, vcpu + 1, PAGE_HYP);
return kvm_share_hyp(vcpu, vcpu + 1);
}
void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
......@@ -343,7 +345,7 @@ void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
{
if (vcpu->arch.has_run_once && unlikely(!irqchip_in_kernel(vcpu->kvm)))
if (vcpu_has_run_once(vcpu) && unlikely(!irqchip_in_kernel(vcpu->kvm)))
static_branch_dec(&userspace_irqchip_in_use);
kvm_mmu_free_memory_cache(&vcpu->arch.mmu_page_cache);
......@@ -561,18 +563,33 @@ static void update_vmid(struct kvm_vmid *vmid)
spin_unlock(&kvm_vmid_lock);
}
static int kvm_vcpu_first_run_init(struct kvm_vcpu *vcpu)
static int kvm_vcpu_initialized(struct kvm_vcpu *vcpu)
{
return vcpu->arch.target >= 0;
}
/*
* Handle both the initialisation that is being done when the vcpu is
* run for the first time, as well as the updates that must be
* performed each time we get a new thread dealing with this vcpu.
*/
int kvm_arch_vcpu_run_pid_change(struct kvm_vcpu *vcpu)
{
struct kvm *kvm = vcpu->kvm;
int ret = 0;
int ret;
if (likely(vcpu->arch.has_run_once))
return 0;
if (!kvm_vcpu_initialized(vcpu))
return -ENOEXEC;
if (!kvm_arm_vcpu_is_finalized(vcpu))
return -EPERM;
vcpu->arch.has_run_once = true;
ret = kvm_arch_vcpu_run_map_fp(vcpu);
if (ret)
return ret;
if (likely(vcpu_has_run_once(vcpu)))
return 0;
kvm_arm_vcpu_init_debug(vcpu);
......@@ -584,12 +601,6 @@ static int kvm_vcpu_first_run_init(struct kvm_vcpu *vcpu)
ret = kvm_vgic_map_resources(kvm);
if (ret)
return ret;
} else {
/*
* Tell the rest of the code that there are userspace irqchip
* VMs in the wild.
*/
static_branch_inc(&userspace_irqchip_in_use);
}
ret = kvm_timer_enable(vcpu);
......@@ -597,6 +608,16 @@ static int kvm_vcpu_first_run_init(struct kvm_vcpu *vcpu)
return ret;
ret = kvm_arm_pmu_v3_enable(vcpu);
if (ret)
return ret;
if (!irqchip_in_kernel(kvm)) {
/*
* Tell the rest of the code that there are userspace irqchip
* VMs in the wild.
*/
static_branch_inc(&userspace_irqchip_in_use);
}
/*
* Initialize traps for protected VMs.
......@@ -689,11 +710,6 @@ void kvm_vcpu_wfi(struct kvm_vcpu *vcpu)
preempt_enable();
}
static int kvm_vcpu_initialized(struct kvm_vcpu *vcpu)
{
return vcpu->arch.target >= 0;
}
static void check_vcpu_requests(struct kvm_vcpu *vcpu)
{
if (kvm_request_pending(vcpu)) {
......@@ -789,13 +805,6 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
struct kvm_run *run = vcpu->run;
int ret;
if (unlikely(!kvm_vcpu_initialized(vcpu)))
return -ENOEXEC;
ret = kvm_vcpu_first_run_init(vcpu);
if (ret)
return ret;
if (run->exit_reason == KVM_EXIT_MMIO) {
ret = kvm_handle_mmio_return(vcpu);
if (ret)
......@@ -859,6 +868,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
}
kvm_arm_setup_debug(vcpu);
kvm_arch_vcpu_ctxflush_fp(vcpu);
/**************************************************************
* Enter the guest
......@@ -1133,7 +1143,7 @@ static int kvm_arch_vcpu_ioctl_vcpu_init(struct kvm_vcpu *vcpu,
* need to invalidate the I-cache though, as FWB does *not*
* imply CTR_EL0.DIC.
*/
if (vcpu->arch.has_run_once) {
if (vcpu_has_run_once(vcpu)) {
if (!cpus_have_final_cap(ARM64_HAS_STAGE2_FWB))
stage2_unmap_vm(vcpu->kvm);
else
......
......@@ -7,7 +7,6 @@
*/
#include <linux/irqflags.h>
#include <linux/sched.h>
#include <linux/thread_info.h>
#include <linux/kvm_host.h>
#include <asm/fpsimd.h>
#include <asm/kvm_asm.h>
......@@ -15,6 +14,19 @@
#include <asm/kvm_mmu.h>
#include <asm/sysreg.h>
void kvm_vcpu_unshare_task_fp(struct kvm_vcpu *vcpu)
{
struct task_struct *p = vcpu->arch.parent_task;
struct user_fpsimd_state *fpsimd;
if (!is_protected_kvm_enabled() || !p)
return;
fpsimd = &p->thread.uw.fpsimd_state;
kvm_unshare_hyp(fpsimd, fpsimd + 1);
put_task_struct(p);
}
/*
* Called on entry to KVM_RUN unless this vcpu previously ran at least
* once and the most recent prior KVM_RUN for this vcpu was called from
......@@ -28,36 +40,29 @@ int kvm_arch_vcpu_run_map_fp(struct kvm_vcpu *vcpu)
{
int ret;
struct thread_info *ti = &current->thread_info;
struct user_fpsimd_state *fpsimd = &current->thread.uw.fpsimd_state;
/*
* Make sure the host task thread flags and fpsimd state are
* visible to hyp:
*/
ret = create_hyp_mappings(ti, ti + 1, PAGE_HYP);
if (ret)
goto error;
kvm_vcpu_unshare_task_fp(vcpu);
ret = create_hyp_mappings(fpsimd, fpsimd + 1, PAGE_HYP);
/* Make sure the host task fpsimd state is visible to hyp: */
ret = kvm_share_hyp(fpsimd, fpsimd + 1);
if (ret)
goto error;
if (vcpu->arch.sve_state) {
void *sve_end;
return ret;
sve_end = vcpu->arch.sve_state + vcpu_sve_state_size(vcpu);
vcpu->arch.host_fpsimd_state = kern_hyp_va(fpsimd);
ret = create_hyp_mappings(vcpu->arch.sve_state, sve_end,
PAGE_HYP);
if (ret)
goto error;
/*
* We need to keep current's task_struct pinned until its data has been
* unshared with the hypervisor to make sure it is not re-used by the
* kernel and donated to someone else while already shared -- see
* kvm_vcpu_unshare_task_fp() for the matching put_task_struct().
*/
if (is_protected_kvm_enabled()) {
get_task_struct(current);
vcpu->arch.parent_task = current;
}
vcpu->arch.host_thread_info = kern_hyp_va(ti);
vcpu->arch.host_fpsimd_state = kern_hyp_va(fpsimd);
error:
return ret;
return 0;
}
/*
......@@ -66,26 +71,27 @@ int kvm_arch_vcpu_run_map_fp(struct kvm_vcpu *vcpu)
*
* Here, we just set the correct metadata to indicate that the FPSIMD
* state in the cpu regs (if any) belongs to current on the host.
*
* TIF_SVE is backed up here, since it may get clobbered with guest state.
* This flag is restored by kvm_arch_vcpu_put_fp(vcpu).
*/
void kvm_arch_vcpu_load_fp(struct kvm_vcpu *vcpu)
{
BUG_ON(!current->mm);
BUG_ON(test_thread_flag(TIF_SVE));
vcpu->arch.flags &= ~(KVM_ARM64_FP_ENABLED |
KVM_ARM64_HOST_SVE_IN_USE |
KVM_ARM64_HOST_SVE_ENABLED);
vcpu->arch.flags &= ~KVM_ARM64_FP_ENABLED;
vcpu->arch.flags |= KVM_ARM64_FP_HOST;
if (test_thread_flag(TIF_SVE))
vcpu->arch.flags |= KVM_ARM64_HOST_SVE_IN_USE;
if (read_sysreg(cpacr_el1) & CPACR_EL1_ZEN_EL0EN)
vcpu->arch.flags |= KVM_ARM64_HOST_SVE_ENABLED;
}
void kvm_arch_vcpu_ctxflush_fp(struct kvm_vcpu *vcpu)
{
if (test_thread_flag(TIF_FOREIGN_FPSTATE))
vcpu->arch.flags |= KVM_ARM64_FP_FOREIGN_FPSTATE;
else
vcpu->arch.flags &= ~KVM_ARM64_FP_FOREIGN_FPSTATE;
}
/*
* If the guest FPSIMD state was loaded, update the host's context
* tracking data mark the CPU FPSIMD regs as dirty and belonging to vcpu
......@@ -115,13 +121,11 @@ void kvm_arch_vcpu_ctxsync_fp(struct kvm_vcpu *vcpu)
void kvm_arch_vcpu_put_fp(struct kvm_vcpu *vcpu)
{
unsigned long flags;
bool host_has_sve = system_supports_sve();
bool guest_has_sve = vcpu_has_sve(vcpu);
local_irq_save(flags);
if (vcpu->arch.flags & KVM_ARM64_FP_ENABLED) {
if (guest_has_sve) {
if (vcpu_has_sve(vcpu)) {
__vcpu_sys_reg(vcpu, ZCR_EL1) = read_sysreg_el1(SYS_ZCR);
/* Restore the VL that was saved when bound to the CPU */
......@@ -131,7 +135,7 @@ void kvm_arch_vcpu_put_fp(struct kvm_vcpu *vcpu)
}
fpsimd_save_and_flush_cpu_state();
} else if (has_vhe() && host_has_sve) {
} else if (has_vhe() && system_supports_sve()) {
/*
* The FPSIMD/SVE state in the CPU has not been touched, and we
* have SVE (and VHE): CPACR_EL1 (alias CPTR_EL2) has been
......@@ -145,8 +149,7 @@ void kvm_arch_vcpu_put_fp(struct kvm_vcpu *vcpu)
sysreg_clear_set(CPACR_EL1, CPACR_EL1_ZEN_EL0EN, 0);
}
update_thread_flag(TIF_SVE,
vcpu->arch.flags & KVM_ARM64_HOST_SVE_IN_USE);
update_thread_flag(TIF_SVE, 0);
local_irq_restore(flags);
}
......@@ -10,4 +10,4 @@ subdir-ccflags-y := -I$(incdir) \
-DDISABLE_BRANCH_PROFILING \
$(DISABLE_STACKLEAK_PLUGIN)
obj-$(CONFIG_KVM) += vhe/ nvhe/ pgtable.o reserved_mem.o
obj-$(CONFIG_KVM) += vhe/ nvhe/ pgtable.o
......@@ -25,9 +25,3 @@ SYM_FUNC_START(__sve_restore_state)
sve_load 0, x1, x2, 3
ret
SYM_FUNC_END(__sve_restore_state)
SYM_FUNC_START(__sve_save_state)
mov x2, #1
sve_save 0, x1, x2, 3
ret
SYM_FUNC_END(__sve_save_state)
// SPDX-License-Identifier: GPL-2.0-only
#include <linux/kbuild.h>
#include <nvhe/memory.h>
int main(void)
{
DEFINE(STRUCT_HYP_PAGE_SIZE, sizeof(struct hyp_page));
return 0;
}
......@@ -29,7 +29,6 @@
#include <asm/fpsimd.h>
#include <asm/debug-monitors.h>
#include <asm/processor.h>
#include <asm/thread_info.h>
struct kvm_exception_table_entry {
int insn, fixup;
......@@ -49,7 +48,7 @@ static inline bool update_fp_enabled(struct kvm_vcpu *vcpu)
* trap the accesses.
*/
if (!system_supports_fpsimd() ||
vcpu->arch.host_thread_info->flags & _TIF_FOREIGN_FPSTATE)
vcpu->arch.flags & KVM_ARM64_FP_FOREIGN_FPSTATE)
vcpu->arch.flags &= ~(KVM_ARM64_FP_ENABLED |
KVM_ARM64_FP_HOST);
......@@ -143,16 +142,6 @@ static inline bool __populate_fault_info(struct kvm_vcpu *vcpu)
return __get_fault_info(vcpu->arch.fault.esr_el2, &vcpu->arch.fault);
}
static inline void __hyp_sve_save_host(struct kvm_vcpu *vcpu)
{
struct thread_struct *thread;
thread = container_of(vcpu->arch.host_fpsimd_state, struct thread_struct,
uw.fpsimd_state);
__sve_save_state(sve_pffr(thread), &vcpu->arch.host_fpsimd_state->fpsr);
}
static inline void __hyp_sve_restore_guest(struct kvm_vcpu *vcpu)
{
sve_cond_update_zcr_vq(vcpu_sve_max_vq(vcpu) - 1, SYS_ZCR_EL2);
......@@ -169,21 +158,14 @@ static inline void __hyp_sve_restore_guest(struct kvm_vcpu *vcpu)
*/
static bool kvm_hyp_handle_fpsimd(struct kvm_vcpu *vcpu, u64 *exit_code)
{
bool sve_guest, sve_host;
bool sve_guest;
u8 esr_ec;
u64 reg;
if (!system_supports_fpsimd())
return false;
if (system_supports_sve()) {
sve_guest = vcpu_has_sve(vcpu);
sve_host = vcpu->arch.flags & KVM_ARM64_HOST_SVE_IN_USE;
} else {
sve_guest = false;
sve_host = false;
}
sve_guest = vcpu_has_sve(vcpu);
esr_ec = kvm_vcpu_trap_get_class(vcpu);
/* Don't handle SVE traps for non-SVE vcpus here: */
......@@ -207,11 +189,7 @@ static bool kvm_hyp_handle_fpsimd(struct kvm_vcpu *vcpu, u64 *exit_code)
isb();
if (vcpu->arch.flags & KVM_ARM64_FP_HOST) {
if (sve_host)
__hyp_sve_save_host(vcpu);
else
__fpsimd_save_state(vcpu->arch.host_fpsimd_state);
__fpsimd_save_state(vcpu->arch.host_fpsimd_state);
vcpu->arch.flags &= ~KVM_ARM64_FP_HOST;
}
......
......@@ -24,6 +24,11 @@ enum pkvm_page_state {
PKVM_PAGE_OWNED = 0ULL,
PKVM_PAGE_SHARED_OWNED = KVM_PGTABLE_PROT_SW0,
PKVM_PAGE_SHARED_BORROWED = KVM_PGTABLE_PROT_SW1,
__PKVM_PAGE_RESERVED = KVM_PGTABLE_PROT_SW0 |
KVM_PGTABLE_PROT_SW1,
/* Meta-states which aren't encoded directly in the PTE's SW bits */
PKVM_NOPAGE,
};
#define PKVM_PAGE_STATE_PROT_MASK (KVM_PGTABLE_PROT_SW0 | KVM_PGTABLE_PROT_SW1)
......@@ -50,6 +55,7 @@ extern const u8 pkvm_hyp_id;
int __pkvm_prot_finalize(void);
int __pkvm_host_share_hyp(u64 pfn);
int __pkvm_host_unshare_hyp(u64 pfn);
bool addr_is_memory(phys_addr_t phys);
int host_stage2_idmap_locked(phys_addr_t addr, u64 size, enum kvm_pgtable_prot prot);
......
......@@ -10,13 +10,8 @@
#include <nvhe/memory.h>
#include <nvhe/spinlock.h>
#define HYP_MEMBLOCK_REGIONS 128
extern struct memblock_region kvm_nvhe_sym(hyp_memory)[];
extern unsigned int kvm_nvhe_sym(hyp_memblock_nr);
extern struct kvm_pgtable pkvm_pgtable;
extern hyp_spinlock_t pkvm_pgd_lock;
extern struct hyp_pool hpool;
extern u64 __io_map_base;
int hyp_create_idmap(u32 hyp_va_bits);
int hyp_map_vectors(void);
......@@ -39,58 +34,4 @@ static inline void hyp_vmemmap_range(phys_addr_t phys, unsigned long size,
*end = ALIGN(*end, PAGE_SIZE);
}
static inline unsigned long __hyp_pgtable_max_pages(unsigned long nr_pages)
{
unsigned long total = 0, i;
/* Provision the worst case scenario */
for (i = 0; i < KVM_PGTABLE_MAX_LEVELS; i++) {
nr_pages = DIV_ROUND_UP(nr_pages, PTRS_PER_PTE);
total += nr_pages;
}
return total;
}
static inline unsigned long __hyp_pgtable_total_pages(void)
{
unsigned long res = 0, i;
/* Cover all of memory with page-granularity */
for (i = 0; i < kvm_nvhe_sym(hyp_memblock_nr); i++) {
struct memblock_region *reg = &kvm_nvhe_sym(hyp_memory)[i];
res += __hyp_pgtable_max_pages(reg->size >> PAGE_SHIFT);
}
return res;
}
static inline unsigned long hyp_s1_pgtable_pages(void)
{
unsigned long res;
res = __hyp_pgtable_total_pages();
/* Allow 1 GiB for private mappings */
res += __hyp_pgtable_max_pages(SZ_1G >> PAGE_SHIFT);
return res;
}
static inline unsigned long host_s2_pgtable_pages(void)
{
unsigned long res;
/*
* Include an extra 16 pages to safely upper-bound the worst case of
* concatenated pgds.
*/
res = __hyp_pgtable_total_pages() + 16;
/* Allow 1 GiB for MMIO mappings */
res += __hyp_pgtable_max_pages(SZ_1G >> PAGE_SHIFT);
return res;
}
#endif /* __KVM_HYP_MM_H */
......@@ -43,6 +43,9 @@ void *hyp_early_alloc_page(void *arg)
return hyp_early_alloc_contig(1);
}
static void hyp_early_alloc_get_page(void *addr) { }
static void hyp_early_alloc_put_page(void *addr) { }
void hyp_early_alloc_init(void *virt, unsigned long size)
{
base = cur = (unsigned long)virt;
......@@ -51,4 +54,6 @@ void hyp_early_alloc_init(void *virt, unsigned long size)
hyp_early_alloc_mm_ops.zalloc_page = hyp_early_alloc_page;
hyp_early_alloc_mm_ops.phys_to_virt = hyp_phys_to_virt;
hyp_early_alloc_mm_ops.virt_to_phys = hyp_virt_to_phys;
hyp_early_alloc_mm_ops.get_page = hyp_early_alloc_get_page;
hyp_early_alloc_mm_ops.put_page = hyp_early_alloc_put_page;
}
......@@ -147,6 +147,13 @@ static void handle___pkvm_host_share_hyp(struct kvm_cpu_context *host_ctxt)
cpu_reg(host_ctxt, 1) = __pkvm_host_share_hyp(pfn);
}
static void handle___pkvm_host_unshare_hyp(struct kvm_cpu_context *host_ctxt)
{
DECLARE_REG(u64, pfn, host_ctxt, 1);
cpu_reg(host_ctxt, 1) = __pkvm_host_unshare_hyp(pfn);
}
static void handle___pkvm_create_private_mapping(struct kvm_cpu_context *host_ctxt)
{
DECLARE_REG(phys_addr_t, phys, host_ctxt, 1);
......@@ -184,6 +191,7 @@ static const hcall_t host_hcall[] = {
HANDLE_FUNC(__pkvm_prot_finalize),
HANDLE_FUNC(__pkvm_host_share_hyp),
HANDLE_FUNC(__pkvm_host_unshare_hyp),
HANDLE_FUNC(__kvm_adjust_pc),
HANDLE_FUNC(__kvm_vcpu_run),
HANDLE_FUNC(__kvm_flush_vm_context),
......
This diff is collapsed.
......@@ -8,6 +8,7 @@
#include <asm/kvm_hyp.h>
#include <asm/kvm_mmu.h>
#include <asm/kvm_pgtable.h>
#include <asm/kvm_pkvm.h>
#include <asm/spectre.h>
#include <nvhe/early_alloc.h>
......@@ -18,11 +19,12 @@
struct kvm_pgtable pkvm_pgtable;
hyp_spinlock_t pkvm_pgd_lock;
u64 __io_map_base;
struct memblock_region hyp_memory[HYP_MEMBLOCK_REGIONS];
unsigned int hyp_memblock_nr;
static u64 __io_map_base;
static int __pkvm_create_mappings(unsigned long start, unsigned long size,
unsigned long phys, enum kvm_pgtable_prot prot)
{
......
......@@ -241,7 +241,7 @@ int hyp_pool_init(struct hyp_pool *pool, u64 pfn, unsigned int nr_pages,
int i;
hyp_spin_lock_init(&pool->lock);
pool->max_order = min(MAX_ORDER, get_order(nr_pages << PAGE_SHIFT));
pool->max_order = min(MAX_ORDER, get_order((nr_pages + 1) << PAGE_SHIFT));
for (i = 0; i < pool->max_order; i++)
INIT_LIST_HEAD(&pool->free_area[i]);
pool->range_start = phys;
......
......@@ -8,6 +8,7 @@
#include <asm/kvm_hyp.h>
#include <asm/kvm_mmu.h>
#include <asm/kvm_pgtable.h>
#include <asm/kvm_pkvm.h>
#include <nvhe/early_alloc.h>
#include <nvhe/fixed_config.h>
......@@ -17,7 +18,6 @@
#include <nvhe/mm.h>
#include <nvhe/trap_handler.h>
struct hyp_pool hpool;
unsigned long hyp_nr_cpus;
#define hyp_percpu_size ((unsigned long)__per_cpu_end - \
......@@ -27,6 +27,7 @@ static void *vmemmap_base;
static void *hyp_pgt_base;
static void *host_s2_pgt_base;
static struct kvm_pgtable_mm_ops pkvm_pgtable_mm_ops;
static struct hyp_pool hpool;
static int divide_memory_pool(void *virt, unsigned long size)
{
......@@ -165,6 +166,7 @@ static int finalize_host_mappings_walker(u64 addr, u64 end, u32 level,
enum kvm_pgtable_walk_flags flag,
void * const arg)
{
struct kvm_pgtable_mm_ops *mm_ops = arg;
enum kvm_pgtable_prot prot;
enum pkvm_page_state state;
kvm_pte_t pte = *ptep;
......@@ -173,6 +175,15 @@ static int finalize_host_mappings_walker(u64 addr, u64 end, u32 level,
if (!kvm_pte_valid(pte))
return 0;
/*
* Fix-up the refcount for the page-table pages as the early allocator
* was unable to access the hyp_vmemmap and so the buddy allocator has
* initialised the refcount to '1'.
*/
mm_ops->get_page(ptep);
if (flag != KVM_PGTABLE_WALK_LEAF)
return 0;
if (level != (KVM_PGTABLE_MAX_LEVELS - 1))
return -EINVAL;
......@@ -205,7 +216,8 @@ static int finalize_host_mappings(void)
{
struct kvm_pgtable_walker walker = {
.cb = finalize_host_mappings_walker,
.flags = KVM_PGTABLE_WALK_LEAF,
.flags = KVM_PGTABLE_WALK_LEAF | KVM_PGTABLE_WALK_TABLE_POST,
.arg = pkvm_pgtable.mm_ops,
};
int i, ret;
......@@ -240,19 +252,20 @@ void __noreturn __pkvm_init_finalise(void)
if (ret)
goto out;
ret = finalize_host_mappings();
if (ret)
goto out;
pkvm_pgtable_mm_ops = (struct kvm_pgtable_mm_ops) {
.zalloc_page = hyp_zalloc_hyp_page,
.phys_to_virt = hyp_phys_to_virt,
.virt_to_phys = hyp_virt_to_phys,
.get_page = hpool_get_page,
.put_page = hpool_put_page,
.page_count = hyp_page_count,
};
pkvm_pgtable.mm_ops = &pkvm_pgtable_mm_ops;
ret = finalize_host_mappings();
if (ret)
goto out;
out:
/*
* We tail-called to here from handle___pkvm_init() and will not return,
......
......@@ -25,7 +25,6 @@
#include <asm/fpsimd.h>
#include <asm/debug-monitors.h>
#include <asm/processor.h>
#include <asm/thread_info.h>
#include <nvhe/fixed_config.h>
#include <nvhe/mem_protect.h>
......
......@@ -383,21 +383,6 @@ enum kvm_pgtable_prot kvm_pgtable_hyp_pte_prot(kvm_pte_t pte)
return prot;
}
static bool hyp_pte_needs_update(kvm_pte_t old, kvm_pte_t new)
{
/*
* Tolerate KVM recreating the exact same mapping, or changing software
* bits if the existing mapping was valid.
*/
if (old == new)
return false;
if (!kvm_pte_valid(old))
return true;
return !WARN_ON((old ^ new) & ~KVM_PTE_LEAF_ATTR_HI_SW);
}
static bool hyp_map_walker_try_leaf(u64 addr, u64 end, u32 level,
kvm_pte_t *ptep, struct hyp_map_data *data)
{
......@@ -407,11 +392,16 @@ static bool hyp_map_walker_try_leaf(u64 addr, u64 end, u32 level,
if (!kvm_block_mapping_supported(addr, end, phys, level))
return false;
data->phys += granule;
new = kvm_init_valid_leaf_pte(phys, data->attr, level);
if (hyp_pte_needs_update(old, new))
smp_store_release(ptep, new);
if (old == new)
return true;
if (!kvm_pte_valid(old))
data->mm_ops->get_page(ptep);
else if (WARN_ON((old ^ new) & ~KVM_PTE_LEAF_ATTR_HI_SW))
return false;
data->phys += granule;
smp_store_release(ptep, new);
return true;
}
......@@ -433,6 +423,7 @@ static int hyp_map_walker(u64 addr, u64 end, u32 level, kvm_pte_t *ptep,
return -ENOMEM;
kvm_set_table_pte(ptep, childp, mm_ops);
mm_ops->get_page(ptep);
return 0;
}
......@@ -460,6 +451,69 @@ int kvm_pgtable_hyp_map(struct kvm_pgtable *pgt, u64 addr, u64 size, u64 phys,
return ret;
}
struct hyp_unmap_data {
u64 unmapped;
struct kvm_pgtable_mm_ops *mm_ops;
};
static int hyp_unmap_walker(u64 addr, u64 end, u32 level, kvm_pte_t *ptep,
enum kvm_pgtable_walk_flags flag, void * const arg)
{
kvm_pte_t pte = *ptep, *childp = NULL;
u64 granule = kvm_granule_size(level);
struct hyp_unmap_data *data = arg;
struct kvm_pgtable_mm_ops *mm_ops = data->mm_ops;
if (!kvm_pte_valid(pte))
return -EINVAL;
if (kvm_pte_table(pte, level)) {
childp = kvm_pte_follow(pte, mm_ops);
if (mm_ops->page_count(childp) != 1)
return 0;
kvm_clear_pte(ptep);
dsb(ishst);
__tlbi_level(vae2is, __TLBI_VADDR(addr, 0), level);
} else {
if (end - addr < granule)
return -EINVAL;
kvm_clear_pte(ptep);
dsb(ishst);
__tlbi_level(vale2is, __TLBI_VADDR(addr, 0), level);
data->unmapped += granule;
}
dsb(ish);
isb();
mm_ops->put_page(ptep);
if (childp)
mm_ops->put_page(childp);
return 0;
}
u64 kvm_pgtable_hyp_unmap(struct kvm_pgtable *pgt, u64 addr, u64 size)
{
struct hyp_unmap_data unmap_data = {
.mm_ops = pgt->mm_ops,
};
struct kvm_pgtable_walker walker = {
.cb = hyp_unmap_walker,
.arg = &unmap_data,
.flags = KVM_PGTABLE_WALK_LEAF | KVM_PGTABLE_WALK_TABLE_POST,
};
if (!pgt->mm_ops->page_count)
return 0;
kvm_pgtable_walk(pgt, addr, size, &walker);
return unmap_data.unmapped;
}
int kvm_pgtable_hyp_init(struct kvm_pgtable *pgt, u32 va_bits,
struct kvm_pgtable_mm_ops *mm_ops)
{
......@@ -482,8 +536,16 @@ static int hyp_free_walker(u64 addr, u64 end, u32 level, kvm_pte_t *ptep,
enum kvm_pgtable_walk_flags flag, void * const arg)
{
struct kvm_pgtable_mm_ops *mm_ops = arg;
kvm_pte_t pte = *ptep;
if (!kvm_pte_valid(pte))
return 0;
mm_ops->put_page(ptep);
if (kvm_pte_table(pte, level))
mm_ops->put_page(kvm_pte_follow(pte, mm_ops));
mm_ops->put_page((void *)kvm_pte_follow(*ptep, mm_ops));
return 0;
}
......@@ -491,7 +553,7 @@ void kvm_pgtable_hyp_destroy(struct kvm_pgtable *pgt)
{
struct kvm_pgtable_walker walker = {
.cb = hyp_free_walker,
.flags = KVM_PGTABLE_WALK_TABLE_POST,
.flags = KVM_PGTABLE_WALK_LEAF | KVM_PGTABLE_WALK_TABLE_POST,
.arg = pgt->mm_ops,
};
......@@ -1116,13 +1178,13 @@ int kvm_pgtable_stage2_flush(struct kvm_pgtable *pgt, u64 addr, u64 size)
}
int __kvm_pgtable_stage2_init(struct kvm_pgtable *pgt, struct kvm_arch *arch,
int __kvm_pgtable_stage2_init(struct kvm_pgtable *pgt, struct kvm_s2_mmu *mmu,
struct kvm_pgtable_mm_ops *mm_ops,
enum kvm_pgtable_stage2_flags flags,
kvm_pgtable_force_pte_cb_t force_pte_cb)
{
size_t pgd_sz;
u64 vtcr = arch->vtcr;
u64 vtcr = mmu->arch->vtcr;
u32 ia_bits = VTCR_EL2_IPA(vtcr);
u32 sl0 = FIELD_GET(VTCR_EL2_SL0_MASK, vtcr);
u32 start_level = VTCR_EL2_TGRAN_SL0_BASE - sl0;
......@@ -1135,7 +1197,7 @@ int __kvm_pgtable_stage2_init(struct kvm_pgtable *pgt, struct kvm_arch *arch,
pgt->ia_bits = ia_bits;
pgt->start_level = start_level;
pgt->mm_ops = mm_ops;
pgt->mmu = &arch->mmu;
pgt->mmu = mmu;
pgt->flags = flags;
pgt->force_pte_cb = force_pte_cb;
......
......@@ -24,7 +24,6 @@
#include <asm/fpsimd.h>
#include <asm/debug-monitors.h>
#include <asm/processor.h>
#include <asm/thread_info.h>
/* VHE specific context */
DEFINE_PER_CPU(struct kvm_host_data, kvm_host_data);
......
......@@ -239,6 +239,9 @@ void free_hyp_pgds(void)
static bool kvm_host_owns_hyp_mappings(void)
{
if (is_kernel_in_hyp_mode())
return false;
if (static_branch_likely(&kvm_protected_mode_initialized))
return false;
......@@ -281,14 +284,117 @@ static phys_addr_t kvm_kaddr_to_phys(void *kaddr)
}
}
static int pkvm_share_hyp(phys_addr_t start, phys_addr_t end)
struct hyp_shared_pfn {
u64 pfn;
int count;
struct rb_node node;
};
static DEFINE_MUTEX(hyp_shared_pfns_lock);
static struct rb_root hyp_shared_pfns = RB_ROOT;
static struct hyp_shared_pfn *find_shared_pfn(u64 pfn, struct rb_node ***node,
struct rb_node **parent)
{
phys_addr_t addr;
struct hyp_shared_pfn *this;
*node = &hyp_shared_pfns.rb_node;
*parent = NULL;
while (**node) {
this = container_of(**node, struct hyp_shared_pfn, node);
*parent = **node;
if (this->pfn < pfn)
*node = &((**node)->rb_left);
else if (this->pfn > pfn)
*node = &((**node)->rb_right);
else
return this;
}
return NULL;
}
static int share_pfn_hyp(u64 pfn)
{
struct rb_node **node, *parent;
struct hyp_shared_pfn *this;
int ret = 0;
mutex_lock(&hyp_shared_pfns_lock);
this = find_shared_pfn(pfn, &node, &parent);
if (this) {
this->count++;
goto unlock;
}
this = kzalloc(sizeof(*this), GFP_KERNEL);
if (!this) {
ret = -ENOMEM;
goto unlock;
}
this->pfn = pfn;
this->count = 1;
rb_link_node(&this->node, parent, node);
rb_insert_color(&this->node, &hyp_shared_pfns);
ret = kvm_call_hyp_nvhe(__pkvm_host_share_hyp, pfn, 1);
unlock:
mutex_unlock(&hyp_shared_pfns_lock);
return ret;
}
static int unshare_pfn_hyp(u64 pfn)
{
struct rb_node **node, *parent;
struct hyp_shared_pfn *this;
int ret = 0;
mutex_lock(&hyp_shared_pfns_lock);
this = find_shared_pfn(pfn, &node, &parent);
if (WARN_ON(!this)) {
ret = -ENOENT;
goto unlock;
}
this->count--;
if (this->count)
goto unlock;
rb_erase(&this->node, &hyp_shared_pfns);
kfree(this);
ret = kvm_call_hyp_nvhe(__pkvm_host_unshare_hyp, pfn, 1);
unlock:
mutex_unlock(&hyp_shared_pfns_lock);
return ret;
}
int kvm_share_hyp(void *from, void *to)
{
phys_addr_t start, end, cur;
u64 pfn;
int ret;
for (addr = ALIGN_DOWN(start, PAGE_SIZE); addr < end; addr += PAGE_SIZE) {
ret = kvm_call_hyp_nvhe(__pkvm_host_share_hyp,
__phys_to_pfn(addr));
if (is_kernel_in_hyp_mode())
return 0;
/*
* The share hcall maps things in the 'fixed-offset' region of the hyp
* VA space, so we can only share physically contiguous data-structures
* for now.
*/
if (is_vmalloc_or_module_addr(from) || is_vmalloc_or_module_addr(to))
return -EINVAL;
if (kvm_host_owns_hyp_mappings())
return create_hyp_mappings(from, to, PAGE_HYP);
start = ALIGN_DOWN(__pa(from), PAGE_SIZE);
end = PAGE_ALIGN(__pa(to));
for (cur = start; cur < end; cur += PAGE_SIZE) {
pfn = __phys_to_pfn(cur);
ret = share_pfn_hyp(pfn);
if (ret)
return ret;
}
......@@ -296,6 +402,22 @@ static int pkvm_share_hyp(phys_addr_t start, phys_addr_t end)
return 0;
}
void kvm_unshare_hyp(void *from, void *to)
{
phys_addr_t start, end, cur;
u64 pfn;
if (is_kernel_in_hyp_mode() || kvm_host_owns_hyp_mappings() || !from)
return;
start = ALIGN_DOWN(__pa(from), PAGE_SIZE);
end = PAGE_ALIGN(__pa(to));
for (cur = start; cur < end; cur += PAGE_SIZE) {
pfn = __phys_to_pfn(cur);
WARN_ON(unshare_pfn_hyp(pfn));
}
}
/**
* create_hyp_mappings - duplicate a kernel virtual address range in Hyp mode
* @from: The virtual kernel start address of the range
......@@ -316,12 +438,8 @@ int create_hyp_mappings(void *from, void *to, enum kvm_pgtable_prot prot)
if (is_kernel_in_hyp_mode())
return 0;
if (!kvm_host_owns_hyp_mappings()) {
if (WARN_ON(prot != PAGE_HYP))
return -EPERM;
return pkvm_share_hyp(kvm_kaddr_to_phys(from),
kvm_kaddr_to_phys(to));
}
if (!kvm_host_owns_hyp_mappings())
return -EPERM;
start = start & PAGE_MASK;
end = PAGE_ALIGN(end);
......@@ -407,6 +525,9 @@ int create_hyp_io_mappings(phys_addr_t phys_addr, size_t size,
unsigned long addr;
int ret;
if (is_protected_kvm_enabled())
return -EPERM;
*kaddr = ioremap(phys_addr, size);
if (!*kaddr)
return -ENOMEM;
......@@ -516,7 +637,8 @@ int kvm_init_stage2_mmu(struct kvm *kvm, struct kvm_s2_mmu *mmu)
if (!pgt)
return -ENOMEM;
err = kvm_pgtable_stage2_init(pgt, &kvm->arch, &kvm_s2_mm_ops);
mmu->arch = &kvm->arch;
err = kvm_pgtable_stage2_init(pgt, mmu, &kvm_s2_mm_ops);
if (err)
goto out_free_pgtable;
......@@ -529,7 +651,6 @@ int kvm_init_stage2_mmu(struct kvm *kvm, struct kvm_s2_mmu *mmu)
for_each_possible_cpu(cpu)
*per_cpu_ptr(mmu->last_vcpu_ran, cpu) = -1;
mmu->arch = &kvm->arch;
mmu->pgt = pgt;
mmu->pgd_phys = __pa(pgt->pgd);
WRITE_ONCE(mmu->vmid.vmid_gen, 0);
......@@ -650,6 +771,9 @@ int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa,
KVM_PGTABLE_PROT_R |
(writable ? KVM_PGTABLE_PROT_W : 0);
if (is_protected_kvm_enabled())
return -EPERM;
size += offset_in_page(guest_ipa);
guest_ipa &= PAGE_MASK;
......
......@@ -8,10 +8,9 @@
#include <linux/memblock.h>
#include <linux/sort.h>
#include <asm/kvm_host.h>
#include <asm/kvm_pkvm.h>
#include <nvhe/memory.h>
#include <nvhe/mm.h>
#include "hyp_constants.h"
static struct memblock_region *hyp_memory = kvm_nvhe_sym(hyp_memory);
static unsigned int *hyp_memblock_nr_ptr = &kvm_nvhe_sym(hyp_memblock_nr);
......@@ -82,7 +81,8 @@ void __init kvm_hyp_reserve(void)
do {
prev = nr_pages;
nr_pages = hyp_mem_pages + prev;
nr_pages = DIV_ROUND_UP(nr_pages * sizeof(struct hyp_page), PAGE_SIZE);
nr_pages = DIV_ROUND_UP(nr_pages * STRUCT_HYP_PAGE_SIZE,
PAGE_SIZE);
nr_pages += __hyp_pgtable_max_pages(nr_pages);
} while (nr_pages != prev);
hyp_mem_pages += nr_pages;
......
......@@ -28,6 +28,7 @@ static u32 kvm_pmu_event_mask(struct kvm *kvm)
case ID_AA64DFR0_PMUVER_8_1:
case ID_AA64DFR0_PMUVER_8_4:
case ID_AA64DFR0_PMUVER_8_5:
case ID_AA64DFR0_PMUVER_8_7:
return GENMASK(15, 0);
default: /* Shouldn't be here, just for sanity */
WARN_ONCE(1, "Unknown PMU version %d\n", kvm->arch.pmuver);
......
......@@ -109,7 +109,7 @@ static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu)
/*
* Make sure the reset request is observed if the change to
* power_state is observed.
* power_off is observed.
*/
smp_wmb();
......
......@@ -94,22 +94,31 @@ static int kvm_vcpu_finalize_sve(struct kvm_vcpu *vcpu)
{
void *buf;
unsigned int vl;
size_t reg_sz;
int ret;
vl = vcpu->arch.sve_max_vl;
/*
* Responsibility for these properties is shared between
* kvm_arm_init_arch_resources(), kvm_vcpu_enable_sve() and
* kvm_arm_init_sve(), kvm_vcpu_enable_sve() and
* set_sve_vls(). Double-check here just to be sure:
*/
if (WARN_ON(!sve_vl_valid(vl) || vl > sve_max_virtualisable_vl() ||
vl > SVE_VL_ARCH_MAX))
return -EIO;
buf = kzalloc(SVE_SIG_REGS_SIZE(sve_vq_from_vl(vl)), GFP_KERNEL_ACCOUNT);
reg_sz = vcpu_sve_state_size(vcpu);
buf = kzalloc(reg_sz, GFP_KERNEL_ACCOUNT);
if (!buf)
return -ENOMEM;
ret = kvm_share_hyp(buf, buf + reg_sz);
if (ret) {
kfree(buf);
return ret;
}
vcpu->arch.sve_state = buf;
vcpu->arch.flags |= KVM_ARM64_VCPU_SVE_FINALIZED;
return 0;
......@@ -141,7 +150,13 @@ bool kvm_arm_vcpu_is_finalized(struct kvm_vcpu *vcpu)
void kvm_arm_vcpu_destroy(struct kvm_vcpu *vcpu)
{
kfree(vcpu->arch.sve_state);
void *sve_state = vcpu->arch.sve_state;
kvm_vcpu_unshare_task_fp(vcpu);
kvm_unshare_hyp(vcpu, vcpu + 1);
if (sve_state)
kvm_unshare_hyp(sve_state, sve_state + vcpu_sve_state_size(vcpu));
kfree(sve_state);
}
static void kvm_vcpu_reset_sve(struct kvm_vcpu *vcpu)
......@@ -193,10 +208,9 @@ static bool vcpu_allowed_register_width(struct kvm_vcpu *vcpu)
* kvm_reset_vcpu - sets core registers and sys_regs to reset value
* @vcpu: The VCPU pointer
*
* This function finds the right table above and sets the registers on
* the virtual CPU struct to their architecturally defined reset
* values, except for registers whose reset is deferred until
* kvm_arm_vcpu_finalize().
* This function sets the registers on the virtual CPU struct to their
* architecturally defined reset values, except for registers whose reset is
* deferred until kvm_arm_vcpu_finalize().
*
* Note: This function can be called from two paths: The KVM_ARM_VCPU_INIT
* ioctl or as part of handling a request issued by another VCPU in the PSCI
......
......@@ -92,7 +92,7 @@ int kvm_vgic_create(struct kvm *kvm, u32 type)
return ret;
kvm_for_each_vcpu(i, vcpu, kvm) {
if (vcpu->arch.has_run_once)
if (vcpu_has_run_once(vcpu))
goto out_unlock;
}
ret = 0;
......
......@@ -764,10 +764,12 @@ static int vgic_register_all_redist_iodevs(struct kvm *kvm)
}
if (ret) {
/* The current c failed, so we start with the previous one. */
/* The current c failed, so iterate over the previous ones. */
int i;
mutex_lock(&kvm->slots_lock);
for (c--; c >= 0; c--) {
vcpu = kvm_get_vcpu(kvm, c);
for (i = 0; i < c; i++) {
vcpu = kvm_get_vcpu(kvm, i);
vgic_unregister_redist_iodev(vcpu);
}
mutex_unlock(&kvm->slots_lock);
......
......@@ -1050,7 +1050,7 @@ static int dispatch_mmio_write(struct kvm_vcpu *vcpu, struct kvm_io_device *dev,
return 0;
}
struct kvm_io_device_ops kvm_io_gic_ops = {
const struct kvm_io_device_ops kvm_io_gic_ops = {
.read = dispatch_mmio_read,
.write = dispatch_mmio_write,
};
......
......@@ -34,7 +34,7 @@ struct vgic_register_region {
};
};
extern struct kvm_io_device_ops kvm_io_gic_ops;
extern const struct kvm_io_device_ops kvm_io_gic_ops;
#define VGIC_ACCESS_8bit 1
#define VGIC_ACCESS_32bit 2
......
......@@ -293,12 +293,12 @@ int vgic_v2_map_resources(struct kvm *kvm)
if (IS_VGIC_ADDR_UNDEF(dist->vgic_dist_base) ||
IS_VGIC_ADDR_UNDEF(dist->vgic_cpu_base)) {
kvm_err("Need to set vgic cpu and dist addresses first\n");
kvm_debug("Need to set vgic cpu and dist addresses first\n");
return -ENXIO;
}
if (!vgic_v2_check_base(dist->vgic_dist_base, dist->vgic_cpu_base)) {
kvm_err("VGIC CPU and dist frames overlap\n");
kvm_debug("VGIC CPU and dist frames overlap\n");
return -EINVAL;
}
......@@ -345,6 +345,11 @@ int vgic_v2_probe(const struct gic_kvm_info *info)
int ret;
u32 vtr;
if (is_protected_kvm_enabled()) {
kvm_err("GICv2 not supported in protected mode\n");
return -ENXIO;
}
if (!info->vctrl.start) {
kvm_err("GICH not present in the firmware table\n");
return -ENXIO;
......
......@@ -554,12 +554,12 @@ int vgic_v3_map_resources(struct kvm *kvm)
}
if (IS_VGIC_ADDR_UNDEF(dist->vgic_dist_base)) {
kvm_err("Need to set vgic distributor addresses first\n");
kvm_debug("Need to set vgic distributor addresses first\n");
return -ENXIO;
}
if (!vgic_v3_check_base(kvm)) {
kvm_err("VGIC redist and dist frames overlap\n");
kvm_debug("VGIC redist and dist frames overlap\n");
return -EINVAL;
}
......@@ -651,7 +651,7 @@ int vgic_v3_probe(const struct gic_kvm_info *info)
} else if (!PAGE_ALIGNED(info->vcpu.start)) {
pr_warn("GICV physical address 0x%llx not page aligned\n",
(unsigned long long)info->vcpu.start);
} else {
} else if (kvm_get_mode() != KVM_MODE_PROTECTED) {
kvm_vgic_global_state.vcpu_base = info->vcpu.start;
kvm_vgic_global_state.can_emulate_gicv2 = true;
ret = kvm_register_vgic_device(KVM_DEV_TYPE_ARM_VGIC_V2);
......
......@@ -5,9 +5,11 @@
#ifndef __KVM_ARM_VGIC_H
#define __KVM_ARM_VGIC_H
#include <linux/kernel.h>
#include <linux/bits.h>
#include <linux/kvm.h>
#include <linux/irqreturn.h>
#include <linux/kref.h>
#include <linux/mutex.h>
#include <linux/spinlock.h>
#include <linux/static_key.h>
#include <linux/types.h>
......
......@@ -1322,7 +1322,6 @@ int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu);
bool kvm_is_reserved_pfn(kvm_pfn_t pfn);
bool kvm_is_zone_device_pfn(kvm_pfn_t pfn);
bool kvm_is_transparent_hugepage(kvm_pfn_t pfn);
struct kvm_irq_ack_notifier {
struct hlist_node link;
......
......@@ -4,6 +4,7 @@
/aarch64/get-reg-list
/aarch64/psci_cpu_on_test
/aarch64/vgic_init
/aarch64/vgic_irq
/s390x/memop
/s390x/resets
/s390x/sync_regs_test
......
......@@ -96,6 +96,7 @@ TEST_GEN_PROGS_aarch64 += aarch64/debug-exceptions
TEST_GEN_PROGS_aarch64 += aarch64/get-reg-list
TEST_GEN_PROGS_aarch64 += aarch64/psci_cpu_on_test
TEST_GEN_PROGS_aarch64 += aarch64/vgic_init
TEST_GEN_PROGS_aarch64 += aarch64/vgic_irq
TEST_GEN_PROGS_aarch64 += demand_paging_test
TEST_GEN_PROGS_aarch64 += dirty_log_test
TEST_GEN_PROGS_aarch64 += dirty_log_perf_test
......
......@@ -382,7 +382,7 @@ static struct kvm_vm *test_vm_create(void)
ucall_init(vm, NULL);
test_init_timer_irq(vm);
vgic_v3_setup(vm, nr_vcpus, GICD_BASE_GPA, GICR_BASE_GPA);
vgic_v3_setup(vm, nr_vcpus, 64, GICD_BASE_GPA, GICR_BASE_GPA);
/* Make all the test's cmdline args visible to the guest */
sync_global_to_guest(vm, test_args);
......
......@@ -1014,6 +1014,22 @@ static __u64 sve_rejects_set[] = {
KVM_REG_ARM64_SVE_VLS,
};
static __u64 pauth_addr_regs[] = {
ARM64_SYS_REG(3, 0, 2, 1, 0), /* APIAKEYLO_EL1 */
ARM64_SYS_REG(3, 0, 2, 1, 1), /* APIAKEYHI_EL1 */
ARM64_SYS_REG(3, 0, 2, 1, 2), /* APIBKEYLO_EL1 */
ARM64_SYS_REG(3, 0, 2, 1, 3), /* APIBKEYHI_EL1 */
ARM64_SYS_REG(3, 0, 2, 2, 0), /* APDAKEYLO_EL1 */
ARM64_SYS_REG(3, 0, 2, 2, 1), /* APDAKEYHI_EL1 */
ARM64_SYS_REG(3, 0, 2, 2, 2), /* APDBKEYLO_EL1 */
ARM64_SYS_REG(3, 0, 2, 2, 3) /* APDBKEYHI_EL1 */
};
static __u64 pauth_generic_regs[] = {
ARM64_SYS_REG(3, 0, 2, 3, 0), /* APGAKEYLO_EL1 */
ARM64_SYS_REG(3, 0, 2, 3, 1), /* APGAKEYHI_EL1 */
};
#define BASE_SUBLIST \
{ "base", .regs = base_regs, .regs_n = ARRAY_SIZE(base_regs), }
#define VREGS_SUBLIST \
......@@ -1025,6 +1041,21 @@ static __u64 sve_rejects_set[] = {
{ "sve", .capability = KVM_CAP_ARM_SVE, .feature = KVM_ARM_VCPU_SVE, .finalize = true, \
.regs = sve_regs, .regs_n = ARRAY_SIZE(sve_regs), \
.rejects_set = sve_rejects_set, .rejects_set_n = ARRAY_SIZE(sve_rejects_set), }
#define PAUTH_SUBLIST \
{ \
.name = "pauth_address", \
.capability = KVM_CAP_ARM_PTRAUTH_ADDRESS, \
.feature = KVM_ARM_VCPU_PTRAUTH_ADDRESS, \
.regs = pauth_addr_regs, \
.regs_n = ARRAY_SIZE(pauth_addr_regs), \
}, \
{ \
.name = "pauth_generic", \
.capability = KVM_CAP_ARM_PTRAUTH_GENERIC, \
.feature = KVM_ARM_VCPU_PTRAUTH_GENERIC, \
.regs = pauth_generic_regs, \
.regs_n = ARRAY_SIZE(pauth_generic_regs), \
}
static struct vcpu_config vregs_config = {
.sublists = {
......@@ -1056,11 +1087,30 @@ static struct vcpu_config sve_pmu_config = {
{0},
},
};
static struct vcpu_config pauth_config = {
.sublists = {
BASE_SUBLIST,
VREGS_SUBLIST,
PAUTH_SUBLIST,
{0},
},
};
static struct vcpu_config pauth_pmu_config = {
.sublists = {
BASE_SUBLIST,
VREGS_SUBLIST,
PAUTH_SUBLIST,
PMU_SUBLIST,
{0},
},
};
static struct vcpu_config *vcpu_configs[] = {
&vregs_config,
&vregs_pmu_config,
&sve_config,
&sve_pmu_config,
&pauth_config,
&pauth_pmu_config,
};
static int vcpu_configs_n = ARRAY_SIZE(vcpu_configs);
This diff is collapsed.
......@@ -11,11 +11,37 @@ enum gic_type {
GIC_TYPE_MAX,
};
#define MIN_SGI 0
#define MIN_PPI 16
#define MIN_SPI 32
#define MAX_SPI 1019
#define IAR_SPURIOUS 1023
#define INTID_IS_SGI(intid) (0 <= (intid) && (intid) < MIN_PPI)
#define INTID_IS_PPI(intid) (MIN_PPI <= (intid) && (intid) < MIN_SPI)
#define INTID_IS_SPI(intid) (MIN_SPI <= (intid) && (intid) <= MAX_SPI)
void gic_init(enum gic_type type, unsigned int nr_cpus,
void *dist_base, void *redist_base);
void gic_irq_enable(unsigned int intid);
void gic_irq_disable(unsigned int intid);
unsigned int gic_get_and_ack_irq(void);
void gic_set_eoi(unsigned int intid);
void gic_set_dir(unsigned int intid);
/*
* Sets the EOI mode. When split is false, EOI just drops the priority. When
* split is true, EOI drops the priority and deactivates the interrupt.
*/
void gic_set_eoi_split(bool split);
void gic_set_priority_mask(uint64_t mask);
void gic_set_priority(uint32_t intid, uint32_t prio);
void gic_irq_set_active(unsigned int intid);
void gic_irq_clear_active(unsigned int intid);
bool gic_irq_get_active(unsigned int intid);
void gic_irq_set_pending(unsigned int intid);
void gic_irq_clear_pending(unsigned int intid);
bool gic_irq_get_pending(unsigned int intid);
void gic_irq_set_config(unsigned int intid, bool is_edge);
#endif /* SELFTEST_KVM_GIC_H */
......@@ -16,8 +16,12 @@
#define GICD_IGROUPR 0x0080
#define GICD_ISENABLER 0x0100
#define GICD_ICENABLER 0x0180
#define GICD_ISPENDR 0x0200
#define GICD_ICPENDR 0x0280
#define GICD_ICACTIVER 0x0380
#define GICD_ISACTIVER 0x0300
#define GICD_IPRIORITYR 0x0400
#define GICD_ICFGR 0x0C00
/*
* The assumption is that the guest runs in a non-secure mode.
......@@ -49,16 +53,24 @@
#define GICR_IGROUPR0 GICD_IGROUPR
#define GICR_ISENABLER0 GICD_ISENABLER
#define GICR_ICENABLER0 GICD_ICENABLER
#define GICR_ISPENDR0 GICD_ISPENDR
#define GICR_ISACTIVER0 GICD_ISACTIVER
#define GICR_ICACTIVER0 GICD_ICACTIVER
#define GICR_ICENABLER GICD_ICENABLER
#define GICR_ICACTIVER GICD_ICACTIVER
#define GICR_IPRIORITYR0 GICD_IPRIORITYR
/* CPU interface registers */
#define SYS_ICC_PMR_EL1 sys_reg(3, 0, 4, 6, 0)
#define SYS_ICC_IAR1_EL1 sys_reg(3, 0, 12, 12, 0)
#define SYS_ICC_EOIR1_EL1 sys_reg(3, 0, 12, 12, 1)
#define SYS_ICC_DIR_EL1 sys_reg(3, 0, 12, 11, 1)
#define SYS_ICC_CTLR_EL1 sys_reg(3, 0, 12, 12, 4)
#define SYS_ICC_SRE_EL1 sys_reg(3, 0, 12, 12, 5)
#define SYS_ICC_GRPEN1_EL1 sys_reg(3, 0, 12, 12, 7)
#define SYS_ICV_AP1R0_EL1 sys_reg(3, 0, 12, 9, 0)
#define ICC_PMR_DEF_PRIO 0xf0
#define ICC_SRE_EL1_SRE (1U << 0)
......
......@@ -113,6 +113,9 @@ enum {
#define ESR_EC_WP_CURRENT 0x35
#define ESR_EC_BRK_INS 0x3c
void aarch64_get_supported_page_sizes(uint32_t ipa,
bool *ps4k, bool *ps16k, bool *ps64k);
void vm_init_descriptor_tables(struct kvm_vm *vm);
void vcpu_init_descriptor_tables(struct kvm_vm *vm, uint32_t vcpuid);
......
......@@ -14,7 +14,21 @@
((uint64_t)(flags) << 12) | \
index)
int vgic_v3_setup(struct kvm_vm *vm, unsigned int nr_vcpus,
int vgic_v3_setup(struct kvm_vm *vm, unsigned int nr_vcpus, uint32_t nr_irqs,
uint64_t gicd_base_gpa, uint64_t gicr_base_gpa);
#endif /* SELFTEST_KVM_VGIC_H */
#define VGIC_MAX_RESERVED 1023
void kvm_irq_set_level_info(int gic_fd, uint32_t intid, int level);
int _kvm_irq_set_level_info(int gic_fd, uint32_t intid, int level);
void kvm_arm_irq_line(struct kvm_vm *vm, uint32_t intid, int level);
int _kvm_arm_irq_line(struct kvm_vm *vm, uint32_t intid, int level);
/* The vcpu arg only applies to private interrupts. */
void kvm_irq_write_ispendr(int gic_fd, uint32_t intid, uint32_t vcpu);
void kvm_irq_write_isactiver(int gic_fd, uint32_t intid, uint32_t vcpu);
#define KVM_IRQCHIP_NUM_PINS (1020 - 32)
#endif // SELFTEST_KVM_VGIC_H
......@@ -42,18 +42,26 @@ enum vm_guest_mode {
VM_MODE_P52V48_4K,
VM_MODE_P52V48_64K,
VM_MODE_P48V48_4K,
VM_MODE_P48V48_16K,
VM_MODE_P48V48_64K,
VM_MODE_P40V48_4K,
VM_MODE_P40V48_16K,
VM_MODE_P40V48_64K,
VM_MODE_PXXV48_4K, /* For 48bits VA but ANY bits PA */
VM_MODE_P47V64_4K,
VM_MODE_P44V64_4K,
VM_MODE_P36V48_4K,
VM_MODE_P36V48_16K,
VM_MODE_P36V48_64K,
VM_MODE_P36V47_16K,
NUM_VM_MODES,
};
#if defined(__aarch64__)
#define VM_MODE_DEFAULT VM_MODE_P40V48_4K
extern enum vm_guest_mode vm_mode_default;
#define VM_MODE_DEFAULT vm_mode_default
#define MIN_PAGE_SHIFT 12U
#define ptes_per_page(page_size) ((page_size) / 8)
......@@ -240,6 +248,8 @@ int _kvm_device_access(int dev_fd, uint32_t group, uint64_t attr,
void *val, bool write);
int kvm_device_access(int dev_fd, uint32_t group, uint64_t attr,
void *val, bool write);
void kvm_irq_line(struct kvm_vm *vm, uint32_t irq, int level);
int _kvm_irq_line(struct kvm_vm *vm, uint32_t irq, int level);
int _vcpu_has_device_attr(struct kvm_vm *vm, uint32_t vcpuid, uint32_t group,
uint64_t attr);
......@@ -250,6 +260,14 @@ int _vcpu_access_device_attr(struct kvm_vm *vm, uint32_t vcpuid, uint32_t group,
int vcpu_access_device_attr(struct kvm_vm *vm, uint32_t vcpuid, uint32_t group,
uint64_t attr, void *val, bool write);
#define KVM_MAX_IRQ_ROUTES 4096
struct kvm_irq_routing *kvm_gsi_routing_create(void);
void kvm_gsi_routing_irqchip_add(struct kvm_irq_routing *routing,
uint32_t gsi, uint32_t pin);
int _kvm_gsi_routing_write(struct kvm_vm *vm, struct kvm_irq_routing *routing);
void kvm_gsi_routing_write(struct kvm_vm *vm, struct kvm_irq_routing *routing);
const char *exit_reason_str(unsigned int exit_reason);
void virt_pgd_alloc(struct kvm_vm *vm);
......
......@@ -93,3 +93,69 @@ void gic_set_eoi(unsigned int intid)
GUEST_ASSERT(gic_common_ops);
gic_common_ops->gic_write_eoir(intid);
}
void gic_set_dir(unsigned int intid)
{
GUEST_ASSERT(gic_common_ops);
gic_common_ops->gic_write_dir(intid);
}
void gic_set_eoi_split(bool split)
{
GUEST_ASSERT(gic_common_ops);
gic_common_ops->gic_set_eoi_split(split);
}
void gic_set_priority_mask(uint64_t pmr)
{
GUEST_ASSERT(gic_common_ops);
gic_common_ops->gic_set_priority_mask(pmr);
}
void gic_set_priority(unsigned int intid, unsigned int prio)
{
GUEST_ASSERT(gic_common_ops);
gic_common_ops->gic_set_priority(intid, prio);
}
void gic_irq_set_active(unsigned int intid)
{
GUEST_ASSERT(gic_common_ops);
gic_common_ops->gic_irq_set_active(intid);
}
void gic_irq_clear_active(unsigned int intid)
{
GUEST_ASSERT(gic_common_ops);
gic_common_ops->gic_irq_clear_active(intid);
}
bool gic_irq_get_active(unsigned int intid)
{
GUEST_ASSERT(gic_common_ops);
return gic_common_ops->gic_irq_get_active(intid);
}
void gic_irq_set_pending(unsigned int intid)
{
GUEST_ASSERT(gic_common_ops);
gic_common_ops->gic_irq_set_pending(intid);
}
void gic_irq_clear_pending(unsigned int intid)
{
GUEST_ASSERT(gic_common_ops);
gic_common_ops->gic_irq_clear_pending(intid);
}
bool gic_irq_get_pending(unsigned int intid)
{
GUEST_ASSERT(gic_common_ops);
return gic_common_ops->gic_irq_get_pending(intid);
}
void gic_irq_set_config(unsigned int intid, bool is_edge)
{
GUEST_ASSERT(gic_common_ops);
gic_common_ops->gic_irq_set_config(intid, is_edge);
}
......@@ -14,6 +14,17 @@ struct gic_common_ops {
void (*gic_irq_disable)(unsigned int intid);
uint64_t (*gic_read_iar)(void);
void (*gic_write_eoir)(uint32_t irq);
void (*gic_write_dir)(uint32_t irq);
void (*gic_set_eoi_split)(bool split);
void (*gic_set_priority_mask)(uint64_t mask);
void (*gic_set_priority)(uint32_t intid, uint32_t prio);
void (*gic_irq_set_active)(uint32_t intid);
void (*gic_irq_clear_active)(uint32_t intid);
bool (*gic_irq_get_active)(uint32_t intid);
void (*gic_irq_set_pending)(uint32_t intid);
void (*gic_irq_clear_pending)(uint32_t intid);
bool (*gic_irq_get_pending)(uint32_t intid);
void (*gic_irq_set_config)(uint32_t intid, bool is_edge);
};
extern const struct gic_common_ops gicv3_ops;
......
......@@ -19,7 +19,8 @@ struct gicv3_data {
unsigned int nr_spis;
};
#define sgi_base_from_redist(redist_base) (redist_base + SZ_64K)
#define sgi_base_from_redist(redist_base) (redist_base + SZ_64K)
#define DIST_BIT (1U << 31)
enum gicv3_intid_range {
SGI_RANGE,
......@@ -50,6 +51,14 @@ static void gicv3_gicr_wait_for_rwp(void *redist_base)
}
}
static void gicv3_wait_for_rwp(uint32_t cpu_or_dist)
{
if (cpu_or_dist & DIST_BIT)
gicv3_gicd_wait_for_rwp();
else
gicv3_gicr_wait_for_rwp(gicv3_data.redist_base[cpu_or_dist]);
}
static enum gicv3_intid_range get_intid_range(unsigned int intid)
{
switch (intid) {
......@@ -81,39 +90,175 @@ static void gicv3_write_eoir(uint32_t irq)
isb();
}
static void
gicv3_config_irq(unsigned int intid, unsigned int offset)
static void gicv3_write_dir(uint32_t irq)
{
write_sysreg_s(irq, SYS_ICC_DIR_EL1);
isb();
}
static void gicv3_set_priority_mask(uint64_t mask)
{
write_sysreg_s(mask, SYS_ICC_PMR_EL1);
}
static void gicv3_set_eoi_split(bool split)
{
uint32_t val;
/* All other fields are read-only, so no need to read CTLR first. In
* fact, the kernel does the same.
*/
val = split ? (1U << 1) : 0;
write_sysreg_s(val, SYS_ICC_CTLR_EL1);
isb();
}
uint32_t gicv3_reg_readl(uint32_t cpu_or_dist, uint64_t offset)
{
void *base = cpu_or_dist & DIST_BIT ? gicv3_data.dist_base
: sgi_base_from_redist(gicv3_data.redist_base[cpu_or_dist]);
return readl(base + offset);
}
void gicv3_reg_writel(uint32_t cpu_or_dist, uint64_t offset, uint32_t reg_val)
{
void *base = cpu_or_dist & DIST_BIT ? gicv3_data.dist_base
: sgi_base_from_redist(gicv3_data.redist_base[cpu_or_dist]);
writel(reg_val, base + offset);
}
uint32_t gicv3_getl_fields(uint32_t cpu_or_dist, uint64_t offset, uint32_t mask)
{
return gicv3_reg_readl(cpu_or_dist, offset) & mask;
}
void gicv3_setl_fields(uint32_t cpu_or_dist, uint64_t offset,
uint32_t mask, uint32_t reg_val)
{
uint32_t tmp = gicv3_reg_readl(cpu_or_dist, offset) & ~mask;
tmp |= (reg_val & mask);
gicv3_reg_writel(cpu_or_dist, offset, tmp);
}
/*
* We use a single offset for the distributor and redistributor maps as they
* have the same value in both. The only exceptions are registers that only
* exist in one and not the other, like GICR_WAKER that doesn't exist in the
* distributor map. Such registers are conveniently marked as reserved in the
* map that doesn't implement it; like GICR_WAKER's offset of 0x0014 being
* marked as "Reserved" in the Distributor map.
*/
static void gicv3_access_reg(uint32_t intid, uint64_t offset,
uint32_t reg_bits, uint32_t bits_per_field,
bool write, uint32_t *val)
{
uint32_t cpu = guest_get_vcpuid();
uint32_t mask = 1 << (intid % 32);
enum gicv3_intid_range intid_range = get_intid_range(intid);
void *reg;
/* We care about 'cpu' only for SGIs or PPIs */
if (intid_range == SGI_RANGE || intid_range == PPI_RANGE) {
GUEST_ASSERT(cpu < gicv3_data.nr_cpus);
reg = sgi_base_from_redist(gicv3_data.redist_base[cpu]) +
offset;
writel(mask, reg);
gicv3_gicr_wait_for_rwp(gicv3_data.redist_base[cpu]);
} else if (intid_range == SPI_RANGE) {
reg = gicv3_data.dist_base + offset + (intid / 32) * 4;
writel(mask, reg);
gicv3_gicd_wait_for_rwp();
} else {
GUEST_ASSERT(0);
}
uint32_t fields_per_reg, index, mask, shift;
uint32_t cpu_or_dist;
GUEST_ASSERT(bits_per_field <= reg_bits);
GUEST_ASSERT(*val < (1U << bits_per_field));
/* Some registers like IROUTER are 64 bit long. Those are currently not
* supported by readl nor writel, so just asserting here until then.
*/
GUEST_ASSERT(reg_bits == 32);
fields_per_reg = reg_bits / bits_per_field;
index = intid % fields_per_reg;
shift = index * bits_per_field;
mask = ((1U << bits_per_field) - 1) << shift;
/* Set offset to the actual register holding intid's config. */
offset += (intid / fields_per_reg) * (reg_bits / 8);
cpu_or_dist = (intid_range == SPI_RANGE) ? DIST_BIT : cpu;
if (write)
gicv3_setl_fields(cpu_or_dist, offset, mask, *val << shift);
*val = gicv3_getl_fields(cpu_or_dist, offset, mask) >> shift;
}
static void gicv3_write_reg(uint32_t intid, uint64_t offset,
uint32_t reg_bits, uint32_t bits_per_field, uint32_t val)
{
gicv3_access_reg(intid, offset, reg_bits,
bits_per_field, true, &val);
}
static uint32_t gicv3_read_reg(uint32_t intid, uint64_t offset,
uint32_t reg_bits, uint32_t bits_per_field)
{
uint32_t val;
gicv3_access_reg(intid, offset, reg_bits,
bits_per_field, false, &val);
return val;
}
static void gicv3_set_priority(uint32_t intid, uint32_t prio)
{
gicv3_write_reg(intid, GICD_IPRIORITYR, 32, 8, prio);
}
/* Sets the intid to be level-sensitive or edge-triggered. */
static void gicv3_irq_set_config(uint32_t intid, bool is_edge)
{
uint32_t val;
/* N/A for private interrupts. */
GUEST_ASSERT(get_intid_range(intid) == SPI_RANGE);
val = is_edge ? 2 : 0;
gicv3_write_reg(intid, GICD_ICFGR, 32, 2, val);
}
static void gicv3_irq_enable(uint32_t intid)
{
bool is_spi = get_intid_range(intid) == SPI_RANGE;
uint32_t cpu = guest_get_vcpuid();
gicv3_write_reg(intid, GICD_ISENABLER, 32, 1, 1);
gicv3_wait_for_rwp(is_spi ? DIST_BIT : cpu);
}
static void gicv3_irq_disable(uint32_t intid)
{
bool is_spi = get_intid_range(intid) == SPI_RANGE;
uint32_t cpu = guest_get_vcpuid();
gicv3_write_reg(intid, GICD_ICENABLER, 32, 1, 1);
gicv3_wait_for_rwp(is_spi ? DIST_BIT : cpu);
}
static void gicv3_irq_set_active(uint32_t intid)
{
gicv3_write_reg(intid, GICD_ISACTIVER, 32, 1, 1);
}
static void gicv3_irq_clear_active(uint32_t intid)
{
gicv3_write_reg(intid, GICD_ICACTIVER, 32, 1, 1);
}
static bool gicv3_irq_get_active(uint32_t intid)
{
return gicv3_read_reg(intid, GICD_ISACTIVER, 32, 1);
}
static void gicv3_irq_set_pending(uint32_t intid)
{
gicv3_write_reg(intid, GICD_ISPENDR, 32, 1, 1);
}
static void gicv3_irq_enable(unsigned int intid)
static void gicv3_irq_clear_pending(uint32_t intid)
{
gicv3_config_irq(intid, GICD_ISENABLER);
gicv3_write_reg(intid, GICD_ICPENDR, 32, 1, 1);
}
static void gicv3_irq_disable(unsigned int intid)
static bool gicv3_irq_get_pending(uint32_t intid)
{
gicv3_config_irq(intid, GICD_ICENABLER);
return gicv3_read_reg(intid, GICD_ISPENDR, 32, 1);
}
static void gicv3_enable_redist(void *redist_base)
......@@ -237,4 +382,15 @@ const struct gic_common_ops gicv3_ops = {
.gic_irq_disable = gicv3_irq_disable,
.gic_read_iar = gicv3_read_iar,
.gic_write_eoir = gicv3_write_eoir,
.gic_write_dir = gicv3_write_dir,
.gic_set_priority_mask = gicv3_set_priority_mask,
.gic_set_eoi_split = gicv3_set_eoi_split,
.gic_set_priority = gicv3_set_priority,
.gic_irq_set_active = gicv3_irq_set_active,
.gic_irq_clear_active = gicv3_irq_clear_active,
.gic_irq_get_active = gicv3_irq_get_active,
.gic_irq_set_pending = gicv3_irq_set_pending,
.gic_irq_clear_pending = gicv3_irq_clear_pending,
.gic_irq_get_pending = gicv3_irq_get_pending,
.gic_irq_set_config = gicv3_irq_set_config,
};
......@@ -8,6 +8,7 @@
#include <linux/compiler.h>
#include <assert.h>
#include "guest_modes.h"
#include "kvm_util.h"
#include "../kvm_util_internal.h"
#include "processor.h"
......@@ -237,6 +238,7 @@ void aarch64_vcpu_setup(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_vcpu_init
get_reg(vm, vcpuid, KVM_ARM64_SYS_REG(SYS_SCTLR_EL1), &sctlr_el1);
get_reg(vm, vcpuid, KVM_ARM64_SYS_REG(SYS_TCR_EL1), &tcr_el1);
/* Configure base granule size */
switch (vm->mode) {
case VM_MODE_P52V48_4K:
TEST_FAIL("AArch64 does not support 4K sized pages "
......@@ -245,25 +247,47 @@ void aarch64_vcpu_setup(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_vcpu_init
TEST_FAIL("AArch64 does not support 4K sized pages "
"with ANY-bit physical address ranges");
case VM_MODE_P52V48_64K:
case VM_MODE_P48V48_64K:
case VM_MODE_P40V48_64K:
case VM_MODE_P36V48_64K:
tcr_el1 |= 1ul << 14; /* TG0 = 64KB */
tcr_el1 |= 6ul << 32; /* IPS = 52 bits */
break;
case VM_MODE_P48V48_16K:
case VM_MODE_P40V48_16K:
case VM_MODE_P36V48_16K:
case VM_MODE_P36V47_16K:
tcr_el1 |= 2ul << 14; /* TG0 = 16KB */
break;
case VM_MODE_P48V48_4K:
case VM_MODE_P40V48_4K:
case VM_MODE_P36V48_4K:
tcr_el1 |= 0ul << 14; /* TG0 = 4KB */
tcr_el1 |= 5ul << 32; /* IPS = 48 bits */
break;
default:
TEST_FAIL("Unknown guest mode, mode: 0x%x", vm->mode);
}
/* Configure output size */
switch (vm->mode) {
case VM_MODE_P52V48_64K:
tcr_el1 |= 6ul << 32; /* IPS = 52 bits */
break;
case VM_MODE_P48V48_4K:
case VM_MODE_P48V48_16K:
case VM_MODE_P48V48_64K:
tcr_el1 |= 1ul << 14; /* TG0 = 64KB */
tcr_el1 |= 5ul << 32; /* IPS = 48 bits */
break;
case VM_MODE_P40V48_4K:
tcr_el1 |= 0ul << 14; /* TG0 = 4KB */
tcr_el1 |= 2ul << 32; /* IPS = 40 bits */
break;
case VM_MODE_P40V48_16K:
case VM_MODE_P40V48_64K:
tcr_el1 |= 1ul << 14; /* TG0 = 64KB */
tcr_el1 |= 2ul << 32; /* IPS = 40 bits */
break;
case VM_MODE_P36V48_4K:
case VM_MODE_P36V48_16K:
case VM_MODE_P36V48_64K:
case VM_MODE_P36V47_16K:
tcr_el1 |= 1ul << 32; /* IPS = 36 bits */
break;
default:
TEST_FAIL("Unknown guest mode, mode: 0x%x", vm->mode);
}
......@@ -432,3 +456,47 @@ uint32_t guest_get_vcpuid(void)
{
return read_sysreg(tpidr_el1);
}
void aarch64_get_supported_page_sizes(uint32_t ipa,
bool *ps4k, bool *ps16k, bool *ps64k)
{
struct kvm_vcpu_init preferred_init;
int kvm_fd, vm_fd, vcpu_fd, err;
uint64_t val;
struct kvm_one_reg reg = {
.id = KVM_ARM64_SYS_REG(SYS_ID_AA64MMFR0_EL1),
.addr = (uint64_t)&val,
};
kvm_fd = open_kvm_dev_path_or_exit();
vm_fd = ioctl(kvm_fd, KVM_CREATE_VM, ipa);
TEST_ASSERT(vm_fd >= 0, "Can't create VM");
vcpu_fd = ioctl(vm_fd, KVM_CREATE_VCPU, 0);
TEST_ASSERT(vcpu_fd >= 0, "Can't create vcpu");
err = ioctl(vm_fd, KVM_ARM_PREFERRED_TARGET, &preferred_init);
TEST_ASSERT(err == 0, "Can't get target");
err = ioctl(vcpu_fd, KVM_ARM_VCPU_INIT, &preferred_init);
TEST_ASSERT(err == 0, "Can't get init vcpu");
err = ioctl(vcpu_fd, KVM_GET_ONE_REG, &reg);
TEST_ASSERT(err == 0, "Can't get MMFR0");
*ps4k = ((val >> 28) & 0xf) != 0xf;
*ps64k = ((val >> 24) & 0xf) == 0;
*ps16k = ((val >> 20) & 0xf) != 0;
close(vcpu_fd);
close(vm_fd);
close(kvm_fd);
}
/*
* arm64 doesn't have a true default mode, so start by computing the
* available IPA space and page sizes early.
*/
void __attribute__((constructor)) init_guest_modes(void)
{
guest_modes_append_default();
}
......@@ -5,11 +5,14 @@
#include <linux/kvm.h>
#include <linux/sizes.h>
#include <asm/kvm_para.h>
#include <asm/kvm.h>
#include "kvm_util.h"
#include "../kvm_util_internal.h"
#include "vgic.h"
#include "gic.h"
#include "gic_v3.h"
/*
* vGIC-v3 default host setup
......@@ -28,7 +31,7 @@
* redistributor regions of the guest. Since it depends on the number of
* vCPUs for the VM, it must be called after all the vCPUs have been created.
*/
int vgic_v3_setup(struct kvm_vm *vm, unsigned int nr_vcpus,
int vgic_v3_setup(struct kvm_vm *vm, unsigned int nr_vcpus, uint32_t nr_irqs,
uint64_t gicd_base_gpa, uint64_t gicr_base_gpa)
{
int gic_fd;
......@@ -50,6 +53,13 @@ int vgic_v3_setup(struct kvm_vm *vm, unsigned int nr_vcpus,
/* Distributor setup */
gic_fd = kvm_create_device(vm, KVM_DEV_TYPE_ARM_VGIC_V3, false);
kvm_device_access(gic_fd, KVM_DEV_ARM_VGIC_GRP_NR_IRQS,
0, &nr_irqs, true);
kvm_device_access(gic_fd, KVM_DEV_ARM_VGIC_GRP_CTRL,
KVM_DEV_ARM_VGIC_CTRL_INIT, NULL, true);
kvm_device_access(gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR,
KVM_VGIC_V3_ADDR_TYPE_DIST, &gicd_base_gpa, true);
nr_gic_pages = vm_calc_num_guest_pages(vm->mode, KVM_VGIC_V3_DIST_SIZE);
......@@ -68,3 +78,94 @@ int vgic_v3_setup(struct kvm_vm *vm, unsigned int nr_vcpus,
return gic_fd;
}
/* should only work for level sensitive interrupts */
int _kvm_irq_set_level_info(int gic_fd, uint32_t intid, int level)
{
uint64_t attr = 32 * (intid / 32);
uint64_t index = intid % 32;
uint64_t val;
int ret;
ret = _kvm_device_access(gic_fd, KVM_DEV_ARM_VGIC_GRP_LEVEL_INFO,
attr, &val, false);
if (ret != 0)
return ret;
val |= 1U << index;
ret = _kvm_device_access(gic_fd, KVM_DEV_ARM_VGIC_GRP_LEVEL_INFO,
attr, &val, true);
return ret;
}
void kvm_irq_set_level_info(int gic_fd, uint32_t intid, int level)
{
int ret = _kvm_irq_set_level_info(gic_fd, intid, level);
TEST_ASSERT(ret == 0, "KVM_DEV_ARM_VGIC_GRP_LEVEL_INFO failed, "
"rc: %i errno: %i", ret, errno);
}
int _kvm_arm_irq_line(struct kvm_vm *vm, uint32_t intid, int level)
{
uint32_t irq = intid & KVM_ARM_IRQ_NUM_MASK;
TEST_ASSERT(!INTID_IS_SGI(intid), "KVM_IRQ_LINE's interface itself "
"doesn't allow injecting SGIs. There's no mask for it.");
if (INTID_IS_PPI(intid))
irq |= KVM_ARM_IRQ_TYPE_PPI << KVM_ARM_IRQ_TYPE_SHIFT;
else
irq |= KVM_ARM_IRQ_TYPE_SPI << KVM_ARM_IRQ_TYPE_SHIFT;
return _kvm_irq_line(vm, irq, level);
}
void kvm_arm_irq_line(struct kvm_vm *vm, uint32_t intid, int level)
{
int ret = _kvm_arm_irq_line(vm, intid, level);
TEST_ASSERT(ret == 0, "KVM_IRQ_LINE failed, rc: %i errno: %i",
ret, errno);
}
static void vgic_poke_irq(int gic_fd, uint32_t intid,
uint32_t vcpu, uint64_t reg_off)
{
uint64_t reg = intid / 32;
uint64_t index = intid % 32;
uint64_t attr = reg_off + reg * 4;
uint64_t val;
bool intid_is_private = INTID_IS_SGI(intid) || INTID_IS_PPI(intid);
/* Check that the addr part of the attr is within 32 bits. */
assert(attr <= KVM_DEV_ARM_VGIC_OFFSET_MASK);
uint32_t group = intid_is_private ? KVM_DEV_ARM_VGIC_GRP_REDIST_REGS
: KVM_DEV_ARM_VGIC_GRP_DIST_REGS;
if (intid_is_private) {
/* TODO: only vcpu 0 implemented for now. */
assert(vcpu == 0);
attr += SZ_64K;
}
/* All calls will succeed, even with invalid intid's, as long as the
* addr part of the attr is within 32 bits (checked above). An invalid
* intid will just make the read/writes point to above the intended
* register space (i.e., ICPENDR after ISPENDR).
*/
kvm_device_access(gic_fd, group, attr, &val, false);
val |= 1ULL << index;
kvm_device_access(gic_fd, group, attr, &val, true);
}
void kvm_irq_write_ispendr(int gic_fd, uint32_t intid, uint32_t vcpu)
{
vgic_poke_irq(gic_fd, intid, vcpu, GICD_ISPENDR);
}
void kvm_irq_write_isactiver(int gic_fd, uint32_t intid, uint32_t vcpu)
{
vgic_poke_irq(gic_fd, intid, vcpu, GICD_ISACTIVER);
}
......@@ -4,22 +4,59 @@
*/
#include "guest_modes.h"
#ifdef __aarch64__
#include "processor.h"
enum vm_guest_mode vm_mode_default;
#endif
struct guest_mode guest_modes[NUM_VM_MODES];
void guest_modes_append_default(void)
{
#ifndef __aarch64__
guest_mode_append(VM_MODE_DEFAULT, true, true);
#ifdef __aarch64__
guest_mode_append(VM_MODE_P40V48_64K, true, true);
#else
{
unsigned int limit = kvm_check_cap(KVM_CAP_ARM_VM_IPA_SIZE);
bool ps4k, ps16k, ps64k;
int i;
aarch64_get_supported_page_sizes(limit, &ps4k, &ps16k, &ps64k);
vm_mode_default = NUM_VM_MODES;
if (limit >= 52)
guest_mode_append(VM_MODE_P52V48_64K, true, true);
guest_mode_append(VM_MODE_P52V48_64K, ps64k, ps64k);
if (limit >= 48) {
guest_mode_append(VM_MODE_P48V48_4K, true, true);
guest_mode_append(VM_MODE_P48V48_64K, true, true);
guest_mode_append(VM_MODE_P48V48_4K, ps4k, ps4k);
guest_mode_append(VM_MODE_P48V48_16K, ps16k, ps16k);
guest_mode_append(VM_MODE_P48V48_64K, ps64k, ps64k);
}
if (limit >= 40) {
guest_mode_append(VM_MODE_P40V48_4K, ps4k, ps4k);
guest_mode_append(VM_MODE_P40V48_16K, ps16k, ps16k);
guest_mode_append(VM_MODE_P40V48_64K, ps64k, ps64k);
if (ps4k)
vm_mode_default = VM_MODE_P40V48_4K;
}
if (limit >= 36) {
guest_mode_append(VM_MODE_P36V48_4K, ps4k, ps4k);
guest_mode_append(VM_MODE_P36V48_16K, ps16k, ps16k);
guest_mode_append(VM_MODE_P36V48_64K, ps64k, ps64k);
guest_mode_append(VM_MODE_P36V47_16K, ps16k, ps16k);
}
/*
* Pick the first supported IPA size if the default
* isn't available.
*/
for (i = 0; vm_mode_default == NUM_VM_MODES && i < NUM_VM_MODES; i++) {
if (guest_modes[i].supported && guest_modes[i].enabled)
vm_mode_default = i;
}
TEST_ASSERT(vm_mode_default != NUM_VM_MODES,
"No supported mode!");
}
#endif
#ifdef __s390x__
......
......@@ -166,12 +166,18 @@ const char *vm_guest_mode_string(uint32_t i)
[VM_MODE_P52V48_4K] = "PA-bits:52, VA-bits:48, 4K pages",
[VM_MODE_P52V48_64K] = "PA-bits:52, VA-bits:48, 64K pages",
[VM_MODE_P48V48_4K] = "PA-bits:48, VA-bits:48, 4K pages",
[VM_MODE_P48V48_16K] = "PA-bits:48, VA-bits:48, 16K pages",
[VM_MODE_P48V48_64K] = "PA-bits:48, VA-bits:48, 64K pages",
[VM_MODE_P40V48_4K] = "PA-bits:40, VA-bits:48, 4K pages",
[VM_MODE_P40V48_16K] = "PA-bits:40, VA-bits:48, 16K pages",
[VM_MODE_P40V48_64K] = "PA-bits:40, VA-bits:48, 64K pages",
[VM_MODE_PXXV48_4K] = "PA-bits:ANY, VA-bits:48, 4K pages",
[VM_MODE_P47V64_4K] = "PA-bits:47, VA-bits:64, 4K pages",
[VM_MODE_P44V64_4K] = "PA-bits:44, VA-bits:64, 4K pages",
[VM_MODE_P36V48_4K] = "PA-bits:36, VA-bits:48, 4K pages",
[VM_MODE_P36V48_16K] = "PA-bits:36, VA-bits:48, 16K pages",
[VM_MODE_P36V48_64K] = "PA-bits:36, VA-bits:48, 64K pages",
[VM_MODE_P36V47_16K] = "PA-bits:36, VA-bits:47, 16K pages",
};
_Static_assert(sizeof(strings)/sizeof(char *) == NUM_VM_MODES,
"Missing new mode strings?");
......@@ -185,12 +191,18 @@ const struct vm_guest_mode_params vm_guest_mode_params[] = {
[VM_MODE_P52V48_4K] = { 52, 48, 0x1000, 12 },
[VM_MODE_P52V48_64K] = { 52, 48, 0x10000, 16 },
[VM_MODE_P48V48_4K] = { 48, 48, 0x1000, 12 },
[VM_MODE_P48V48_16K] = { 48, 48, 0x4000, 14 },
[VM_MODE_P48V48_64K] = { 48, 48, 0x10000, 16 },
[VM_MODE_P40V48_4K] = { 40, 48, 0x1000, 12 },
[VM_MODE_P40V48_16K] = { 40, 48, 0x4000, 14 },
[VM_MODE_P40V48_64K] = { 40, 48, 0x10000, 16 },
[VM_MODE_PXXV48_4K] = { 0, 0, 0x1000, 12 },
[VM_MODE_P47V64_4K] = { 47, 64, 0x1000, 12 },
[VM_MODE_P44V64_4K] = { 44, 64, 0x1000, 12 },
[VM_MODE_P36V48_4K] = { 36, 48, 0x1000, 12 },
[VM_MODE_P36V48_16K] = { 36, 48, 0x4000, 14 },
[VM_MODE_P36V48_64K] = { 36, 48, 0x10000, 16 },
[VM_MODE_P36V47_16K] = { 36, 47, 0x4000, 14 },
};
_Static_assert(sizeof(vm_guest_mode_params)/sizeof(struct vm_guest_mode_params) == NUM_VM_MODES,
"Missing new mode params?");
......@@ -252,9 +264,19 @@ struct kvm_vm *vm_create(enum vm_guest_mode mode, uint64_t phy_pages, int perm)
vm->pgtable_levels = 3;
break;
case VM_MODE_P40V48_4K:
case VM_MODE_P36V48_4K:
vm->pgtable_levels = 4;
break;
case VM_MODE_P40V48_64K:
case VM_MODE_P36V48_64K:
vm->pgtable_levels = 3;
break;
case VM_MODE_P48V48_16K:
case VM_MODE_P40V48_16K:
case VM_MODE_P36V48_16K:
vm->pgtable_levels = 4;
break;
case VM_MODE_P36V47_16K:
vm->pgtable_levels = 3;
break;
case VM_MODE_PXXV48_4K:
......@@ -2086,6 +2108,78 @@ int vcpu_access_device_attr(struct kvm_vm *vm, uint32_t vcpuid, uint32_t group,
return ret;
}
/*
* IRQ related functions.
*/
int _kvm_irq_line(struct kvm_vm *vm, uint32_t irq, int level)
{
struct kvm_irq_level irq_level = {
.irq = irq,
.level = level,
};
return _vm_ioctl(vm, KVM_IRQ_LINE, &irq_level);
}
void kvm_irq_line(struct kvm_vm *vm, uint32_t irq, int level)
{
int ret = _kvm_irq_line(vm, irq, level);
TEST_ASSERT(ret >= 0, "KVM_IRQ_LINE failed, rc: %i errno: %i", ret, errno);
}
struct kvm_irq_routing *kvm_gsi_routing_create(void)
{
struct kvm_irq_routing *routing;
size_t size;
size = sizeof(struct kvm_irq_routing);
/* Allocate space for the max number of entries: this wastes 196 KBs. */
size += KVM_MAX_IRQ_ROUTES * sizeof(struct kvm_irq_routing_entry);
routing = calloc(1, size);
assert(routing);
return routing;
}
void kvm_gsi_routing_irqchip_add(struct kvm_irq_routing *routing,
uint32_t gsi, uint32_t pin)
{
int i;
assert(routing);
assert(routing->nr < KVM_MAX_IRQ_ROUTES);
i = routing->nr;
routing->entries[i].gsi = gsi;
routing->entries[i].type = KVM_IRQ_ROUTING_IRQCHIP;
routing->entries[i].flags = 0;
routing->entries[i].u.irqchip.irqchip = 0;
routing->entries[i].u.irqchip.pin = pin;
routing->nr++;
}
int _kvm_gsi_routing_write(struct kvm_vm *vm, struct kvm_irq_routing *routing)
{
int ret;
assert(routing);
ret = ioctl(vm_get_fd(vm), KVM_SET_GSI_ROUTING, routing);
free(routing);
return ret;
}
void kvm_gsi_routing_write(struct kvm_vm *vm, struct kvm_irq_routing *routing)
{
int ret;
ret = _kvm_gsi_routing_write(vm, routing);
TEST_ASSERT(ret == 0, "KVM_SET_GSI_ROUTING failed, rc: %i errno: %i",
ret, errno);
}
/*
* VM Dump
*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment