Commit 541ad015 authored by Marc Zyngier's avatar Marc Zyngier

arm: Remove 32bit KVM host support

That's it. Remove all references to KVM itself, and document
that although it is no more, the ABI between SVC and HYP still
exists.
Signed-off-by: default avatarMarc Zyngier <maz@kernel.org>
Acked-by: default avatarOlof Johansson <olof@lixom.net>
Acked-by: default avatarArnd Bergmann <arnd@arndb.de>
Acked-by: default avatarWill Deacon <will@kernel.org>
Acked-by: default avatarVladimir Murzin <vladimir.murzin@arm.com>
Acked-by: default avatarCatalin Marinas <catalin.marinas@arm.com>
Acked-by: default avatarLinus Walleij <linus.walleij@linaro.org>
Acked-by: default avatarChristoffer Dall <christoffer.dall@arm.com>
parent bb7c62bc
...@@ -11,6 +11,11 @@ hypervisor when running as a guest (under Xen, KVM or any other ...@@ -11,6 +11,11 @@ hypervisor when running as a guest (under Xen, KVM or any other
hypervisor), or any hypervisor-specific interaction when the kernel is hypervisor), or any hypervisor-specific interaction when the kernel is
used as a host. used as a host.
Note: KVM/arm has been removed from the kernel. The API described
here is still valid though, as it allows the kernel to kexec when
booted at HYP. It can also be used by a hypervisor other than KVM
if necessary.
On arm and arm64 (without VHE), the kernel doesn't run in hypervisor On arm and arm64 (without VHE), the kernel doesn't run in hypervisor
mode, but still needs to interact with it, allowing a built-in mode, but still needs to interact with it, allowing a built-in
hypervisor to be either installed or torn down. hypervisor to be either installed or torn down.
......
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2012 - Virtual Open Systems and Columbia University
* Author: Christoffer Dall <c.dall@virtualopensystems.com>
*/
#ifndef __ARM_KVM_ARM_H__
#define __ARM_KVM_ARM_H__
#include <linux/const.h>
#include <linux/types.h>
/* Hyp Configuration Register (HCR) bits */
#define HCR_TGE (1 << 27)
#define HCR_TVM (1 << 26)
#define HCR_TTLB (1 << 25)
#define HCR_TPU (1 << 24)
#define HCR_TPC (1 << 23)
#define HCR_TSW (1 << 22)
#define HCR_TAC (1 << 21)
#define HCR_TIDCP (1 << 20)
#define HCR_TSC (1 << 19)
#define HCR_TID3 (1 << 18)
#define HCR_TID2 (1 << 17)
#define HCR_TID1 (1 << 16)
#define HCR_TID0 (1 << 15)
#define HCR_TWE (1 << 14)
#define HCR_TWI (1 << 13)
#define HCR_DC (1 << 12)
#define HCR_BSU (3 << 10)
#define HCR_BSU_IS (1 << 10)
#define HCR_FB (1 << 9)
#define HCR_VA (1 << 8)
#define HCR_VI (1 << 7)
#define HCR_VF (1 << 6)
#define HCR_AMO (1 << 5)
#define HCR_IMO (1 << 4)
#define HCR_FMO (1 << 3)
#define HCR_PTW (1 << 2)
#define HCR_SWIO (1 << 1)
#define HCR_VM 1
/*
* The bits we set in HCR:
* TAC: Trap ACTLR
* TSC: Trap SMC
* TVM: Trap VM ops (until MMU and caches are on)
* TSW: Trap cache operations by set/way
* TWI: Trap WFI
* TWE: Trap WFE
* TIDCP: Trap L2CTLR/L2ECTLR
* BSU_IS: Upgrade barriers to the inner shareable domain
* FB: Force broadcast of all maintainance operations
* AMO: Override CPSR.A and enable signaling with VA
* IMO: Override CPSR.I and enable signaling with VI
* FMO: Override CPSR.F and enable signaling with VF
* SWIO: Turn set/way invalidates into set/way clean+invalidate
*/
#define HCR_GUEST_MASK (HCR_TSC | HCR_TSW | HCR_TWI | HCR_VM | HCR_BSU_IS | \
HCR_FB | HCR_TAC | HCR_AMO | HCR_IMO | HCR_FMO | \
HCR_TVM | HCR_TWE | HCR_SWIO | HCR_TIDCP)
/* System Control Register (SCTLR) bits */
#define SCTLR_TE (1 << 30)
#define SCTLR_EE (1 << 25)
#define SCTLR_V (1 << 13)
/* Hyp System Control Register (HSCTLR) bits */
#define HSCTLR_TE (1 << 30)
#define HSCTLR_EE (1 << 25)
#define HSCTLR_FI (1 << 21)
#define HSCTLR_WXN (1 << 19)
#define HSCTLR_I (1 << 12)
#define HSCTLR_C (1 << 2)
#define HSCTLR_A (1 << 1)
#define HSCTLR_M 1
#define HSCTLR_MASK (HSCTLR_M | HSCTLR_A | HSCTLR_C | HSCTLR_I | \
HSCTLR_WXN | HSCTLR_FI | HSCTLR_EE | HSCTLR_TE)
/* TTBCR and HTCR Registers bits */
#define TTBCR_EAE (1 << 31)
#define TTBCR_IMP (1 << 30)
#define TTBCR_SH1 (3 << 28)
#define TTBCR_ORGN1 (3 << 26)
#define TTBCR_IRGN1 (3 << 24)
#define TTBCR_EPD1 (1 << 23)
#define TTBCR_A1 (1 << 22)
#define TTBCR_T1SZ (7 << 16)
#define TTBCR_SH0 (3 << 12)
#define TTBCR_ORGN0 (3 << 10)
#define TTBCR_IRGN0 (3 << 8)
#define TTBCR_EPD0 (1 << 7)
#define TTBCR_T0SZ (7 << 0)
#define HTCR_MASK (TTBCR_T0SZ | TTBCR_IRGN0 | TTBCR_ORGN0 | TTBCR_SH0)
/* Hyp System Trap Register */
#define HSTR_T(x) (1 << x)
#define HSTR_TTEE (1 << 16)
#define HSTR_TJDBX (1 << 17)
/* Hyp Coprocessor Trap Register */
#define HCPTR_TCP(x) (1 << x)
#define HCPTR_TCP_MASK (0x3fff)
#define HCPTR_TASE (1 << 15)
#define HCPTR_TTA (1 << 20)
#define HCPTR_TCPAC (1 << 31)
/* Hyp Debug Configuration Register bits */
#define HDCR_TDRA (1 << 11)
#define HDCR_TDOSA (1 << 10)
#define HDCR_TDA (1 << 9)
#define HDCR_TDE (1 << 8)
#define HDCR_HPME (1 << 7)
#define HDCR_TPM (1 << 6)
#define HDCR_TPMCR (1 << 5)
#define HDCR_HPMN_MASK (0x1F)
/*
* The architecture supports 40-bit IPA as input to the 2nd stage translations
* and PTRS_PER_S2_PGD becomes 1024, because each entry covers 1GB of address
* space.
*/
#define KVM_PHYS_SHIFT (40)
#define PTRS_PER_S2_PGD (_AC(1, ULL) << (KVM_PHYS_SHIFT - 30))
/* Virtualization Translation Control Register (VTCR) bits */
#define VTCR_SH0 (3 << 12)
#define VTCR_ORGN0 (3 << 10)
#define VTCR_IRGN0 (3 << 8)
#define VTCR_SL0 (3 << 6)
#define VTCR_S (1 << 4)
#define VTCR_T0SZ (0xf)
#define VTCR_MASK (VTCR_SH0 | VTCR_ORGN0 | VTCR_IRGN0 | VTCR_SL0 | \
VTCR_S | VTCR_T0SZ)
#define VTCR_HTCR_SH (VTCR_SH0 | VTCR_ORGN0 | VTCR_IRGN0)
#define VTCR_SL_L2 (0 << 6) /* Starting-level: 2 */
#define VTCR_SL_L1 (1 << 6) /* Starting-level: 1 */
#define KVM_VTCR_SL0 VTCR_SL_L1
/* stage-2 input address range defined as 2^(32-T0SZ) */
#define KVM_T0SZ (32 - KVM_PHYS_SHIFT)
#define KVM_VTCR_T0SZ (KVM_T0SZ & VTCR_T0SZ)
#define KVM_VTCR_S ((KVM_VTCR_T0SZ << 1) & VTCR_S)
/* Virtualization Translation Table Base Register (VTTBR) bits */
#if KVM_VTCR_SL0 == VTCR_SL_L2 /* see ARM DDI 0406C: B4-1720 */
#define VTTBR_X (14 - KVM_T0SZ)
#else
#define VTTBR_X (5 - KVM_T0SZ)
#endif
#define VTTBR_CNP_BIT _AC(1, UL)
#define VTTBR_BADDR_MASK (((_AC(1, ULL) << (40 - VTTBR_X)) - 1) << VTTBR_X)
#define VTTBR_VMID_SHIFT _AC(48, ULL)
#define VTTBR_VMID_MASK(size) (_AT(u64, (1 << size) - 1) << VTTBR_VMID_SHIFT)
/* Hyp Syndrome Register (HSR) bits */
#define HSR_EC_SHIFT (26)
#define HSR_EC (_AC(0x3f, UL) << HSR_EC_SHIFT)
#define HSR_IL (_AC(1, UL) << 25)
#define HSR_ISS (HSR_IL - 1)
#define HSR_ISV_SHIFT (24)
#define HSR_ISV (_AC(1, UL) << HSR_ISV_SHIFT)
#define HSR_SRT_SHIFT (16)
#define HSR_SRT_MASK (0xf << HSR_SRT_SHIFT)
#define HSR_CM (1 << 8)
#define HSR_FSC (0x3f)
#define HSR_FSC_TYPE (0x3c)
#define HSR_SSE (1 << 21)
#define HSR_WNR (1 << 6)
#define HSR_CV_SHIFT (24)
#define HSR_CV (_AC(1, UL) << HSR_CV_SHIFT)
#define HSR_COND_SHIFT (20)
#define HSR_COND (_AC(0xf, UL) << HSR_COND_SHIFT)
#define FSC_FAULT (0x04)
#define FSC_ACCESS (0x08)
#define FSC_PERM (0x0c)
#define FSC_SEA (0x10)
#define FSC_SEA_TTW0 (0x14)
#define FSC_SEA_TTW1 (0x15)
#define FSC_SEA_TTW2 (0x16)
#define FSC_SEA_TTW3 (0x17)
#define FSC_SECC (0x18)
#define FSC_SECC_TTW0 (0x1c)
#define FSC_SECC_TTW1 (0x1d)
#define FSC_SECC_TTW2 (0x1e)
#define FSC_SECC_TTW3 (0x1f)
/* Hyp Prefetch Fault Address Register (HPFAR/HDFAR) */
#define HPFAR_MASK (~0xf)
#define HSR_EC_UNKNOWN (0x00)
#define HSR_EC_WFI (0x01)
#define HSR_EC_CP15_32 (0x03)
#define HSR_EC_CP15_64 (0x04)
#define HSR_EC_CP14_MR (0x05)
#define HSR_EC_CP14_LS (0x06)
#define HSR_EC_CP_0_13 (0x07)
#define HSR_EC_CP10_ID (0x08)
#define HSR_EC_JAZELLE (0x09)
#define HSR_EC_BXJ (0x0A)
#define HSR_EC_CP14_64 (0x0C)
#define HSR_EC_SVC_HYP (0x11)
#define HSR_EC_HVC (0x12)
#define HSR_EC_SMC (0x13)
#define HSR_EC_IABT (0x20)
#define HSR_EC_IABT_HYP (0x21)
#define HSR_EC_DABT (0x24)
#define HSR_EC_DABT_HYP (0x25)
#define HSR_EC_MAX (0x3f)
#define HSR_WFI_IS_WFE (_AC(1, UL) << 0)
#define HSR_HVC_IMM_MASK ((_AC(1, UL) << 16) - 1)
#define HSR_DABT_S1PTW (_AC(1, UL) << 7)
#define HSR_DABT_CM (_AC(1, UL) << 8)
#define kvm_arm_exception_type \
{0, "RESET" }, \
{1, "UNDEFINED" }, \
{2, "SOFTWARE" }, \
{3, "PREF_ABORT" }, \
{4, "DATA_ABORT" }, \
{5, "IRQ" }, \
{6, "FIQ" }, \
{7, "HVC" }
#define HSRECN(x) { HSR_EC_##x, #x }
#define kvm_arm_exception_class \
HSRECN(UNKNOWN), HSRECN(WFI), HSRECN(CP15_32), HSRECN(CP15_64), \
HSRECN(CP14_MR), HSRECN(CP14_LS), HSRECN(CP_0_13), HSRECN(CP10_ID), \
HSRECN(JAZELLE), HSRECN(BXJ), HSRECN(CP14_64), HSRECN(SVC_HYP), \
HSRECN(HVC), HSRECN(SMC), HSRECN(IABT), HSRECN(IABT_HYP), \
HSRECN(DABT), HSRECN(DABT_HYP)
#endif /* __ARM_KVM_ARM_H__ */
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2012 - Virtual Open Systems and Columbia University
* Author: Christoffer Dall <c.dall@virtualopensystems.com>
*/
#ifndef __ARM_KVM_ASM_H__
#define __ARM_KVM_ASM_H__
#include <asm/virt.h>
#define ARM_EXIT_WITH_ABORT_BIT 31
#define ARM_EXCEPTION_CODE(x) ((x) & ~(1U << ARM_EXIT_WITH_ABORT_BIT))
#define ARM_EXCEPTION_IS_TRAP(x) \
(ARM_EXCEPTION_CODE((x)) == ARM_EXCEPTION_PREF_ABORT || \
ARM_EXCEPTION_CODE((x)) == ARM_EXCEPTION_DATA_ABORT || \
ARM_EXCEPTION_CODE((x)) == ARM_EXCEPTION_HVC)
#define ARM_ABORT_PENDING(x) !!((x) & (1U << ARM_EXIT_WITH_ABORT_BIT))
#define ARM_EXCEPTION_RESET 0
#define ARM_EXCEPTION_UNDEFINED 1
#define ARM_EXCEPTION_SOFTWARE 2
#define ARM_EXCEPTION_PREF_ABORT 3
#define ARM_EXCEPTION_DATA_ABORT 4
#define ARM_EXCEPTION_IRQ 5
#define ARM_EXCEPTION_FIQ 6
#define ARM_EXCEPTION_HVC 7
#define ARM_EXCEPTION_HYP_GONE HVC_STUB_ERR
/*
* The rr_lo_hi macro swaps a pair of registers depending on
* current endianness. It is used in conjunction with ldrd and strd
* instructions that load/store a 64-bit value from/to memory to/from
* a pair of registers which are used with the mrrc and mcrr instructions.
* If used with the ldrd/strd instructions, the a1 parameter is the first
* source/destination register and the a2 parameter is the second
* source/destination register. Note that the ldrd/strd instructions
* already swap the bytes within the words correctly according to the
* endianness setting, but the order of the registers need to be effectively
* swapped when used with the mrrc/mcrr instructions.
*/
#ifdef CONFIG_CPU_ENDIAN_BE8
#define rr_lo_hi(a1, a2) a2, a1
#else
#define rr_lo_hi(a1, a2) a1, a2
#endif
#define kvm_ksym_ref(kva) (kva)
#ifndef __ASSEMBLY__
struct kvm;
struct kvm_vcpu;
extern char __kvm_hyp_init[];
extern char __kvm_hyp_init_end[];
extern void __kvm_flush_vm_context(void);
extern void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa);
extern void __kvm_tlb_flush_vmid(struct kvm *kvm);
extern void __kvm_tlb_flush_local_vmid(struct kvm_vcpu *vcpu);
extern void __kvm_timer_set_cntvoff(u32 cntvoff_low, u32 cntvoff_high);
/* no VHE on 32-bit :( */
static inline int kvm_vcpu_run_vhe(struct kvm_vcpu *vcpu) { BUG(); return 0; }
extern int __kvm_vcpu_run_nvhe(struct kvm_vcpu *vcpu);
extern void __init_stage2_translation(void);
extern u64 __vgic_v3_get_ich_vtr_el2(void);
extern u64 __vgic_v3_read_vmcr(void);
extern void __vgic_v3_write_vmcr(u32 vmcr);
extern void __vgic_v3_init_lrs(void);
#endif
#endif /* __ARM_KVM_ASM_H__ */
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2012 Rusty Russell IBM Corporation
*/
#ifndef __ARM_KVM_COPROC_H__
#define __ARM_KVM_COPROC_H__
#include <linux/kvm_host.h>
void kvm_reset_coprocs(struct kvm_vcpu *vcpu);
struct kvm_coproc_target_table {
unsigned target;
const struct coproc_reg *table;
size_t num;
};
void kvm_register_target_coproc_table(struct kvm_coproc_target_table *table);
int kvm_handle_cp10_id(struct kvm_vcpu *vcpu, struct kvm_run *run);
int kvm_handle_cp_0_13_access(struct kvm_vcpu *vcpu, struct kvm_run *run);
int kvm_handle_cp14_load_store(struct kvm_vcpu *vcpu, struct kvm_run *run);
int kvm_handle_cp14_32(struct kvm_vcpu *vcpu, struct kvm_run *run);
int kvm_handle_cp14_64(struct kvm_vcpu *vcpu, struct kvm_run *run);
int kvm_handle_cp15_32(struct kvm_vcpu *vcpu, struct kvm_run *run);
int kvm_handle_cp15_64(struct kvm_vcpu *vcpu, struct kvm_run *run);
unsigned long kvm_arm_num_guest_msrs(struct kvm_vcpu *vcpu);
int kvm_arm_copy_msrindices(struct kvm_vcpu *vcpu, u64 __user *uindices);
void kvm_coproc_table_init(void);
struct kvm_one_reg;
int kvm_arm_copy_coproc_indices(struct kvm_vcpu *vcpu, u64 __user *uindices);
int kvm_arm_coproc_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *);
int kvm_arm_coproc_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *);
unsigned long kvm_arm_num_coproc_regs(struct kvm_vcpu *vcpu);
#endif /* __ARM_KVM_COPROC_H__ */
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2012 - Virtual Open Systems and Columbia University
* Author: Christoffer Dall <c.dall@virtualopensystems.com>
*/
#ifndef __ARM_KVM_EMULATE_H__
#define __ARM_KVM_EMULATE_H__
#include <linux/kvm_host.h>
#include <asm/kvm_asm.h>
#include <asm/kvm_arm.h>
#include <asm/cputype.h>
/* arm64 compatibility macros */
#define PSR_AA32_MODE_FIQ FIQ_MODE
#define PSR_AA32_MODE_SVC SVC_MODE
#define PSR_AA32_MODE_ABT ABT_MODE
#define PSR_AA32_MODE_UND UND_MODE
#define PSR_AA32_T_BIT PSR_T_BIT
#define PSR_AA32_F_BIT PSR_F_BIT
#define PSR_AA32_I_BIT PSR_I_BIT
#define PSR_AA32_A_BIT PSR_A_BIT
#define PSR_AA32_E_BIT PSR_E_BIT
#define PSR_AA32_IT_MASK PSR_IT_MASK
#define PSR_AA32_GE_MASK 0x000f0000
#define PSR_AA32_DIT_BIT 0x00200000
#define PSR_AA32_PAN_BIT 0x00400000
#define PSR_AA32_SSBS_BIT 0x00800000
#define PSR_AA32_Q_BIT PSR_Q_BIT
#define PSR_AA32_V_BIT PSR_V_BIT
#define PSR_AA32_C_BIT PSR_C_BIT
#define PSR_AA32_Z_BIT PSR_Z_BIT
#define PSR_AA32_N_BIT PSR_N_BIT
unsigned long *vcpu_reg(struct kvm_vcpu *vcpu, u8 reg_num);
static inline unsigned long *vcpu_reg32(struct kvm_vcpu *vcpu, u8 reg_num)
{
return vcpu_reg(vcpu, reg_num);
}
unsigned long *__vcpu_spsr(struct kvm_vcpu *vcpu);
static inline unsigned long vpcu_read_spsr(struct kvm_vcpu *vcpu)
{
return *__vcpu_spsr(vcpu);
}
static inline void vcpu_write_spsr(struct kvm_vcpu *vcpu, unsigned long v)
{
*__vcpu_spsr(vcpu) = v;
}
static inline unsigned long host_spsr_to_spsr32(unsigned long spsr)
{
return spsr;
}
static inline unsigned long vcpu_get_reg(struct kvm_vcpu *vcpu,
u8 reg_num)
{
return *vcpu_reg(vcpu, reg_num);
}
static inline void vcpu_set_reg(struct kvm_vcpu *vcpu, u8 reg_num,
unsigned long val)
{
*vcpu_reg(vcpu, reg_num) = val;
}
bool kvm_condition_valid32(const struct kvm_vcpu *vcpu);
void kvm_skip_instr32(struct kvm_vcpu *vcpu, bool is_wide_instr);
void kvm_inject_undef32(struct kvm_vcpu *vcpu);
void kvm_inject_dabt32(struct kvm_vcpu *vcpu, unsigned long addr);
void kvm_inject_pabt32(struct kvm_vcpu *vcpu, unsigned long addr);
void kvm_inject_vabt(struct kvm_vcpu *vcpu);
static inline void kvm_inject_undefined(struct kvm_vcpu *vcpu)
{
kvm_inject_undef32(vcpu);
}
static inline void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr)
{
kvm_inject_dabt32(vcpu, addr);
}
static inline void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr)
{
kvm_inject_pabt32(vcpu, addr);
}
static inline bool kvm_condition_valid(const struct kvm_vcpu *vcpu)
{
return kvm_condition_valid32(vcpu);
}
static inline void kvm_skip_instr(struct kvm_vcpu *vcpu, bool is_wide_instr)
{
kvm_skip_instr32(vcpu, is_wide_instr);
}
static inline void vcpu_reset_hcr(struct kvm_vcpu *vcpu)
{
vcpu->arch.hcr = HCR_GUEST_MASK;
}
static inline unsigned long *vcpu_hcr(const struct kvm_vcpu *vcpu)
{
return (unsigned long *)&vcpu->arch.hcr;
}
static inline void vcpu_clear_wfx_traps(struct kvm_vcpu *vcpu)
{
vcpu->arch.hcr &= ~HCR_TWE;
}
static inline void vcpu_set_wfx_traps(struct kvm_vcpu *vcpu)
{
vcpu->arch.hcr |= HCR_TWE;
}
static inline bool vcpu_mode_is_32bit(const struct kvm_vcpu *vcpu)
{
return true;
}
static inline unsigned long *vcpu_pc(struct kvm_vcpu *vcpu)
{
return &vcpu->arch.ctxt.gp_regs.usr_regs.ARM_pc;
}
static inline unsigned long *vcpu_cpsr(const struct kvm_vcpu *vcpu)
{
return (unsigned long *)&vcpu->arch.ctxt.gp_regs.usr_regs.ARM_cpsr;
}
static inline void vcpu_set_thumb(struct kvm_vcpu *vcpu)
{
*vcpu_cpsr(vcpu) |= PSR_T_BIT;
}
static inline bool mode_has_spsr(struct kvm_vcpu *vcpu)
{
unsigned long cpsr_mode = vcpu->arch.ctxt.gp_regs.usr_regs.ARM_cpsr & MODE_MASK;
return (cpsr_mode > USR_MODE && cpsr_mode < SYSTEM_MODE);
}
static inline bool vcpu_mode_priv(struct kvm_vcpu *vcpu)
{
unsigned long cpsr_mode = vcpu->arch.ctxt.gp_regs.usr_regs.ARM_cpsr & MODE_MASK;
return cpsr_mode > USR_MODE;
}
static inline u32 kvm_vcpu_get_hsr(const struct kvm_vcpu *vcpu)
{
return vcpu->arch.fault.hsr;
}
static inline int kvm_vcpu_get_condition(const struct kvm_vcpu *vcpu)
{
u32 hsr = kvm_vcpu_get_hsr(vcpu);
if (hsr & HSR_CV)
return (hsr & HSR_COND) >> HSR_COND_SHIFT;
return -1;
}
static inline unsigned long kvm_vcpu_get_hfar(struct kvm_vcpu *vcpu)
{
return vcpu->arch.fault.hxfar;
}
static inline phys_addr_t kvm_vcpu_get_fault_ipa(struct kvm_vcpu *vcpu)
{
return ((phys_addr_t)vcpu->arch.fault.hpfar & HPFAR_MASK) << 8;
}
static inline bool kvm_vcpu_dabt_isvalid(struct kvm_vcpu *vcpu)
{
return kvm_vcpu_get_hsr(vcpu) & HSR_ISV;
}
static inline unsigned long kvm_vcpu_dabt_iss_nisv_sanitized(const struct kvm_vcpu *vcpu)
{
return kvm_vcpu_get_hsr(vcpu) & (HSR_CM | HSR_WNR | HSR_FSC);
}
static inline bool kvm_vcpu_dabt_iswrite(struct kvm_vcpu *vcpu)
{
return kvm_vcpu_get_hsr(vcpu) & HSR_WNR;
}
static inline bool kvm_vcpu_dabt_issext(struct kvm_vcpu *vcpu)
{
return kvm_vcpu_get_hsr(vcpu) & HSR_SSE;
}
static inline bool kvm_vcpu_dabt_issf(const struct kvm_vcpu *vcpu)
{
return false;
}
static inline int kvm_vcpu_dabt_get_rd(struct kvm_vcpu *vcpu)
{
return (kvm_vcpu_get_hsr(vcpu) & HSR_SRT_MASK) >> HSR_SRT_SHIFT;
}
static inline bool kvm_vcpu_dabt_iss1tw(struct kvm_vcpu *vcpu)
{
return kvm_vcpu_get_hsr(vcpu) & HSR_DABT_S1PTW;
}
static inline bool kvm_vcpu_dabt_is_cm(struct kvm_vcpu *vcpu)
{
return !!(kvm_vcpu_get_hsr(vcpu) & HSR_DABT_CM);
}
/* Get Access Size from a data abort */
static inline unsigned int kvm_vcpu_dabt_get_as(struct kvm_vcpu *vcpu)
{
switch ((kvm_vcpu_get_hsr(vcpu) >> 22) & 0x3) {
case 0:
return 1;
case 1:
return 2;
case 2:
return 4;
default:
kvm_err("Hardware is weird: SAS 0b11 is reserved\n");
return 4;
}
}
/* This one is not specific to Data Abort */
static inline bool kvm_vcpu_trap_il_is32bit(struct kvm_vcpu *vcpu)
{
return kvm_vcpu_get_hsr(vcpu) & HSR_IL;
}
static inline u8 kvm_vcpu_trap_get_class(struct kvm_vcpu *vcpu)
{
return kvm_vcpu_get_hsr(vcpu) >> HSR_EC_SHIFT;
}
static inline bool kvm_vcpu_trap_is_iabt(struct kvm_vcpu *vcpu)
{
return kvm_vcpu_trap_get_class(vcpu) == HSR_EC_IABT;
}
static inline u8 kvm_vcpu_trap_get_fault(struct kvm_vcpu *vcpu)
{
return kvm_vcpu_get_hsr(vcpu) & HSR_FSC;
}
static inline u8 kvm_vcpu_trap_get_fault_type(struct kvm_vcpu *vcpu)
{
return kvm_vcpu_get_hsr(vcpu) & HSR_FSC_TYPE;
}
static inline bool kvm_vcpu_dabt_isextabt(struct kvm_vcpu *vcpu)
{
switch (kvm_vcpu_trap_get_fault(vcpu)) {
case FSC_SEA:
case FSC_SEA_TTW0:
case FSC_SEA_TTW1:
case FSC_SEA_TTW2:
case FSC_SEA_TTW3:
case FSC_SECC:
case FSC_SECC_TTW0:
case FSC_SECC_TTW1:
case FSC_SECC_TTW2:
case FSC_SECC_TTW3:
return true;
default:
return false;
}
}
static inline bool kvm_is_write_fault(struct kvm_vcpu *vcpu)
{
if (kvm_vcpu_trap_is_iabt(vcpu))
return false;
return kvm_vcpu_dabt_iswrite(vcpu);
}
static inline u32 kvm_vcpu_hvc_get_imm(struct kvm_vcpu *vcpu)
{
return kvm_vcpu_get_hsr(vcpu) & HSR_HVC_IMM_MASK;
}
static inline unsigned long kvm_vcpu_get_mpidr_aff(struct kvm_vcpu *vcpu)
{
return vcpu_cp15(vcpu, c0_MPIDR) & MPIDR_HWID_BITMASK;
}
static inline bool kvm_arm_get_vcpu_workaround_2_flag(struct kvm_vcpu *vcpu)
{
return false;
}
static inline void kvm_arm_set_vcpu_workaround_2_flag(struct kvm_vcpu *vcpu,
bool flag)
{
}
static inline void kvm_vcpu_set_be(struct kvm_vcpu *vcpu)
{
*vcpu_cpsr(vcpu) |= PSR_E_BIT;
}
static inline bool kvm_vcpu_is_be(struct kvm_vcpu *vcpu)
{
return !!(*vcpu_cpsr(vcpu) & PSR_E_BIT);
}
static inline unsigned long vcpu_data_guest_to_host(struct kvm_vcpu *vcpu,
unsigned long data,
unsigned int len)
{
if (kvm_vcpu_is_be(vcpu)) {
switch (len) {
case 1:
return data & 0xff;
case 2:
return be16_to_cpu(data & 0xffff);
default:
return be32_to_cpu(data);
}
} else {
switch (len) {
case 1:
return data & 0xff;
case 2:
return le16_to_cpu(data & 0xffff);
default:
return le32_to_cpu(data);
}
}
}
static inline unsigned long vcpu_data_host_to_guest(struct kvm_vcpu *vcpu,
unsigned long data,
unsigned int len)
{
if (kvm_vcpu_is_be(vcpu)) {
switch (len) {
case 1:
return data & 0xff;
case 2:
return cpu_to_be16(data & 0xffff);
default:
return cpu_to_be32(data);
}
} else {
switch (len) {
case 1:
return data & 0xff;
case 2:
return cpu_to_le16(data & 0xffff);
default:
return cpu_to_le32(data);
}
}
}
static inline void vcpu_ptrauth_setup_lazy(struct kvm_vcpu *vcpu) {}
#endif /* __ARM_KVM_EMULATE_H__ */
This diff is collapsed.
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2015 - ARM Ltd
* Author: Marc Zyngier <marc.zyngier@arm.com>
*/
#ifndef __ARM_KVM_HYP_H__
#define __ARM_KVM_HYP_H__
#include <linux/compiler.h>
#include <linux/kvm_host.h>
#include <asm/cp15.h>
#include <asm/kvm_arm.h>
#include <asm/vfp.h>
#define __hyp_text __section(.hyp.text) notrace
#define __ACCESS_VFP(CRn) \
"mrc", "mcr", __stringify(p10, 7, %0, CRn, cr0, 0), u32
#define write_special(v, r) \
asm volatile("msr " __stringify(r) ", %0" : : "r" (v))
#define read_special(r) ({ \
u32 __val; \
asm volatile("mrs %0, " __stringify(r) : "=r" (__val)); \
__val; \
})
#define TTBR0 __ACCESS_CP15_64(0, c2)
#define TTBR1 __ACCESS_CP15_64(1, c2)
#define VTTBR __ACCESS_CP15_64(6, c2)
#define PAR __ACCESS_CP15_64(0, c7)
#define CNTP_CVAL __ACCESS_CP15_64(2, c14)
#define CNTV_CVAL __ACCESS_CP15_64(3, c14)
#define CNTVOFF __ACCESS_CP15_64(4, c14)
#define MIDR __ACCESS_CP15(c0, 0, c0, 0)
#define CSSELR __ACCESS_CP15(c0, 2, c0, 0)
#define VPIDR __ACCESS_CP15(c0, 4, c0, 0)
#define VMPIDR __ACCESS_CP15(c0, 4, c0, 5)
#define SCTLR __ACCESS_CP15(c1, 0, c0, 0)
#define CPACR __ACCESS_CP15(c1, 0, c0, 2)
#define HCR __ACCESS_CP15(c1, 4, c1, 0)
#define HDCR __ACCESS_CP15(c1, 4, c1, 1)
#define HCPTR __ACCESS_CP15(c1, 4, c1, 2)
#define HSTR __ACCESS_CP15(c1, 4, c1, 3)
#define TTBCR __ACCESS_CP15(c2, 0, c0, 2)
#define HTCR __ACCESS_CP15(c2, 4, c0, 2)
#define VTCR __ACCESS_CP15(c2, 4, c1, 2)
#define DACR __ACCESS_CP15(c3, 0, c0, 0)
#define DFSR __ACCESS_CP15(c5, 0, c0, 0)
#define IFSR __ACCESS_CP15(c5, 0, c0, 1)
#define ADFSR __ACCESS_CP15(c5, 0, c1, 0)
#define AIFSR __ACCESS_CP15(c5, 0, c1, 1)
#define HSR __ACCESS_CP15(c5, 4, c2, 0)
#define DFAR __ACCESS_CP15(c6, 0, c0, 0)
#define IFAR __ACCESS_CP15(c6, 0, c0, 2)
#define HDFAR __ACCESS_CP15(c6, 4, c0, 0)
#define HIFAR __ACCESS_CP15(c6, 4, c0, 2)
#define HPFAR __ACCESS_CP15(c6, 4, c0, 4)
#define ICIALLUIS __ACCESS_CP15(c7, 0, c1, 0)
#define BPIALLIS __ACCESS_CP15(c7, 0, c1, 6)
#define ICIMVAU __ACCESS_CP15(c7, 0, c5, 1)
#define ATS1CPR __ACCESS_CP15(c7, 0, c8, 0)
#define TLBIALLIS __ACCESS_CP15(c8, 0, c3, 0)
#define TLBIALL __ACCESS_CP15(c8, 0, c7, 0)
#define TLBIALLNSNHIS __ACCESS_CP15(c8, 4, c3, 4)
#define PRRR __ACCESS_CP15(c10, 0, c2, 0)
#define NMRR __ACCESS_CP15(c10, 0, c2, 1)
#define AMAIR0 __ACCESS_CP15(c10, 0, c3, 0)
#define AMAIR1 __ACCESS_CP15(c10, 0, c3, 1)
#define VBAR __ACCESS_CP15(c12, 0, c0, 0)
#define CID __ACCESS_CP15(c13, 0, c0, 1)
#define TID_URW __ACCESS_CP15(c13, 0, c0, 2)
#define TID_URO __ACCESS_CP15(c13, 0, c0, 3)
#define TID_PRIV __ACCESS_CP15(c13, 0, c0, 4)
#define HTPIDR __ACCESS_CP15(c13, 4, c0, 2)
#define CNTKCTL __ACCESS_CP15(c14, 0, c1, 0)
#define CNTP_CTL __ACCESS_CP15(c14, 0, c2, 1)
#define CNTV_CTL __ACCESS_CP15(c14, 0, c3, 1)
#define CNTHCTL __ACCESS_CP15(c14, 4, c1, 0)
#define VFP_FPEXC __ACCESS_VFP(FPEXC)
/* AArch64 compatibility macros, only for the timer so far */
#define read_sysreg_el0(r) read_sysreg(r##_EL0)
#define write_sysreg_el0(v, r) write_sysreg(v, r##_EL0)
#define SYS_CNTP_CTL_EL0 CNTP_CTL
#define SYS_CNTP_CVAL_EL0 CNTP_CVAL
#define SYS_CNTV_CTL_EL0 CNTV_CTL
#define SYS_CNTV_CVAL_EL0 CNTV_CVAL
#define cntvoff_el2 CNTVOFF
#define cnthctl_el2 CNTHCTL
void __timer_enable_traps(struct kvm_vcpu *vcpu);
void __timer_disable_traps(struct kvm_vcpu *vcpu);
void __vgic_v2_save_state(struct kvm_vcpu *vcpu);
void __vgic_v2_restore_state(struct kvm_vcpu *vcpu);
void __sysreg_save_state(struct kvm_cpu_context *ctxt);
void __sysreg_restore_state(struct kvm_cpu_context *ctxt);
void __vgic_v3_save_state(struct kvm_vcpu *vcpu);
void __vgic_v3_restore_state(struct kvm_vcpu *vcpu);
void __vgic_v3_activate_traps(struct kvm_vcpu *vcpu);
void __vgic_v3_deactivate_traps(struct kvm_vcpu *vcpu);
void __vgic_v3_save_aprs(struct kvm_vcpu *vcpu);
void __vgic_v3_restore_aprs(struct kvm_vcpu *vcpu);
asmlinkage void __vfp_save_state(struct vfp_hard_struct *vfp);
asmlinkage void __vfp_restore_state(struct vfp_hard_struct *vfp);
static inline bool __vfp_enabled(void)
{
return !(read_sysreg(HCPTR) & (HCPTR_TCP(11) | HCPTR_TCP(10)));
}
void __hyp_text __banked_save_state(struct kvm_cpu_context *ctxt);
void __hyp_text __banked_restore_state(struct kvm_cpu_context *ctxt);
asmlinkage int __guest_enter(struct kvm_vcpu *vcpu,
struct kvm_cpu_context *host);
asmlinkage int __hyp_do_panic(const char *, int, u32);
#endif /* __ARM_KVM_HYP_H__ */
This diff is collapsed.
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (C) 2018 - Arm Ltd */
#ifndef __ARM_KVM_RAS_H__
#define __ARM_KVM_RAS_H__
#include <linux/types.h>
static inline int kvm_handle_guest_sea(phys_addr_t addr, unsigned int esr)
{
return -1;
}
#endif /* __ARM_KVM_RAS_H__ */
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2016 - ARM Ltd
*
* stage2 page table helpers
*/
#ifndef __ARM_S2_PGTABLE_H_
#define __ARM_S2_PGTABLE_H_
/*
* kvm_mmu_cache_min_pages() is the number of pages required
* to install a stage-2 translation. We pre-allocate the entry
* level table at VM creation. Since we have a 3 level page-table,
* we need only two pages to add a new mapping.
*/
#define kvm_mmu_cache_min_pages(kvm) 2
#define stage2_pgd_none(kvm, pgd) pgd_none(pgd)
#define stage2_pgd_clear(kvm, pgd) pgd_clear(pgd)
#define stage2_pgd_present(kvm, pgd) pgd_present(pgd)
#define stage2_pgd_populate(kvm, pgd, pud) pgd_populate(NULL, pgd, pud)
#define stage2_pud_offset(kvm, pgd, address) pud_offset(pgd, address)
#define stage2_pud_free(kvm, pud) do { } while (0)
#define stage2_pud_none(kvm, pud) pud_none(pud)
#define stage2_pud_clear(kvm, pud) pud_clear(pud)
#define stage2_pud_present(kvm, pud) pud_present(pud)
#define stage2_pud_populate(kvm, pud, pmd) pud_populate(NULL, pud, pmd)
#define stage2_pmd_offset(kvm, pud, address) pmd_offset(pud, address)
#define stage2_pmd_free(kvm, pmd) free_page((unsigned long)pmd)
#define stage2_pud_huge(kvm, pud) pud_huge(pud)
/* Open coded p*d_addr_end that can deal with 64bit addresses */
static inline phys_addr_t
stage2_pgd_addr_end(struct kvm *kvm, phys_addr_t addr, phys_addr_t end)
{
phys_addr_t boundary = (addr + PGDIR_SIZE) & PGDIR_MASK;
return (boundary - 1 < end - 1) ? boundary : end;
}
#define stage2_pud_addr_end(kvm, addr, end) (end)
static inline phys_addr_t
stage2_pmd_addr_end(struct kvm *kvm, phys_addr_t addr, phys_addr_t end)
{
phys_addr_t boundary = (addr + PMD_SIZE) & PMD_MASK;
return (boundary - 1 < end - 1) ? boundary : end;
}
#define stage2_pgd_index(kvm, addr) pgd_index(addr)
#define stage2_pte_table_empty(kvm, ptep) kvm_page_empty(ptep)
#define stage2_pmd_table_empty(kvm, pmdp) kvm_page_empty(pmdp)
#define stage2_pud_table_empty(kvm, pudp) false
static inline bool kvm_stage2_has_pud(struct kvm *kvm)
{
return false;
}
#define S2_PMD_MASK PMD_MASK
#define S2_PMD_SIZE PMD_SIZE
#define S2_PUD_MASK PUD_MASK
#define S2_PUD_SIZE PUD_SIZE
static inline bool kvm_stage2_has_pmd(struct kvm *kvm)
{
return true;
}
#endif /* __ARM_S2_PGTABLE_H_ */
...@@ -67,11 +67,6 @@ static inline bool is_kernel_in_hyp_mode(void) ...@@ -67,11 +67,6 @@ static inline bool is_kernel_in_hyp_mode(void)
return false; return false;
} }
static inline bool has_vhe(void)
{
return false;
}
/* The section containing the hypervisor idmap text */ /* The section containing the hypervisor idmap text */
extern char __hyp_idmap_text_start[]; extern char __hyp_idmap_text_start[];
extern char __hyp_idmap_text_end[]; extern char __hyp_idmap_text_end[];
......
This diff is collapsed.
...@@ -11,9 +11,6 @@ ...@@ -11,9 +11,6 @@
#include <linux/sched.h> #include <linux/sched.h>
#include <linux/mm.h> #include <linux/mm.h>
#include <linux/dma-mapping.h> #include <linux/dma-mapping.h>
#ifdef CONFIG_KVM_ARM_HOST
#include <linux/kvm_host.h>
#endif
#include <asm/cacheflush.h> #include <asm/cacheflush.h>
#include <asm/glue-df.h> #include <asm/glue-df.h>
#include <asm/glue-pf.h> #include <asm/glue-pf.h>
...@@ -167,14 +164,6 @@ int main(void) ...@@ -167,14 +164,6 @@ int main(void)
DEFINE(CACHE_WRITEBACK_ORDER, __CACHE_WRITEBACK_ORDER); DEFINE(CACHE_WRITEBACK_ORDER, __CACHE_WRITEBACK_ORDER);
DEFINE(CACHE_WRITEBACK_GRANULE, __CACHE_WRITEBACK_GRANULE); DEFINE(CACHE_WRITEBACK_GRANULE, __CACHE_WRITEBACK_GRANULE);
BLANK(); BLANK();
#ifdef CONFIG_KVM_ARM_HOST
DEFINE(VCPU_GUEST_CTXT, offsetof(struct kvm_vcpu, arch.ctxt));
DEFINE(VCPU_HOST_CTXT, offsetof(struct kvm_vcpu, arch.host_cpu_context));
DEFINE(CPU_CTXT_VFP, offsetof(struct kvm_cpu_context, vfp));
DEFINE(CPU_CTXT_GP_REGS, offsetof(struct kvm_cpu_context, gp_regs));
DEFINE(GP_REGS_USR, offsetof(struct kvm_regs, usr_regs));
#endif
BLANK();
#ifdef CONFIG_VDSO #ifdef CONFIG_VDSO
DEFINE(VDSO_DATA_SIZE, sizeof(union vdso_data_store)); DEFINE(VDSO_DATA_SIZE, sizeof(union vdso_data_store));
#endif #endif
......
# SPDX-License-Identifier: GPL-2.0
#
# KVM configuration
#
source "virt/kvm/Kconfig"
source "virt/lib/Kconfig"
menuconfig VIRTUALIZATION
bool "Virtualization"
---help---
Say Y here to get to see options for using your Linux host to run
other operating systems inside virtual machines (guests).
This option alone does not add any kernel code.
If you say N, all options in this submenu will be skipped and
disabled.
if VIRTUALIZATION
config KVM
bool "Kernel-based Virtual Machine (KVM) support"
depends on MMU && OF
select PREEMPT_NOTIFIERS
select ARM_GIC
select ARM_GIC_V3
select ARM_GIC_V3_ITS
select HAVE_KVM_CPU_RELAX_INTERCEPT
select HAVE_KVM_ARCH_TLB_FLUSH_ALL
select KVM_MMIO
select KVM_ARM_HOST
select KVM_GENERIC_DIRTYLOG_READ_PROTECT
select SRCU
select MMU_NOTIFIER
select KVM_VFIO
select HAVE_KVM_EVENTFD
select HAVE_KVM_IRQFD
select HAVE_KVM_IRQCHIP
select HAVE_KVM_IRQ_ROUTING
select HAVE_KVM_MSI
select IRQ_BYPASS_MANAGER
select HAVE_KVM_IRQ_BYPASS
depends on ARM_VIRT_EXT && ARM_LPAE && ARM_ARCH_TIMER
---help---
Support hosting virtualized guest machines.
This module provides access to the hardware capabilities through
a character device node named /dev/kvm.
If unsure, say N.
config KVM_ARM_HOST
bool
---help---
Provides host support for ARM processors.
source "drivers/vhost/Kconfig"
endif # VIRTUALIZATION
# SPDX-License-Identifier: GPL-2.0
#
# Makefile for Kernel-based Virtual Machine module
#
plus_virt := $(call as-instr,.arch_extension virt,+virt)
ifeq ($(plus_virt),+virt)
plus_virt_def := -DREQUIRES_VIRT=1
endif
KVM := ../../../virt/kvm
ccflags-y += -I $(srctree)/$(src) -I $(srctree)/virt/kvm/arm/vgic
CFLAGS_$(KVM)/arm/arm.o := $(plus_virt_def)
AFLAGS_init.o := -Wa,-march=armv7-a$(plus_virt)
AFLAGS_interrupts.o := -Wa,-march=armv7-a$(plus_virt)
kvm-arm-y = $(KVM)/kvm_main.o $(KVM)/coalesced_mmio.o $(KVM)/eventfd.o $(KVM)/vfio.o
obj-$(CONFIG_KVM_ARM_HOST) += hyp/
obj-y += kvm-arm.o init.o interrupts.o
obj-y += handle_exit.o guest.o emulate.o reset.o
obj-y += coproc.o coproc_a15.o coproc_a7.o vgic-v3-coproc.o
obj-y += $(KVM)/arm/arm.o $(KVM)/arm/mmu.o $(KVM)/arm/mmio.o
obj-y += $(KVM)/arm/psci.o $(KVM)/arm/perf.o $(KVM)/arm/hypercalls.o
obj-y += $(KVM)/arm/aarch32.o
obj-y += $(KVM)/arm/vgic/vgic.o
obj-y += $(KVM)/arm/vgic/vgic-init.o
obj-y += $(KVM)/arm/vgic/vgic-irqfd.o
obj-y += $(KVM)/arm/vgic/vgic-v2.o
obj-y += $(KVM)/arm/vgic/vgic-v3.o
obj-y += $(KVM)/arm/vgic/vgic-v4.o
obj-y += $(KVM)/arm/vgic/vgic-mmio.o
obj-y += $(KVM)/arm/vgic/vgic-mmio-v2.o
obj-y += $(KVM)/arm/vgic/vgic-mmio-v3.o
obj-y += $(KVM)/arm/vgic/vgic-kvm-device.o
obj-y += $(KVM)/arm/vgic/vgic-its.o
obj-y += $(KVM)/arm/vgic/vgic-debug.o
obj-y += $(KVM)/irqchip.o
obj-y += $(KVM)/arm/arch_timer.o
This diff is collapsed.
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2012 - Virtual Open Systems and Columbia University
* Authors: Christoffer Dall <c.dall@virtualopensystems.com>
*/
#ifndef __ARM_KVM_COPROC_LOCAL_H__
#define __ARM_KVM_COPROC_LOCAL_H__
struct coproc_params {
unsigned long CRn;
unsigned long CRm;
unsigned long Op1;
unsigned long Op2;
unsigned long Rt1;
unsigned long Rt2;
bool is_64bit;
bool is_write;
};
struct coproc_reg {
/* MRC/MCR/MRRC/MCRR instruction which accesses it. */
unsigned long CRn;
unsigned long CRm;
unsigned long Op1;
unsigned long Op2;
bool is_64bit;
/* Trapped access from guest, if non-NULL. */
bool (*access)(struct kvm_vcpu *,
const struct coproc_params *,
const struct coproc_reg *);
/* Initialization for vcpu. */
void (*reset)(struct kvm_vcpu *, const struct coproc_reg *);
/* Index into vcpu_cp15(vcpu, ...), or 0 if we don't need to save it. */
unsigned long reg;
/* Value (usually reset value) */
u64 val;
};
static inline void print_cp_instr(const struct coproc_params *p)
{
/* Look, we even formatted it for you to paste into the table! */
if (p->is_64bit) {
kvm_pr_unimpl(" { CRm64(%2lu), Op1(%2lu), is64, func_%s },\n",
p->CRn, p->Op1, p->is_write ? "write" : "read");
} else {
kvm_pr_unimpl(" { CRn(%2lu), CRm(%2lu), Op1(%2lu), Op2(%2lu), is32,"
" func_%s },\n",
p->CRn, p->CRm, p->Op1, p->Op2,
p->is_write ? "write" : "read");
}
}
static inline bool ignore_write(struct kvm_vcpu *vcpu,
const struct coproc_params *p)
{
return true;
}
static inline bool read_zero(struct kvm_vcpu *vcpu,
const struct coproc_params *p)
{
*vcpu_reg(vcpu, p->Rt1) = 0;
return true;
}
/* Reset functions */
static inline void reset_unknown(struct kvm_vcpu *vcpu,
const struct coproc_reg *r)
{
BUG_ON(!r->reg);
BUG_ON(r->reg >= ARRAY_SIZE(vcpu->arch.ctxt.cp15));
vcpu_cp15(vcpu, r->reg) = 0xdecafbad;
}
static inline void reset_val(struct kvm_vcpu *vcpu, const struct coproc_reg *r)
{
BUG_ON(!r->reg);
BUG_ON(r->reg >= ARRAY_SIZE(vcpu->arch.ctxt.cp15));
vcpu_cp15(vcpu, r->reg) = r->val;
}
static inline void reset_unknown64(struct kvm_vcpu *vcpu,
const struct coproc_reg *r)
{
BUG_ON(!r->reg);
BUG_ON(r->reg + 1 >= ARRAY_SIZE(vcpu->arch.ctxt.cp15));
vcpu_cp15(vcpu, r->reg) = 0xdecafbad;
vcpu_cp15(vcpu, r->reg+1) = 0xd0c0ffee;
}
static inline int cmp_reg(const struct coproc_reg *i1,
const struct coproc_reg *i2)
{
BUG_ON(i1 == i2);
if (!i1)
return 1;
else if (!i2)
return -1;
if (i1->CRn != i2->CRn)
return i1->CRn - i2->CRn;
if (i1->CRm != i2->CRm)
return i1->CRm - i2->CRm;
if (i1->Op1 != i2->Op1)
return i1->Op1 - i2->Op1;
if (i1->Op2 != i2->Op2)
return i1->Op2 - i2->Op2;
return i2->is_64bit - i1->is_64bit;
}
#define CRn(_x) .CRn = _x
#define CRm(_x) .CRm = _x
#define CRm64(_x) .CRn = _x, .CRm = 0
#define Op1(_x) .Op1 = _x
#define Op2(_x) .Op2 = _x
#define is64 .is_64bit = true
#define is32 .is_64bit = false
bool access_vm_reg(struct kvm_vcpu *vcpu,
const struct coproc_params *p,
const struct coproc_reg *r);
#endif /* __ARM_KVM_COPROC_LOCAL_H__ */
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2012 - Virtual Open Systems and Columbia University
* Authors: Rusty Russell <rusty@rustcorp.au>
* Christoffer Dall <c.dall@virtualopensystems.com>
*/
#include <linux/kvm_host.h>
#include <asm/kvm_coproc.h>
#include <asm/kvm_emulate.h>
#include <linux/init.h>
#include "coproc.h"
/*
* A15-specific CP15 registers.
* CRn denotes the primary register number, but is copied to the CRm in the
* user space API for 64-bit register access in line with the terminology used
* in the ARM ARM.
* Important: Must be sorted ascending by CRn, CRM, Op1, Op2 and with 64-bit
* registers preceding 32-bit ones.
*/
static const struct coproc_reg a15_regs[] = {
/* SCTLR: swapped by interrupt.S. */
{ CRn( 1), CRm( 0), Op1( 0), Op2( 0), is32,
access_vm_reg, reset_val, c1_SCTLR, 0x00C50078 },
};
static struct kvm_coproc_target_table a15_target_table = {
.target = KVM_ARM_TARGET_CORTEX_A15,
.table = a15_regs,
.num = ARRAY_SIZE(a15_regs),
};
static int __init coproc_a15_init(void)
{
kvm_register_target_coproc_table(&a15_target_table);
return 0;
}
late_initcall(coproc_a15_init);
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2012 - Virtual Open Systems and Columbia University
* Copyright (C) 2013 - ARM Ltd
*
* Authors: Rusty Russell <rusty@rustcorp.au>
* Christoffer Dall <c.dall@virtualopensystems.com>
* Jonathan Austin <jonathan.austin@arm.com>
*/
#include <linux/kvm_host.h>
#include <asm/kvm_coproc.h>
#include <asm/kvm_emulate.h>
#include <linux/init.h>
#include "coproc.h"
/*
* Cortex-A7 specific CP15 registers.
* CRn denotes the primary register number, but is copied to the CRm in the
* user space API for 64-bit register access in line with the terminology used
* in the ARM ARM.
* Important: Must be sorted ascending by CRn, CRM, Op1, Op2 and with 64-bit
* registers preceding 32-bit ones.
*/
static const struct coproc_reg a7_regs[] = {
/* SCTLR: swapped by interrupt.S. */
{ CRn( 1), CRm( 0), Op1( 0), Op2( 0), is32,
access_vm_reg, reset_val, c1_SCTLR, 0x00C50878 },
};
static struct kvm_coproc_target_table a7_target_table = {
.target = KVM_ARM_TARGET_CORTEX_A7,
.table = a7_regs,
.num = ARRAY_SIZE(a7_regs),
};
static int __init coproc_a7_init(void)
{
kvm_register_target_coproc_table(&a7_target_table);
return 0;
}
late_initcall(coproc_a7_init);
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2012 - Virtual Open Systems and Columbia University
* Author: Christoffer Dall <c.dall@virtualopensystems.com>
*/
#include <linux/mm.h>
#include <linux/kvm_host.h>
#include <asm/kvm_arm.h>
#include <asm/kvm_emulate.h>
#include <asm/opcodes.h>
#include <trace/events/kvm.h>
#include "trace.h"
#define VCPU_NR_MODES 6
#define VCPU_REG_OFFSET_USR 0
#define VCPU_REG_OFFSET_FIQ 1
#define VCPU_REG_OFFSET_IRQ 2
#define VCPU_REG_OFFSET_SVC 3
#define VCPU_REG_OFFSET_ABT 4
#define VCPU_REG_OFFSET_UND 5
#define REG_OFFSET(_reg) \
(offsetof(struct kvm_regs, _reg) / sizeof(u32))
#define USR_REG_OFFSET(_num) REG_OFFSET(usr_regs.uregs[_num])
static const unsigned long vcpu_reg_offsets[VCPU_NR_MODES][15] = {
/* USR/SYS Registers */
[VCPU_REG_OFFSET_USR] = {
USR_REG_OFFSET(0), USR_REG_OFFSET(1), USR_REG_OFFSET(2),
USR_REG_OFFSET(3), USR_REG_OFFSET(4), USR_REG_OFFSET(5),
USR_REG_OFFSET(6), USR_REG_OFFSET(7), USR_REG_OFFSET(8),
USR_REG_OFFSET(9), USR_REG_OFFSET(10), USR_REG_OFFSET(11),
USR_REG_OFFSET(12), USR_REG_OFFSET(13), USR_REG_OFFSET(14),
},
/* FIQ Registers */
[VCPU_REG_OFFSET_FIQ] = {
USR_REG_OFFSET(0), USR_REG_OFFSET(1), USR_REG_OFFSET(2),
USR_REG_OFFSET(3), USR_REG_OFFSET(4), USR_REG_OFFSET(5),
USR_REG_OFFSET(6), USR_REG_OFFSET(7),
REG_OFFSET(fiq_regs[0]), /* r8 */
REG_OFFSET(fiq_regs[1]), /* r9 */
REG_OFFSET(fiq_regs[2]), /* r10 */
REG_OFFSET(fiq_regs[3]), /* r11 */
REG_OFFSET(fiq_regs[4]), /* r12 */
REG_OFFSET(fiq_regs[5]), /* r13 */
REG_OFFSET(fiq_regs[6]), /* r14 */
},
/* IRQ Registers */
[VCPU_REG_OFFSET_IRQ] = {
USR_REG_OFFSET(0), USR_REG_OFFSET(1), USR_REG_OFFSET(2),
USR_REG_OFFSET(3), USR_REG_OFFSET(4), USR_REG_OFFSET(5),
USR_REG_OFFSET(6), USR_REG_OFFSET(7), USR_REG_OFFSET(8),
USR_REG_OFFSET(9), USR_REG_OFFSET(10), USR_REG_OFFSET(11),
USR_REG_OFFSET(12),
REG_OFFSET(irq_regs[0]), /* r13 */
REG_OFFSET(irq_regs[1]), /* r14 */
},
/* SVC Registers */
[VCPU_REG_OFFSET_SVC] = {
USR_REG_OFFSET(0), USR_REG_OFFSET(1), USR_REG_OFFSET(2),
USR_REG_OFFSET(3), USR_REG_OFFSET(4), USR_REG_OFFSET(5),
USR_REG_OFFSET(6), USR_REG_OFFSET(7), USR_REG_OFFSET(8),
USR_REG_OFFSET(9), USR_REG_OFFSET(10), USR_REG_OFFSET(11),
USR_REG_OFFSET(12),
REG_OFFSET(svc_regs[0]), /* r13 */
REG_OFFSET(svc_regs[1]), /* r14 */
},
/* ABT Registers */
[VCPU_REG_OFFSET_ABT] = {
USR_REG_OFFSET(0), USR_REG_OFFSET(1), USR_REG_OFFSET(2),
USR_REG_OFFSET(3), USR_REG_OFFSET(4), USR_REG_OFFSET(5),
USR_REG_OFFSET(6), USR_REG_OFFSET(7), USR_REG_OFFSET(8),
USR_REG_OFFSET(9), USR_REG_OFFSET(10), USR_REG_OFFSET(11),
USR_REG_OFFSET(12),
REG_OFFSET(abt_regs[0]), /* r13 */
REG_OFFSET(abt_regs[1]), /* r14 */
},
/* UND Registers */
[VCPU_REG_OFFSET_UND] = {
USR_REG_OFFSET(0), USR_REG_OFFSET(1), USR_REG_OFFSET(2),
USR_REG_OFFSET(3), USR_REG_OFFSET(4), USR_REG_OFFSET(5),
USR_REG_OFFSET(6), USR_REG_OFFSET(7), USR_REG_OFFSET(8),
USR_REG_OFFSET(9), USR_REG_OFFSET(10), USR_REG_OFFSET(11),
USR_REG_OFFSET(12),
REG_OFFSET(und_regs[0]), /* r13 */
REG_OFFSET(und_regs[1]), /* r14 */
},
};
/*
* Return a pointer to the register number valid in the current mode of
* the virtual CPU.
*/
unsigned long *vcpu_reg(struct kvm_vcpu *vcpu, u8 reg_num)
{
unsigned long *reg_array = (unsigned long *)&vcpu->arch.ctxt.gp_regs;
unsigned long mode = *vcpu_cpsr(vcpu) & MODE_MASK;
switch (mode) {
case USR_MODE...SVC_MODE:
mode &= ~MODE32_BIT; /* 0 ... 3 */
break;
case ABT_MODE:
mode = VCPU_REG_OFFSET_ABT;
break;
case UND_MODE:
mode = VCPU_REG_OFFSET_UND;
break;
case SYSTEM_MODE:
mode = VCPU_REG_OFFSET_USR;
break;
default:
BUG();
}
return reg_array + vcpu_reg_offsets[mode][reg_num];
}
/*
* Return the SPSR for the current mode of the virtual CPU.
*/
unsigned long *__vcpu_spsr(struct kvm_vcpu *vcpu)
{
unsigned long mode = *vcpu_cpsr(vcpu) & MODE_MASK;
switch (mode) {
case SVC_MODE:
return &vcpu->arch.ctxt.gp_regs.KVM_ARM_SVC_spsr;
case ABT_MODE:
return &vcpu->arch.ctxt.gp_regs.KVM_ARM_ABT_spsr;
case UND_MODE:
return &vcpu->arch.ctxt.gp_regs.KVM_ARM_UND_spsr;
case IRQ_MODE:
return &vcpu->arch.ctxt.gp_regs.KVM_ARM_IRQ_spsr;
case FIQ_MODE:
return &vcpu->arch.ctxt.gp_regs.KVM_ARM_FIQ_spsr;
default:
BUG();
}
}
/******************************************************************************
* Inject exceptions into the guest
*/
/**
* kvm_inject_vabt - inject an async abort / SError into the guest
* @vcpu: The VCPU to receive the exception
*
* It is assumed that this code is called from the VCPU thread and that the
* VCPU therefore is not currently executing guest code.
*/
void kvm_inject_vabt(struct kvm_vcpu *vcpu)
{
*vcpu_hcr(vcpu) |= HCR_VA;
}
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2012 - Virtual Open Systems and Columbia University
* Author: Christoffer Dall <c.dall@virtualopensystems.com>
*/
#include <linux/errno.h>
#include <linux/err.h>
#include <linux/kvm_host.h>
#include <linux/module.h>
#include <linux/vmalloc.h>
#include <linux/fs.h>
#include <kvm/arm_psci.h>
#include <asm/cputype.h>
#include <linux/uaccess.h>
#include <asm/kvm.h>
#include <asm/kvm_emulate.h>
#include <asm/kvm_coproc.h>
#define VM_STAT(x) { #x, offsetof(struct kvm, stat.x), KVM_STAT_VM }
#define VCPU_STAT(x) { #x, offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU }
struct kvm_stats_debugfs_item debugfs_entries[] = {
VCPU_STAT(halt_successful_poll),
VCPU_STAT(halt_attempted_poll),
VCPU_STAT(halt_poll_invalid),
VCPU_STAT(halt_wakeup),
VCPU_STAT(hvc_exit_stat),
VCPU_STAT(wfe_exit_stat),
VCPU_STAT(wfi_exit_stat),
VCPU_STAT(mmio_exit_user),
VCPU_STAT(mmio_exit_kernel),
VCPU_STAT(exits),
{ NULL }
};
static u64 core_reg_offset_from_id(u64 id)
{
return id & ~(KVM_REG_ARCH_MASK | KVM_REG_SIZE_MASK | KVM_REG_ARM_CORE);
}
static int get_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
{
u32 __user *uaddr = (u32 __user *)(long)reg->addr;
struct kvm_regs *regs = &vcpu->arch.ctxt.gp_regs;
u64 off;
if (KVM_REG_SIZE(reg->id) != 4)
return -ENOENT;
/* Our ID is an index into the kvm_regs struct. */
off = core_reg_offset_from_id(reg->id);
if (off >= sizeof(*regs) / KVM_REG_SIZE(reg->id))
return -ENOENT;
return put_user(((u32 *)regs)[off], uaddr);
}
static int set_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
{
u32 __user *uaddr = (u32 __user *)(long)reg->addr;
struct kvm_regs *regs = &vcpu->arch.ctxt.gp_regs;
u64 off, val;
if (KVM_REG_SIZE(reg->id) != 4)
return -ENOENT;
/* Our ID is an index into the kvm_regs struct. */
off = core_reg_offset_from_id(reg->id);
if (off >= sizeof(*regs) / KVM_REG_SIZE(reg->id))
return -ENOENT;
if (get_user(val, uaddr) != 0)
return -EFAULT;
if (off == KVM_REG_ARM_CORE_REG(usr_regs.ARM_cpsr)) {
unsigned long mode = val & MODE_MASK;
switch (mode) {
case USR_MODE:
case FIQ_MODE:
case IRQ_MODE:
case SVC_MODE:
case ABT_MODE:
case UND_MODE:
break;
default:
return -EINVAL;
}
}
((u32 *)regs)[off] = val;
return 0;
}
int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
{
return -EINVAL;
}
int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
{
return -EINVAL;
}
#define NUM_TIMER_REGS 3
static bool is_timer_reg(u64 index)
{
switch (index) {
case KVM_REG_ARM_TIMER_CTL:
case KVM_REG_ARM_TIMER_CNT:
case KVM_REG_ARM_TIMER_CVAL:
return true;
}
return false;
}
static int copy_timer_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
{
if (put_user(KVM_REG_ARM_TIMER_CTL, uindices))
return -EFAULT;
uindices++;
if (put_user(KVM_REG_ARM_TIMER_CNT, uindices))
return -EFAULT;
uindices++;
if (put_user(KVM_REG_ARM_TIMER_CVAL, uindices))
return -EFAULT;
return 0;
}
static int set_timer_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
{
void __user *uaddr = (void __user *)(long)reg->addr;
u64 val;
int ret;
ret = copy_from_user(&val, uaddr, KVM_REG_SIZE(reg->id));
if (ret != 0)
return -EFAULT;
return kvm_arm_timer_set_reg(vcpu, reg->id, val);
}
static int get_timer_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
{
void __user *uaddr = (void __user *)(long)reg->addr;
u64 val;
val = kvm_arm_timer_get_reg(vcpu, reg->id);
return copy_to_user(uaddr, &val, KVM_REG_SIZE(reg->id)) ? -EFAULT : 0;
}
static unsigned long num_core_regs(void)
{
return sizeof(struct kvm_regs) / sizeof(u32);
}
/**
* kvm_arm_num_regs - how many registers do we present via KVM_GET_ONE_REG
*
* This is for all registers.
*/
unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu)
{
return num_core_regs() + kvm_arm_num_coproc_regs(vcpu)
+ kvm_arm_get_fw_num_regs(vcpu)
+ NUM_TIMER_REGS;
}
/**
* kvm_arm_copy_reg_indices - get indices of all registers.
*
* We do core registers right here, then we append coproc regs.
*/
int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
{
unsigned int i;
const u64 core_reg = KVM_REG_ARM | KVM_REG_SIZE_U32 | KVM_REG_ARM_CORE;
int ret;
for (i = 0; i < sizeof(struct kvm_regs)/sizeof(u32); i++) {
if (put_user(core_reg | i, uindices))
return -EFAULT;
uindices++;
}
ret = kvm_arm_copy_fw_reg_indices(vcpu, uindices);
if (ret)
return ret;
uindices += kvm_arm_get_fw_num_regs(vcpu);
ret = copy_timer_indices(vcpu, uindices);
if (ret)
return ret;
uindices += NUM_TIMER_REGS;
return kvm_arm_copy_coproc_indices(vcpu, uindices);
}
int kvm_arm_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
{
/* We currently use nothing arch-specific in upper 32 bits */
if ((reg->id & ~KVM_REG_SIZE_MASK) >> 32 != KVM_REG_ARM >> 32)
return -EINVAL;
/* Register group 16 means we want a core register. */
if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_CORE)
return get_core_reg(vcpu, reg);
if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_FW)
return kvm_arm_get_fw_reg(vcpu, reg);
if (is_timer_reg(reg->id))
return get_timer_reg(vcpu, reg);
return kvm_arm_coproc_get_reg(vcpu, reg);
}
int kvm_arm_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
{
/* We currently use nothing arch-specific in upper 32 bits */
if ((reg->id & ~KVM_REG_SIZE_MASK) >> 32 != KVM_REG_ARM >> 32)
return -EINVAL;
/* Register group 16 means we set a core register. */
if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_CORE)
return set_core_reg(vcpu, reg);
if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_FW)
return kvm_arm_set_fw_reg(vcpu, reg);
if (is_timer_reg(reg->id))
return set_timer_reg(vcpu, reg);
return kvm_arm_coproc_set_reg(vcpu, reg);
}
int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
struct kvm_sregs *sregs)
{
return -EINVAL;
}
int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
struct kvm_sregs *sregs)
{
return -EINVAL;
}
int __kvm_arm_vcpu_get_events(struct kvm_vcpu *vcpu,
struct kvm_vcpu_events *events)
{
events->exception.serror_pending = !!(*vcpu_hcr(vcpu) & HCR_VA);
/*
* We never return a pending ext_dabt here because we deliver it to
* the virtual CPU directly when setting the event and it's no longer
* 'pending' at this point.
*/
return 0;
}
int __kvm_arm_vcpu_set_events(struct kvm_vcpu *vcpu,
struct kvm_vcpu_events *events)
{
bool serror_pending = events->exception.serror_pending;
bool has_esr = events->exception.serror_has_esr;
bool ext_dabt_pending = events->exception.ext_dabt_pending;
if (serror_pending && has_esr)
return -EINVAL;
else if (serror_pending)
kvm_inject_vabt(vcpu);
if (ext_dabt_pending)
kvm_inject_dabt(vcpu, kvm_vcpu_get_hfar(vcpu));
return 0;
}
int __attribute_const__ kvm_target_cpu(void)
{
switch (read_cpuid_part()) {
case ARM_CPU_PART_CORTEX_A7:
return KVM_ARM_TARGET_CORTEX_A7;
case ARM_CPU_PART_CORTEX_A15:
return KVM_ARM_TARGET_CORTEX_A15;
default:
return -EINVAL;
}
}
int kvm_vcpu_preferred_target(struct kvm_vcpu_init *init)
{
int target = kvm_target_cpu();
if (target < 0)
return -ENODEV;
memset(init, 0, sizeof(*init));
/*
* For now, we don't return any features.
* In future, we might use features to return target
* specific features available for the preferred
* target type.
*/
init->target = (__u32)target;
return 0;
}
int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
{
return -EINVAL;
}
int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
{
return -EINVAL;
}
int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
struct kvm_translation *tr)
{
return -EINVAL;
}
int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
struct kvm_guest_debug *dbg)
{
return -EINVAL;
}
int kvm_arm_vcpu_arch_set_attr(struct kvm_vcpu *vcpu,
struct kvm_device_attr *attr)
{
int ret;
switch (attr->group) {
case KVM_ARM_VCPU_TIMER_CTRL:
ret = kvm_arm_timer_set_attr(vcpu, attr);
break;
default:
ret = -ENXIO;
break;
}
return ret;
}
int kvm_arm_vcpu_arch_get_attr(struct kvm_vcpu *vcpu,
struct kvm_device_attr *attr)
{
int ret;
switch (attr->group) {
case KVM_ARM_VCPU_TIMER_CTRL:
ret = kvm_arm_timer_get_attr(vcpu, attr);
break;
default:
ret = -ENXIO;
break;
}
return ret;
}
int kvm_arm_vcpu_arch_has_attr(struct kvm_vcpu *vcpu,
struct kvm_device_attr *attr)
{
int ret;
switch (attr->group) {
case KVM_ARM_VCPU_TIMER_CTRL:
ret = kvm_arm_timer_has_attr(vcpu, attr);
break;
default:
ret = -ENXIO;
break;
}
return ret;
}
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2012 - Virtual Open Systems and Columbia University
* Author: Christoffer Dall <c.dall@virtualopensystems.com>
*/
#include <linux/kvm.h>
#include <linux/kvm_host.h>
#include <asm/kvm_emulate.h>
#include <asm/kvm_coproc.h>
#include <asm/kvm_mmu.h>
#include <kvm/arm_hypercalls.h>
#include <trace/events/kvm.h>
#include "trace.h"
typedef int (*exit_handle_fn)(struct kvm_vcpu *, struct kvm_run *);
static int handle_hvc(struct kvm_vcpu *vcpu, struct kvm_run *run)
{
int ret;
trace_kvm_hvc(*vcpu_pc(vcpu), *vcpu_reg(vcpu, 0),
kvm_vcpu_hvc_get_imm(vcpu));
vcpu->stat.hvc_exit_stat++;
ret = kvm_hvc_call_handler(vcpu);
if (ret < 0) {
vcpu_set_reg(vcpu, 0, ~0UL);
return 1;
}
return ret;
}
static int handle_smc(struct kvm_vcpu *vcpu, struct kvm_run *run)
{
/*
* "If an SMC instruction executed at Non-secure EL1 is
* trapped to EL2 because HCR_EL2.TSC is 1, the exception is a
* Trap exception, not a Secure Monitor Call exception [...]"
*
* We need to advance the PC after the trap, as it would
* otherwise return to the same address...
*/
vcpu_set_reg(vcpu, 0, ~0UL);
kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
return 1;
}
/**
* kvm_handle_wfx - handle a WFI or WFE instructions trapped in guests
* @vcpu: the vcpu pointer
* @run: the kvm_run structure pointer
*
* WFE: Yield the CPU and come back to this vcpu when the scheduler
* decides to.
* WFI: Simply call kvm_vcpu_block(), which will halt execution of
* world-switches and schedule other host processes until there is an
* incoming IRQ or FIQ to the VM.
*/
static int kvm_handle_wfx(struct kvm_vcpu *vcpu, struct kvm_run *run)
{
if (kvm_vcpu_get_hsr(vcpu) & HSR_WFI_IS_WFE) {
trace_kvm_wfx(*vcpu_pc(vcpu), true);
vcpu->stat.wfe_exit_stat++;
kvm_vcpu_on_spin(vcpu, vcpu_mode_priv(vcpu));
} else {
trace_kvm_wfx(*vcpu_pc(vcpu), false);
vcpu->stat.wfi_exit_stat++;
kvm_vcpu_block(vcpu);
kvm_clear_request(KVM_REQ_UNHALT, vcpu);
}
kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
return 1;
}
static int kvm_handle_unknown_ec(struct kvm_vcpu *vcpu, struct kvm_run *run)
{
u32 hsr = kvm_vcpu_get_hsr(vcpu);
kvm_pr_unimpl("Unknown exception class: hsr: %#08x\n",
hsr);
kvm_inject_undefined(vcpu);
return 1;
}
static exit_handle_fn arm_exit_handlers[] = {
[0 ... HSR_EC_MAX] = kvm_handle_unknown_ec,
[HSR_EC_WFI] = kvm_handle_wfx,
[HSR_EC_CP15_32] = kvm_handle_cp15_32,
[HSR_EC_CP15_64] = kvm_handle_cp15_64,
[HSR_EC_CP14_MR] = kvm_handle_cp14_32,
[HSR_EC_CP14_LS] = kvm_handle_cp14_load_store,
[HSR_EC_CP14_64] = kvm_handle_cp14_64,
[HSR_EC_CP_0_13] = kvm_handle_cp_0_13_access,
[HSR_EC_CP10_ID] = kvm_handle_cp10_id,
[HSR_EC_HVC] = handle_hvc,
[HSR_EC_SMC] = handle_smc,
[HSR_EC_IABT] = kvm_handle_guest_abort,
[HSR_EC_DABT] = kvm_handle_guest_abort,
};
static exit_handle_fn kvm_get_exit_handler(struct kvm_vcpu *vcpu)
{
u8 hsr_ec = kvm_vcpu_trap_get_class(vcpu);
return arm_exit_handlers[hsr_ec];
}
/*
* Return > 0 to return to guest, < 0 on error, 0 (and set exit_reason) on
* proper exit to userspace.
*/
int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *run,
int exception_index)
{
exit_handle_fn exit_handler;
if (ARM_ABORT_PENDING(exception_index)) {
u8 hsr_ec = kvm_vcpu_trap_get_class(vcpu);
/*
* HVC/SMC already have an adjusted PC, which we need
* to correct in order to return to after having
* injected the abort.
*/
if (hsr_ec == HSR_EC_HVC || hsr_ec == HSR_EC_SMC) {
u32 adj = kvm_vcpu_trap_il_is32bit(vcpu) ? 4 : 2;
*vcpu_pc(vcpu) -= adj;
}
kvm_inject_vabt(vcpu);
return 1;
}
exception_index = ARM_EXCEPTION_CODE(exception_index);
switch (exception_index) {
case ARM_EXCEPTION_IRQ:
return 1;
case ARM_EXCEPTION_HVC:
/*
* See ARM ARM B1.14.1: "Hyp traps on instructions
* that fail their condition code check"
*/
if (!kvm_condition_valid(vcpu)) {
kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
return 1;
}
exit_handler = kvm_get_exit_handler(vcpu);
return exit_handler(vcpu, run);
case ARM_EXCEPTION_DATA_ABORT:
kvm_inject_vabt(vcpu);
return 1;
case ARM_EXCEPTION_HYP_GONE:
/*
* HYP has been reset to the hyp-stub. This happens
* when a guest is pre-empted by kvm_reboot()'s
* shutdown call.
*/
run->exit_reason = KVM_EXIT_FAIL_ENTRY;
return 0;
default:
kvm_pr_unimpl("Unsupported exception type: %d",
exception_index);
run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
return 0;
}
}
# SPDX-License-Identifier: GPL-2.0
#
# Makefile for Kernel-based Virtual Machine module, HYP part
#
ccflags-y += -fno-stack-protector -DDISABLE_BRANCH_PROFILING
KVM=../../../../virt/kvm
CFLAGS_ARMV7VE :=$(call cc-option, -march=armv7ve)
obj-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/hyp/vgic-v3-sr.o
obj-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/hyp/timer-sr.o
obj-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/hyp/aarch32.o
obj-$(CONFIG_KVM_ARM_HOST) += tlb.o
obj-$(CONFIG_KVM_ARM_HOST) += cp15-sr.o
obj-$(CONFIG_KVM_ARM_HOST) += vfp.o
obj-$(CONFIG_KVM_ARM_HOST) += banked-sr.o
CFLAGS_banked-sr.o += $(CFLAGS_ARMV7VE)
obj-$(CONFIG_KVM_ARM_HOST) += entry.o
obj-$(CONFIG_KVM_ARM_HOST) += hyp-entry.o
obj-$(CONFIG_KVM_ARM_HOST) += switch.o
CFLAGS_switch.o += $(CFLAGS_ARMV7VE)
obj-$(CONFIG_KVM_ARM_HOST) += s2-setup.o
# KVM code is run at a different exception code with a different map, so
# compiler instrumentation that inserts callbacks or checks into the code may
# cause crashes. Just disable it.
GCOV_PROFILE := n
KASAN_SANITIZE := n
UBSAN_SANITIZE := n
KCOV_INSTRUMENT := n
// SPDX-License-Identifier: GPL-2.0-only
/*
* Original code:
* Copyright (C) 2012 - Virtual Open Systems and Columbia University
* Author: Christoffer Dall <c.dall@virtualopensystems.com>
*
* Mostly rewritten in C by Marc Zyngier <marc.zyngier@arm.com>
*/
#include <asm/kvm_hyp.h>
/*
* gcc before 4.9 doesn't understand -march=armv7ve, so we have to
* trick the assembler.
*/
__asm__(".arch_extension virt");
void __hyp_text __banked_save_state(struct kvm_cpu_context *ctxt)
{
ctxt->gp_regs.usr_regs.ARM_sp = read_special(SP_usr);
ctxt->gp_regs.usr_regs.ARM_pc = read_special(ELR_hyp);
ctxt->gp_regs.usr_regs.ARM_cpsr = read_special(SPSR);
ctxt->gp_regs.KVM_ARM_SVC_sp = read_special(SP_svc);
ctxt->gp_regs.KVM_ARM_SVC_lr = read_special(LR_svc);
ctxt->gp_regs.KVM_ARM_SVC_spsr = read_special(SPSR_svc);
ctxt->gp_regs.KVM_ARM_ABT_sp = read_special(SP_abt);
ctxt->gp_regs.KVM_ARM_ABT_lr = read_special(LR_abt);
ctxt->gp_regs.KVM_ARM_ABT_spsr = read_special(SPSR_abt);
ctxt->gp_regs.KVM_ARM_UND_sp = read_special(SP_und);
ctxt->gp_regs.KVM_ARM_UND_lr = read_special(LR_und);
ctxt->gp_regs.KVM_ARM_UND_spsr = read_special(SPSR_und);
ctxt->gp_regs.KVM_ARM_IRQ_sp = read_special(SP_irq);
ctxt->gp_regs.KVM_ARM_IRQ_lr = read_special(LR_irq);
ctxt->gp_regs.KVM_ARM_IRQ_spsr = read_special(SPSR_irq);
ctxt->gp_regs.KVM_ARM_FIQ_r8 = read_special(R8_fiq);
ctxt->gp_regs.KVM_ARM_FIQ_r9 = read_special(R9_fiq);
ctxt->gp_regs.KVM_ARM_FIQ_r10 = read_special(R10_fiq);
ctxt->gp_regs.KVM_ARM_FIQ_fp = read_special(R11_fiq);
ctxt->gp_regs.KVM_ARM_FIQ_ip = read_special(R12_fiq);
ctxt->gp_regs.KVM_ARM_FIQ_sp = read_special(SP_fiq);
ctxt->gp_regs.KVM_ARM_FIQ_lr = read_special(LR_fiq);
ctxt->gp_regs.KVM_ARM_FIQ_spsr = read_special(SPSR_fiq);
}
void __hyp_text __banked_restore_state(struct kvm_cpu_context *ctxt)
{
write_special(ctxt->gp_regs.usr_regs.ARM_sp, SP_usr);
write_special(ctxt->gp_regs.usr_regs.ARM_pc, ELR_hyp);
write_special(ctxt->gp_regs.usr_regs.ARM_cpsr, SPSR_cxsf);
write_special(ctxt->gp_regs.KVM_ARM_SVC_sp, SP_svc);
write_special(ctxt->gp_regs.KVM_ARM_SVC_lr, LR_svc);
write_special(ctxt->gp_regs.KVM_ARM_SVC_spsr, SPSR_svc);
write_special(ctxt->gp_regs.KVM_ARM_ABT_sp, SP_abt);
write_special(ctxt->gp_regs.KVM_ARM_ABT_lr, LR_abt);
write_special(ctxt->gp_regs.KVM_ARM_ABT_spsr, SPSR_abt);
write_special(ctxt->gp_regs.KVM_ARM_UND_sp, SP_und);
write_special(ctxt->gp_regs.KVM_ARM_UND_lr, LR_und);
write_special(ctxt->gp_regs.KVM_ARM_UND_spsr, SPSR_und);
write_special(ctxt->gp_regs.KVM_ARM_IRQ_sp, SP_irq);
write_special(ctxt->gp_regs.KVM_ARM_IRQ_lr, LR_irq);
write_special(ctxt->gp_regs.KVM_ARM_IRQ_spsr, SPSR_irq);
write_special(ctxt->gp_regs.KVM_ARM_FIQ_r8, R8_fiq);
write_special(ctxt->gp_regs.KVM_ARM_FIQ_r9, R9_fiq);
write_special(ctxt->gp_regs.KVM_ARM_FIQ_r10, R10_fiq);
write_special(ctxt->gp_regs.KVM_ARM_FIQ_fp, R11_fiq);
write_special(ctxt->gp_regs.KVM_ARM_FIQ_ip, R12_fiq);
write_special(ctxt->gp_regs.KVM_ARM_FIQ_sp, SP_fiq);
write_special(ctxt->gp_regs.KVM_ARM_FIQ_lr, LR_fiq);
write_special(ctxt->gp_regs.KVM_ARM_FIQ_spsr, SPSR_fiq);
}
// SPDX-License-Identifier: GPL-2.0-only
/*
* Original code:
* Copyright (C) 2012 - Virtual Open Systems and Columbia University
* Author: Christoffer Dall <c.dall@virtualopensystems.com>
*
* Mostly rewritten in C by Marc Zyngier <marc.zyngier@arm.com>
*/
#include <asm/kvm_hyp.h>
static u64 *cp15_64(struct kvm_cpu_context *ctxt, int idx)
{
return (u64 *)(ctxt->cp15 + idx);
}
void __hyp_text __sysreg_save_state(struct kvm_cpu_context *ctxt)
{
ctxt->cp15[c0_CSSELR] = read_sysreg(CSSELR);
ctxt->cp15[c1_SCTLR] = read_sysreg(SCTLR);
ctxt->cp15[c1_CPACR] = read_sysreg(CPACR);
*cp15_64(ctxt, c2_TTBR0) = read_sysreg(TTBR0);
*cp15_64(ctxt, c2_TTBR1) = read_sysreg(TTBR1);
ctxt->cp15[c2_TTBCR] = read_sysreg(TTBCR);
ctxt->cp15[c3_DACR] = read_sysreg(DACR);
ctxt->cp15[c5_DFSR] = read_sysreg(DFSR);
ctxt->cp15[c5_IFSR] = read_sysreg(IFSR);
ctxt->cp15[c5_ADFSR] = read_sysreg(ADFSR);
ctxt->cp15[c5_AIFSR] = read_sysreg(AIFSR);
ctxt->cp15[c6_DFAR] = read_sysreg(DFAR);
ctxt->cp15[c6_IFAR] = read_sysreg(IFAR);
*cp15_64(ctxt, c7_PAR) = read_sysreg(PAR);
ctxt->cp15[c10_PRRR] = read_sysreg(PRRR);
ctxt->cp15[c10_NMRR] = read_sysreg(NMRR);
ctxt->cp15[c10_AMAIR0] = read_sysreg(AMAIR0);
ctxt->cp15[c10_AMAIR1] = read_sysreg(AMAIR1);
ctxt->cp15[c12_VBAR] = read_sysreg(VBAR);
ctxt->cp15[c13_CID] = read_sysreg(CID);
ctxt->cp15[c13_TID_URW] = read_sysreg(TID_URW);
ctxt->cp15[c13_TID_URO] = read_sysreg(TID_URO);
ctxt->cp15[c13_TID_PRIV] = read_sysreg(TID_PRIV);
ctxt->cp15[c14_CNTKCTL] = read_sysreg(CNTKCTL);
}
void __hyp_text __sysreg_restore_state(struct kvm_cpu_context *ctxt)
{
write_sysreg(ctxt->cp15[c0_MPIDR], VMPIDR);
write_sysreg(ctxt->cp15[c0_CSSELR], CSSELR);
write_sysreg(ctxt->cp15[c1_SCTLR], SCTLR);
write_sysreg(ctxt->cp15[c1_CPACR], CPACR);
write_sysreg(*cp15_64(ctxt, c2_TTBR0), TTBR0);
write_sysreg(*cp15_64(ctxt, c2_TTBR1), TTBR1);
write_sysreg(ctxt->cp15[c2_TTBCR], TTBCR);
write_sysreg(ctxt->cp15[c3_DACR], DACR);
write_sysreg(ctxt->cp15[c5_DFSR], DFSR);
write_sysreg(ctxt->cp15[c5_IFSR], IFSR);
write_sysreg(ctxt->cp15[c5_ADFSR], ADFSR);
write_sysreg(ctxt->cp15[c5_AIFSR], AIFSR);
write_sysreg(ctxt->cp15[c6_DFAR], DFAR);
write_sysreg(ctxt->cp15[c6_IFAR], IFAR);
write_sysreg(*cp15_64(ctxt, c7_PAR), PAR);
write_sysreg(ctxt->cp15[c10_PRRR], PRRR);
write_sysreg(ctxt->cp15[c10_NMRR], NMRR);
write_sysreg(ctxt->cp15[c10_AMAIR0], AMAIR0);
write_sysreg(ctxt->cp15[c10_AMAIR1], AMAIR1);
write_sysreg(ctxt->cp15[c12_VBAR], VBAR);
write_sysreg(ctxt->cp15[c13_CID], CID);
write_sysreg(ctxt->cp15[c13_TID_URW], TID_URW);
write_sysreg(ctxt->cp15[c13_TID_URO], TID_URO);
write_sysreg(ctxt->cp15[c13_TID_PRIV], TID_PRIV);
write_sysreg(ctxt->cp15[c14_CNTKCTL], CNTKCTL);
}
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2016 - ARM Ltd
* Author: Marc Zyngier <marc.zyngier@arm.com>
*/
#include <linux/linkage.h>
#include <asm/asm-offsets.h>
#include <asm/kvm_arm.h>
#include <asm/kvm_asm.h>
.arch_extension virt
.text
.pushsection .hyp.text, "ax"
#define USR_REGS_OFFSET (CPU_CTXT_GP_REGS + GP_REGS_USR)
/* int __guest_enter(struct kvm_vcpu *vcpu, struct kvm_cpu_context *host) */
ENTRY(__guest_enter)
@ Save host registers
add r1, r1, #(USR_REGS_OFFSET + S_R4)
stm r1!, {r4-r12}
str lr, [r1, #4] @ Skip SP_usr (already saved)
@ Restore guest registers
add r0, r0, #(VCPU_GUEST_CTXT + USR_REGS_OFFSET + S_R0)
ldr lr, [r0, #S_LR]
ldm r0, {r0-r12}
clrex
eret
ENDPROC(__guest_enter)
ENTRY(__guest_exit)
/*
* return convention:
* guest r0, r1, r2 saved on the stack
* r0: vcpu pointer
* r1: exception code
*/
add r2, r0, #(VCPU_GUEST_CTXT + USR_REGS_OFFSET + S_R3)
stm r2!, {r3-r12}
str lr, [r2, #4]
add r2, r0, #(VCPU_GUEST_CTXT + USR_REGS_OFFSET + S_R0)
pop {r3, r4, r5} @ r0, r1, r2
stm r2, {r3-r5}
ldr r0, [r0, #VCPU_HOST_CTXT]
add r0, r0, #(USR_REGS_OFFSET + S_R4)
ldm r0!, {r4-r12}
ldr lr, [r0, #4]
mov r0, r1
mrs r1, SPSR
mrs r2, ELR_hyp
mrc p15, 4, r3, c5, c2, 0 @ HSR
/*
* Force loads and stores to complete before unmasking aborts
* and forcing the delivery of the exception. This gives us a
* single instruction window, which the handler will try to
* match.
*/
dsb sy
cpsie a
.global abort_guest_exit_start
abort_guest_exit_start:
isb
.global abort_guest_exit_end
abort_guest_exit_end:
/*
* If we took an abort, r0[31] will be set, and cmp will set
* the N bit in PSTATE.
*/
cmp r0, #0
msrmi SPSR_cxsf, r1
msrmi ELR_hyp, r2
mcrmi p15, 4, r3, c5, c2, 0 @ HSR
bx lr
ENDPROC(__guest_exit)
/*
* If VFPv3 support is not available, then we will not switch the VFP
* registers; however cp10 and cp11 accesses will still trap and fallback
* to the regular coprocessor emulation code, which currently will
* inject an undefined exception to the guest.
*/
#ifdef CONFIG_VFPv3
ENTRY(__vfp_guest_restore)
push {r3, r4, lr}
@ NEON/VFP used. Turn on VFP access.
mrc p15, 4, r1, c1, c1, 2 @ HCPTR
bic r1, r1, #(HCPTR_TCP(10) | HCPTR_TCP(11))
mcr p15, 4, r1, c1, c1, 2 @ HCPTR
isb
@ Switch VFP/NEON hardware state to the guest's
mov r4, r0
ldr r0, [r0, #VCPU_HOST_CTXT]
add r0, r0, #CPU_CTXT_VFP
bl __vfp_save_state
add r0, r4, #(VCPU_GUEST_CTXT + CPU_CTXT_VFP)
bl __vfp_restore_state
pop {r3, r4, lr}
pop {r0, r1, r2}
clrex
eret
ENDPROC(__vfp_guest_restore)
#endif
.popsection
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* irq.h: in kernel interrupt controller related definitions
* Copyright (c) 2016 Red Hat, Inc.
*
* This header is included by irqchip.c. However, on ARM, interrupt
* controller declarations are located in include/kvm/arm_vgic.h since
* they are mostly shared between arm and arm64.
*/
#ifndef __IRQ_H
#define __IRQ_H
#include <kvm/arm_vgic.h>
#endif
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment