Commit 300dca68 authored by Marc Zyngier's avatar Marc Zyngier

Merge branch 'kvm-arm64/pre-nv-5.9' into kvmarm-master/next-WIP

Signed-off-by: default avatarMarc Zyngier <maz@kernel.org>
parents c199a009 41ce82f6
...@@ -62,7 +62,8 @@ ...@@ -62,7 +62,8 @@
#define ARM64_HAS_GENERIC_AUTH 52 #define ARM64_HAS_GENERIC_AUTH 52
#define ARM64_HAS_32BIT_EL1 53 #define ARM64_HAS_32BIT_EL1 53
#define ARM64_BTI 54 #define ARM64_BTI 54
#define ARM64_HAS_ARMv8_4_TTL 55
#define ARM64_NCAPS 55 #define ARM64_NCAPS 56
#endif /* __ASM_CPUCAPS_H */ #endif /* __ASM_CPUCAPS_H */
...@@ -95,6 +95,7 @@ extern void *__nvhe_undefined_symbol; ...@@ -95,6 +95,7 @@ extern void *__nvhe_undefined_symbol;
struct kvm; struct kvm;
struct kvm_vcpu; struct kvm_vcpu;
struct kvm_s2_mmu;
DECLARE_KVM_NVHE_SYM(__kvm_hyp_init); DECLARE_KVM_NVHE_SYM(__kvm_hyp_init);
DECLARE_KVM_HYP_SYM(__kvm_hyp_vector); DECLARE_KVM_HYP_SYM(__kvm_hyp_vector);
...@@ -108,9 +109,10 @@ DECLARE_KVM_HYP_SYM(__bp_harden_hyp_vecs); ...@@ -108,9 +109,10 @@ DECLARE_KVM_HYP_SYM(__bp_harden_hyp_vecs);
#endif #endif
extern void __kvm_flush_vm_context(void); extern void __kvm_flush_vm_context(void);
extern void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa); extern void __kvm_tlb_flush_vmid_ipa(struct kvm_s2_mmu *mmu, phys_addr_t ipa,
extern void __kvm_tlb_flush_vmid(struct kvm *kvm); int level);
extern void __kvm_tlb_flush_local_vmid(struct kvm_vcpu *vcpu); extern void __kvm_tlb_flush_vmid(struct kvm_s2_mmu *mmu);
extern void __kvm_tlb_flush_local_vmid(struct kvm_s2_mmu *mmu);
extern void __kvm_timer_set_cntvoff(u64 cntvoff); extern void __kvm_timer_set_cntvoff(u64 cntvoff);
......
...@@ -124,33 +124,12 @@ static inline void vcpu_set_vsesr(struct kvm_vcpu *vcpu, u64 vsesr) ...@@ -124,33 +124,12 @@ static inline void vcpu_set_vsesr(struct kvm_vcpu *vcpu, u64 vsesr)
static __always_inline unsigned long *vcpu_pc(const struct kvm_vcpu *vcpu) static __always_inline unsigned long *vcpu_pc(const struct kvm_vcpu *vcpu)
{ {
return (unsigned long *)&vcpu_gp_regs(vcpu)->regs.pc; return (unsigned long *)&vcpu_gp_regs(vcpu)->pc;
}
static inline unsigned long *__vcpu_elr_el1(const struct kvm_vcpu *vcpu)
{
return (unsigned long *)&vcpu_gp_regs(vcpu)->elr_el1;
}
static inline unsigned long vcpu_read_elr_el1(const struct kvm_vcpu *vcpu)
{
if (vcpu->arch.sysregs_loaded_on_cpu)
return read_sysreg_el1(SYS_ELR);
else
return *__vcpu_elr_el1(vcpu);
}
static inline void vcpu_write_elr_el1(const struct kvm_vcpu *vcpu, unsigned long v)
{
if (vcpu->arch.sysregs_loaded_on_cpu)
write_sysreg_el1(v, SYS_ELR);
else
*__vcpu_elr_el1(vcpu) = v;
} }
static __always_inline unsigned long *vcpu_cpsr(const struct kvm_vcpu *vcpu) static __always_inline unsigned long *vcpu_cpsr(const struct kvm_vcpu *vcpu)
{ {
return (unsigned long *)&vcpu_gp_regs(vcpu)->regs.pstate; return (unsigned long *)&vcpu_gp_regs(vcpu)->pstate;
} }
static __always_inline bool vcpu_mode_is_32bit(const struct kvm_vcpu *vcpu) static __always_inline bool vcpu_mode_is_32bit(const struct kvm_vcpu *vcpu)
...@@ -179,14 +158,14 @@ static inline void vcpu_set_thumb(struct kvm_vcpu *vcpu) ...@@ -179,14 +158,14 @@ static inline void vcpu_set_thumb(struct kvm_vcpu *vcpu)
static __always_inline unsigned long vcpu_get_reg(const struct kvm_vcpu *vcpu, static __always_inline unsigned long vcpu_get_reg(const struct kvm_vcpu *vcpu,
u8 reg_num) u8 reg_num)
{ {
return (reg_num == 31) ? 0 : vcpu_gp_regs(vcpu)->regs.regs[reg_num]; return (reg_num == 31) ? 0 : vcpu_gp_regs(vcpu)->regs[reg_num];
} }
static __always_inline void vcpu_set_reg(struct kvm_vcpu *vcpu, u8 reg_num, static __always_inline void vcpu_set_reg(struct kvm_vcpu *vcpu, u8 reg_num,
unsigned long val) unsigned long val)
{ {
if (reg_num != 31) if (reg_num != 31)
vcpu_gp_regs(vcpu)->regs.regs[reg_num] = val; vcpu_gp_regs(vcpu)->regs[reg_num] = val;
} }
static inline unsigned long vcpu_read_spsr(const struct kvm_vcpu *vcpu) static inline unsigned long vcpu_read_spsr(const struct kvm_vcpu *vcpu)
...@@ -197,7 +176,7 @@ static inline unsigned long vcpu_read_spsr(const struct kvm_vcpu *vcpu) ...@@ -197,7 +176,7 @@ static inline unsigned long vcpu_read_spsr(const struct kvm_vcpu *vcpu)
if (vcpu->arch.sysregs_loaded_on_cpu) if (vcpu->arch.sysregs_loaded_on_cpu)
return read_sysreg_el1(SYS_SPSR); return read_sysreg_el1(SYS_SPSR);
else else
return vcpu_gp_regs(vcpu)->spsr[KVM_SPSR_EL1]; return __vcpu_sys_reg(vcpu, SPSR_EL1);
} }
static inline void vcpu_write_spsr(struct kvm_vcpu *vcpu, unsigned long v) static inline void vcpu_write_spsr(struct kvm_vcpu *vcpu, unsigned long v)
...@@ -210,7 +189,7 @@ static inline void vcpu_write_spsr(struct kvm_vcpu *vcpu, unsigned long v) ...@@ -210,7 +189,7 @@ static inline void vcpu_write_spsr(struct kvm_vcpu *vcpu, unsigned long v)
if (vcpu->arch.sysregs_loaded_on_cpu) if (vcpu->arch.sysregs_loaded_on_cpu)
write_sysreg_el1(v, SYS_SPSR); write_sysreg_el1(v, SYS_SPSR);
else else
vcpu_gp_regs(vcpu)->spsr[KVM_SPSR_EL1] = v; __vcpu_sys_reg(vcpu, SPSR_EL1) = v;
} }
/* /*
...@@ -519,11 +498,11 @@ static __always_inline void kvm_skip_instr(struct kvm_vcpu *vcpu, bool is_wide_i ...@@ -519,11 +498,11 @@ static __always_inline void kvm_skip_instr(struct kvm_vcpu *vcpu, bool is_wide_i
static __always_inline void __kvm_skip_instr(struct kvm_vcpu *vcpu) static __always_inline void __kvm_skip_instr(struct kvm_vcpu *vcpu)
{ {
*vcpu_pc(vcpu) = read_sysreg_el2(SYS_ELR); *vcpu_pc(vcpu) = read_sysreg_el2(SYS_ELR);
vcpu->arch.ctxt.gp_regs.regs.pstate = read_sysreg_el2(SYS_SPSR); vcpu_gp_regs(vcpu)->pstate = read_sysreg_el2(SYS_SPSR);
kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu)); kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
write_sysreg_el2(vcpu->arch.ctxt.gp_regs.regs.pstate, SYS_SPSR); write_sysreg_el2(vcpu_gp_regs(vcpu)->pstate, SYS_SPSR);
write_sysreg_el2(*vcpu_pc(vcpu), SYS_ELR); write_sysreg_el2(*vcpu_pc(vcpu), SYS_ELR);
} }
......
...@@ -66,19 +66,34 @@ struct kvm_vmid { ...@@ -66,19 +66,34 @@ struct kvm_vmid {
u32 vmid; u32 vmid;
}; };
struct kvm_arch { struct kvm_s2_mmu {
struct kvm_vmid vmid; struct kvm_vmid vmid;
/* stage2 entry level table */ /*
pgd_t *pgd; * stage2 entry level table
phys_addr_t pgd_phys; *
* Two kvm_s2_mmu structures in the same VM can point to the same
/* VTCR_EL2 value for this VM */ * pgd here. This happens when running a guest using a
u64 vtcr; * translation regime that isn't affected by its own stage-2
* translation, such as a non-VHE hypervisor running at vEL2, or
* for vEL1/EL0 with vHCR_EL2.VM == 0. In that case, we use the
* canonical stage-2 page tables.
*/
pgd_t *pgd;
phys_addr_t pgd_phys;
/* The last vcpu id that ran on each physical CPU */ /* The last vcpu id that ran on each physical CPU */
int __percpu *last_vcpu_ran; int __percpu *last_vcpu_ran;
struct kvm *kvm;
};
struct kvm_arch {
struct kvm_s2_mmu mmu;
/* VTCR_EL2 value for this VM */
u64 vtcr;
/* The maximum number of vCPUs depends on the used GIC model */ /* The maximum number of vCPUs depends on the used GIC model */
int max_vcpus; int max_vcpus;
...@@ -170,6 +185,16 @@ enum vcpu_sysreg { ...@@ -170,6 +185,16 @@ enum vcpu_sysreg {
APGAKEYLO_EL1, APGAKEYLO_EL1,
APGAKEYHI_EL1, APGAKEYHI_EL1,
ELR_EL1,
SP_EL1,
SPSR_EL1,
CNTVOFF_EL2,
CNTV_CVAL_EL0,
CNTV_CTL_EL0,
CNTP_CVAL_EL0,
CNTP_CTL_EL0,
/* 32bit specific registers. Keep them at the end of the range */ /* 32bit specific registers. Keep them at the end of the range */
DACR32_EL2, /* Domain Access Control Register */ DACR32_EL2, /* Domain Access Control Register */
IFSR32_EL2, /* Instruction Fault Status Register */ IFSR32_EL2, /* Instruction Fault Status Register */
...@@ -221,7 +246,15 @@ enum vcpu_sysreg { ...@@ -221,7 +246,15 @@ enum vcpu_sysreg {
#define NR_COPRO_REGS (NR_SYS_REGS * 2) #define NR_COPRO_REGS (NR_SYS_REGS * 2)
struct kvm_cpu_context { struct kvm_cpu_context {
struct kvm_regs gp_regs; struct user_pt_regs regs; /* sp = sp_el0 */
u64 spsr_abt;
u64 spsr_und;
u64 spsr_irq;
u64 spsr_fiq;
struct user_fpsimd_state fp_regs;
union { union {
u64 sys_regs[NR_SYS_REGS]; u64 sys_regs[NR_SYS_REGS];
u32 copro[NR_COPRO_REGS]; u32 copro[NR_COPRO_REGS];
...@@ -254,6 +287,9 @@ struct kvm_vcpu_arch { ...@@ -254,6 +287,9 @@ struct kvm_vcpu_arch {
void *sve_state; void *sve_state;
unsigned int sve_max_vl; unsigned int sve_max_vl;
/* Stage 2 paging state used by the hardware on next switch */
struct kvm_s2_mmu *hw_mmu;
/* HYP configuration */ /* HYP configuration */
u64 hcr_el2; u64 hcr_el2;
u32 mdcr_el2; u32 mdcr_el2;
...@@ -384,15 +420,20 @@ struct kvm_vcpu_arch { ...@@ -384,15 +420,20 @@ struct kvm_vcpu_arch {
system_supports_generic_auth()) && \ system_supports_generic_auth()) && \
((vcpu)->arch.flags & KVM_ARM64_GUEST_HAS_PTRAUTH)) ((vcpu)->arch.flags & KVM_ARM64_GUEST_HAS_PTRAUTH))
#define vcpu_gp_regs(v) (&(v)->arch.ctxt.gp_regs) #define vcpu_gp_regs(v) (&(v)->arch.ctxt.regs)
/* /*
* Only use __vcpu_sys_reg if you know you want the memory backed version of a * Only use __vcpu_sys_reg/ctxt_sys_reg if you know you want the
* register, and not the one most recently accessed by a running VCPU. For * memory backed version of a register, and not the one most recently
* example, for userspace access or for system registers that are never context * accessed by a running VCPU. For example, for userspace access or
* switched, but only emulated. * for system registers that are never context switched, but only
* emulated.
*/ */
#define __vcpu_sys_reg(v,r) ((v)->arch.ctxt.sys_regs[(r)]) #define __ctxt_sys_reg(c,r) (&(c)->sys_regs[(r)])
#define ctxt_sys_reg(c,r) (*__ctxt_sys_reg(c,r))
#define __vcpu_sys_reg(v,r) (ctxt_sys_reg(&(v)->arch.ctxt, (r)))
u64 vcpu_read_sys_reg(const struct kvm_vcpu *vcpu, int reg); u64 vcpu_read_sys_reg(const struct kvm_vcpu *vcpu, int reg);
void vcpu_write_sys_reg(struct kvm_vcpu *vcpu, u64 val, int reg); void vcpu_write_sys_reg(struct kvm_vcpu *vcpu, u64 val, int reg);
...@@ -538,7 +579,7 @@ DECLARE_PER_CPU(kvm_host_data_t, kvm_host_data); ...@@ -538,7 +579,7 @@ DECLARE_PER_CPU(kvm_host_data_t, kvm_host_data);
static inline void kvm_init_host_cpu_context(struct kvm_cpu_context *cpu_ctxt) static inline void kvm_init_host_cpu_context(struct kvm_cpu_context *cpu_ctxt)
{ {
/* The host's MPIDR is immutable, so let's set it up at boot time */ /* The host's MPIDR is immutable, so let's set it up at boot time */
cpu_ctxt->sys_regs[MPIDR_EL1] = read_cpuid_mpidr(); ctxt_sys_reg(cpu_ctxt, MPIDR_EL1) = read_cpuid_mpidr();
} }
static inline bool kvm_arch_requires_vhe(void) static inline bool kvm_arch_requires_vhe(void)
......
...@@ -134,8 +134,8 @@ int create_hyp_exec_mappings(phys_addr_t phys_addr, size_t size, ...@@ -134,8 +134,8 @@ int create_hyp_exec_mappings(phys_addr_t phys_addr, size_t size,
void free_hyp_pgds(void); void free_hyp_pgds(void);
void stage2_unmap_vm(struct kvm *kvm); void stage2_unmap_vm(struct kvm *kvm);
int kvm_alloc_stage2_pgd(struct kvm *kvm); int kvm_init_stage2_mmu(struct kvm *kvm, struct kvm_s2_mmu *mmu);
void kvm_free_stage2_pgd(struct kvm *kvm); void kvm_free_stage2_pgd(struct kvm_s2_mmu *mmu);
int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa, int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa,
phys_addr_t pa, unsigned long size, bool writable); phys_addr_t pa, unsigned long size, bool writable);
...@@ -577,13 +577,13 @@ static inline u64 kvm_vttbr_baddr_mask(struct kvm *kvm) ...@@ -577,13 +577,13 @@ static inline u64 kvm_vttbr_baddr_mask(struct kvm *kvm)
return vttbr_baddr_mask(kvm_phys_shift(kvm), kvm_stage2_levels(kvm)); return vttbr_baddr_mask(kvm_phys_shift(kvm), kvm_stage2_levels(kvm));
} }
static __always_inline u64 kvm_get_vttbr(struct kvm *kvm) static __always_inline u64 kvm_get_vttbr(struct kvm_s2_mmu *mmu)
{ {
struct kvm_vmid *vmid = &kvm->arch.vmid; struct kvm_vmid *vmid = &mmu->vmid;
u64 vmid_field, baddr; u64 vmid_field, baddr;
u64 cnp = system_supports_cnp() ? VTTBR_CNP_BIT : 0; u64 cnp = system_supports_cnp() ? VTTBR_CNP_BIT : 0;
baddr = kvm->arch.pgd_phys; baddr = mmu->pgd_phys;
vmid_field = (u64)vmid->vmid << VTTBR_VMID_SHIFT; vmid_field = (u64)vmid->vmid << VTTBR_VMID_SHIFT;
return kvm_phys_to_vttbr(baddr) | vmid_field | cnp; return kvm_phys_to_vttbr(baddr) | vmid_field | cnp;
} }
...@@ -592,10 +592,10 @@ static __always_inline u64 kvm_get_vttbr(struct kvm *kvm) ...@@ -592,10 +592,10 @@ static __always_inline u64 kvm_get_vttbr(struct kvm *kvm)
* Must be called from hyp code running at EL2 with an updated VTTBR * Must be called from hyp code running at EL2 with an updated VTTBR
* and interrupts disabled. * and interrupts disabled.
*/ */
static __always_inline void __load_guest_stage2(struct kvm *kvm) static __always_inline void __load_guest_stage2(struct kvm_s2_mmu *mmu)
{ {
write_sysreg(kvm->arch.vtcr, vtcr_el2); write_sysreg(kern_hyp_va(mmu->kvm)->arch.vtcr, vtcr_el2);
write_sysreg(kvm_get_vttbr(kvm), vttbr_el2); write_sysreg(kvm_get_vttbr(mmu), vttbr_el2);
/* /*
* ARM errata 1165522 and 1530923 require the actual execution of the * ARM errata 1165522 and 1530923 require the actual execution of the
......
...@@ -178,10 +178,12 @@ ...@@ -178,10 +178,12 @@
#define PTE_S2_RDONLY (_AT(pteval_t, 1) << 6) /* HAP[2:1] */ #define PTE_S2_RDONLY (_AT(pteval_t, 1) << 6) /* HAP[2:1] */
#define PTE_S2_RDWR (_AT(pteval_t, 3) << 6) /* HAP[2:1] */ #define PTE_S2_RDWR (_AT(pteval_t, 3) << 6) /* HAP[2:1] */
#define PTE_S2_XN (_AT(pteval_t, 2) << 53) /* XN[1:0] */ #define PTE_S2_XN (_AT(pteval_t, 2) << 53) /* XN[1:0] */
#define PTE_S2_SW_RESVD (_AT(pteval_t, 15) << 55) /* Reserved for SW */
#define PMD_S2_RDONLY (_AT(pmdval_t, 1) << 6) /* HAP[2:1] */ #define PMD_S2_RDONLY (_AT(pmdval_t, 1) << 6) /* HAP[2:1] */
#define PMD_S2_RDWR (_AT(pmdval_t, 3) << 6) /* HAP[2:1] */ #define PMD_S2_RDWR (_AT(pmdval_t, 3) << 6) /* HAP[2:1] */
#define PMD_S2_XN (_AT(pmdval_t, 2) << 53) /* XN[1:0] */ #define PMD_S2_XN (_AT(pmdval_t, 2) << 53) /* XN[1:0] */
#define PMD_S2_SW_RESVD (_AT(pmdval_t, 15) << 55) /* Reserved for SW */
#define PUD_S2_RDONLY (_AT(pudval_t, 1) << 6) /* HAP[2:1] */ #define PUD_S2_RDONLY (_AT(pudval_t, 1) << 6) /* HAP[2:1] */
#define PUD_S2_RDWR (_AT(pudval_t, 3) << 6) /* HAP[2:1] */ #define PUD_S2_RDWR (_AT(pudval_t, 3) << 6) /* HAP[2:1] */
......
...@@ -256,4 +256,13 @@ stage2_pgd_addr_end(struct kvm *kvm, phys_addr_t addr, phys_addr_t end) ...@@ -256,4 +256,13 @@ stage2_pgd_addr_end(struct kvm *kvm, phys_addr_t addr, phys_addr_t end)
return (boundary - 1 < end - 1) ? boundary : end; return (boundary - 1 < end - 1) ? boundary : end;
} }
/*
* Level values for the ARMv8.4-TTL extension, mapping PUD/PMD/PTE and
* the architectural page-table level.
*/
#define S2_NO_LEVEL_HINT 0
#define S2_PUD_LEVEL 1
#define S2_PMD_LEVEL 2
#define S2_PTE_LEVEL 3
#endif /* __ARM64_S2_PGTABLE_H_ */ #endif /* __ARM64_S2_PGTABLE_H_ */
...@@ -746,6 +746,7 @@ ...@@ -746,6 +746,7 @@
/* id_aa64mmfr2 */ /* id_aa64mmfr2 */
#define ID_AA64MMFR2_E0PD_SHIFT 60 #define ID_AA64MMFR2_E0PD_SHIFT 60
#define ID_AA64MMFR2_TTL_SHIFT 48
#define ID_AA64MMFR2_FWB_SHIFT 40 #define ID_AA64MMFR2_FWB_SHIFT 40
#define ID_AA64MMFR2_AT_SHIFT 32 #define ID_AA64MMFR2_AT_SHIFT 32
#define ID_AA64MMFR2_LVA_SHIFT 16 #define ID_AA64MMFR2_LVA_SHIFT 16
......
...@@ -10,6 +10,7 @@ ...@@ -10,6 +10,7 @@
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
#include <linux/bitfield.h>
#include <linux/mm_types.h> #include <linux/mm_types.h>
#include <linux/sched.h> #include <linux/sched.h>
#include <asm/cputype.h> #include <asm/cputype.h>
...@@ -59,6 +60,50 @@ ...@@ -59,6 +60,50 @@
__ta; \ __ta; \
}) })
/*
* Level-based TLBI operations.
*
* When ARMv8.4-TTL exists, TLBI operations take an additional hint for
* the level at which the invalidation must take place. If the level is
* wrong, no invalidation may take place. In the case where the level
* cannot be easily determined, a 0 value for the level parameter will
* perform a non-hinted invalidation.
*
* For Stage-2 invalidation, use the level values provided to that effect
* in asm/stage2_pgtable.h.
*/
#define TLBI_TTL_MASK GENMASK_ULL(47, 44)
#define TLBI_TTL_TG_4K 1
#define TLBI_TTL_TG_16K 2
#define TLBI_TTL_TG_64K 3
#define __tlbi_level(op, addr, level) \
do { \
u64 arg = addr; \
\
if (cpus_have_const_cap(ARM64_HAS_ARMv8_4_TTL) && \
level) { \
u64 ttl = level & 3; \
\
switch (PAGE_SIZE) { \
case SZ_4K: \
ttl |= TLBI_TTL_TG_4K << 2; \
break; \
case SZ_16K: \
ttl |= TLBI_TTL_TG_16K << 2; \
break; \
case SZ_64K: \
ttl |= TLBI_TTL_TG_64K << 2; \
break; \
} \
\
arg &= ~TLBI_TTL_MASK; \
arg |= FIELD_PREP(TLBI_TTL_MASK, ttl); \
} \
\
__tlbi(op, arg); \
} while(0)
/* /*
* TLB Invalidation * TLB Invalidation
* ================ * ================
......
...@@ -102,13 +102,12 @@ int main(void) ...@@ -102,13 +102,12 @@ int main(void)
DEFINE(VCPU_FAULT_DISR, offsetof(struct kvm_vcpu, arch.fault.disr_el1)); DEFINE(VCPU_FAULT_DISR, offsetof(struct kvm_vcpu, arch.fault.disr_el1));
DEFINE(VCPU_WORKAROUND_FLAGS, offsetof(struct kvm_vcpu, arch.workaround_flags)); DEFINE(VCPU_WORKAROUND_FLAGS, offsetof(struct kvm_vcpu, arch.workaround_flags));
DEFINE(VCPU_HCR_EL2, offsetof(struct kvm_vcpu, arch.hcr_el2)); DEFINE(VCPU_HCR_EL2, offsetof(struct kvm_vcpu, arch.hcr_el2));
DEFINE(CPU_GP_REGS, offsetof(struct kvm_cpu_context, gp_regs)); DEFINE(CPU_USER_PT_REGS, offsetof(struct kvm_cpu_context, regs));
DEFINE(CPU_APIAKEYLO_EL1, offsetof(struct kvm_cpu_context, sys_regs[APIAKEYLO_EL1])); DEFINE(CPU_APIAKEYLO_EL1, offsetof(struct kvm_cpu_context, sys_regs[APIAKEYLO_EL1]));
DEFINE(CPU_APIBKEYLO_EL1, offsetof(struct kvm_cpu_context, sys_regs[APIBKEYLO_EL1])); DEFINE(CPU_APIBKEYLO_EL1, offsetof(struct kvm_cpu_context, sys_regs[APIBKEYLO_EL1]));
DEFINE(CPU_APDAKEYLO_EL1, offsetof(struct kvm_cpu_context, sys_regs[APDAKEYLO_EL1])); DEFINE(CPU_APDAKEYLO_EL1, offsetof(struct kvm_cpu_context, sys_regs[APDAKEYLO_EL1]));
DEFINE(CPU_APDBKEYLO_EL1, offsetof(struct kvm_cpu_context, sys_regs[APDBKEYLO_EL1])); DEFINE(CPU_APDBKEYLO_EL1, offsetof(struct kvm_cpu_context, sys_regs[APDBKEYLO_EL1]));
DEFINE(CPU_APGAKEYLO_EL1, offsetof(struct kvm_cpu_context, sys_regs[APGAKEYLO_EL1])); DEFINE(CPU_APGAKEYLO_EL1, offsetof(struct kvm_cpu_context, sys_regs[APGAKEYLO_EL1]));
DEFINE(CPU_USER_PT_REGS, offsetof(struct kvm_regs, regs));
DEFINE(HOST_CONTEXT_VCPU, offsetof(struct kvm_cpu_context, __hyp_running_vcpu)); DEFINE(HOST_CONTEXT_VCPU, offsetof(struct kvm_cpu_context, __hyp_running_vcpu));
DEFINE(HOST_DATA_CONTEXT, offsetof(struct kvm_host_data, host_ctxt)); DEFINE(HOST_DATA_CONTEXT, offsetof(struct kvm_host_data, host_ctxt));
#endif #endif
......
...@@ -323,6 +323,7 @@ static const struct arm64_ftr_bits ftr_id_aa64mmfr1[] = { ...@@ -323,6 +323,7 @@ static const struct arm64_ftr_bits ftr_id_aa64mmfr1[] = {
static const struct arm64_ftr_bits ftr_id_aa64mmfr2[] = { static const struct arm64_ftr_bits ftr_id_aa64mmfr2[] = {
ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_E0PD_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_E0PD_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_TTL_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_FWB_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_FWB_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_AT_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_AT_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_LVA_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_LVA_SHIFT, 4, 0),
...@@ -1882,6 +1883,16 @@ static const struct arm64_cpu_capabilities arm64_features[] = { ...@@ -1882,6 +1883,16 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
.matches = has_cpuid_feature, .matches = has_cpuid_feature,
.cpu_enable = cpu_has_fwb, .cpu_enable = cpu_has_fwb,
}, },
{
.desc = "ARMv8.4 Translation Table Level",
.type = ARM64_CPUCAP_SYSTEM_FEATURE,
.capability = ARM64_HAS_ARMv8_4_TTL,
.sys_reg = SYS_ID_AA64MMFR2_EL1,
.sign = FTR_UNSIGNED,
.field_pos = ID_AA64MMFR2_TTL_SHIFT,
.min_field_value = 1,
.matches = has_cpuid_feature,
},
#ifdef CONFIG_ARM64_HW_AFDBM #ifdef CONFIG_ARM64_HW_AFDBM
{ {
/* /*
......
...@@ -51,6 +51,93 @@ static u64 kvm_arm_timer_read(struct kvm_vcpu *vcpu, ...@@ -51,6 +51,93 @@ static u64 kvm_arm_timer_read(struct kvm_vcpu *vcpu,
struct arch_timer_context *timer, struct arch_timer_context *timer,
enum kvm_arch_timer_regs treg); enum kvm_arch_timer_regs treg);
u32 timer_get_ctl(struct arch_timer_context *ctxt)
{
struct kvm_vcpu *vcpu = ctxt->vcpu;
switch(arch_timer_ctx_index(ctxt)) {
case TIMER_VTIMER:
return __vcpu_sys_reg(vcpu, CNTV_CTL_EL0);
case TIMER_PTIMER:
return __vcpu_sys_reg(vcpu, CNTP_CTL_EL0);
default:
WARN_ON(1);
return 0;
}
}
u64 timer_get_cval(struct arch_timer_context *ctxt)
{
struct kvm_vcpu *vcpu = ctxt->vcpu;
switch(arch_timer_ctx_index(ctxt)) {
case TIMER_VTIMER:
return __vcpu_sys_reg(vcpu, CNTV_CVAL_EL0);
case TIMER_PTIMER:
return __vcpu_sys_reg(vcpu, CNTP_CVAL_EL0);
default:
WARN_ON(1);
return 0;
}
}
static u64 timer_get_offset(struct arch_timer_context *ctxt)
{
struct kvm_vcpu *vcpu = ctxt->vcpu;
switch(arch_timer_ctx_index(ctxt)) {
case TIMER_VTIMER:
return __vcpu_sys_reg(vcpu, CNTVOFF_EL2);
default:
return 0;
}
}
static void timer_set_ctl(struct arch_timer_context *ctxt, u32 ctl)
{
struct kvm_vcpu *vcpu = ctxt->vcpu;
switch(arch_timer_ctx_index(ctxt)) {
case TIMER_VTIMER:
__vcpu_sys_reg(vcpu, CNTV_CTL_EL0) = ctl;
break;
case TIMER_PTIMER:
__vcpu_sys_reg(vcpu, CNTP_CTL_EL0) = ctl;
break;
default:
WARN_ON(1);
}
}
static void timer_set_cval(struct arch_timer_context *ctxt, u64 cval)
{
struct kvm_vcpu *vcpu = ctxt->vcpu;
switch(arch_timer_ctx_index(ctxt)) {
case TIMER_VTIMER:
__vcpu_sys_reg(vcpu, CNTV_CVAL_EL0) = cval;
break;
case TIMER_PTIMER:
__vcpu_sys_reg(vcpu, CNTP_CVAL_EL0) = cval;
break;
default:
WARN_ON(1);
}
}
static void timer_set_offset(struct arch_timer_context *ctxt, u64 offset)
{
struct kvm_vcpu *vcpu = ctxt->vcpu;
switch(arch_timer_ctx_index(ctxt)) {
case TIMER_VTIMER:
__vcpu_sys_reg(vcpu, CNTVOFF_EL2) = offset;
break;
default:
WARN(offset, "timer %ld\n", arch_timer_ctx_index(ctxt));
}
}
u64 kvm_phys_timer_read(void) u64 kvm_phys_timer_read(void)
{ {
return timecounter->cc->read(timecounter->cc); return timecounter->cc->read(timecounter->cc);
...@@ -124,8 +211,8 @@ static u64 kvm_timer_compute_delta(struct arch_timer_context *timer_ctx) ...@@ -124,8 +211,8 @@ static u64 kvm_timer_compute_delta(struct arch_timer_context *timer_ctx)
{ {
u64 cval, now; u64 cval, now;
cval = timer_ctx->cnt_cval; cval = timer_get_cval(timer_ctx);
now = kvm_phys_timer_read() - timer_ctx->cntvoff; now = kvm_phys_timer_read() - timer_get_offset(timer_ctx);
if (now < cval) { if (now < cval) {
u64 ns; u64 ns;
...@@ -144,8 +231,8 @@ static bool kvm_timer_irq_can_fire(struct arch_timer_context *timer_ctx) ...@@ -144,8 +231,8 @@ static bool kvm_timer_irq_can_fire(struct arch_timer_context *timer_ctx)
{ {
WARN_ON(timer_ctx && timer_ctx->loaded); WARN_ON(timer_ctx && timer_ctx->loaded);
return timer_ctx && return timer_ctx &&
!(timer_ctx->cnt_ctl & ARCH_TIMER_CTRL_IT_MASK) && ((timer_get_ctl(timer_ctx) &
(timer_ctx->cnt_ctl & ARCH_TIMER_CTRL_ENABLE); (ARCH_TIMER_CTRL_IT_MASK | ARCH_TIMER_CTRL_ENABLE)) == ARCH_TIMER_CTRL_ENABLE);
} }
/* /*
...@@ -256,8 +343,8 @@ static bool kvm_timer_should_fire(struct arch_timer_context *timer_ctx) ...@@ -256,8 +343,8 @@ static bool kvm_timer_should_fire(struct arch_timer_context *timer_ctx)
if (!kvm_timer_irq_can_fire(timer_ctx)) if (!kvm_timer_irq_can_fire(timer_ctx))
return false; return false;
cval = timer_ctx->cnt_cval; cval = timer_get_cval(timer_ctx);
now = kvm_phys_timer_read() - timer_ctx->cntvoff; now = kvm_phys_timer_read() - timer_get_offset(timer_ctx);
return cval <= now; return cval <= now;
} }
...@@ -350,8 +437,8 @@ static void timer_save_state(struct arch_timer_context *ctx) ...@@ -350,8 +437,8 @@ static void timer_save_state(struct arch_timer_context *ctx)
switch (index) { switch (index) {
case TIMER_VTIMER: case TIMER_VTIMER:
ctx->cnt_ctl = read_sysreg_el0(SYS_CNTV_CTL); timer_set_ctl(ctx, read_sysreg_el0(SYS_CNTV_CTL));
ctx->cnt_cval = read_sysreg_el0(SYS_CNTV_CVAL); timer_set_cval(ctx, read_sysreg_el0(SYS_CNTV_CVAL));
/* Disable the timer */ /* Disable the timer */
write_sysreg_el0(0, SYS_CNTV_CTL); write_sysreg_el0(0, SYS_CNTV_CTL);
...@@ -359,8 +446,8 @@ static void timer_save_state(struct arch_timer_context *ctx) ...@@ -359,8 +446,8 @@ static void timer_save_state(struct arch_timer_context *ctx)
break; break;
case TIMER_PTIMER: case TIMER_PTIMER:
ctx->cnt_ctl = read_sysreg_el0(SYS_CNTP_CTL); timer_set_ctl(ctx, read_sysreg_el0(SYS_CNTP_CTL));
ctx->cnt_cval = read_sysreg_el0(SYS_CNTP_CVAL); timer_set_cval(ctx, read_sysreg_el0(SYS_CNTP_CVAL));
/* Disable the timer */ /* Disable the timer */
write_sysreg_el0(0, SYS_CNTP_CTL); write_sysreg_el0(0, SYS_CNTP_CTL);
...@@ -429,14 +516,14 @@ static void timer_restore_state(struct arch_timer_context *ctx) ...@@ -429,14 +516,14 @@ static void timer_restore_state(struct arch_timer_context *ctx)
switch (index) { switch (index) {
case TIMER_VTIMER: case TIMER_VTIMER:
write_sysreg_el0(ctx->cnt_cval, SYS_CNTV_CVAL); write_sysreg_el0(timer_get_cval(ctx), SYS_CNTV_CVAL);
isb(); isb();
write_sysreg_el0(ctx->cnt_ctl, SYS_CNTV_CTL); write_sysreg_el0(timer_get_ctl(ctx), SYS_CNTV_CTL);
break; break;
case TIMER_PTIMER: case TIMER_PTIMER:
write_sysreg_el0(ctx->cnt_cval, SYS_CNTP_CVAL); write_sysreg_el0(timer_get_cval(ctx), SYS_CNTP_CVAL);
isb(); isb();
write_sysreg_el0(ctx->cnt_ctl, SYS_CNTP_CTL); write_sysreg_el0(timer_get_ctl(ctx), SYS_CNTP_CTL);
break; break;
case NR_KVM_TIMERS: case NR_KVM_TIMERS:
BUG(); BUG();
...@@ -528,7 +615,7 @@ void kvm_timer_vcpu_load(struct kvm_vcpu *vcpu) ...@@ -528,7 +615,7 @@ void kvm_timer_vcpu_load(struct kvm_vcpu *vcpu)
kvm_timer_vcpu_load_nogic(vcpu); kvm_timer_vcpu_load_nogic(vcpu);
} }
set_cntvoff(map.direct_vtimer->cntvoff); set_cntvoff(timer_get_offset(map.direct_vtimer));
kvm_timer_unblocking(vcpu); kvm_timer_unblocking(vcpu);
...@@ -615,7 +702,7 @@ static void unmask_vtimer_irq_user(struct kvm_vcpu *vcpu) ...@@ -615,7 +702,7 @@ static void unmask_vtimer_irq_user(struct kvm_vcpu *vcpu)
} }
} }
void kvm_timer_sync_hwstate(struct kvm_vcpu *vcpu) void kvm_timer_sync_user(struct kvm_vcpu *vcpu)
{ {
struct arch_timer_cpu *timer = vcpu_timer(vcpu); struct arch_timer_cpu *timer = vcpu_timer(vcpu);
...@@ -639,8 +726,8 @@ int kvm_timer_vcpu_reset(struct kvm_vcpu *vcpu) ...@@ -639,8 +726,8 @@ int kvm_timer_vcpu_reset(struct kvm_vcpu *vcpu)
* resets the timer to be disabled and unmasked and is compliant with * resets the timer to be disabled and unmasked and is compliant with
* the ARMv7 architecture. * the ARMv7 architecture.
*/ */
vcpu_vtimer(vcpu)->cnt_ctl = 0; timer_set_ctl(vcpu_vtimer(vcpu), 0);
vcpu_ptimer(vcpu)->cnt_ctl = 0; timer_set_ctl(vcpu_ptimer(vcpu), 0);
if (timer->enabled) { if (timer->enabled) {
kvm_timer_update_irq(vcpu, false, vcpu_vtimer(vcpu)); kvm_timer_update_irq(vcpu, false, vcpu_vtimer(vcpu));
...@@ -668,13 +755,13 @@ static void update_vtimer_cntvoff(struct kvm_vcpu *vcpu, u64 cntvoff) ...@@ -668,13 +755,13 @@ static void update_vtimer_cntvoff(struct kvm_vcpu *vcpu, u64 cntvoff)
mutex_lock(&kvm->lock); mutex_lock(&kvm->lock);
kvm_for_each_vcpu(i, tmp, kvm) kvm_for_each_vcpu(i, tmp, kvm)
vcpu_vtimer(tmp)->cntvoff = cntvoff; timer_set_offset(vcpu_vtimer(tmp), cntvoff);
/* /*
* When called from the vcpu create path, the CPU being created is not * When called from the vcpu create path, the CPU being created is not
* included in the loop above, so we just set it here as well. * included in the loop above, so we just set it here as well.
*/ */
vcpu_vtimer(vcpu)->cntvoff = cntvoff; timer_set_offset(vcpu_vtimer(vcpu), cntvoff);
mutex_unlock(&kvm->lock); mutex_unlock(&kvm->lock);
} }
...@@ -684,9 +771,12 @@ void kvm_timer_vcpu_init(struct kvm_vcpu *vcpu) ...@@ -684,9 +771,12 @@ void kvm_timer_vcpu_init(struct kvm_vcpu *vcpu)
struct arch_timer_context *vtimer = vcpu_vtimer(vcpu); struct arch_timer_context *vtimer = vcpu_vtimer(vcpu);
struct arch_timer_context *ptimer = vcpu_ptimer(vcpu); struct arch_timer_context *ptimer = vcpu_ptimer(vcpu);
vtimer->vcpu = vcpu;
ptimer->vcpu = vcpu;
/* Synchronize cntvoff across all vtimers of a VM. */ /* Synchronize cntvoff across all vtimers of a VM. */
update_vtimer_cntvoff(vcpu, kvm_phys_timer_read()); update_vtimer_cntvoff(vcpu, kvm_phys_timer_read());
ptimer->cntvoff = 0; timer_set_offset(ptimer, 0);
hrtimer_init(&timer->bg_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_HARD); hrtimer_init(&timer->bg_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_HARD);
timer->bg_timer.function = kvm_bg_timer_expire; timer->bg_timer.function = kvm_bg_timer_expire;
...@@ -704,9 +794,6 @@ void kvm_timer_vcpu_init(struct kvm_vcpu *vcpu) ...@@ -704,9 +794,6 @@ void kvm_timer_vcpu_init(struct kvm_vcpu *vcpu)
vtimer->host_timer_irq_flags = host_vtimer_irq_flags; vtimer->host_timer_irq_flags = host_vtimer_irq_flags;
ptimer->host_timer_irq_flags = host_ptimer_irq_flags; ptimer->host_timer_irq_flags = host_ptimer_irq_flags;
vtimer->vcpu = vcpu;
ptimer->vcpu = vcpu;
} }
static void kvm_timer_init_interrupt(void *info) static void kvm_timer_init_interrupt(void *info)
...@@ -756,10 +843,12 @@ static u64 read_timer_ctl(struct arch_timer_context *timer) ...@@ -756,10 +843,12 @@ static u64 read_timer_ctl(struct arch_timer_context *timer)
* UNKNOWN when ENABLE bit is 0, so we chose to set ISTATUS bit * UNKNOWN when ENABLE bit is 0, so we chose to set ISTATUS bit
* regardless of ENABLE bit for our implementation convenience. * regardless of ENABLE bit for our implementation convenience.
*/ */
u32 ctl = timer_get_ctl(timer);
if (!kvm_timer_compute_delta(timer)) if (!kvm_timer_compute_delta(timer))
return timer->cnt_ctl | ARCH_TIMER_CTRL_IT_STAT; ctl |= ARCH_TIMER_CTRL_IT_STAT;
else
return timer->cnt_ctl; return ctl;
} }
u64 kvm_arm_timer_get_reg(struct kvm_vcpu *vcpu, u64 regid) u64 kvm_arm_timer_get_reg(struct kvm_vcpu *vcpu, u64 regid)
...@@ -795,8 +884,8 @@ static u64 kvm_arm_timer_read(struct kvm_vcpu *vcpu, ...@@ -795,8 +884,8 @@ static u64 kvm_arm_timer_read(struct kvm_vcpu *vcpu,
switch (treg) { switch (treg) {
case TIMER_REG_TVAL: case TIMER_REG_TVAL:
val = timer->cnt_cval - kvm_phys_timer_read() + timer->cntvoff; val = timer_get_cval(timer) - kvm_phys_timer_read() + timer_get_offset(timer);
val &= lower_32_bits(val); val = lower_32_bits(val);
break; break;
case TIMER_REG_CTL: case TIMER_REG_CTL:
...@@ -804,11 +893,11 @@ static u64 kvm_arm_timer_read(struct kvm_vcpu *vcpu, ...@@ -804,11 +893,11 @@ static u64 kvm_arm_timer_read(struct kvm_vcpu *vcpu,
break; break;
case TIMER_REG_CVAL: case TIMER_REG_CVAL:
val = timer->cnt_cval; val = timer_get_cval(timer);
break; break;
case TIMER_REG_CNT: case TIMER_REG_CNT:
val = kvm_phys_timer_read() - timer->cntvoff; val = kvm_phys_timer_read() - timer_get_offset(timer);
break; break;
default: default:
...@@ -842,15 +931,15 @@ static void kvm_arm_timer_write(struct kvm_vcpu *vcpu, ...@@ -842,15 +931,15 @@ static void kvm_arm_timer_write(struct kvm_vcpu *vcpu,
{ {
switch (treg) { switch (treg) {
case TIMER_REG_TVAL: case TIMER_REG_TVAL:
timer->cnt_cval = kvm_phys_timer_read() - timer->cntvoff + (s32)val; timer_set_cval(timer, kvm_phys_timer_read() - timer_get_offset(timer) + (s32)val);
break; break;
case TIMER_REG_CTL: case TIMER_REG_CTL:
timer->cnt_ctl = val & ~ARCH_TIMER_CTRL_IT_STAT; timer_set_ctl(timer, val & ~ARCH_TIMER_CTRL_IT_STAT);
break; break;
case TIMER_REG_CVAL: case TIMER_REG_CVAL:
timer->cnt_cval = val; timer_set_cval(timer, val);
break; break;
default: default:
......
...@@ -106,22 +106,15 @@ static int kvm_arm_default_max_vcpus(void) ...@@ -106,22 +106,15 @@ static int kvm_arm_default_max_vcpus(void)
*/ */
int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
{ {
int ret, cpu; int ret;
ret = kvm_arm_setup_stage2(kvm, type); ret = kvm_arm_setup_stage2(kvm, type);
if (ret) if (ret)
return ret; return ret;
kvm->arch.last_vcpu_ran = alloc_percpu(typeof(*kvm->arch.last_vcpu_ran)); ret = kvm_init_stage2_mmu(kvm, &kvm->arch.mmu);
if (!kvm->arch.last_vcpu_ran)
return -ENOMEM;
for_each_possible_cpu(cpu)
*per_cpu_ptr(kvm->arch.last_vcpu_ran, cpu) = -1;
ret = kvm_alloc_stage2_pgd(kvm);
if (ret) if (ret)
goto out_fail_alloc; return ret;
ret = create_hyp_mappings(kvm, kvm + 1, PAGE_HYP); ret = create_hyp_mappings(kvm, kvm + 1, PAGE_HYP);
if (ret) if (ret)
...@@ -129,18 +122,12 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) ...@@ -129,18 +122,12 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
kvm_vgic_early_init(kvm); kvm_vgic_early_init(kvm);
/* Mark the initial VMID generation invalid */
kvm->arch.vmid.vmid_gen = 0;
/* The maximum number of VCPUs is limited by the host's GIC model */ /* The maximum number of VCPUs is limited by the host's GIC model */
kvm->arch.max_vcpus = kvm_arm_default_max_vcpus(); kvm->arch.max_vcpus = kvm_arm_default_max_vcpus();
return ret; return ret;
out_free_stage2_pgd: out_free_stage2_pgd:
kvm_free_stage2_pgd(kvm); kvm_free_stage2_pgd(&kvm->arch.mmu);
out_fail_alloc:
free_percpu(kvm->arch.last_vcpu_ran);
kvm->arch.last_vcpu_ran = NULL;
return ret; return ret;
} }
...@@ -160,9 +147,6 @@ void kvm_arch_destroy_vm(struct kvm *kvm) ...@@ -160,9 +147,6 @@ void kvm_arch_destroy_vm(struct kvm *kvm)
kvm_vgic_destroy(kvm); kvm_vgic_destroy(kvm);
free_percpu(kvm->arch.last_vcpu_ran);
kvm->arch.last_vcpu_ran = NULL;
for (i = 0; i < KVM_MAX_VCPUS; ++i) { for (i = 0; i < KVM_MAX_VCPUS; ++i) {
if (kvm->vcpus[i]) { if (kvm->vcpus[i]) {
kvm_vcpu_destroy(kvm->vcpus[i]); kvm_vcpu_destroy(kvm->vcpus[i]);
...@@ -279,6 +263,8 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu) ...@@ -279,6 +263,8 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
kvm_arm_pvtime_vcpu_init(&vcpu->arch); kvm_arm_pvtime_vcpu_init(&vcpu->arch);
vcpu->arch.hw_mmu = &vcpu->kvm->arch.mmu;
err = kvm_vgic_vcpu_init(vcpu); err = kvm_vgic_vcpu_init(vcpu);
if (err) if (err)
return err; return err;
...@@ -334,16 +320,18 @@ void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu) ...@@ -334,16 +320,18 @@ void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu)
void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
{ {
struct kvm_s2_mmu *mmu;
int *last_ran; int *last_ran;
last_ran = this_cpu_ptr(vcpu->kvm->arch.last_vcpu_ran); mmu = vcpu->arch.hw_mmu;
last_ran = this_cpu_ptr(mmu->last_vcpu_ran);
/* /*
* We might get preempted before the vCPU actually runs, but * We might get preempted before the vCPU actually runs, but
* over-invalidation doesn't affect correctness. * over-invalidation doesn't affect correctness.
*/ */
if (*last_ran != vcpu->vcpu_id) { if (*last_ran != vcpu->vcpu_id) {
kvm_call_hyp(__kvm_tlb_flush_local_vmid, vcpu); kvm_call_hyp(__kvm_tlb_flush_local_vmid, mmu);
*last_ran = vcpu->vcpu_id; *last_ran = vcpu->vcpu_id;
} }
...@@ -680,7 +668,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu) ...@@ -680,7 +668,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
*/ */
cond_resched(); cond_resched();
update_vmid(&vcpu->kvm->arch.vmid); update_vmid(&vcpu->arch.hw_mmu->vmid);
check_vcpu_requests(vcpu); check_vcpu_requests(vcpu);
...@@ -729,13 +717,13 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu) ...@@ -729,13 +717,13 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
*/ */
smp_store_mb(vcpu->mode, IN_GUEST_MODE); smp_store_mb(vcpu->mode, IN_GUEST_MODE);
if (ret <= 0 || need_new_vmid_gen(&vcpu->kvm->arch.vmid) || if (ret <= 0 || need_new_vmid_gen(&vcpu->arch.hw_mmu->vmid) ||
kvm_request_pending(vcpu)) { kvm_request_pending(vcpu)) {
vcpu->mode = OUTSIDE_GUEST_MODE; vcpu->mode = OUTSIDE_GUEST_MODE;
isb(); /* Ensure work in x_flush_hwstate is committed */ isb(); /* Ensure work in x_flush_hwstate is committed */
kvm_pmu_sync_hwstate(vcpu); kvm_pmu_sync_hwstate(vcpu);
if (static_branch_unlikely(&userspace_irqchip_in_use)) if (static_branch_unlikely(&userspace_irqchip_in_use))
kvm_timer_sync_hwstate(vcpu); kvm_timer_sync_user(vcpu);
kvm_vgic_sync_hwstate(vcpu); kvm_vgic_sync_hwstate(vcpu);
local_irq_enable(); local_irq_enable();
preempt_enable(); preempt_enable();
...@@ -780,7 +768,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu) ...@@ -780,7 +768,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
* timer virtual interrupt state. * timer virtual interrupt state.
*/ */
if (static_branch_unlikely(&userspace_irqchip_in_use)) if (static_branch_unlikely(&userspace_irqchip_in_use))
kvm_timer_sync_hwstate(vcpu); kvm_timer_sync_user(vcpu);
kvm_arch_vcpu_ctxsync_fp(vcpu); kvm_arch_vcpu_ctxsync_fp(vcpu);
......
...@@ -85,7 +85,7 @@ void kvm_arch_vcpu_ctxsync_fp(struct kvm_vcpu *vcpu) ...@@ -85,7 +85,7 @@ void kvm_arch_vcpu_ctxsync_fp(struct kvm_vcpu *vcpu)
WARN_ON_ONCE(!irqs_disabled()); WARN_ON_ONCE(!irqs_disabled());
if (vcpu->arch.flags & KVM_ARM64_FP_ENABLED) { if (vcpu->arch.flags & KVM_ARM64_FP_ENABLED) {
fpsimd_bind_state_to_cpu(&vcpu->arch.ctxt.gp_regs.fp_regs, fpsimd_bind_state_to_cpu(&vcpu->arch.ctxt.fp_regs,
vcpu->arch.sve_state, vcpu->arch.sve_state,
vcpu->arch.sve_max_vl); vcpu->arch.sve_max_vl);
...@@ -109,12 +109,10 @@ void kvm_arch_vcpu_put_fp(struct kvm_vcpu *vcpu) ...@@ -109,12 +109,10 @@ void kvm_arch_vcpu_put_fp(struct kvm_vcpu *vcpu)
local_irq_save(flags); local_irq_save(flags);
if (vcpu->arch.flags & KVM_ARM64_FP_ENABLED) { if (vcpu->arch.flags & KVM_ARM64_FP_ENABLED) {
u64 *guest_zcr = &vcpu->arch.ctxt.sys_regs[ZCR_EL1];
fpsimd_save_and_flush_cpu_state(); fpsimd_save_and_flush_cpu_state();
if (guest_has_sve) if (guest_has_sve)
*guest_zcr = read_sysreg_s(SYS_ZCR_EL12); __vcpu_sys_reg(vcpu, ZCR_EL1) = read_sysreg_s(SYS_ZCR_EL12);
} else if (host_has_sve) { } else if (host_has_sve) {
/* /*
* The FPSIMD/SVE state in the CPU has not been touched, and we * The FPSIMD/SVE state in the CPU has not been touched, and we
......
...@@ -101,19 +101,69 @@ static int core_reg_size_from_offset(const struct kvm_vcpu *vcpu, u64 off) ...@@ -101,19 +101,69 @@ static int core_reg_size_from_offset(const struct kvm_vcpu *vcpu, u64 off)
return size; return size;
} }
static int validate_core_offset(const struct kvm_vcpu *vcpu, static void *core_reg_addr(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
const struct kvm_one_reg *reg)
{ {
u64 off = core_reg_offset_from_id(reg->id); u64 off = core_reg_offset_from_id(reg->id);
int size = core_reg_size_from_offset(vcpu, off); int size = core_reg_size_from_offset(vcpu, off);
if (size < 0) if (size < 0)
return -EINVAL; return NULL;
if (KVM_REG_SIZE(reg->id) != size) if (KVM_REG_SIZE(reg->id) != size)
return -EINVAL; return NULL;
return 0; switch (off) {
case KVM_REG_ARM_CORE_REG(regs.regs[0]) ...
KVM_REG_ARM_CORE_REG(regs.regs[30]):
off -= KVM_REG_ARM_CORE_REG(regs.regs[0]);
off /= 2;
return &vcpu->arch.ctxt.regs.regs[off];
case KVM_REG_ARM_CORE_REG(regs.sp):
return &vcpu->arch.ctxt.regs.sp;
case KVM_REG_ARM_CORE_REG(regs.pc):
return &vcpu->arch.ctxt.regs.pc;
case KVM_REG_ARM_CORE_REG(regs.pstate):
return &vcpu->arch.ctxt.regs.pstate;
case KVM_REG_ARM_CORE_REG(sp_el1):
return __ctxt_sys_reg(&vcpu->arch.ctxt, SP_EL1);
case KVM_REG_ARM_CORE_REG(elr_el1):
return __ctxt_sys_reg(&vcpu->arch.ctxt, ELR_EL1);
case KVM_REG_ARM_CORE_REG(spsr[KVM_SPSR_EL1]):
return __ctxt_sys_reg(&vcpu->arch.ctxt, SPSR_EL1);
case KVM_REG_ARM_CORE_REG(spsr[KVM_SPSR_ABT]):
return &vcpu->arch.ctxt.spsr_abt;
case KVM_REG_ARM_CORE_REG(spsr[KVM_SPSR_UND]):
return &vcpu->arch.ctxt.spsr_und;
case KVM_REG_ARM_CORE_REG(spsr[KVM_SPSR_IRQ]):
return &vcpu->arch.ctxt.spsr_irq;
case KVM_REG_ARM_CORE_REG(spsr[KVM_SPSR_FIQ]):
return &vcpu->arch.ctxt.spsr_fiq;
case KVM_REG_ARM_CORE_REG(fp_regs.vregs[0]) ...
KVM_REG_ARM_CORE_REG(fp_regs.vregs[31]):
off -= KVM_REG_ARM_CORE_REG(fp_regs.vregs[0]);
off /= 4;
return &vcpu->arch.ctxt.fp_regs.vregs[off];
case KVM_REG_ARM_CORE_REG(fp_regs.fpsr):
return &vcpu->arch.ctxt.fp_regs.fpsr;
case KVM_REG_ARM_CORE_REG(fp_regs.fpcr):
return &vcpu->arch.ctxt.fp_regs.fpcr;
default:
return NULL;
}
} }
static int get_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) static int get_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
...@@ -125,8 +175,8 @@ static int get_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) ...@@ -125,8 +175,8 @@ static int get_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
* off the index in the "array". * off the index in the "array".
*/ */
__u32 __user *uaddr = (__u32 __user *)(unsigned long)reg->addr; __u32 __user *uaddr = (__u32 __user *)(unsigned long)reg->addr;
struct kvm_regs *regs = vcpu_gp_regs(vcpu); int nr_regs = sizeof(struct kvm_regs) / sizeof(__u32);
int nr_regs = sizeof(*regs) / sizeof(__u32); void *addr;
u32 off; u32 off;
/* Our ID is an index into the kvm_regs struct. */ /* Our ID is an index into the kvm_regs struct. */
...@@ -135,10 +185,11 @@ static int get_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) ...@@ -135,10 +185,11 @@ static int get_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
(off + (KVM_REG_SIZE(reg->id) / sizeof(__u32))) >= nr_regs) (off + (KVM_REG_SIZE(reg->id) / sizeof(__u32))) >= nr_regs)
return -ENOENT; return -ENOENT;
if (validate_core_offset(vcpu, reg)) addr = core_reg_addr(vcpu, reg);
if (!addr)
return -EINVAL; return -EINVAL;
if (copy_to_user(uaddr, ((u32 *)regs) + off, KVM_REG_SIZE(reg->id))) if (copy_to_user(uaddr, addr, KVM_REG_SIZE(reg->id)))
return -EFAULT; return -EFAULT;
return 0; return 0;
...@@ -147,10 +198,9 @@ static int get_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) ...@@ -147,10 +198,9 @@ static int get_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
static int set_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) static int set_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
{ {
__u32 __user *uaddr = (__u32 __user *)(unsigned long)reg->addr; __u32 __user *uaddr = (__u32 __user *)(unsigned long)reg->addr;
struct kvm_regs *regs = vcpu_gp_regs(vcpu); int nr_regs = sizeof(struct kvm_regs) / sizeof(__u32);
int nr_regs = sizeof(*regs) / sizeof(__u32);
__uint128_t tmp; __uint128_t tmp;
void *valp = &tmp; void *valp = &tmp, *addr;
u64 off; u64 off;
int err = 0; int err = 0;
...@@ -160,7 +210,8 @@ static int set_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) ...@@ -160,7 +210,8 @@ static int set_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
(off + (KVM_REG_SIZE(reg->id) / sizeof(__u32))) >= nr_regs) (off + (KVM_REG_SIZE(reg->id) / sizeof(__u32))) >= nr_regs)
return -ENOENT; return -ENOENT;
if (validate_core_offset(vcpu, reg)) addr = core_reg_addr(vcpu, reg);
if (!addr)
return -EINVAL; return -EINVAL;
if (KVM_REG_SIZE(reg->id) > sizeof(tmp)) if (KVM_REG_SIZE(reg->id) > sizeof(tmp))
...@@ -198,7 +249,7 @@ static int set_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) ...@@ -198,7 +249,7 @@ static int set_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
} }
} }
memcpy((u32 *)regs + off, valp, KVM_REG_SIZE(reg->id)); memcpy(addr, valp, KVM_REG_SIZE(reg->id));
if (*vcpu_cpsr(vcpu) & PSR_MODE32_BIT) { if (*vcpu_cpsr(vcpu) & PSR_MODE32_BIT) {
int i; int i;
......
...@@ -16,8 +16,7 @@ ...@@ -16,8 +16,7 @@
#include <asm/kvm_mmu.h> #include <asm/kvm_mmu.h>
#include <asm/kvm_ptrauth.h> #include <asm/kvm_ptrauth.h>
#define CPU_GP_REG_OFFSET(x) (CPU_GP_REGS + x) #define CPU_XREG_OFFSET(x) (CPU_USER_PT_REGS + 8*x)
#define CPU_XREG_OFFSET(x) CPU_GP_REG_OFFSET(CPU_USER_PT_REGS + 8*x)
#define CPU_SP_EL0_OFFSET (CPU_XREG_OFFSET(30) + 8) #define CPU_SP_EL0_OFFSET (CPU_XREG_OFFSET(30) + 8)
.text .text
......
...@@ -88,9 +88,8 @@ ...@@ -88,9 +88,8 @@
default: write_debug(ptr[0], reg, 0); \ default: write_debug(ptr[0], reg, 0); \
} }
static inline void __debug_save_state(struct kvm_vcpu *vcpu, static void __debug_save_state(struct kvm_guest_debug_arch *dbg,
struct kvm_guest_debug_arch *dbg, struct kvm_cpu_context *ctxt)
struct kvm_cpu_context *ctxt)
{ {
u64 aa64dfr0; u64 aa64dfr0;
int brps, wrps; int brps, wrps;
...@@ -104,12 +103,11 @@ static inline void __debug_save_state(struct kvm_vcpu *vcpu, ...@@ -104,12 +103,11 @@ static inline void __debug_save_state(struct kvm_vcpu *vcpu,
save_debug(dbg->dbg_wcr, dbgwcr, wrps); save_debug(dbg->dbg_wcr, dbgwcr, wrps);
save_debug(dbg->dbg_wvr, dbgwvr, wrps); save_debug(dbg->dbg_wvr, dbgwvr, wrps);
ctxt->sys_regs[MDCCINT_EL1] = read_sysreg(mdccint_el1); ctxt_sys_reg(ctxt, MDCCINT_EL1) = read_sysreg(mdccint_el1);
} }
static inline void __debug_restore_state(struct kvm_vcpu *vcpu, static void __debug_restore_state(struct kvm_guest_debug_arch *dbg,
struct kvm_guest_debug_arch *dbg, struct kvm_cpu_context *ctxt)
struct kvm_cpu_context *ctxt)
{ {
u64 aa64dfr0; u64 aa64dfr0;
int brps, wrps; int brps, wrps;
...@@ -124,7 +122,7 @@ static inline void __debug_restore_state(struct kvm_vcpu *vcpu, ...@@ -124,7 +122,7 @@ static inline void __debug_restore_state(struct kvm_vcpu *vcpu,
restore_debug(dbg->dbg_wcr, dbgwcr, wrps); restore_debug(dbg->dbg_wcr, dbgwcr, wrps);
restore_debug(dbg->dbg_wvr, dbgwvr, wrps); restore_debug(dbg->dbg_wvr, dbgwvr, wrps);
write_sysreg(ctxt->sys_regs[MDCCINT_EL1], mdccint_el1); write_sysreg(ctxt_sys_reg(ctxt, MDCCINT_EL1), mdccint_el1);
} }
static inline void __debug_switch_to_guest_common(struct kvm_vcpu *vcpu) static inline void __debug_switch_to_guest_common(struct kvm_vcpu *vcpu)
...@@ -142,8 +140,8 @@ static inline void __debug_switch_to_guest_common(struct kvm_vcpu *vcpu) ...@@ -142,8 +140,8 @@ static inline void __debug_switch_to_guest_common(struct kvm_vcpu *vcpu)
host_dbg = &vcpu->arch.host_debug_state.regs; host_dbg = &vcpu->arch.host_debug_state.regs;
guest_dbg = kern_hyp_va(vcpu->arch.debug_ptr); guest_dbg = kern_hyp_va(vcpu->arch.debug_ptr);
__debug_save_state(vcpu, host_dbg, host_ctxt); __debug_save_state(host_dbg, host_ctxt);
__debug_restore_state(vcpu, guest_dbg, guest_ctxt); __debug_restore_state(guest_dbg, guest_ctxt);
} }
static inline void __debug_switch_to_host_common(struct kvm_vcpu *vcpu) static inline void __debug_switch_to_host_common(struct kvm_vcpu *vcpu)
...@@ -161,8 +159,8 @@ static inline void __debug_switch_to_host_common(struct kvm_vcpu *vcpu) ...@@ -161,8 +159,8 @@ static inline void __debug_switch_to_host_common(struct kvm_vcpu *vcpu)
host_dbg = &vcpu->arch.host_debug_state.regs; host_dbg = &vcpu->arch.host_debug_state.regs;
guest_dbg = kern_hyp_va(vcpu->arch.debug_ptr); guest_dbg = kern_hyp_va(vcpu->arch.debug_ptr);
__debug_save_state(vcpu, guest_dbg, guest_ctxt); __debug_save_state(guest_dbg, guest_ctxt);
__debug_restore_state(vcpu, host_dbg, host_ctxt); __debug_restore_state(host_dbg, host_ctxt);
vcpu->arch.flags &= ~KVM_ARM64_DEBUG_DIRTY; vcpu->arch.flags &= ~KVM_ARM64_DEBUG_DIRTY;
} }
......
...@@ -53,7 +53,7 @@ static inline void __fpsimd_save_fpexc32(struct kvm_vcpu *vcpu) ...@@ -53,7 +53,7 @@ static inline void __fpsimd_save_fpexc32(struct kvm_vcpu *vcpu)
if (!vcpu_el1_is_32bit(vcpu)) if (!vcpu_el1_is_32bit(vcpu))
return; return;
vcpu->arch.ctxt.sys_regs[FPEXC32_EL2] = read_sysreg(fpexc32_el2); __vcpu_sys_reg(vcpu, FPEXC32_EL2) = read_sysreg(fpexc32_el2);
} }
static inline void __activate_traps_fpsimd32(struct kvm_vcpu *vcpu) static inline void __activate_traps_fpsimd32(struct kvm_vcpu *vcpu)
...@@ -122,9 +122,9 @@ static inline void ___deactivate_traps(struct kvm_vcpu *vcpu) ...@@ -122,9 +122,9 @@ static inline void ___deactivate_traps(struct kvm_vcpu *vcpu)
} }
} }
static inline void __activate_vm(struct kvm *kvm) static inline void __activate_vm(struct kvm_s2_mmu *mmu)
{ {
__load_guest_stage2(kvm); __load_guest_stage2(mmu);
} }
static inline bool __translate_far_to_hpfar(u64 far, u64 *hpfar) static inline bool __translate_far_to_hpfar(u64 far, u64 *hpfar)
...@@ -266,17 +266,16 @@ static inline bool __hyp_handle_fpsimd(struct kvm_vcpu *vcpu) ...@@ -266,17 +266,16 @@ static inline bool __hyp_handle_fpsimd(struct kvm_vcpu *vcpu)
if (sve_guest) { if (sve_guest) {
sve_load_state(vcpu_sve_pffr(vcpu), sve_load_state(vcpu_sve_pffr(vcpu),
&vcpu->arch.ctxt.gp_regs.fp_regs.fpsr, &vcpu->arch.ctxt.fp_regs.fpsr,
sve_vq_from_vl(vcpu->arch.sve_max_vl) - 1); sve_vq_from_vl(vcpu->arch.sve_max_vl) - 1);
write_sysreg_s(vcpu->arch.ctxt.sys_regs[ZCR_EL1], SYS_ZCR_EL12); write_sysreg_s(__vcpu_sys_reg(vcpu, ZCR_EL1), SYS_ZCR_EL12);
} else { } else {
__fpsimd_restore_state(&vcpu->arch.ctxt.gp_regs.fp_regs); __fpsimd_restore_state(&vcpu->arch.ctxt.fp_regs);
} }
/* Skip restoring fpexc32 for AArch64 guests */ /* Skip restoring fpexc32 for AArch64 guests */
if (!(read_sysreg(hcr_el2) & HCR_RW)) if (!(read_sysreg(hcr_el2) & HCR_RW))
write_sysreg(vcpu->arch.ctxt.sys_regs[FPEXC32_EL2], write_sysreg(__vcpu_sys_reg(vcpu, FPEXC32_EL2), fpexc32_el2);
fpexc32_el2);
vcpu->arch.flags |= KVM_ARM64_FP_ENABLED; vcpu->arch.flags |= KVM_ARM64_FP_ENABLED;
...@@ -365,11 +364,14 @@ static inline bool esr_is_ptrauth_trap(u32 esr) ...@@ -365,11 +364,14 @@ static inline bool esr_is_ptrauth_trap(u32 esr)
return false; return false;
} }
#define __ptrauth_save_key(regs, key) \ #define __ptrauth_save_key(ctxt, key) \
({ \ do { \
regs[key ## KEYLO_EL1] = read_sysreg_s(SYS_ ## key ## KEYLO_EL1); \ u64 __val; \
regs[key ## KEYHI_EL1] = read_sysreg_s(SYS_ ## key ## KEYHI_EL1); \ __val = read_sysreg_s(SYS_ ## key ## KEYLO_EL1); \
}) ctxt_sys_reg(ctxt, key ## KEYLO_EL1) = __val; \
__val = read_sysreg_s(SYS_ ## key ## KEYHI_EL1); \
ctxt_sys_reg(ctxt, key ## KEYHI_EL1) = __val; \
} while(0)
static inline bool __hyp_handle_ptrauth(struct kvm_vcpu *vcpu) static inline bool __hyp_handle_ptrauth(struct kvm_vcpu *vcpu)
{ {
...@@ -381,11 +383,11 @@ static inline bool __hyp_handle_ptrauth(struct kvm_vcpu *vcpu) ...@@ -381,11 +383,11 @@ static inline bool __hyp_handle_ptrauth(struct kvm_vcpu *vcpu)
return false; return false;
ctxt = &__hyp_this_cpu_ptr(kvm_host_data)->host_ctxt; ctxt = &__hyp_this_cpu_ptr(kvm_host_data)->host_ctxt;
__ptrauth_save_key(ctxt->sys_regs, APIA); __ptrauth_save_key(ctxt, APIA);
__ptrauth_save_key(ctxt->sys_regs, APIB); __ptrauth_save_key(ctxt, APIB);
__ptrauth_save_key(ctxt->sys_regs, APDA); __ptrauth_save_key(ctxt, APDA);
__ptrauth_save_key(ctxt->sys_regs, APDB); __ptrauth_save_key(ctxt, APDB);
__ptrauth_save_key(ctxt->sys_regs, APGA); __ptrauth_save_key(ctxt, APGA);
vcpu_ptrauth_enable(vcpu); vcpu_ptrauth_enable(vcpu);
......
This diff is collapsed.
...@@ -52,9 +52,9 @@ static void __activate_traps(struct kvm_vcpu *vcpu) ...@@ -52,9 +52,9 @@ static void __activate_traps(struct kvm_vcpu *vcpu)
* configured and enabled. We can now restore the guest's S1 * configured and enabled. We can now restore the guest's S1
* configuration: SCTLR, and only then TCR. * configuration: SCTLR, and only then TCR.
*/ */
write_sysreg_el1(ctxt->sys_regs[SCTLR_EL1], SYS_SCTLR); write_sysreg_el1(ctxt_sys_reg(ctxt, SCTLR_EL1), SYS_SCTLR);
isb(); isb();
write_sysreg_el1(ctxt->sys_regs[TCR_EL1], SYS_TCR); write_sysreg_el1(ctxt_sys_reg(ctxt, TCR_EL1), SYS_TCR);
} }
} }
...@@ -194,7 +194,7 @@ int __kvm_vcpu_run(struct kvm_vcpu *vcpu) ...@@ -194,7 +194,7 @@ int __kvm_vcpu_run(struct kvm_vcpu *vcpu)
__sysreg32_restore_state(vcpu); __sysreg32_restore_state(vcpu);
__sysreg_restore_state_nvhe(guest_ctxt); __sysreg_restore_state_nvhe(guest_ctxt);
__activate_vm(kern_hyp_va(vcpu->kvm)); __activate_vm(kern_hyp_va(vcpu->arch.hw_mmu));
__activate_traps(vcpu); __activate_traps(vcpu);
__hyp_vgic_restore_state(vcpu); __hyp_vgic_restore_state(vcpu);
......
...@@ -12,7 +12,8 @@ struct tlb_inv_context { ...@@ -12,7 +12,8 @@ struct tlb_inv_context {
u64 tcr; u64 tcr;
}; };
static void __tlb_switch_to_guest(struct kvm *kvm, struct tlb_inv_context *cxt) static void __tlb_switch_to_guest(struct kvm_s2_mmu *mmu,
struct tlb_inv_context *cxt)
{ {
if (cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT)) { if (cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT)) {
u64 val; u64 val;
...@@ -30,12 +31,10 @@ static void __tlb_switch_to_guest(struct kvm *kvm, struct tlb_inv_context *cxt) ...@@ -30,12 +31,10 @@ static void __tlb_switch_to_guest(struct kvm *kvm, struct tlb_inv_context *cxt)
isb(); isb();
} }
/* __load_guest_stage2() includes an ISB for the workaround. */ __load_guest_stage2(mmu);
__load_guest_stage2(kvm);
asm(ALTERNATIVE("isb", "nop", ARM64_WORKAROUND_SPECULATIVE_AT));
} }
static void __tlb_switch_to_host(struct kvm *kvm, struct tlb_inv_context *cxt) static void __tlb_switch_to_host(struct tlb_inv_context *cxt)
{ {
write_sysreg(0, vttbr_el2); write_sysreg(0, vttbr_el2);
...@@ -47,15 +46,16 @@ static void __tlb_switch_to_host(struct kvm *kvm, struct tlb_inv_context *cxt) ...@@ -47,15 +46,16 @@ static void __tlb_switch_to_host(struct kvm *kvm, struct tlb_inv_context *cxt)
} }
} }
void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa) void __kvm_tlb_flush_vmid_ipa(struct kvm_s2_mmu *mmu,
phys_addr_t ipa, int level)
{ {
struct tlb_inv_context cxt; struct tlb_inv_context cxt;
dsb(ishst); dsb(ishst);
/* Switch to requested VMID */ /* Switch to requested VMID */
kvm = kern_hyp_va(kvm); mmu = kern_hyp_va(mmu);
__tlb_switch_to_guest(kvm, &cxt); __tlb_switch_to_guest(mmu, &cxt);
/* /*
* We could do so much better if we had the VA as well. * We could do so much better if we had the VA as well.
...@@ -63,7 +63,7 @@ void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa) ...@@ -63,7 +63,7 @@ void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa)
* whole of Stage-1. Weep... * whole of Stage-1. Weep...
*/ */
ipa >>= 12; ipa >>= 12;
__tlbi(ipas2e1is, ipa); __tlbi_level(ipas2e1is, ipa, level);
/* /*
* We have to ensure completion of the invalidation at Stage-2, * We have to ensure completion of the invalidation at Stage-2,
...@@ -98,39 +98,39 @@ void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa) ...@@ -98,39 +98,39 @@ void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa)
if (icache_is_vpipt()) if (icache_is_vpipt())
__flush_icache_all(); __flush_icache_all();
__tlb_switch_to_host(kvm, &cxt); __tlb_switch_to_host(&cxt);
} }
void __kvm_tlb_flush_vmid(struct kvm *kvm) void __kvm_tlb_flush_vmid(struct kvm_s2_mmu *mmu)
{ {
struct tlb_inv_context cxt; struct tlb_inv_context cxt;
dsb(ishst); dsb(ishst);
/* Switch to requested VMID */ /* Switch to requested VMID */
kvm = kern_hyp_va(kvm); mmu = kern_hyp_va(mmu);
__tlb_switch_to_guest(kvm, &cxt); __tlb_switch_to_guest(mmu, &cxt);
__tlbi(vmalls12e1is); __tlbi(vmalls12e1is);
dsb(ish); dsb(ish);
isb(); isb();
__tlb_switch_to_host(kvm, &cxt); __tlb_switch_to_host(&cxt);
} }
void __kvm_tlb_flush_local_vmid(struct kvm_vcpu *vcpu) void __kvm_tlb_flush_local_vmid(struct kvm_s2_mmu *mmu)
{ {
struct kvm *kvm = kern_hyp_va(kern_hyp_va(vcpu)->kvm);
struct tlb_inv_context cxt; struct tlb_inv_context cxt;
/* Switch to requested VMID */ /* Switch to requested VMID */
__tlb_switch_to_guest(kvm, &cxt); mmu = kern_hyp_va(mmu);
__tlb_switch_to_guest(mmu, &cxt);
__tlbi(vmalle1); __tlbi(vmalle1);
dsb(nsh); dsb(nsh);
isb(); isb();
__tlb_switch_to_host(kvm, &cxt); __tlb_switch_to_host(&cxt);
} }
void __kvm_flush_vm_context(void) void __kvm_flush_vm_context(void)
......
...@@ -125,7 +125,7 @@ static int __kvm_vcpu_run_vhe(struct kvm_vcpu *vcpu) ...@@ -125,7 +125,7 @@ static int __kvm_vcpu_run_vhe(struct kvm_vcpu *vcpu)
* stage 2 translation, and __activate_traps clear HCR_EL2.TGE * stage 2 translation, and __activate_traps clear HCR_EL2.TGE
* (among other things). * (among other things).
*/ */
__activate_vm(vcpu->kvm); __activate_vm(vcpu->arch.hw_mmu);
__activate_traps(vcpu); __activate_traps(vcpu);
sysreg_restore_guest_state_vhe(guest_ctxt); sysreg_restore_guest_state_vhe(guest_ctxt);
......
...@@ -16,7 +16,8 @@ struct tlb_inv_context { ...@@ -16,7 +16,8 @@ struct tlb_inv_context {
u64 sctlr; u64 sctlr;
}; };
static void __tlb_switch_to_guest(struct kvm *kvm, struct tlb_inv_context *cxt) static void __tlb_switch_to_guest(struct kvm_s2_mmu *mmu,
struct tlb_inv_context *cxt)
{ {
u64 val; u64 val;
...@@ -52,14 +53,14 @@ static void __tlb_switch_to_guest(struct kvm *kvm, struct tlb_inv_context *cxt) ...@@ -52,14 +53,14 @@ static void __tlb_switch_to_guest(struct kvm *kvm, struct tlb_inv_context *cxt)
* place before clearing TGE. __load_guest_stage2() already * place before clearing TGE. __load_guest_stage2() already
* has an ISB in order to deal with this. * has an ISB in order to deal with this.
*/ */
__load_guest_stage2(kvm); __load_guest_stage2(mmu);
val = read_sysreg(hcr_el2); val = read_sysreg(hcr_el2);
val &= ~HCR_TGE; val &= ~HCR_TGE;
write_sysreg(val, hcr_el2); write_sysreg(val, hcr_el2);
isb(); isb();
} }
static void __tlb_switch_to_host(struct kvm *kvm, struct tlb_inv_context *cxt) static void __tlb_switch_to_host(struct tlb_inv_context *cxt)
{ {
/* /*
* We're done with the TLB operation, let's restore the host's * We're done with the TLB operation, let's restore the host's
...@@ -78,14 +79,15 @@ static void __tlb_switch_to_host(struct kvm *kvm, struct tlb_inv_context *cxt) ...@@ -78,14 +79,15 @@ static void __tlb_switch_to_host(struct kvm *kvm, struct tlb_inv_context *cxt)
local_irq_restore(cxt->flags); local_irq_restore(cxt->flags);
} }
void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa) void __kvm_tlb_flush_vmid_ipa(struct kvm_s2_mmu *mmu,
phys_addr_t ipa, int level)
{ {
struct tlb_inv_context cxt; struct tlb_inv_context cxt;
dsb(ishst); dsb(ishst);
/* Switch to requested VMID */ /* Switch to requested VMID */
__tlb_switch_to_guest(kvm, &cxt); __tlb_switch_to_guest(mmu, &cxt);
/* /*
* We could do so much better if we had the VA as well. * We could do so much better if we had the VA as well.
...@@ -93,7 +95,7 @@ void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa) ...@@ -93,7 +95,7 @@ void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa)
* whole of Stage-1. Weep... * whole of Stage-1. Weep...
*/ */
ipa >>= 12; ipa >>= 12;
__tlbi(ipas2e1is, ipa); __tlbi_level(ipas2e1is, ipa, level);
/* /*
* We have to ensure completion of the invalidation at Stage-2, * We have to ensure completion of the invalidation at Stage-2,
...@@ -106,38 +108,37 @@ void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa) ...@@ -106,38 +108,37 @@ void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa)
dsb(ish); dsb(ish);
isb(); isb();
__tlb_switch_to_host(kvm, &cxt); __tlb_switch_to_host(&cxt);
} }
void __kvm_tlb_flush_vmid(struct kvm *kvm) void __kvm_tlb_flush_vmid(struct kvm_s2_mmu *mmu)
{ {
struct tlb_inv_context cxt; struct tlb_inv_context cxt;
dsb(ishst); dsb(ishst);
/* Switch to requested VMID */ /* Switch to requested VMID */
__tlb_switch_to_guest(kvm, &cxt); __tlb_switch_to_guest(mmu, &cxt);
__tlbi(vmalls12e1is); __tlbi(vmalls12e1is);
dsb(ish); dsb(ish);
isb(); isb();
__tlb_switch_to_host(kvm, &cxt); __tlb_switch_to_host(&cxt);
} }
void __kvm_tlb_flush_local_vmid(struct kvm_vcpu *vcpu) void __kvm_tlb_flush_local_vmid(struct kvm_s2_mmu *mmu)
{ {
struct kvm *kvm = vcpu->kvm;
struct tlb_inv_context cxt; struct tlb_inv_context cxt;
/* Switch to requested VMID */ /* Switch to requested VMID */
__tlb_switch_to_guest(kvm, &cxt); __tlb_switch_to_guest(mmu, &cxt);
__tlbi(vmalle1); __tlbi(vmalle1);
dsb(nsh); dsb(nsh);
isb(); isb();
__tlb_switch_to_host(kvm, &cxt); __tlb_switch_to_host(&cxt);
} }
void __kvm_flush_vm_context(void) void __kvm_flush_vm_context(void)
......
...@@ -64,7 +64,7 @@ static void enter_exception64(struct kvm_vcpu *vcpu, unsigned long target_mode, ...@@ -64,7 +64,7 @@ static void enter_exception64(struct kvm_vcpu *vcpu, unsigned long target_mode,
case PSR_MODE_EL1h: case PSR_MODE_EL1h:
vbar = vcpu_read_sys_reg(vcpu, VBAR_EL1); vbar = vcpu_read_sys_reg(vcpu, VBAR_EL1);
sctlr = vcpu_read_sys_reg(vcpu, SCTLR_EL1); sctlr = vcpu_read_sys_reg(vcpu, SCTLR_EL1);
vcpu_write_elr_el1(vcpu, *vcpu_pc(vcpu)); vcpu_write_sys_reg(vcpu, *vcpu_pc(vcpu), ELR_EL1);
break; break;
default: default:
/* Don't do that */ /* Don't do that */
......
This diff is collapsed.
...@@ -100,7 +100,7 @@ static const unsigned long vcpu_reg_offsets[VCPU_NR_MODES][16] = { ...@@ -100,7 +100,7 @@ static const unsigned long vcpu_reg_offsets[VCPU_NR_MODES][16] = {
*/ */
unsigned long *vcpu_reg32(const struct kvm_vcpu *vcpu, u8 reg_num) unsigned long *vcpu_reg32(const struct kvm_vcpu *vcpu, u8 reg_num)
{ {
unsigned long *reg_array = (unsigned long *)&vcpu->arch.ctxt.gp_regs.regs; unsigned long *reg_array = (unsigned long *)&vcpu->arch.ctxt.regs;
unsigned long mode = *vcpu_cpsr(vcpu) & PSR_AA32_MODE_MASK; unsigned long mode = *vcpu_cpsr(vcpu) & PSR_AA32_MODE_MASK;
switch (mode) { switch (mode) {
...@@ -147,8 +147,20 @@ unsigned long vcpu_read_spsr32(const struct kvm_vcpu *vcpu) ...@@ -147,8 +147,20 @@ unsigned long vcpu_read_spsr32(const struct kvm_vcpu *vcpu)
{ {
int spsr_idx = vcpu_spsr32_mode(vcpu); int spsr_idx = vcpu_spsr32_mode(vcpu);
if (!vcpu->arch.sysregs_loaded_on_cpu) if (!vcpu->arch.sysregs_loaded_on_cpu) {
return vcpu_gp_regs(vcpu)->spsr[spsr_idx]; switch (spsr_idx) {
case KVM_SPSR_SVC:
return __vcpu_sys_reg(vcpu, SPSR_EL1);
case KVM_SPSR_ABT:
return vcpu->arch.ctxt.spsr_abt;
case KVM_SPSR_UND:
return vcpu->arch.ctxt.spsr_und;
case KVM_SPSR_IRQ:
return vcpu->arch.ctxt.spsr_irq;
case KVM_SPSR_FIQ:
return vcpu->arch.ctxt.spsr_fiq;
}
}
switch (spsr_idx) { switch (spsr_idx) {
case KVM_SPSR_SVC: case KVM_SPSR_SVC:
...@@ -171,7 +183,24 @@ void vcpu_write_spsr32(struct kvm_vcpu *vcpu, unsigned long v) ...@@ -171,7 +183,24 @@ void vcpu_write_spsr32(struct kvm_vcpu *vcpu, unsigned long v)
int spsr_idx = vcpu_spsr32_mode(vcpu); int spsr_idx = vcpu_spsr32_mode(vcpu);
if (!vcpu->arch.sysregs_loaded_on_cpu) { if (!vcpu->arch.sysregs_loaded_on_cpu) {
vcpu_gp_regs(vcpu)->spsr[spsr_idx] = v; switch (spsr_idx) {
case KVM_SPSR_SVC:
__vcpu_sys_reg(vcpu, SPSR_EL1) = v;
break;
case KVM_SPSR_ABT:
vcpu->arch.ctxt.spsr_abt = v;
break;
case KVM_SPSR_UND:
vcpu->arch.ctxt.spsr_und = v;
break;
case KVM_SPSR_IRQ:
vcpu->arch.ctxt.spsr_irq = v;
break;
case KVM_SPSR_FIQ:
vcpu->arch.ctxt.spsr_fiq = v;
break;
}
return; return;
} }
......
...@@ -288,7 +288,7 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu) ...@@ -288,7 +288,7 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu)
/* Reset core registers */ /* Reset core registers */
memset(vcpu_gp_regs(vcpu), 0, sizeof(*vcpu_gp_regs(vcpu))); memset(vcpu_gp_regs(vcpu), 0, sizeof(*vcpu_gp_regs(vcpu)));
vcpu_gp_regs(vcpu)->regs.pstate = pstate; vcpu_gp_regs(vcpu)->pstate = pstate;
/* Reset system registers */ /* Reset system registers */
kvm_reset_sys_regs(vcpu); kvm_reset_sys_regs(vcpu);
......
...@@ -94,6 +94,7 @@ static bool __vcpu_read_sys_reg_from_cpu(int reg, u64 *val) ...@@ -94,6 +94,7 @@ static bool __vcpu_read_sys_reg_from_cpu(int reg, u64 *val)
case TPIDR_EL1: *val = read_sysreg_s(SYS_TPIDR_EL1); break; case TPIDR_EL1: *val = read_sysreg_s(SYS_TPIDR_EL1); break;
case AMAIR_EL1: *val = read_sysreg_s(SYS_AMAIR_EL12); break; case AMAIR_EL1: *val = read_sysreg_s(SYS_AMAIR_EL12); break;
case CNTKCTL_EL1: *val = read_sysreg_s(SYS_CNTKCTL_EL12); break; case CNTKCTL_EL1: *val = read_sysreg_s(SYS_CNTKCTL_EL12); break;
case ELR_EL1: *val = read_sysreg_s(SYS_ELR_EL12); break;
case PAR_EL1: *val = read_sysreg_s(SYS_PAR_EL1); break; case PAR_EL1: *val = read_sysreg_s(SYS_PAR_EL1); break;
case DACR32_EL2: *val = read_sysreg_s(SYS_DACR32_EL2); break; case DACR32_EL2: *val = read_sysreg_s(SYS_DACR32_EL2); break;
case IFSR32_EL2: *val = read_sysreg_s(SYS_IFSR32_EL2); break; case IFSR32_EL2: *val = read_sysreg_s(SYS_IFSR32_EL2); break;
...@@ -133,6 +134,7 @@ static bool __vcpu_write_sys_reg_to_cpu(u64 val, int reg) ...@@ -133,6 +134,7 @@ static bool __vcpu_write_sys_reg_to_cpu(u64 val, int reg)
case TPIDR_EL1: write_sysreg_s(val, SYS_TPIDR_EL1); break; case TPIDR_EL1: write_sysreg_s(val, SYS_TPIDR_EL1); break;
case AMAIR_EL1: write_sysreg_s(val, SYS_AMAIR_EL12); break; case AMAIR_EL1: write_sysreg_s(val, SYS_AMAIR_EL12); break;
case CNTKCTL_EL1: write_sysreg_s(val, SYS_CNTKCTL_EL12); break; case CNTKCTL_EL1: write_sysreg_s(val, SYS_CNTKCTL_EL12); break;
case ELR_EL1: write_sysreg_s(val, SYS_ELR_EL12); break;
case PAR_EL1: write_sysreg_s(val, SYS_PAR_EL1); break; case PAR_EL1: write_sysreg_s(val, SYS_PAR_EL1); break;
case DACR32_EL2: write_sysreg_s(val, SYS_DACR32_EL2); break; case DACR32_EL2: write_sysreg_s(val, SYS_DACR32_EL2); break;
case IFSR32_EL2: write_sysreg_s(val, SYS_IFSR32_EL2); break; case IFSR32_EL2: write_sysreg_s(val, SYS_IFSR32_EL2); break;
......
...@@ -301,8 +301,8 @@ TRACE_EVENT(kvm_timer_save_state, ...@@ -301,8 +301,8 @@ TRACE_EVENT(kvm_timer_save_state,
), ),
TP_fast_assign( TP_fast_assign(
__entry->ctl = ctx->cnt_ctl; __entry->ctl = timer_get_ctl(ctx);
__entry->cval = ctx->cnt_cval; __entry->cval = timer_get_cval(ctx);
__entry->timer_idx = arch_timer_ctx_index(ctx); __entry->timer_idx = arch_timer_ctx_index(ctx);
), ),
...@@ -323,8 +323,8 @@ TRACE_EVENT(kvm_timer_restore_state, ...@@ -323,8 +323,8 @@ TRACE_EVENT(kvm_timer_restore_state,
), ),
TP_fast_assign( TP_fast_assign(
__entry->ctl = ctx->cnt_ctl; __entry->ctl = timer_get_ctl(ctx);
__entry->cval = ctx->cnt_cval; __entry->cval = timer_get_cval(ctx);
__entry->timer_idx = arch_timer_ctx_index(ctx); __entry->timer_idx = arch_timer_ctx_index(ctx);
), ),
......
...@@ -26,16 +26,9 @@ enum kvm_arch_timer_regs { ...@@ -26,16 +26,9 @@ enum kvm_arch_timer_regs {
struct arch_timer_context { struct arch_timer_context {
struct kvm_vcpu *vcpu; struct kvm_vcpu *vcpu;
/* Registers: control register, timer value */
u32 cnt_ctl;
u64 cnt_cval;
/* Timer IRQ */ /* Timer IRQ */
struct kvm_irq_level irq; struct kvm_irq_level irq;
/* Virtual offset */
u64 cntvoff;
/* Emulated Timer (may be unused) */ /* Emulated Timer (may be unused) */
struct hrtimer hrtimer; struct hrtimer hrtimer;
...@@ -71,7 +64,7 @@ int kvm_timer_hyp_init(bool); ...@@ -71,7 +64,7 @@ int kvm_timer_hyp_init(bool);
int kvm_timer_enable(struct kvm_vcpu *vcpu); int kvm_timer_enable(struct kvm_vcpu *vcpu);
int kvm_timer_vcpu_reset(struct kvm_vcpu *vcpu); int kvm_timer_vcpu_reset(struct kvm_vcpu *vcpu);
void kvm_timer_vcpu_init(struct kvm_vcpu *vcpu); void kvm_timer_vcpu_init(struct kvm_vcpu *vcpu);
void kvm_timer_sync_hwstate(struct kvm_vcpu *vcpu); void kvm_timer_sync_user(struct kvm_vcpu *vcpu);
bool kvm_timer_should_notify_user(struct kvm_vcpu *vcpu); bool kvm_timer_should_notify_user(struct kvm_vcpu *vcpu);
void kvm_timer_update_run(struct kvm_vcpu *vcpu); void kvm_timer_update_run(struct kvm_vcpu *vcpu);
void kvm_timer_vcpu_terminate(struct kvm_vcpu *vcpu); void kvm_timer_vcpu_terminate(struct kvm_vcpu *vcpu);
...@@ -109,4 +102,8 @@ void kvm_arm_timer_write_sysreg(struct kvm_vcpu *vcpu, ...@@ -109,4 +102,8 @@ void kvm_arm_timer_write_sysreg(struct kvm_vcpu *vcpu,
enum kvm_arch_timer_regs treg, enum kvm_arch_timer_regs treg,
u64 val); u64 val);
/* Needed for tracing */
u32 timer_get_ctl(struct arch_timer_context *ctxt);
u64 timer_get_cval(struct arch_timer_context *ctxt);
#endif #endif
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment