Commit 435a9f60 authored by Oliver Upton's avatar Oliver Upton

Merge branch kvm-arm64/shadow-mmu into kvmarm/next

* kvm-arm64/shadow-mmu:
  : Shadow stage-2 MMU support for NV, courtesy of Marc Zyngier
  :
  : Initial implementation of shadow stage-2 page tables to support a guest
  : hypervisor. In the author's words:
  :
  :   So here's the 10000m (approximately 30000ft for those of you stuck
  :   with the wrong units) view of what this is doing:
  :
  :     - for each {VMID,VTTBR,VTCR} tuple the guest uses, we use a
  :       separate shadow s2_mmu context. This context has its own "real"
  :       VMID and a set of page tables that are the combination of the
  :       guest's S2 and the host S2, built dynamically one fault at a time.
  :
  :     - these shadow S2 contexts are ephemeral, and behave exactly as
  :       TLBs. For all intent and purposes, they *are* TLBs, and we discard
  :       them pretty often.
  :
  :     - TLB invalidation takes three possible paths:
  :
  :       * either this is an EL2 S1 invalidation, and we directly emulate
  :         it as early as possible
  :
  :       * or this is an EL1 S1 invalidation, and we need to apply it to
  :         the shadow S2s (plural!) that match the VMID set by the L1 guest
  :
  :       * or finally, this is affecting S2, and we need to teardown the
  :         corresponding part of the shadow S2s, which invalidates the TLBs
  KVM: arm64: nv: Truely enable nXS TLBI operations
  KVM: arm64: nv: Add handling of NXS-flavoured TLBI operations
  KVM: arm64: nv: Add handling of range-based TLBI operations
  KVM: arm64: nv: Add handling of outer-shareable TLBI operations
  KVM: arm64: nv: Invalidate TLBs based on shadow S2 TTL-like information
  KVM: arm64: nv: Tag shadow S2 entries with guest's leaf S2 level
  KVM: arm64: nv: Handle FEAT_TTL hinted TLB operations
  KVM: arm64: nv: Handle TLBI IPAS2E1{,IS} operations
  KVM: arm64: nv: Handle TLBI ALLE1{,IS} operations
  KVM: arm64: nv: Handle TLBI VMALLS12E1{,IS} operations
  KVM: arm64: nv: Handle TLB invalidation targeting L2 stage-1
  KVM: arm64: nv: Handle EL2 Stage-1 TLB invalidation
  KVM: arm64: nv: Add Stage-1 EL2 invalidation primitives
  KVM: arm64: nv: Unmap/flush shadow stage 2 page tables
  KVM: arm64: nv: Handle shadow stage 2 page faults
  KVM: arm64: nv: Implement nested Stage-2 page table walk logic
  KVM: arm64: nv: Support multiple nested Stage-2 mmu structures
Signed-off-by: default avatarOliver Upton <oliver.upton@linux.dev>
parents a35d5b20 3cfde36d
...@@ -152,6 +152,7 @@ ...@@ -152,6 +152,7 @@
#define ESR_ELx_Xs_MASK (GENMASK_ULL(4, 0)) #define ESR_ELx_Xs_MASK (GENMASK_ULL(4, 0))
/* ISS field definitions for exceptions taken in to Hyp */ /* ISS field definitions for exceptions taken in to Hyp */
#define ESR_ELx_FSC_ADDRSZ (0x00)
#define ESR_ELx_CV (UL(1) << 24) #define ESR_ELx_CV (UL(1) << 24)
#define ESR_ELx_COND_SHIFT (20) #define ESR_ELx_COND_SHIFT (20)
#define ESR_ELx_COND_MASK (UL(0xF) << ESR_ELx_COND_SHIFT) #define ESR_ELx_COND_MASK (UL(0xF) << ESR_ELx_COND_SHIFT)
......
...@@ -232,6 +232,8 @@ extern void __kvm_tlb_flush_vmid_range(struct kvm_s2_mmu *mmu, ...@@ -232,6 +232,8 @@ extern void __kvm_tlb_flush_vmid_range(struct kvm_s2_mmu *mmu,
phys_addr_t start, unsigned long pages); phys_addr_t start, unsigned long pages);
extern void __kvm_tlb_flush_vmid(struct kvm_s2_mmu *mmu); extern void __kvm_tlb_flush_vmid(struct kvm_s2_mmu *mmu);
extern int __kvm_tlbi_s1e2(struct kvm_s2_mmu *mmu, u64 va, u64 sys_encoding);
extern void __kvm_timer_set_cntvoff(u64 cntvoff); extern void __kvm_timer_set_cntvoff(u64 cntvoff);
extern int __kvm_vcpu_run(struct kvm_vcpu *vcpu); extern int __kvm_vcpu_run(struct kvm_vcpu *vcpu);
......
...@@ -189,6 +189,33 @@ struct kvm_s2_mmu { ...@@ -189,6 +189,33 @@ struct kvm_s2_mmu {
uint64_t split_page_chunk_size; uint64_t split_page_chunk_size;
struct kvm_arch *arch; struct kvm_arch *arch;
/*
* For a shadow stage-2 MMU, the virtual vttbr used by the
* host to parse the guest S2.
* This either contains:
* - the virtual VTTBR programmed by the guest hypervisor with
* CnP cleared
* - The value 1 (VMID=0, BADDR=0, CnP=1) if invalid
*
* We also cache the full VTCR which gets used for TLB invalidation,
* taking the ARM ARM's "Any of the bits in VTCR_EL2 are permitted
* to be cached in a TLB" to the letter.
*/
u64 tlb_vttbr;
u64 tlb_vtcr;
/*
* true when this represents a nested context where virtual
* HCR_EL2.VM == 1
*/
bool nested_stage2_enabled;
/*
* 0: Nobody is currently using this, check vttbr for validity
* >0: Somebody is actively using this.
*/
atomic_t refcnt;
}; };
struct kvm_arch_memory_slot { struct kvm_arch_memory_slot {
...@@ -256,6 +283,14 @@ struct kvm_arch { ...@@ -256,6 +283,14 @@ struct kvm_arch {
*/ */
u64 fgu[__NR_FGT_GROUP_IDS__]; u64 fgu[__NR_FGT_GROUP_IDS__];
/*
* Stage 2 paging state for VMs with nested S2 using a virtual
* VMID.
*/
struct kvm_s2_mmu *nested_mmus;
size_t nested_mmus_size;
int nested_mmus_next;
/* Interrupt controller */ /* Interrupt controller */
struct vgic_dist vgic; struct vgic_dist vgic;
...@@ -1306,6 +1341,7 @@ void kvm_vcpu_load_vhe(struct kvm_vcpu *vcpu); ...@@ -1306,6 +1341,7 @@ void kvm_vcpu_load_vhe(struct kvm_vcpu *vcpu);
void kvm_vcpu_put_vhe(struct kvm_vcpu *vcpu); void kvm_vcpu_put_vhe(struct kvm_vcpu *vcpu);
int __init kvm_set_ipa_limit(void); int __init kvm_set_ipa_limit(void);
u32 kvm_get_pa_bits(struct kvm *kvm);
#define __KVM_HAVE_ARCH_VM_ALLOC #define __KVM_HAVE_ARCH_VM_ALLOC
struct kvm *kvm_arch_alloc_vm(void); struct kvm *kvm_arch_alloc_vm(void);
......
...@@ -98,6 +98,7 @@ alternative_cb_end ...@@ -98,6 +98,7 @@ alternative_cb_end
#include <asm/mmu_context.h> #include <asm/mmu_context.h>
#include <asm/kvm_emulate.h> #include <asm/kvm_emulate.h>
#include <asm/kvm_host.h> #include <asm/kvm_host.h>
#include <asm/kvm_nested.h>
void kvm_update_va_mask(struct alt_instr *alt, void kvm_update_va_mask(struct alt_instr *alt,
__le32 *origptr, __le32 *updptr, int nr_inst); __le32 *origptr, __le32 *updptr, int nr_inst);
...@@ -165,6 +166,10 @@ int create_hyp_exec_mappings(phys_addr_t phys_addr, size_t size, ...@@ -165,6 +166,10 @@ int create_hyp_exec_mappings(phys_addr_t phys_addr, size_t size,
int create_hyp_stack(phys_addr_t phys_addr, unsigned long *haddr); int create_hyp_stack(phys_addr_t phys_addr, unsigned long *haddr);
void __init free_hyp_pgds(void); void __init free_hyp_pgds(void);
void kvm_stage2_unmap_range(struct kvm_s2_mmu *mmu, phys_addr_t start, u64 size);
void kvm_stage2_flush_range(struct kvm_s2_mmu *mmu, phys_addr_t addr, phys_addr_t end);
void kvm_stage2_wp_range(struct kvm_s2_mmu *mmu, phys_addr_t addr, phys_addr_t end);
void stage2_unmap_vm(struct kvm *kvm); void stage2_unmap_vm(struct kvm *kvm);
int kvm_init_stage2_mmu(struct kvm *kvm, struct kvm_s2_mmu *mmu, unsigned long type); int kvm_init_stage2_mmu(struct kvm *kvm, struct kvm_s2_mmu *mmu, unsigned long type);
void kvm_uninit_stage2_mmu(struct kvm *kvm); void kvm_uninit_stage2_mmu(struct kvm *kvm);
...@@ -326,5 +331,26 @@ static inline struct kvm *kvm_s2_mmu_to_kvm(struct kvm_s2_mmu *mmu) ...@@ -326,5 +331,26 @@ static inline struct kvm *kvm_s2_mmu_to_kvm(struct kvm_s2_mmu *mmu)
{ {
return container_of(mmu->arch, struct kvm, arch); return container_of(mmu->arch, struct kvm, arch);
} }
static inline u64 get_vmid(u64 vttbr)
{
return (vttbr & VTTBR_VMID_MASK(kvm_get_vmid_bits())) >>
VTTBR_VMID_SHIFT;
}
static inline bool kvm_s2_mmu_valid(struct kvm_s2_mmu *mmu)
{
return !(mmu->tlb_vttbr & VTTBR_CNP_BIT);
}
static inline bool kvm_is_nested_s2_mmu(struct kvm *kvm, struct kvm_s2_mmu *mmu)
{
/*
* Be careful, mmu may not be fully initialised so do look at
* *any* of its fields.
*/
return &kvm->arch.mmu != mmu;
}
#endif /* __ASSEMBLY__ */ #endif /* __ASSEMBLY__ */
#endif /* __ARM64_KVM_MMU_H__ */ #endif /* __ARM64_KVM_MMU_H__ */
...@@ -5,6 +5,7 @@ ...@@ -5,6 +5,7 @@
#include <linux/bitfield.h> #include <linux/bitfield.h>
#include <linux/kvm_host.h> #include <linux/kvm_host.h>
#include <asm/kvm_emulate.h> #include <asm/kvm_emulate.h>
#include <asm/kvm_pgtable.h>
static inline bool vcpu_has_nv(const struct kvm_vcpu *vcpu) static inline bool vcpu_has_nv(const struct kvm_vcpu *vcpu)
{ {
...@@ -61,6 +62,125 @@ static inline u64 translate_ttbr0_el2_to_ttbr0_el1(u64 ttbr0) ...@@ -61,6 +62,125 @@ static inline u64 translate_ttbr0_el2_to_ttbr0_el1(u64 ttbr0)
} }
extern bool forward_smc_trap(struct kvm_vcpu *vcpu); extern bool forward_smc_trap(struct kvm_vcpu *vcpu);
extern void kvm_init_nested(struct kvm *kvm);
extern int kvm_vcpu_init_nested(struct kvm_vcpu *vcpu);
extern void kvm_init_nested_s2_mmu(struct kvm_s2_mmu *mmu);
extern struct kvm_s2_mmu *lookup_s2_mmu(struct kvm_vcpu *vcpu);
union tlbi_info;
extern void kvm_s2_mmu_iterate_by_vmid(struct kvm *kvm, u16 vmid,
const union tlbi_info *info,
void (*)(struct kvm_s2_mmu *,
const union tlbi_info *));
extern void kvm_vcpu_load_hw_mmu(struct kvm_vcpu *vcpu);
extern void kvm_vcpu_put_hw_mmu(struct kvm_vcpu *vcpu);
struct kvm_s2_trans {
phys_addr_t output;
unsigned long block_size;
bool writable;
bool readable;
int level;
u32 esr;
u64 upper_attr;
};
static inline phys_addr_t kvm_s2_trans_output(struct kvm_s2_trans *trans)
{
return trans->output;
}
static inline unsigned long kvm_s2_trans_size(struct kvm_s2_trans *trans)
{
return trans->block_size;
}
static inline u32 kvm_s2_trans_esr(struct kvm_s2_trans *trans)
{
return trans->esr;
}
static inline bool kvm_s2_trans_readable(struct kvm_s2_trans *trans)
{
return trans->readable;
}
static inline bool kvm_s2_trans_writable(struct kvm_s2_trans *trans)
{
return trans->writable;
}
static inline bool kvm_s2_trans_executable(struct kvm_s2_trans *trans)
{
return !(trans->upper_attr & BIT(54));
}
extern int kvm_walk_nested_s2(struct kvm_vcpu *vcpu, phys_addr_t gipa,
struct kvm_s2_trans *result);
extern int kvm_s2_handle_perm_fault(struct kvm_vcpu *vcpu,
struct kvm_s2_trans *trans);
extern int kvm_inject_s2_fault(struct kvm_vcpu *vcpu, u64 esr_el2);
extern void kvm_nested_s2_wp(struct kvm *kvm);
extern void kvm_nested_s2_unmap(struct kvm *kvm);
extern void kvm_nested_s2_flush(struct kvm *kvm);
unsigned long compute_tlb_inval_range(struct kvm_s2_mmu *mmu, u64 val);
static inline bool kvm_supported_tlbi_s1e1_op(struct kvm_vcpu *vpcu, u32 instr)
{
struct kvm *kvm = vpcu->kvm;
u8 CRm = sys_reg_CRm(instr);
if (!(sys_reg_Op0(instr) == TLBI_Op0 &&
sys_reg_Op1(instr) == TLBI_Op1_EL1))
return false;
if (!(sys_reg_CRn(instr) == TLBI_CRn_XS ||
(sys_reg_CRn(instr) == TLBI_CRn_nXS &&
kvm_has_feat(kvm, ID_AA64ISAR1_EL1, XS, IMP))))
return false;
if (CRm == TLBI_CRm_nROS &&
!kvm_has_feat(kvm, ID_AA64ISAR0_EL1, TLB, OS))
return false;
if ((CRm == TLBI_CRm_RIS || CRm == TLBI_CRm_ROS ||
CRm == TLBI_CRm_RNS) &&
!kvm_has_feat(kvm, ID_AA64ISAR0_EL1, TLB, RANGE))
return false;
return true;
}
static inline bool kvm_supported_tlbi_s1e2_op(struct kvm_vcpu *vpcu, u32 instr)
{
struct kvm *kvm = vpcu->kvm;
u8 CRm = sys_reg_CRm(instr);
if (!(sys_reg_Op0(instr) == TLBI_Op0 &&
sys_reg_Op1(instr) == TLBI_Op1_EL2))
return false;
if (!(sys_reg_CRn(instr) == TLBI_CRn_XS ||
(sys_reg_CRn(instr) == TLBI_CRn_nXS &&
kvm_has_feat(kvm, ID_AA64ISAR1_EL1, XS, IMP))))
return false;
if (CRm == TLBI_CRm_IPAIS || CRm == TLBI_CRm_IPAONS)
return false;
if (CRm == TLBI_CRm_nROS &&
!kvm_has_feat(kvm, ID_AA64ISAR0_EL1, TLB, OS))
return false;
if ((CRm == TLBI_CRm_RIS || CRm == TLBI_CRm_ROS ||
CRm == TLBI_CRm_RNS) &&
!kvm_has_feat(kvm, ID_AA64ISAR0_EL1, TLB, RANGE))
return false;
return true;
}
int kvm_init_nv_sysregs(struct kvm *kvm); int kvm_init_nv_sysregs(struct kvm *kvm);
...@@ -76,4 +196,11 @@ static inline bool kvm_auth_eretax(struct kvm_vcpu *vcpu, u64 *elr) ...@@ -76,4 +196,11 @@ static inline bool kvm_auth_eretax(struct kvm_vcpu *vcpu, u64 *elr)
} }
#endif #endif
#define KVM_NV_GUEST_MAP_SZ (KVM_PGTABLE_PROT_SW1 | KVM_PGTABLE_PROT_SW0)
static inline u64 kvm_encode_nested_level(struct kvm_s2_trans *trans)
{
return FIELD_PREP(KVM_NV_GUEST_MAP_SZ, trans->level);
}
#endif /* __ARM64_KVM_NESTED_H */ #endif /* __ARM64_KVM_NESTED_H */
...@@ -654,6 +654,23 @@ ...@@ -654,6 +654,23 @@
#define OP_AT_S12E0W sys_insn(AT_Op0, 4, AT_CRn, 8, 7) #define OP_AT_S12E0W sys_insn(AT_Op0, 4, AT_CRn, 8, 7)
/* TLBI instructions */ /* TLBI instructions */
#define TLBI_Op0 1
#define TLBI_Op1_EL1 0 /* Accessible from EL1 or higher */
#define TLBI_Op1_EL2 4 /* Accessible from EL2 or higher */
#define TLBI_CRn_XS 8 /* Extra Slow (the common one) */
#define TLBI_CRn_nXS 9 /* not Extra Slow (which nobody uses)*/
#define TLBI_CRm_IPAIS 0 /* S2 Inner-Shareable */
#define TLBI_CRm_nROS 1 /* non-Range, Outer-Sharable */
#define TLBI_CRm_RIS 2 /* Range, Inner-Sharable */
#define TLBI_CRm_nRIS 3 /* non-Range, Inner-Sharable */
#define TLBI_CRm_IPAONS 4 /* S2 Outer and Non-Shareable */
#define TLBI_CRm_ROS 5 /* Range, Outer-Sharable */
#define TLBI_CRm_RNS 6 /* Range, Non-Sharable */
#define TLBI_CRm_nRNS 7 /* non-Range, Non-Sharable */
#define OP_TLBI_VMALLE1OS sys_insn(1, 0, 8, 1, 0) #define OP_TLBI_VMALLE1OS sys_insn(1, 0, 8, 1, 0)
#define OP_TLBI_VAE1OS sys_insn(1, 0, 8, 1, 1) #define OP_TLBI_VAE1OS sys_insn(1, 0, 8, 1, 1)
#define OP_TLBI_ASIDE1OS sys_insn(1, 0, 8, 1, 2) #define OP_TLBI_ASIDE1OS sys_insn(1, 0, 8, 1, 2)
......
...@@ -179,6 +179,8 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) ...@@ -179,6 +179,8 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
mutex_unlock(&kvm->lock); mutex_unlock(&kvm->lock);
#endif #endif
kvm_init_nested(kvm);
ret = kvm_share_hyp(kvm, kvm + 1); ret = kvm_share_hyp(kvm, kvm + 1);
if (ret) if (ret)
return ret; return ret;
...@@ -578,6 +580,9 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) ...@@ -578,6 +580,9 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
struct kvm_s2_mmu *mmu; struct kvm_s2_mmu *mmu;
int *last_ran; int *last_ran;
if (vcpu_has_nv(vcpu))
kvm_vcpu_load_hw_mmu(vcpu);
mmu = vcpu->arch.hw_mmu; mmu = vcpu->arch.hw_mmu;
last_ran = this_cpu_ptr(mmu->last_vcpu_ran); last_ran = this_cpu_ptr(mmu->last_vcpu_ran);
...@@ -633,6 +638,8 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) ...@@ -633,6 +638,8 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
kvm_timer_vcpu_put(vcpu); kvm_timer_vcpu_put(vcpu);
kvm_vgic_put(vcpu); kvm_vgic_put(vcpu);
kvm_vcpu_pmu_restore_host(vcpu); kvm_vcpu_pmu_restore_host(vcpu);
if (vcpu_has_nv(vcpu))
kvm_vcpu_put_hw_mmu(vcpu);
kvm_arm_vmid_clear_active(); kvm_arm_vmid_clear_active();
vcpu_clear_on_unsupported_cpu(vcpu); vcpu_clear_on_unsupported_cpu(vcpu);
...@@ -1491,6 +1498,10 @@ static int kvm_setup_vcpu(struct kvm_vcpu *vcpu) ...@@ -1491,6 +1498,10 @@ static int kvm_setup_vcpu(struct kvm_vcpu *vcpu)
if (kvm_vcpu_has_pmu(vcpu) && !kvm->arch.arm_pmu) if (kvm_vcpu_has_pmu(vcpu) && !kvm->arch.arm_pmu)
ret = kvm_arm_set_default_pmu(kvm); ret = kvm_arm_set_default_pmu(kvm);
/* Prepare for nested if required */
if (!ret && vcpu_has_nv(vcpu))
ret = kvm_vcpu_init_nested(vcpu);
return ret; return ret;
} }
......
...@@ -266,10 +266,59 @@ static void kvm_hyp_save_fpsimd_host(struct kvm_vcpu *vcpu) ...@@ -266,10 +266,59 @@ static void kvm_hyp_save_fpsimd_host(struct kvm_vcpu *vcpu)
__fpsimd_save_state(*host_data_ptr(fpsimd_state)); __fpsimd_save_state(*host_data_ptr(fpsimd_state));
} }
static bool kvm_hyp_handle_tlbi_el2(struct kvm_vcpu *vcpu, u64 *exit_code)
{
int ret = -EINVAL;
u32 instr;
u64 val;
/*
* Ideally, we would never trap on EL2 S1 TLB invalidations using
* the EL1 instructions when the guest's HCR_EL2.{E2H,TGE}=={1,1}.
* But "thanks" to FEAT_NV2, we don't trap writes to HCR_EL2,
* meaning that we can't track changes to the virtual TGE bit. So we
* have to leave HCR_EL2.TTLB set on the host. Oopsie...
*
* Try and handle these invalidation as quickly as possible, without
* fully exiting. Note that we don't need to consider any forwarding
* here, as having E2H+TGE set is the very definition of being
* InHost.
*
* For the lesser hypervisors out there that have failed to get on
* with the VHE program, we can also handle the nVHE style of EL2
* invalidation.
*/
if (!(is_hyp_ctxt(vcpu)))
return false;
instr = esr_sys64_to_sysreg(kvm_vcpu_get_esr(vcpu));
val = vcpu_get_reg(vcpu, kvm_vcpu_sys_get_rt(vcpu));
if ((kvm_supported_tlbi_s1e1_op(vcpu, instr) &&
vcpu_el2_e2h_is_set(vcpu) && vcpu_el2_tge_is_set(vcpu)) ||
kvm_supported_tlbi_s1e2_op (vcpu, instr))
ret = __kvm_tlbi_s1e2(NULL, val, instr);
if (ret)
return false;
__kvm_skip_instr(vcpu);
return true;
}
static bool kvm_hyp_handle_sysreg_vhe(struct kvm_vcpu *vcpu, u64 *exit_code)
{
if (kvm_hyp_handle_tlbi_el2(vcpu, exit_code))
return true;
return kvm_hyp_handle_sysreg(vcpu, exit_code);
}
static const exit_handler_fn hyp_exit_handlers[] = { static const exit_handler_fn hyp_exit_handlers[] = {
[0 ... ESR_ELx_EC_MAX] = NULL, [0 ... ESR_ELx_EC_MAX] = NULL,
[ESR_ELx_EC_CP15_32] = kvm_hyp_handle_cp15_32, [ESR_ELx_EC_CP15_32] = kvm_hyp_handle_cp15_32,
[ESR_ELx_EC_SYS64] = kvm_hyp_handle_sysreg, [ESR_ELx_EC_SYS64] = kvm_hyp_handle_sysreg_vhe,
[ESR_ELx_EC_SVE] = kvm_hyp_handle_fpsimd, [ESR_ELx_EC_SVE] = kvm_hyp_handle_fpsimd,
[ESR_ELx_EC_FP_ASIMD] = kvm_hyp_handle_fpsimd, [ESR_ELx_EC_FP_ASIMD] = kvm_hyp_handle_fpsimd,
[ESR_ELx_EC_IABT_LOW] = kvm_hyp_handle_iabt_low, [ESR_ELx_EC_IABT_LOW] = kvm_hyp_handle_iabt_low,
......
...@@ -219,3 +219,150 @@ void __kvm_flush_vm_context(void) ...@@ -219,3 +219,150 @@ void __kvm_flush_vm_context(void)
__tlbi(alle1is); __tlbi(alle1is);
dsb(ish); dsb(ish);
} }
/*
* TLB invalidation emulation for NV. For any given instruction, we
* perform the following transformtions:
*
* - a TLBI targeting EL2 S1 is remapped to EL1 S1
* - a non-shareable TLBI is upgraded to being inner-shareable
* - an outer-shareable TLBI is also mapped to inner-shareable
* - an nXS TLBI is upgraded to XS
*/
int __kvm_tlbi_s1e2(struct kvm_s2_mmu *mmu, u64 va, u64 sys_encoding)
{
struct tlb_inv_context cxt;
int ret = 0;
/*
* The guest will have provided its own DSB ISHST before trapping.
* If it hasn't, that's its own problem, and we won't paper over it
* (plus, there is plenty of extra synchronisation before we even
* get here...).
*/
if (mmu)
enter_vmid_context(mmu, &cxt);
switch (sys_encoding) {
case OP_TLBI_ALLE2:
case OP_TLBI_ALLE2IS:
case OP_TLBI_ALLE2OS:
case OP_TLBI_VMALLE1:
case OP_TLBI_VMALLE1IS:
case OP_TLBI_VMALLE1OS:
case OP_TLBI_ALLE2NXS:
case OP_TLBI_ALLE2ISNXS:
case OP_TLBI_ALLE2OSNXS:
case OP_TLBI_VMALLE1NXS:
case OP_TLBI_VMALLE1ISNXS:
case OP_TLBI_VMALLE1OSNXS:
__tlbi(vmalle1is);
break;
case OP_TLBI_VAE2:
case OP_TLBI_VAE2IS:
case OP_TLBI_VAE2OS:
case OP_TLBI_VAE1:
case OP_TLBI_VAE1IS:
case OP_TLBI_VAE1OS:
case OP_TLBI_VAE2NXS:
case OP_TLBI_VAE2ISNXS:
case OP_TLBI_VAE2OSNXS:
case OP_TLBI_VAE1NXS:
case OP_TLBI_VAE1ISNXS:
case OP_TLBI_VAE1OSNXS:
__tlbi(vae1is, va);
break;
case OP_TLBI_VALE2:
case OP_TLBI_VALE2IS:
case OP_TLBI_VALE2OS:
case OP_TLBI_VALE1:
case OP_TLBI_VALE1IS:
case OP_TLBI_VALE1OS:
case OP_TLBI_VALE2NXS:
case OP_TLBI_VALE2ISNXS:
case OP_TLBI_VALE2OSNXS:
case OP_TLBI_VALE1NXS:
case OP_TLBI_VALE1ISNXS:
case OP_TLBI_VALE1OSNXS:
__tlbi(vale1is, va);
break;
case OP_TLBI_ASIDE1:
case OP_TLBI_ASIDE1IS:
case OP_TLBI_ASIDE1OS:
case OP_TLBI_ASIDE1NXS:
case OP_TLBI_ASIDE1ISNXS:
case OP_TLBI_ASIDE1OSNXS:
__tlbi(aside1is, va);
break;
case OP_TLBI_VAAE1:
case OP_TLBI_VAAE1IS:
case OP_TLBI_VAAE1OS:
case OP_TLBI_VAAE1NXS:
case OP_TLBI_VAAE1ISNXS:
case OP_TLBI_VAAE1OSNXS:
__tlbi(vaae1is, va);
break;
case OP_TLBI_VAALE1:
case OP_TLBI_VAALE1IS:
case OP_TLBI_VAALE1OS:
case OP_TLBI_VAALE1NXS:
case OP_TLBI_VAALE1ISNXS:
case OP_TLBI_VAALE1OSNXS:
__tlbi(vaale1is, va);
break;
case OP_TLBI_RVAE2:
case OP_TLBI_RVAE2IS:
case OP_TLBI_RVAE2OS:
case OP_TLBI_RVAE1:
case OP_TLBI_RVAE1IS:
case OP_TLBI_RVAE1OS:
case OP_TLBI_RVAE2NXS:
case OP_TLBI_RVAE2ISNXS:
case OP_TLBI_RVAE2OSNXS:
case OP_TLBI_RVAE1NXS:
case OP_TLBI_RVAE1ISNXS:
case OP_TLBI_RVAE1OSNXS:
__tlbi(rvae1is, va);
break;
case OP_TLBI_RVALE2:
case OP_TLBI_RVALE2IS:
case OP_TLBI_RVALE2OS:
case OP_TLBI_RVALE1:
case OP_TLBI_RVALE1IS:
case OP_TLBI_RVALE1OS:
case OP_TLBI_RVALE2NXS:
case OP_TLBI_RVALE2ISNXS:
case OP_TLBI_RVALE2OSNXS:
case OP_TLBI_RVALE1NXS:
case OP_TLBI_RVALE1ISNXS:
case OP_TLBI_RVALE1OSNXS:
__tlbi(rvale1is, va);
break;
case OP_TLBI_RVAAE1:
case OP_TLBI_RVAAE1IS:
case OP_TLBI_RVAAE1OS:
case OP_TLBI_RVAAE1NXS:
case OP_TLBI_RVAAE1ISNXS:
case OP_TLBI_RVAAE1OSNXS:
__tlbi(rvaae1is, va);
break;
case OP_TLBI_RVAALE1:
case OP_TLBI_RVAALE1IS:
case OP_TLBI_RVAALE1OS:
case OP_TLBI_RVAALE1NXS:
case OP_TLBI_RVAALE1ISNXS:
case OP_TLBI_RVAALE1OSNXS:
__tlbi(rvaale1is, va);
break;
default:
ret = -EINVAL;
}
dsb(ish);
isb();
if (mmu)
exit_vmid_context(&cxt);
return ret;
}
This diff is collapsed.
This diff is collapsed.
...@@ -268,6 +268,12 @@ void kvm_reset_vcpu(struct kvm_vcpu *vcpu) ...@@ -268,6 +268,12 @@ void kvm_reset_vcpu(struct kvm_vcpu *vcpu)
preempt_enable(); preempt_enable();
} }
u32 kvm_get_pa_bits(struct kvm *kvm)
{
/* Fixed limit until we can configure ID_AA64MMFR0.PARange */
return kvm_ipa_limit;
}
u32 get_kvm_ipa_limit(void) u32 get_kvm_ipa_limit(void)
{ {
return kvm_ipa_limit; return kvm_ipa_limit;
......
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment