Commit 20475f78 authored by Vladimir Murzin's avatar Vladimir Murzin Committed by Marc Zyngier

arm64: KVM: Add support for 16-bit VMID

The ARMv8.1 architecture extension allows to choose between 8-bit and
16-bit of VMID, so use this capability for KVM.
Reviewed-by: default avatarChristoffer Dall <christoffer.dall@linaro.org>
Signed-off-by: default avatarVladimir Murzin <vladimir.murzin@arm.com>
Signed-off-by: default avatarMarc Zyngier <marc.zyngier@arm.com>
parent 8420dcd3
...@@ -164,7 +164,7 @@ ...@@ -164,7 +164,7 @@
#define VTTBR_BADDR_SHIFT (VTTBR_X - 1) #define VTTBR_BADDR_SHIFT (VTTBR_X - 1)
#define VTTBR_BADDR_MASK (((_AC(1, ULL) << (40 - VTTBR_X)) - 1) << VTTBR_BADDR_SHIFT) #define VTTBR_BADDR_MASK (((_AC(1, ULL) << (40 - VTTBR_X)) - 1) << VTTBR_BADDR_SHIFT)
#define VTTBR_VMID_SHIFT _AC(48, ULL) #define VTTBR_VMID_SHIFT _AC(48, ULL)
#define VTTBR_VMID_MASK (_AC(0xff, ULL) << VTTBR_VMID_SHIFT) #define VTTBR_VMID_MASK(size) (_AT(u64, (1 << size) - 1) << VTTBR_VMID_SHIFT)
/* Hyp Syndrome Register (HSR) bits */ /* Hyp Syndrome Register (HSR) bits */
#define HSR_EC_SHIFT (26) #define HSR_EC_SHIFT (26)
......
...@@ -279,6 +279,11 @@ static inline void __kvm_extend_hypmap(pgd_t *boot_hyp_pgd, ...@@ -279,6 +279,11 @@ static inline void __kvm_extend_hypmap(pgd_t *boot_hyp_pgd,
pgd_t *merged_hyp_pgd, pgd_t *merged_hyp_pgd,
unsigned long hyp_idmap_start) { } unsigned long hyp_idmap_start) { }
static inline unsigned int kvm_get_vmid_bits(void)
{
return 8;
}
#endif /* !__ASSEMBLY__ */ #endif /* !__ASSEMBLY__ */
#endif /* __ARM_KVM_MMU_H__ */ #endif /* __ARM_KVM_MMU_H__ */
...@@ -59,7 +59,8 @@ static DEFINE_PER_CPU(struct kvm_vcpu *, kvm_arm_running_vcpu); ...@@ -59,7 +59,8 @@ static DEFINE_PER_CPU(struct kvm_vcpu *, kvm_arm_running_vcpu);
/* The VMID used in the VTTBR */ /* The VMID used in the VTTBR */
static atomic64_t kvm_vmid_gen = ATOMIC64_INIT(1); static atomic64_t kvm_vmid_gen = ATOMIC64_INIT(1);
static u8 kvm_next_vmid; static u32 kvm_next_vmid;
static unsigned int kvm_vmid_bits __read_mostly;
static DEFINE_SPINLOCK(kvm_vmid_lock); static DEFINE_SPINLOCK(kvm_vmid_lock);
static void kvm_arm_set_running_vcpu(struct kvm_vcpu *vcpu) static void kvm_arm_set_running_vcpu(struct kvm_vcpu *vcpu)
...@@ -434,11 +435,12 @@ static void update_vttbr(struct kvm *kvm) ...@@ -434,11 +435,12 @@ static void update_vttbr(struct kvm *kvm)
kvm->arch.vmid_gen = atomic64_read(&kvm_vmid_gen); kvm->arch.vmid_gen = atomic64_read(&kvm_vmid_gen);
kvm->arch.vmid = kvm_next_vmid; kvm->arch.vmid = kvm_next_vmid;
kvm_next_vmid++; kvm_next_vmid++;
kvm_next_vmid &= (1 << kvm_vmid_bits) - 1;
/* update vttbr to be used with the new vmid */ /* update vttbr to be used with the new vmid */
pgd_phys = virt_to_phys(kvm_get_hwpgd(kvm)); pgd_phys = virt_to_phys(kvm_get_hwpgd(kvm));
BUG_ON(pgd_phys & ~VTTBR_BADDR_MASK); BUG_ON(pgd_phys & ~VTTBR_BADDR_MASK);
vmid = ((u64)(kvm->arch.vmid) << VTTBR_VMID_SHIFT) & VTTBR_VMID_MASK; vmid = ((u64)(kvm->arch.vmid) << VTTBR_VMID_SHIFT) & VTTBR_VMID_MASK(kvm_vmid_bits);
kvm->arch.vttbr = pgd_phys | vmid; kvm->arch.vttbr = pgd_phys | vmid;
spin_unlock(&kvm_vmid_lock); spin_unlock(&kvm_vmid_lock);
...@@ -1135,6 +1137,10 @@ static int init_hyp_mode(void) ...@@ -1135,6 +1137,10 @@ static int init_hyp_mode(void)
kvm_perf_init(); kvm_perf_init();
/* set size of VMID supported by CPU */
kvm_vmid_bits = kvm_get_vmid_bits();
kvm_info("%d-bit VMID\n", kvm_vmid_bits);
kvm_info("Hyp mode initialized successfully\n"); kvm_info("Hyp mode initialized successfully\n");
return 0; return 0;
......
...@@ -125,6 +125,7 @@ ...@@ -125,6 +125,7 @@
#define VTCR_EL2_SL0_LVL1 (1 << 6) #define VTCR_EL2_SL0_LVL1 (1 << 6)
#define VTCR_EL2_T0SZ_MASK 0x3f #define VTCR_EL2_T0SZ_MASK 0x3f
#define VTCR_EL2_T0SZ_40B 24 #define VTCR_EL2_T0SZ_40B 24
#define VTCR_EL2_VS 19
/* /*
* We configure the Stage-2 page tables to always restrict the IPA space to be * We configure the Stage-2 page tables to always restrict the IPA space to be
...@@ -169,7 +170,7 @@ ...@@ -169,7 +170,7 @@
#define VTTBR_BADDR_SHIFT (VTTBR_X - 1) #define VTTBR_BADDR_SHIFT (VTTBR_X - 1)
#define VTTBR_BADDR_MASK (((UL(1) << (PHYS_MASK_SHIFT - VTTBR_X)) - 1) << VTTBR_BADDR_SHIFT) #define VTTBR_BADDR_MASK (((UL(1) << (PHYS_MASK_SHIFT - VTTBR_X)) - 1) << VTTBR_BADDR_SHIFT)
#define VTTBR_VMID_SHIFT (UL(48)) #define VTTBR_VMID_SHIFT (UL(48))
#define VTTBR_VMID_MASK (UL(0xFF) << VTTBR_VMID_SHIFT) #define VTTBR_VMID_MASK(size) (_AT(u64, (1 << size) - 1) << VTTBR_VMID_SHIFT)
/* Hyp System Trap Register */ /* Hyp System Trap Register */
#define HSTR_EL2_T(x) (1 << x) #define HSTR_EL2_T(x) (1 << x)
......
...@@ -20,6 +20,7 @@ ...@@ -20,6 +20,7 @@
#include <asm/page.h> #include <asm/page.h>
#include <asm/memory.h> #include <asm/memory.h>
#include <asm/cpufeature.h>
/* /*
* As we only have the TTBR0_EL2 register, we cannot express * As we only have the TTBR0_EL2 register, we cannot express
...@@ -301,5 +302,12 @@ static inline void __kvm_extend_hypmap(pgd_t *boot_hyp_pgd, ...@@ -301,5 +302,12 @@ static inline void __kvm_extend_hypmap(pgd_t *boot_hyp_pgd,
merged_hyp_pgd[idmap_idx] = __pgd(__pa(boot_hyp_pgd) | PMD_TYPE_TABLE); merged_hyp_pgd[idmap_idx] = __pgd(__pa(boot_hyp_pgd) | PMD_TYPE_TABLE);
} }
static inline unsigned int kvm_get_vmid_bits(void)
{
int reg = read_system_reg(SYS_ID_AA64MMFR1_EL1);
return (cpuid_feature_extract_field(reg, ID_AA64MMFR1_VMIDBITS_SHIFT) == 2) ? 16 : 8;
}
#endif /* __ASSEMBLY__ */ #endif /* __ASSEMBLY__ */
#endif /* __ARM64_KVM_MMU_H__ */ #endif /* __ARM64_KVM_MMU_H__ */
...@@ -94,6 +94,15 @@ __do_hyp_init: ...@@ -94,6 +94,15 @@ __do_hyp_init:
*/ */
mrs x5, ID_AA64MMFR0_EL1 mrs x5, ID_AA64MMFR0_EL1
bfi x4, x5, #16, #3 bfi x4, x5, #16, #3
/*
* Read the VMIDBits bits from ID_AA64MMFR1_EL1 and set the VS bit in
* VTCR_EL2.
*/
mrs x5, ID_AA64MMFR1_EL1
ubfx x5, x5, #5, #1
lsl x5, x5, #VTCR_EL2_VS
orr x4, x4, x5
msr vtcr_el2, x4 msr vtcr_el2, x4
mrs x4, mair_el1 mrs x4, mair_el1
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment