Commit 30431774 authored by Will Deacon's avatar Will Deacon

Merge branch 'for-next/rip-vpipt' into for-next/core

* for-next/rip-vpipt:
  arm64: Rename reserved values for CTR_EL0.L1Ip
  arm64: Kill detection of VPIPT i-cache policy
  KVM: arm64: Remove VPIPT I-cache handling
parents dd9168ab f35c32ca
...@@ -58,7 +58,6 @@ static inline unsigned int arch_slab_minalign(void) ...@@ -58,7 +58,6 @@ static inline unsigned int arch_slab_minalign(void)
#define CTR_L1IP(ctr) SYS_FIELD_GET(CTR_EL0, L1Ip, ctr) #define CTR_L1IP(ctr) SYS_FIELD_GET(CTR_EL0, L1Ip, ctr)
#define ICACHEF_ALIASING 0 #define ICACHEF_ALIASING 0
#define ICACHEF_VPIPT 1
extern unsigned long __icache_flags; extern unsigned long __icache_flags;
/* /*
...@@ -70,11 +69,6 @@ static inline int icache_is_aliasing(void) ...@@ -70,11 +69,6 @@ static inline int icache_is_aliasing(void)
return test_bit(ICACHEF_ALIASING, &__icache_flags); return test_bit(ICACHEF_ALIASING, &__icache_flags);
} }
static __always_inline int icache_is_vpipt(void)
{
return test_bit(ICACHEF_VPIPT, &__icache_flags);
}
static inline u32 cache_type_cwg(void) static inline u32 cache_type_cwg(void)
{ {
return SYS_FIELD_GET(CTR_EL0, CWG, read_cpuid_cachetype()); return SYS_FIELD_GET(CTR_EL0, CWG, read_cpuid_cachetype());
......
...@@ -243,13 +243,6 @@ static inline size_t __invalidate_icache_max_range(void) ...@@ -243,13 +243,6 @@ static inline size_t __invalidate_icache_max_range(void)
static inline void __invalidate_icache_guest_page(void *va, size_t size) static inline void __invalidate_icache_guest_page(void *va, size_t size)
{ {
/*
* VPIPT I-cache maintenance must be done from EL2. See comment in the
* nVHE flavor of __kvm_tlb_flush_vmid_ipa().
*/
if (icache_is_vpipt() && read_sysreg(CurrentEL) != CurrentEL_EL2)
return;
/* /*
* Blow the whole I-cache if it is aliasing (i.e. VIPT) or the * Blow the whole I-cache if it is aliasing (i.e. VIPT) or the
* invalidation range exceeds our arbitrary limit on invadations by * invalidation range exceeds our arbitrary limit on invadations by
......
...@@ -36,8 +36,6 @@ static struct cpuinfo_arm64 boot_cpu_data; ...@@ -36,8 +36,6 @@ static struct cpuinfo_arm64 boot_cpu_data;
static inline const char *icache_policy_str(int l1ip) static inline const char *icache_policy_str(int l1ip)
{ {
switch (l1ip) { switch (l1ip) {
case CTR_EL0_L1Ip_VPIPT:
return "VPIPT";
case CTR_EL0_L1Ip_VIPT: case CTR_EL0_L1Ip_VIPT:
return "VIPT"; return "VIPT";
case CTR_EL0_L1Ip_PIPT: case CTR_EL0_L1Ip_PIPT:
...@@ -388,9 +386,6 @@ static void cpuinfo_detect_icache_policy(struct cpuinfo_arm64 *info) ...@@ -388,9 +386,6 @@ static void cpuinfo_detect_icache_policy(struct cpuinfo_arm64 *info)
switch (l1ip) { switch (l1ip) {
case CTR_EL0_L1Ip_PIPT: case CTR_EL0_L1Ip_PIPT:
break; break;
case CTR_EL0_L1Ip_VPIPT:
set_bit(ICACHEF_VPIPT, &__icache_flags);
break;
case CTR_EL0_L1Ip_VIPT: case CTR_EL0_L1Ip_VIPT:
default: default:
/* Assume aliasing */ /* Assume aliasing */
......
...@@ -12,7 +12,7 @@ ...@@ -12,7 +12,7 @@
#include <nvhe/pkvm.h> #include <nvhe/pkvm.h>
#include <nvhe/trap_handler.h> #include <nvhe/trap_handler.h>
/* Used by icache_is_vpipt(). */ /* Used by icache_is_aliasing(). */
unsigned long __icache_flags; unsigned long __icache_flags;
/* Used by kvm_get_vttbr(). */ /* Used by kvm_get_vttbr(). */
......
...@@ -105,28 +105,6 @@ void __kvm_tlb_flush_vmid_ipa(struct kvm_s2_mmu *mmu, ...@@ -105,28 +105,6 @@ void __kvm_tlb_flush_vmid_ipa(struct kvm_s2_mmu *mmu,
dsb(ish); dsb(ish);
isb(); isb();
/*
* If the host is running at EL1 and we have a VPIPT I-cache,
* then we must perform I-cache maintenance at EL2 in order for
* it to have an effect on the guest. Since the guest cannot hit
* I-cache lines allocated with a different VMID, we don't need
* to worry about junk out of guest reset (we nuke the I-cache on
* VMID rollover), but we do need to be careful when remapping
* executable pages for the same guest. This can happen when KSM
* takes a CoW fault on an executable page, copies the page into
* a page that was previously mapped in the guest and then needs
* to invalidate the guest view of the I-cache for that page
* from EL1. To solve this, we invalidate the entire I-cache when
* unmapping a page from a guest if we have a VPIPT I-cache but
* the host is running at EL1. As above, we could do better if
* we had the VA.
*
* The moral of this story is: if you have a VPIPT I-cache, then
* you should be running with VHE enabled.
*/
if (icache_is_vpipt())
icache_inval_all_pou();
__tlb_switch_to_host(&cxt); __tlb_switch_to_host(&cxt);
} }
...@@ -157,28 +135,6 @@ void __kvm_tlb_flush_vmid_ipa_nsh(struct kvm_s2_mmu *mmu, ...@@ -157,28 +135,6 @@ void __kvm_tlb_flush_vmid_ipa_nsh(struct kvm_s2_mmu *mmu,
dsb(nsh); dsb(nsh);
isb(); isb();
/*
* If the host is running at EL1 and we have a VPIPT I-cache,
* then we must perform I-cache maintenance at EL2 in order for
* it to have an effect on the guest. Since the guest cannot hit
* I-cache lines allocated with a different VMID, we don't need
* to worry about junk out of guest reset (we nuke the I-cache on
* VMID rollover), but we do need to be careful when remapping
* executable pages for the same guest. This can happen when KSM
* takes a CoW fault on an executable page, copies the page into
* a page that was previously mapped in the guest and then needs
* to invalidate the guest view of the I-cache for that page
* from EL1. To solve this, we invalidate the entire I-cache when
* unmapping a page from a guest if we have a VPIPT I-cache but
* the host is running at EL1. As above, we could do better if
* we had the VA.
*
* The moral of this story is: if you have a VPIPT I-cache, then
* you should be running with VHE enabled.
*/
if (icache_is_vpipt())
icache_inval_all_pou();
__tlb_switch_to_host(&cxt); __tlb_switch_to_host(&cxt);
} }
...@@ -205,10 +161,6 @@ void __kvm_tlb_flush_vmid_range(struct kvm_s2_mmu *mmu, ...@@ -205,10 +161,6 @@ void __kvm_tlb_flush_vmid_range(struct kvm_s2_mmu *mmu,
dsb(ish); dsb(ish);
isb(); isb();
/* See the comment in __kvm_tlb_flush_vmid_ipa() */
if (icache_is_vpipt())
icache_inval_all_pou();
__tlb_switch_to_host(&cxt); __tlb_switch_to_host(&cxt);
} }
...@@ -246,18 +198,5 @@ void __kvm_flush_vm_context(void) ...@@ -246,18 +198,5 @@ void __kvm_flush_vm_context(void)
/* Same remark as in __tlb_switch_to_guest() */ /* Same remark as in __tlb_switch_to_guest() */
dsb(ish); dsb(ish);
__tlbi(alle1is); __tlbi(alle1is);
/*
* VIPT and PIPT caches are not affected by VMID, so no maintenance
* is necessary across a VMID rollover.
*
* VPIPT caches constrain lookup and maintenance to the active VMID,
* so we need to invalidate lines with a stale VMID to avoid an ABA
* race after multiple rollovers.
*
*/
if (icache_is_vpipt())
asm volatile("ic ialluis");
dsb(ish); dsb(ish);
} }
...@@ -216,18 +216,5 @@ void __kvm_flush_vm_context(void) ...@@ -216,18 +216,5 @@ void __kvm_flush_vm_context(void)
{ {
dsb(ishst); dsb(ishst);
__tlbi(alle1is); __tlbi(alle1is);
/*
* VIPT and PIPT caches are not affected by VMID, so no maintenance
* is necessary across a VMID rollover.
*
* VPIPT caches constrain lookup and maintenance to the active VMID,
* so we need to invalidate lines with a stale VMID to avoid an ABA
* race after multiple rollovers.
*
*/
if (icache_is_vpipt())
asm volatile("ic ialluis");
dsb(ish); dsb(ish);
} }
...@@ -2004,9 +2004,10 @@ Field 27:24 CWG ...@@ -2004,9 +2004,10 @@ Field 27:24 CWG
Field 23:20 ERG Field 23:20 ERG
Field 19:16 DminLine Field 19:16 DminLine
Enum 15:14 L1Ip Enum 15:14 L1Ip
0b00 VPIPT # This was named as VPIPT in the ARM but now documented as reserved
0b00 RESERVED_VPIPT
# This is named as AIVIVT in the ARM but documented as reserved # This is named as AIVIVT in the ARM but documented as reserved
0b01 RESERVED 0b01 RESERVED_AIVIVT
0b10 VIPT 0b10 VIPT
0b11 PIPT 0b11 PIPT
EndEnum EndEnum
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment