Commit a15f6939 authored by Marc Zyngier's avatar Marc Zyngier Committed by Christoffer Dall

KVM: arm/arm64: Split dcache/icache flushing

As we're about to introduce opportunistic invalidation of the icache,
let's split dcache and icache flushing.
Acked-by: default avatarChristoffer Dall <cdall@linaro.org>
Signed-off-by: default avatarMarc Zyngier <marc.zyngier@arm.com>
Signed-off-by: default avatarChristoffer Dall <christoffer.dall@linaro.org>
parent d6811986
......@@ -126,7 +126,34 @@ static inline bool vcpu_has_cache_enabled(struct kvm_vcpu *vcpu)
return (vcpu_cp15(vcpu, c1_SCTLR) & 0b101) == 0b101;
}
static inline void __coherent_cache_guest_page(struct kvm_vcpu *vcpu,
static inline void __clean_dcache_guest_page(struct kvm_vcpu *vcpu,
kvm_pfn_t pfn,
unsigned long size)
{
/*
* Clean the dcache to the Point of Coherency.
*
* We need to do this through a kernel mapping (using the
* user-space mapping has proved to be the wrong
* solution). For that, we need to kmap one page at a time,
* and iterate over the range.
*/
VM_BUG_ON(size & ~PAGE_MASK);
while (size) {
void *va = kmap_atomic_pfn(pfn);
kvm_flush_dcache_to_poc(va, PAGE_SIZE);
size -= PAGE_SIZE;
pfn++;
kunmap_atomic(va);
}
}
static inline void __invalidate_icache_guest_page(struct kvm_vcpu *vcpu,
kvm_pfn_t pfn,
unsigned long size)
{
......@@ -141,21 +168,23 @@ static inline void __coherent_cache_guest_page(struct kvm_vcpu *vcpu,
*
* VIVT caches are tagged using both the ASID and the VMID and doesn't
* need any kind of flushing (DDI 0406C.b - Page B3-1392).
*
* We need to do this through a kernel mapping (using the
* user-space mapping has proved to be the wrong
* solution). For that, we need to kmap one page at a time,
* and iterate over the range.
*/
VM_BUG_ON(size & ~PAGE_MASK);
if (icache_is_vivt_asid_tagged())
return;
if (!icache_is_pipt()) {
/* any kind of VIPT cache */
__flush_icache_all();
return;
}
/* PIPT cache. As for the d-side, use a temporary kernel mapping. */
while (size) {
void *va = kmap_atomic_pfn(pfn);
kvm_flush_dcache_to_poc(va, PAGE_SIZE);
if (icache_is_pipt())
__cpuc_coherent_user_range((unsigned long)va,
(unsigned long)va + PAGE_SIZE);
......@@ -164,11 +193,6 @@ static inline void __coherent_cache_guest_page(struct kvm_vcpu *vcpu,
kunmap_atomic(va);
}
if (!icache_is_pipt() && !icache_is_vivt_asid_tagged()) {
/* any kind of VIPT cache */
__flush_icache_all();
}
}
static inline void __kvm_flush_dcache_pte(pte_t pte)
......
......@@ -230,19 +230,26 @@ static inline bool vcpu_has_cache_enabled(struct kvm_vcpu *vcpu)
return (vcpu_sys_reg(vcpu, SCTLR_EL1) & 0b101) == 0b101;
}
static inline void __coherent_cache_guest_page(struct kvm_vcpu *vcpu,
static inline void __clean_dcache_guest_page(struct kvm_vcpu *vcpu,
kvm_pfn_t pfn,
unsigned long size)
{
void *va = page_address(pfn_to_page(pfn));
kvm_flush_dcache_to_poc(va, size);
}
static inline void __invalidate_icache_guest_page(struct kvm_vcpu *vcpu,
kvm_pfn_t pfn,
unsigned long size)
{
if (icache_is_aliasing()) {
/* any kind of VIPT cache */
__flush_icache_all();
} else if (is_kernel_in_hyp_mode() || !icache_is_vpipt()) {
/* PIPT or VPIPT at EL2 (see comment in __kvm_tlb_flush_vmid_ipa) */
void *va = page_address(pfn_to_page(pfn));
flush_icache_range((unsigned long)va,
(unsigned long)va + size);
}
......
......@@ -1257,10 +1257,16 @@ void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm,
kvm_mmu_write_protect_pt_masked(kvm, slot, gfn_offset, mask);
}
static void coherent_cache_guest_page(struct kvm_vcpu *vcpu, kvm_pfn_t pfn,
static void clean_dcache_guest_page(struct kvm_vcpu *vcpu, kvm_pfn_t pfn,
unsigned long size)
{
__coherent_cache_guest_page(vcpu, pfn, size);
__clean_dcache_guest_page(vcpu, pfn, size);
}
static void invalidate_icache_guest_page(struct kvm_vcpu *vcpu, kvm_pfn_t pfn,
unsigned long size)
{
__invalidate_icache_guest_page(vcpu, pfn, size);
}
static void kvm_send_hwpoison_signal(unsigned long address,
......@@ -1391,7 +1397,9 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
new_pmd = kvm_s2pmd_mkwrite(new_pmd);
kvm_set_pfn_dirty(pfn);
}
coherent_cache_guest_page(vcpu, pfn, PMD_SIZE);
clean_dcache_guest_page(vcpu, pfn, PMD_SIZE);
invalidate_icache_guest_page(vcpu, pfn, PMD_SIZE);
ret = stage2_set_pmd_huge(kvm, memcache, fault_ipa, &new_pmd);
} else {
pte_t new_pte = pfn_pte(pfn, mem_type);
......@@ -1401,7 +1409,9 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
kvm_set_pfn_dirty(pfn);
mark_page_dirty(kvm, gfn);
}
coherent_cache_guest_page(vcpu, pfn, PAGE_SIZE);
clean_dcache_guest_page(vcpu, pfn, PAGE_SIZE);
invalidate_icache_guest_page(vcpu, pfn, PAGE_SIZE);
ret = stage2_set_pte(kvm, memcache, fault_ipa, &new_pte, flags);
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment