Commit 2dbf12ae authored by Catalin Marinas's avatar Catalin Marinas Committed by Marc Zyngier

KVM: arm64: Simplify the sanitise_mte_tags() logic

Currently sanitise_mte_tags() checks if it's an online page before
attempting to sanitise the tags. Such detection should be done in the
caller via the VM_MTE_ALLOWED vma flag. Since kvm_set_spte_gfn() does
not have the vma, leave the page unmapped if not already tagged. Tag
initialisation will be done on a subsequent access fault in
user_mem_abort().
Signed-off-by: default avatarCatalin Marinas <catalin.marinas@arm.com>
[pcc@google.com: fix the page initializer]
Signed-off-by: default avatarPeter Collingbourne <pcc@google.com>
Reviewed-by: default avatarSteven Price <steven.price@arm.com>
Cc: Will Deacon <will@kernel.org>
Cc: Marc Zyngier <maz@kernel.org>
Cc: Peter Collingbourne <pcc@google.com>
Reviewed-by: default avatarCornelia Huck <cohuck@redhat.com>
Signed-off-by: default avatarMarc Zyngier <maz@kernel.org>
Link: https://lore.kernel.org/r/20221104011041.290951-4-pcc@google.com
parent e059853d
...@@ -1091,23 +1091,14 @@ static int get_vma_page_shift(struct vm_area_struct *vma, unsigned long hva) ...@@ -1091,23 +1091,14 @@ static int get_vma_page_shift(struct vm_area_struct *vma, unsigned long hva)
* - mmap_lock protects between a VM faulting a page in and the VMM performing * - mmap_lock protects between a VM faulting a page in and the VMM performing
* an mprotect() to add VM_MTE * an mprotect() to add VM_MTE
*/ */
static int sanitise_mte_tags(struct kvm *kvm, kvm_pfn_t pfn, static void sanitise_mte_tags(struct kvm *kvm, kvm_pfn_t pfn,
unsigned long size) unsigned long size)
{ {
unsigned long i, nr_pages = size >> PAGE_SHIFT; unsigned long i, nr_pages = size >> PAGE_SHIFT;
struct page *page; struct page *page = pfn_to_page(pfn);
if (!kvm_has_mte(kvm)) if (!kvm_has_mte(kvm))
return 0; return;
/*
* pfn_to_online_page() is used to reject ZONE_DEVICE pages
* that may not support tags.
*/
page = pfn_to_online_page(pfn);
if (!page)
return -EFAULT;
for (i = 0; i < nr_pages; i++, page++) { for (i = 0; i < nr_pages; i++, page++) {
if (!page_mte_tagged(page)) { if (!page_mte_tagged(page)) {
...@@ -1115,8 +1106,6 @@ static int sanitise_mte_tags(struct kvm *kvm, kvm_pfn_t pfn, ...@@ -1115,8 +1106,6 @@ static int sanitise_mte_tags(struct kvm *kvm, kvm_pfn_t pfn,
set_page_mte_tagged(page); set_page_mte_tagged(page);
} }
} }
return 0;
} }
static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
...@@ -1127,7 +1116,6 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, ...@@ -1127,7 +1116,6 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
bool write_fault, writable, force_pte = false; bool write_fault, writable, force_pte = false;
bool exec_fault; bool exec_fault;
bool device = false; bool device = false;
bool shared;
unsigned long mmu_seq; unsigned long mmu_seq;
struct kvm *kvm = vcpu->kvm; struct kvm *kvm = vcpu->kvm;
struct kvm_mmu_memory_cache *memcache = &vcpu->arch.mmu_page_cache; struct kvm_mmu_memory_cache *memcache = &vcpu->arch.mmu_page_cache;
...@@ -1177,8 +1165,6 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, ...@@ -1177,8 +1165,6 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
vma_shift = get_vma_page_shift(vma, hva); vma_shift = get_vma_page_shift(vma, hva);
} }
shared = (vma->vm_flags & VM_SHARED);
switch (vma_shift) { switch (vma_shift) {
#ifndef __PAGETABLE_PMD_FOLDED #ifndef __PAGETABLE_PMD_FOLDED
case PUD_SHIFT: case PUD_SHIFT:
...@@ -1299,12 +1285,13 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, ...@@ -1299,12 +1285,13 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
if (fault_status != FSC_PERM && !device && kvm_has_mte(kvm)) { if (fault_status != FSC_PERM && !device && kvm_has_mte(kvm)) {
/* Check the VMM hasn't introduced a new VM_SHARED VMA */ /* Check the VMM hasn't introduced a new VM_SHARED VMA */
if (!shared) if ((vma->vm_flags & VM_MTE_ALLOWED) &&
ret = sanitise_mte_tags(kvm, pfn, vma_pagesize); !(vma->vm_flags & VM_SHARED)) {
else sanitise_mte_tags(kvm, pfn, vma_pagesize);
} else {
ret = -EFAULT; ret = -EFAULT;
if (ret)
goto out_unlock; goto out_unlock;
}
} }
if (writable) if (writable)
...@@ -1526,15 +1513,18 @@ bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range) ...@@ -1526,15 +1513,18 @@ bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range)
bool kvm_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range) bool kvm_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
{ {
kvm_pfn_t pfn = pte_pfn(range->pte); kvm_pfn_t pfn = pte_pfn(range->pte);
int ret;
if (!kvm->arch.mmu.pgt) if (!kvm->arch.mmu.pgt)
return false; return false;
WARN_ON(range->end - range->start != 1); WARN_ON(range->end - range->start != 1);
ret = sanitise_mte_tags(kvm, pfn, PAGE_SIZE); /*
if (ret) * If the page isn't tagged, defer to user_mem_abort() for sanitising
* the MTE tags. The S2 pte should have been unmapped by
* mmu_notifier_invalidate_range_end().
*/
if (kvm_has_mte(kvm) && !page_mte_tagged(pfn_to_page(pfn)))
return false; return false;
/* /*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment