Commit aeda9130 authored by Marc Zyngier's avatar Marc Zyngier Committed by Christoffer Dall

arm/arm64: KVM: Optimize handling of Access Flag faults

Now that we have page aging in Stage-2, it becomes obvious that
we're doing way too much work handling the fault.

The page is not going anywhere (it is still mapped), the page
tables are already allocated, and all we want is to flip a bit
in the PMD or PTE. Also, we can avoid any form of TLB invalidation,
since a page with the AF bit off is not allowed to be cached.

An obvious solution is to have a separate handler for FSC_ACCESS,
where we pride ourselves to only do the very minimum amount of
work.
Signed-off-by: default avatarMarc Zyngier <marc.zyngier@arm.com>
Acked-by: default avatarChristoffer Dall <christoffer.dall@linaro.org>
Signed-off-by: default avatarChristoffer Dall <christoffer.dall@linaro.org>
parent 35307b9a
...@@ -1304,6 +1304,46 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, ...@@ -1304,6 +1304,46 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
return ret; return ret;
} }
/*
* Resolve the access fault by making the page young again.
* Note that because the faulting entry is guaranteed not to be
* cached in the TLB, we don't need to invalidate anything.
*/
static void handle_access_fault(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa)
{
pmd_t *pmd;
pte_t *pte;
pfn_t pfn;
bool pfn_valid = false;
trace_kvm_access_fault(fault_ipa);
spin_lock(&vcpu->kvm->mmu_lock);
pmd = stage2_get_pmd(vcpu->kvm, NULL, fault_ipa);
if (!pmd || pmd_none(*pmd)) /* Nothing there */
goto out;
if (kvm_pmd_huge(*pmd)) { /* THP, HugeTLB */
*pmd = pmd_mkyoung(*pmd);
pfn = pmd_pfn(*pmd);
pfn_valid = true;
goto out;
}
pte = pte_offset_kernel(pmd, fault_ipa);
if (pte_none(*pte)) /* Nothing there either */
goto out;
*pte = pte_mkyoung(*pte); /* Just a page... */
pfn = pte_pfn(*pte);
pfn_valid = true;
out:
spin_unlock(&vcpu->kvm->mmu_lock);
if (pfn_valid)
kvm_set_pfn_accessed(pfn);
}
/** /**
* kvm_handle_guest_abort - handles all 2nd stage aborts * kvm_handle_guest_abort - handles all 2nd stage aborts
* @vcpu: the VCPU pointer * @vcpu: the VCPU pointer
...@@ -1371,6 +1411,12 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run) ...@@ -1371,6 +1411,12 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run)
/* Userspace should not be able to register out-of-bounds IPAs */ /* Userspace should not be able to register out-of-bounds IPAs */
VM_BUG_ON(fault_ipa >= KVM_PHYS_SIZE); VM_BUG_ON(fault_ipa >= KVM_PHYS_SIZE);
if (fault_status == FSC_ACCESS) {
handle_access_fault(vcpu, fault_ipa);
ret = 1;
goto out_unlock;
}
ret = user_mem_abort(vcpu, fault_ipa, memslot, hva, fault_status); ret = user_mem_abort(vcpu, fault_ipa, memslot, hva, fault_status);
if (ret == 0) if (ret == 0)
ret = 1; ret = 1;
......
...@@ -68,6 +68,21 @@ TRACE_EVENT(kvm_guest_fault, ...@@ -68,6 +68,21 @@ TRACE_EVENT(kvm_guest_fault,
__entry->hxfar, __entry->vcpu_pc) __entry->hxfar, __entry->vcpu_pc)
); );
TRACE_EVENT(kvm_access_fault,
TP_PROTO(unsigned long ipa),
TP_ARGS(ipa),
TP_STRUCT__entry(
__field( unsigned long, ipa )
),
TP_fast_assign(
__entry->ipa = ipa;
),
TP_printk("IPA: %lx", __entry->ipa)
);
TRACE_EVENT(kvm_irq_line, TRACE_EVENT(kvm_irq_line,
TP_PROTO(unsigned int type, int vcpu_idx, int irq_num, int level), TP_PROTO(unsigned int type, int vcpu_idx, int irq_num, int level),
TP_ARGS(type, vcpu_idx, irq_num, level), TP_ARGS(type, vcpu_idx, irq_num, level),
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment