Commit d7c55201 authored by Xiao Guangrong's avatar Xiao Guangrong Committed by Avi Kivity

KVM: MMU: abstract some functions to handle fault pfn

Introduce handle_abnormal_pfn to handle fault pfn on page fault path,
introduce mmu_invalid_pfn to handle fault pfn on prefetch path

It is the preparing work for mmio page fault support
Signed-off-by: default avatarXiao Guangrong <xiaoguangrong@cn.fujitsu.com>
Signed-off-by: default avatarAvi Kivity <avi@redhat.com>
parent fce92dce
...@@ -2221,18 +2221,15 @@ static void kvm_send_hwpoison_signal(unsigned long address, struct task_struct * ...@@ -2221,18 +2221,15 @@ static void kvm_send_hwpoison_signal(unsigned long address, struct task_struct *
send_sig_info(SIGBUS, &info, tsk); send_sig_info(SIGBUS, &info, tsk);
} }
static int kvm_handle_bad_page(struct kvm_vcpu *vcpu, gva_t gva, static int kvm_handle_bad_page(struct kvm_vcpu *vcpu, gfn_t gfn, pfn_t pfn)
unsigned access, gfn_t gfn, pfn_t pfn)
{ {
kvm_release_pfn_clean(pfn); kvm_release_pfn_clean(pfn);
if (is_hwpoison_pfn(pfn)) { if (is_hwpoison_pfn(pfn)) {
kvm_send_hwpoison_signal(gfn_to_hva(vcpu->kvm, gfn), current); kvm_send_hwpoison_signal(gfn_to_hva(vcpu->kvm, gfn), current);
return 0; return 0;
} else if (is_fault_pfn(pfn)) }
return -EFAULT;
vcpu_cache_mmio_info(vcpu, gva, gfn, access); return -EFAULT;
return 1;
} }
static void transparent_hugepage_adjust(struct kvm_vcpu *vcpu, static void transparent_hugepage_adjust(struct kvm_vcpu *vcpu,
...@@ -2277,6 +2274,33 @@ static void transparent_hugepage_adjust(struct kvm_vcpu *vcpu, ...@@ -2277,6 +2274,33 @@ static void transparent_hugepage_adjust(struct kvm_vcpu *vcpu,
} }
} }
static bool mmu_invalid_pfn(pfn_t pfn)
{
return unlikely(is_invalid_pfn(pfn) || is_noslot_pfn(pfn));
}
static bool handle_abnormal_pfn(struct kvm_vcpu *vcpu, gva_t gva, gfn_t gfn,
pfn_t pfn, unsigned access, int *ret_val)
{
bool ret = true;
/* The pfn is invalid, report the error! */
if (unlikely(is_invalid_pfn(pfn))) {
*ret_val = kvm_handle_bad_page(vcpu, gfn, pfn);
goto exit;
}
if (unlikely(is_noslot_pfn(pfn))) {
vcpu_cache_mmio_info(vcpu, gva, gfn, access);
*ret_val = 1;
goto exit;
}
ret = false;
exit:
return ret;
}
static bool try_async_pf(struct kvm_vcpu *vcpu, bool prefault, gfn_t gfn, static bool try_async_pf(struct kvm_vcpu *vcpu, bool prefault, gfn_t gfn,
gva_t gva, pfn_t *pfn, bool write, bool *writable); gva_t gva, pfn_t *pfn, bool write, bool *writable);
...@@ -2311,9 +2335,8 @@ static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, int write, gfn_t gfn, ...@@ -2311,9 +2335,8 @@ static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, int write, gfn_t gfn,
if (try_async_pf(vcpu, prefault, gfn, v, &pfn, write, &map_writable)) if (try_async_pf(vcpu, prefault, gfn, v, &pfn, write, &map_writable))
return 0; return 0;
/* mmio */ if (handle_abnormal_pfn(vcpu, v, gfn, pfn, ACC_ALL, &r))
if (is_error_pfn(pfn)) return r;
return kvm_handle_bad_page(vcpu, v, ACC_ALL, gfn, pfn);
spin_lock(&vcpu->kvm->mmu_lock); spin_lock(&vcpu->kvm->mmu_lock);
if (mmu_notifier_retry(vcpu, mmu_seq)) if (mmu_notifier_retry(vcpu, mmu_seq))
...@@ -2685,9 +2708,9 @@ static int tdp_page_fault(struct kvm_vcpu *vcpu, gva_t gpa, u32 error_code, ...@@ -2685,9 +2708,9 @@ static int tdp_page_fault(struct kvm_vcpu *vcpu, gva_t gpa, u32 error_code,
if (try_async_pf(vcpu, prefault, gfn, gpa, &pfn, write, &map_writable)) if (try_async_pf(vcpu, prefault, gfn, gpa, &pfn, write, &map_writable))
return 0; return 0;
/* mmio */ if (handle_abnormal_pfn(vcpu, 0, gfn, pfn, ACC_ALL, &r))
if (is_error_pfn(pfn)) return r;
return kvm_handle_bad_page(vcpu, 0, 0, gfn, pfn);
spin_lock(&vcpu->kvm->mmu_lock); spin_lock(&vcpu->kvm->mmu_lock);
if (mmu_notifier_retry(vcpu, mmu_seq)) if (mmu_notifier_retry(vcpu, mmu_seq))
goto out_unlock; goto out_unlock;
......
...@@ -367,7 +367,7 @@ static void FNAME(update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, ...@@ -367,7 +367,7 @@ static void FNAME(update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
pgprintk("%s: gpte %llx spte %p\n", __func__, (u64)gpte, spte); pgprintk("%s: gpte %llx spte %p\n", __func__, (u64)gpte, spte);
pte_access = sp->role.access & FNAME(gpte_access)(vcpu, gpte, true); pte_access = sp->role.access & FNAME(gpte_access)(vcpu, gpte, true);
pfn = gfn_to_pfn_atomic(vcpu->kvm, gpte_to_gfn(gpte)); pfn = gfn_to_pfn_atomic(vcpu->kvm, gpte_to_gfn(gpte));
if (is_error_pfn(pfn)) { if (mmu_invalid_pfn(pfn)) {
kvm_release_pfn_clean(pfn); kvm_release_pfn_clean(pfn);
return; return;
} }
...@@ -445,7 +445,7 @@ static void FNAME(pte_prefetch)(struct kvm_vcpu *vcpu, struct guest_walker *gw, ...@@ -445,7 +445,7 @@ static void FNAME(pte_prefetch)(struct kvm_vcpu *vcpu, struct guest_walker *gw,
gfn = gpte_to_gfn(gpte); gfn = gpte_to_gfn(gpte);
pfn = pte_prefetch_gfn_to_pfn(vcpu, gfn, pfn = pte_prefetch_gfn_to_pfn(vcpu, gfn,
pte_access & ACC_WRITE_MASK); pte_access & ACC_WRITE_MASK);
if (is_error_pfn(pfn)) { if (mmu_invalid_pfn(pfn)) {
kvm_release_pfn_clean(pfn); kvm_release_pfn_clean(pfn);
break; break;
} }
...@@ -615,10 +615,10 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, u32 error_code, ...@@ -615,10 +615,10 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, u32 error_code,
&map_writable)) &map_writable))
return 0; return 0;
/* mmio */ if (handle_abnormal_pfn(vcpu, mmu_is_nested(vcpu) ? 0 : addr,
if (is_error_pfn(pfn)) walker.gfn, pfn, walker.pte_access, &r))
return kvm_handle_bad_page(vcpu, mmu_is_nested(vcpu) ? 0 : return r;
addr, walker.pte_access, walker.gfn, pfn);
spin_lock(&vcpu->kvm->mmu_lock); spin_lock(&vcpu->kvm->mmu_lock);
if (mmu_notifier_retry(vcpu, mmu_seq)) if (mmu_notifier_retry(vcpu, mmu_seq))
goto out_unlock; goto out_unlock;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment