Commit cb9aaa30 authored by Xiao Guangrong's avatar Xiao Guangrong Committed by Avi Kivity

KVM: do not release the error pfn

After commit a2766325, the error pfn is replaced by the
error code, it need not be released anymore

[ The patch has been compiling tested for powerpc ]
Signed-off-by: default avatarXiao Guangrong <xiaoguangrong@linux.vnet.ibm.com>
Signed-off-by: default avatarAvi Kivity <avi@redhat.com>
parent 6cede2e6
...@@ -524,7 +524,6 @@ static inline void kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500, ...@@ -524,7 +524,6 @@ static inline void kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500,
if (is_error_pfn(pfn)) { if (is_error_pfn(pfn)) {
printk(KERN_ERR "Couldn't get real page for gfn %lx!\n", printk(KERN_ERR "Couldn't get real page for gfn %lx!\n",
(long)gfn); (long)gfn);
kvm_release_pfn_clean(pfn);
return; return;
} }
......
...@@ -2496,7 +2496,9 @@ static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep, ...@@ -2496,7 +2496,9 @@ static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
rmap_recycle(vcpu, sptep, gfn); rmap_recycle(vcpu, sptep, gfn);
} }
} }
kvm_release_pfn_clean(pfn);
if (!is_error_pfn(pfn))
kvm_release_pfn_clean(pfn);
} }
static void nonpaging_new_cr3(struct kvm_vcpu *vcpu) static void nonpaging_new_cr3(struct kvm_vcpu *vcpu)
...@@ -2648,7 +2650,6 @@ static void kvm_send_hwpoison_signal(unsigned long address, struct task_struct * ...@@ -2648,7 +2650,6 @@ static void kvm_send_hwpoison_signal(unsigned long address, struct task_struct *
static int kvm_handle_bad_page(struct kvm_vcpu *vcpu, gfn_t gfn, pfn_t pfn) static int kvm_handle_bad_page(struct kvm_vcpu *vcpu, gfn_t gfn, pfn_t pfn)
{ {
kvm_release_pfn_clean(pfn);
if (pfn == KVM_PFN_ERR_HWPOISON) { if (pfn == KVM_PFN_ERR_HWPOISON) {
kvm_send_hwpoison_signal(gfn_to_hva(vcpu->kvm, gfn), current); kvm_send_hwpoison_signal(gfn_to_hva(vcpu->kvm, gfn), current);
return 0; return 0;
...@@ -3273,8 +3274,6 @@ static bool try_async_pf(struct kvm_vcpu *vcpu, bool prefault, gfn_t gfn, ...@@ -3273,8 +3274,6 @@ static bool try_async_pf(struct kvm_vcpu *vcpu, bool prefault, gfn_t gfn,
if (!async) if (!async)
return false; /* *pfn has correct page already */ return false; /* *pfn has correct page already */
kvm_release_pfn_clean(*pfn);
if (!prefault && can_do_async_pf(vcpu)) { if (!prefault && can_do_async_pf(vcpu)) {
trace_kvm_try_async_get_page(gva, gfn); trace_kvm_try_async_get_page(gva, gfn);
if (kvm_find_async_pf_gfn(vcpu, gfn)) { if (kvm_find_async_pf_gfn(vcpu, gfn)) {
......
...@@ -116,10 +116,8 @@ static void audit_mappings(struct kvm_vcpu *vcpu, u64 *sptep, int level) ...@@ -116,10 +116,8 @@ static void audit_mappings(struct kvm_vcpu *vcpu, u64 *sptep, int level)
gfn = kvm_mmu_page_get_gfn(sp, sptep - sp->spt); gfn = kvm_mmu_page_get_gfn(sp, sptep - sp->spt);
pfn = gfn_to_pfn_atomic(vcpu->kvm, gfn); pfn = gfn_to_pfn_atomic(vcpu->kvm, gfn);
if (is_error_pfn(pfn)) { if (is_error_pfn(pfn))
kvm_release_pfn_clean(pfn);
return; return;
}
hpa = pfn << PAGE_SHIFT; hpa = pfn << PAGE_SHIFT;
if ((*sptep & PT64_BASE_ADDR_MASK) != hpa) if ((*sptep & PT64_BASE_ADDR_MASK) != hpa)
......
...@@ -370,10 +370,8 @@ static void FNAME(update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, ...@@ -370,10 +370,8 @@ static void FNAME(update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
pgprintk("%s: gpte %llx spte %p\n", __func__, (u64)gpte, spte); pgprintk("%s: gpte %llx spte %p\n", __func__, (u64)gpte, spte);
pte_access = sp->role.access & FNAME(gpte_access)(vcpu, gpte, true); pte_access = sp->role.access & FNAME(gpte_access)(vcpu, gpte, true);
pfn = gfn_to_pfn_atomic(vcpu->kvm, gpte_to_gfn(gpte)); pfn = gfn_to_pfn_atomic(vcpu->kvm, gpte_to_gfn(gpte));
if (mmu_invalid_pfn(pfn)) { if (mmu_invalid_pfn(pfn))
kvm_release_pfn_clean(pfn);
return; return;
}
/* /*
* we call mmu_set_spte() with host_writable = true because that * we call mmu_set_spte() with host_writable = true because that
...@@ -448,10 +446,8 @@ static void FNAME(pte_prefetch)(struct kvm_vcpu *vcpu, struct guest_walker *gw, ...@@ -448,10 +446,8 @@ static void FNAME(pte_prefetch)(struct kvm_vcpu *vcpu, struct guest_walker *gw,
gfn = gpte_to_gfn(gpte); gfn = gpte_to_gfn(gpte);
pfn = pte_prefetch_gfn_to_pfn(vcpu, gfn, pfn = pte_prefetch_gfn_to_pfn(vcpu, gfn,
pte_access & ACC_WRITE_MASK); pte_access & ACC_WRITE_MASK);
if (mmu_invalid_pfn(pfn)) { if (mmu_invalid_pfn(pfn))
kvm_release_pfn_clean(pfn);
break; break;
}
mmu_set_spte(vcpu, spte, sp->role.access, pte_access, 0, 0, mmu_set_spte(vcpu, spte, sp->role.access, pte_access, 0, 0,
NULL, PT_PAGE_TABLE_LEVEL, gfn, NULL, PT_PAGE_TABLE_LEVEL, gfn,
......
...@@ -107,7 +107,6 @@ int kvm_iommu_map_pages(struct kvm *kvm, struct kvm_memory_slot *slot) ...@@ -107,7 +107,6 @@ int kvm_iommu_map_pages(struct kvm *kvm, struct kvm_memory_slot *slot)
*/ */
pfn = kvm_pin_pages(slot, gfn, page_size); pfn = kvm_pin_pages(slot, gfn, page_size);
if (is_error_pfn(pfn)) { if (is_error_pfn(pfn)) {
kvm_release_pfn_clean(pfn);
gfn += 1; gfn += 1;
continue; continue;
} }
......
...@@ -102,9 +102,6 @@ static bool largepages_enabled = true; ...@@ -102,9 +102,6 @@ static bool largepages_enabled = true;
bool kvm_is_mmio_pfn(pfn_t pfn) bool kvm_is_mmio_pfn(pfn_t pfn)
{ {
if (is_error_pfn(pfn))
return false;
if (pfn_valid(pfn)) { if (pfn_valid(pfn)) {
int reserved; int reserved;
struct page *tail = pfn_to_page(pfn); struct page *tail = pfn_to_page(pfn);
...@@ -1165,10 +1162,13 @@ EXPORT_SYMBOL_GPL(gfn_to_page_many_atomic); ...@@ -1165,10 +1162,13 @@ EXPORT_SYMBOL_GPL(gfn_to_page_many_atomic);
static struct page *kvm_pfn_to_page(pfn_t pfn) static struct page *kvm_pfn_to_page(pfn_t pfn)
{ {
WARN_ON(kvm_is_mmio_pfn(pfn)); if (is_error_pfn(pfn))
return KVM_ERR_PTR_BAD_PAGE;
if (is_error_pfn(pfn) || kvm_is_mmio_pfn(pfn)) if (kvm_is_mmio_pfn(pfn)) {
WARN_ON(1);
return KVM_ERR_PTR_BAD_PAGE; return KVM_ERR_PTR_BAD_PAGE;
}
return pfn_to_page(pfn); return pfn_to_page(pfn);
} }
...@@ -1193,7 +1193,9 @@ EXPORT_SYMBOL_GPL(kvm_release_page_clean); ...@@ -1193,7 +1193,9 @@ EXPORT_SYMBOL_GPL(kvm_release_page_clean);
void kvm_release_pfn_clean(pfn_t pfn) void kvm_release_pfn_clean(pfn_t pfn)
{ {
if (!is_error_pfn(pfn) && !kvm_is_mmio_pfn(pfn)) WARN_ON(is_error_pfn(pfn));
if (!kvm_is_mmio_pfn(pfn))
put_page(pfn_to_page(pfn)); put_page(pfn_to_page(pfn));
} }
EXPORT_SYMBOL_GPL(kvm_release_pfn_clean); EXPORT_SYMBOL_GPL(kvm_release_pfn_clean);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment