Commit 6cede2e6 authored by Xiao Guangrong's avatar Xiao Guangrong Committed by Avi Kivity

KVM: introduce KVM_ERR_PTR_BAD_PAGE

It is used to eliminate the overload of function call and cleanup
the code
Signed-off-by: default avatarXiao Guangrong <xiaoguangrong@linux.vnet.ibm.com>
Signed-off-by: default avatarAvi Kivity <avi@redhat.com>
parent 9a592a95
...@@ -68,6 +68,13 @@ static inline int is_invalid_pfn(pfn_t pfn) ...@@ -68,6 +68,13 @@ static inline int is_invalid_pfn(pfn_t pfn)
return !is_noslot_pfn(pfn) && is_error_pfn(pfn); return !is_noslot_pfn(pfn) && is_error_pfn(pfn);
} }
#define KVM_ERR_PTR_BAD_PAGE (ERR_PTR(-ENOENT))
static inline int is_error_page(struct page *page)
{
return IS_ERR(page);
}
/* /*
* vcpu->requests bit members * vcpu->requests bit members
*/ */
...@@ -409,7 +416,6 @@ id_to_memslot(struct kvm_memslots *slots, int id) ...@@ -409,7 +416,6 @@ id_to_memslot(struct kvm_memslots *slots, int id)
return slot; return slot;
} }
int is_error_page(struct page *page);
int kvm_is_error_hva(unsigned long addr); int kvm_is_error_hva(unsigned long addr);
int kvm_set_memory_region(struct kvm *kvm, int kvm_set_memory_region(struct kvm *kvm,
struct kvm_userspace_memory_region *mem, struct kvm_userspace_memory_region *mem,
...@@ -436,7 +442,6 @@ void kvm_arch_flush_shadow(struct kvm *kvm); ...@@ -436,7 +442,6 @@ void kvm_arch_flush_shadow(struct kvm *kvm);
int gfn_to_page_many_atomic(struct kvm *kvm, gfn_t gfn, struct page **pages, int gfn_to_page_many_atomic(struct kvm *kvm, gfn_t gfn, struct page **pages,
int nr_pages); int nr_pages);
struct page *get_bad_page(void);
struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn); struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn);
unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn); unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn);
void kvm_release_page_clean(struct page *page); void kvm_release_page_clean(struct page *page);
......
...@@ -203,7 +203,7 @@ int kvm_async_pf_wakeup_all(struct kvm_vcpu *vcpu) ...@@ -203,7 +203,7 @@ int kvm_async_pf_wakeup_all(struct kvm_vcpu *vcpu)
if (!work) if (!work)
return -ENOMEM; return -ENOMEM;
work->page = get_bad_page(); work->page = KVM_ERR_PTR_BAD_PAGE;
INIT_LIST_HEAD(&work->queue); /* for list_del to work */ INIT_LIST_HEAD(&work->queue); /* for list_del to work */
spin_lock(&vcpu->async_pf.lock); spin_lock(&vcpu->async_pf.lock);
......
...@@ -922,17 +922,6 @@ void kvm_disable_largepages(void) ...@@ -922,17 +922,6 @@ void kvm_disable_largepages(void)
} }
EXPORT_SYMBOL_GPL(kvm_disable_largepages); EXPORT_SYMBOL_GPL(kvm_disable_largepages);
int is_error_page(struct page *page)
{
return IS_ERR(page);
}
EXPORT_SYMBOL_GPL(is_error_page);
struct page *get_bad_page(void)
{
return ERR_PTR(-ENOENT);
}
static inline unsigned long bad_hva(void) static inline unsigned long bad_hva(void)
{ {
return PAGE_OFFSET; return PAGE_OFFSET;
...@@ -1179,7 +1168,7 @@ static struct page *kvm_pfn_to_page(pfn_t pfn) ...@@ -1179,7 +1168,7 @@ static struct page *kvm_pfn_to_page(pfn_t pfn)
WARN_ON(kvm_is_mmio_pfn(pfn)); WARN_ON(kvm_is_mmio_pfn(pfn));
if (is_error_pfn(pfn) || kvm_is_mmio_pfn(pfn)) if (is_error_pfn(pfn) || kvm_is_mmio_pfn(pfn))
return get_bad_page(); return KVM_ERR_PTR_BAD_PAGE;
return pfn_to_page(pfn); return pfn_to_page(pfn);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment