Commit 6c8ee57b authored by Xiao Guangrong's avatar Xiao Guangrong Committed by Avi Kivity

KVM: introduce KVM_PFN_ERR_FAULT

After that, the exported and un-inline function, get_fault_pfn,
can be removed
Signed-off-by: default avatarXiao Guangrong <xiaoguangrong@linux.vnet.ibm.com>
Signed-off-by: default avatarAvi Kivity <avi@redhat.com>
parent 16b854c8
...@@ -2512,7 +2512,7 @@ static pfn_t pte_prefetch_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn, ...@@ -2512,7 +2512,7 @@ static pfn_t pte_prefetch_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn,
slot = gfn_to_memslot_dirty_bitmap(vcpu, gfn, no_dirty_log); slot = gfn_to_memslot_dirty_bitmap(vcpu, gfn, no_dirty_log);
if (!slot) if (!slot)
return get_fault_pfn(); return KVM_PFN_ERR_FAULT;
hva = gfn_to_hva_memslot(slot, gfn); hva = gfn_to_hva_memslot(slot, gfn);
......
...@@ -48,6 +48,8 @@ ...@@ -48,6 +48,8 @@
#define KVM_MAX_MMIO_FRAGMENTS \ #define KVM_MAX_MMIO_FRAGMENTS \
(KVM_MMIO_SIZE / KVM_USER_MMIO_SIZE + KVM_EXTRA_MMIO_FRAGMENTS) (KVM_MMIO_SIZE / KVM_USER_MMIO_SIZE + KVM_EXTRA_MMIO_FRAGMENTS)
#define KVM_PFN_ERR_FAULT (-EFAULT)
/* /*
* vcpu->requests bit members * vcpu->requests bit members
*/ */
...@@ -443,7 +445,6 @@ void kvm_release_pfn_clean(pfn_t pfn); ...@@ -443,7 +445,6 @@ void kvm_release_pfn_clean(pfn_t pfn);
void kvm_set_pfn_dirty(pfn_t pfn); void kvm_set_pfn_dirty(pfn_t pfn);
void kvm_set_pfn_accessed(pfn_t pfn); void kvm_set_pfn_accessed(pfn_t pfn);
void kvm_get_pfn(pfn_t pfn); void kvm_get_pfn(pfn_t pfn);
pfn_t get_fault_pfn(void);
int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset, int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset,
int len); int len);
......
...@@ -939,12 +939,6 @@ static pfn_t get_bad_pfn(void) ...@@ -939,12 +939,6 @@ static pfn_t get_bad_pfn(void)
return -ENOENT; return -ENOENT;
} }
pfn_t get_fault_pfn(void)
{
return -EFAULT;
}
EXPORT_SYMBOL_GPL(get_fault_pfn);
static pfn_t get_hwpoison_pfn(void) static pfn_t get_hwpoison_pfn(void)
{ {
return -EHWPOISON; return -EHWPOISON;
...@@ -1115,7 +1109,7 @@ static pfn_t hva_to_pfn(unsigned long addr, bool atomic, bool *async, ...@@ -1115,7 +1109,7 @@ static pfn_t hva_to_pfn(unsigned long addr, bool atomic, bool *async,
struct vm_area_struct *vma; struct vm_area_struct *vma;
if (atomic) if (atomic)
return get_fault_pfn(); return KVM_PFN_ERR_FAULT;
down_read(&current->mm->mmap_sem); down_read(&current->mm->mmap_sem);
if (npages == -EHWPOISON || if (npages == -EHWPOISON ||
...@@ -1127,7 +1121,7 @@ static pfn_t hva_to_pfn(unsigned long addr, bool atomic, bool *async, ...@@ -1127,7 +1121,7 @@ static pfn_t hva_to_pfn(unsigned long addr, bool atomic, bool *async,
vma = find_vma_intersection(current->mm, addr, addr+1); vma = find_vma_intersection(current->mm, addr, addr+1);
if (vma == NULL) if (vma == NULL)
pfn = get_fault_pfn(); pfn = KVM_PFN_ERR_FAULT;
else if ((vma->vm_flags & VM_PFNMAP)) { else if ((vma->vm_flags & VM_PFNMAP)) {
pfn = ((addr - vma->vm_start) >> PAGE_SHIFT) + pfn = ((addr - vma->vm_start) >> PAGE_SHIFT) +
vma->vm_pgoff; vma->vm_pgoff;
...@@ -1135,7 +1129,7 @@ static pfn_t hva_to_pfn(unsigned long addr, bool atomic, bool *async, ...@@ -1135,7 +1129,7 @@ static pfn_t hva_to_pfn(unsigned long addr, bool atomic, bool *async,
} else { } else {
if (async && (vma->vm_flags & VM_WRITE)) if (async && (vma->vm_flags & VM_WRITE))
*async = true; *async = true;
pfn = get_fault_pfn(); pfn = KVM_PFN_ERR_FAULT;
} }
up_read(&current->mm->mmap_sem); up_read(&current->mm->mmap_sem);
} else } else
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment