Commit 9b5a8e00 authored by Matthew Wilcox's avatar Matthew Wilcox Committed by Linus Torvalds

mm: convert insert_pfn() to vm_fault_t

All callers convert its errno into a vm_fault_t, so convert it to return a
vm_fault_t directly.

Link: http://lkml.kernel.org/r/20180828145728.11873-11-willy@infradead.orgSigned-off-by: default avatarMatthew Wilcox <willy@infradead.org>
Reviewed-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Cc: Nicolas Pitre <nicolas.pitre@linaro.org>
Cc: Souptick Joarder <jrdr.linux@gmail.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 79f3aa5b
...@@ -1520,19 +1520,16 @@ int vm_insert_page(struct vm_area_struct *vma, unsigned long addr, ...@@ -1520,19 +1520,16 @@ int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
} }
EXPORT_SYMBOL(vm_insert_page); EXPORT_SYMBOL(vm_insert_page);
static int insert_pfn(struct vm_area_struct *vma, unsigned long addr, static vm_fault_t insert_pfn(struct vm_area_struct *vma, unsigned long addr,
pfn_t pfn, pgprot_t prot, bool mkwrite) pfn_t pfn, pgprot_t prot, bool mkwrite)
{ {
struct mm_struct *mm = vma->vm_mm; struct mm_struct *mm = vma->vm_mm;
int retval;
pte_t *pte, entry; pte_t *pte, entry;
spinlock_t *ptl; spinlock_t *ptl;
retval = -ENOMEM;
pte = get_locked_pte(mm, addr, &ptl); pte = get_locked_pte(mm, addr, &ptl);
if (!pte) if (!pte)
goto out; return VM_FAULT_OOM;
retval = -EBUSY;
if (!pte_none(*pte)) { if (!pte_none(*pte)) {
if (mkwrite) { if (mkwrite) {
/* /*
...@@ -1565,11 +1562,9 @@ static int insert_pfn(struct vm_area_struct *vma, unsigned long addr, ...@@ -1565,11 +1562,9 @@ static int insert_pfn(struct vm_area_struct *vma, unsigned long addr,
set_pte_at(mm, addr, pte, entry); set_pte_at(mm, addr, pte, entry);
update_mmu_cache(vma, addr, pte); /* XXX: why not for insert_page? */ update_mmu_cache(vma, addr, pte); /* XXX: why not for insert_page? */
retval = 0;
out_unlock: out_unlock:
pte_unmap_unlock(pte, ptl); pte_unmap_unlock(pte, ptl);
out: return VM_FAULT_NOPAGE;
return retval;
} }
/** /**
...@@ -1593,8 +1588,6 @@ static int insert_pfn(struct vm_area_struct *vma, unsigned long addr, ...@@ -1593,8 +1588,6 @@ static int insert_pfn(struct vm_area_struct *vma, unsigned long addr,
vm_fault_t vmf_insert_pfn_prot(struct vm_area_struct *vma, unsigned long addr, vm_fault_t vmf_insert_pfn_prot(struct vm_area_struct *vma, unsigned long addr,
unsigned long pfn, pgprot_t pgprot) unsigned long pfn, pgprot_t pgprot)
{ {
int err;
/* /*
* Technically, architectures with pte_special can avoid all these * Technically, architectures with pte_special can avoid all these
* restrictions (same for remap_pfn_range). However we would like * restrictions (same for remap_pfn_range). However we would like
...@@ -1615,15 +1608,8 @@ vm_fault_t vmf_insert_pfn_prot(struct vm_area_struct *vma, unsigned long addr, ...@@ -1615,15 +1608,8 @@ vm_fault_t vmf_insert_pfn_prot(struct vm_area_struct *vma, unsigned long addr,
track_pfn_insert(vma, &pgprot, __pfn_to_pfn_t(pfn, PFN_DEV)); track_pfn_insert(vma, &pgprot, __pfn_to_pfn_t(pfn, PFN_DEV));
err = insert_pfn(vma, addr, __pfn_to_pfn_t(pfn, PFN_DEV), pgprot, return insert_pfn(vma, addr, __pfn_to_pfn_t(pfn, PFN_DEV), pgprot,
false); false);
if (err == -ENOMEM)
return VM_FAULT_OOM;
if (err < 0 && err != -EBUSY)
return VM_FAULT_SIGBUS;
return VM_FAULT_NOPAGE;
} }
EXPORT_SYMBOL(vmf_insert_pfn_prot); EXPORT_SYMBOL(vmf_insert_pfn_prot);
...@@ -1703,7 +1689,7 @@ static vm_fault_t __vm_insert_mixed(struct vm_area_struct *vma, ...@@ -1703,7 +1689,7 @@ static vm_fault_t __vm_insert_mixed(struct vm_area_struct *vma,
page = pfn_to_page(pfn_t_to_pfn(pfn)); page = pfn_to_page(pfn_t_to_pfn(pfn));
err = insert_page(vma, addr, page, pgprot); err = insert_page(vma, addr, page, pgprot);
} else { } else {
err = insert_pfn(vma, addr, pfn, pgprot, mkwrite); return insert_pfn(vma, addr, pfn, pgprot, mkwrite);
} }
if (err == -ENOMEM) if (err == -ENOMEM)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment