Commit b2e593e2 authored by Dan Williams's avatar Dan Williams Committed by Linus Torvalds

x86, mm: unify exit paths in gup_pte_range()

All exit paths from gup_pte_range() require pte_unmap() of the original
pte page before returning.  Refactor the code to have a single exit
point to do the unmap.

This mirrors the flow of the generic gup_pte_range() in mm/gup.c.

Link: http://lkml.kernel.org/r/148804251828.36605.14910389618497006945.stgit@dwillia2-desk3.amr.corp.intel.comSigned-off-by: default avatarDan Williams <dan.j.williams@intel.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Dave Hansen <dave.hansen@linux.intel.com>
Cc: Ross Zwisler <ross.zwisler@linux.intel.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent ef947b25
...@@ -106,36 +106,35 @@ static noinline int gup_pte_range(pmd_t pmd, unsigned long addr, ...@@ -106,36 +106,35 @@ static noinline int gup_pte_range(pmd_t pmd, unsigned long addr,
unsigned long end, int write, struct page **pages, int *nr) unsigned long end, int write, struct page **pages, int *nr)
{ {
struct dev_pagemap *pgmap = NULL; struct dev_pagemap *pgmap = NULL;
int nr_start = *nr; int nr_start = *nr, ret = 0;
pte_t *ptep; pte_t *ptep, *ptem;
ptep = pte_offset_map(&pmd, addr); /*
* Keep the original mapped PTE value (ptem) around since we
* might increment ptep off the end of the page when finishing
* our loop iteration.
*/
ptem = ptep = pte_offset_map(&pmd, addr);
do { do {
pte_t pte = gup_get_pte(ptep); pte_t pte = gup_get_pte(ptep);
struct page *page; struct page *page;
/* Similar to the PMD case, NUMA hinting must take slow path */ /* Similar to the PMD case, NUMA hinting must take slow path */
if (pte_protnone(pte)) { if (pte_protnone(pte))
pte_unmap(ptep); break;
return 0;
}
if (!pte_allows_gup(pte_val(pte), write)) { if (!pte_allows_gup(pte_val(pte), write))
pte_unmap(ptep); break;
return 0;
}
if (pte_devmap(pte)) { if (pte_devmap(pte)) {
pgmap = get_dev_pagemap(pte_pfn(pte), pgmap); pgmap = get_dev_pagemap(pte_pfn(pte), pgmap);
if (unlikely(!pgmap)) { if (unlikely(!pgmap)) {
undo_dev_pagemap(nr, nr_start, pages); undo_dev_pagemap(nr, nr_start, pages);
pte_unmap(ptep); break;
return 0;
} }
} else if (pte_special(pte)) { } else if (pte_special(pte))
pte_unmap(ptep); break;
return 0;
}
VM_BUG_ON(!pfn_valid(pte_pfn(pte))); VM_BUG_ON(!pfn_valid(pte_pfn(pte)));
page = pte_page(pte); page = pte_page(pte);
get_page(page); get_page(page);
...@@ -145,9 +144,11 @@ static noinline int gup_pte_range(pmd_t pmd, unsigned long addr, ...@@ -145,9 +144,11 @@ static noinline int gup_pte_range(pmd_t pmd, unsigned long addr,
(*nr)++; (*nr)++;
} while (ptep++, addr += PAGE_SIZE, addr != end); } while (ptep++, addr += PAGE_SIZE, addr != end);
pte_unmap(ptep - 1); if (addr == end)
ret = 1;
pte_unmap(ptem);
return 1; return ret;
} }
static inline void get_head_page_multiple(struct page *page, int nr) static inline void get_head_page_multiple(struct page *page, int nr)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment