Commit bcf8039e authored by Dave Hansen's avatar Dave Hansen Committed by Linus Torvalds

pagemap: fix large pages in pagemap

We were walking right into huge page areas in the pagemap walker, and
calling the pmds pmd_bad() and clearing them.

That leaked huge pages.  Bad.

This patch at least works around that for now.  It ignores huge pages in
the pagemap walker for the time being, and won't leak those pages.
Signed-off-by: default avatarDave Hansen <dave@linux.vnet.ibm.com>
Acked-by: default avatarMatt Mackall <mpm@selenic.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 2165009b
...@@ -553,24 +553,45 @@ static u64 swap_pte_to_pagemap_entry(pte_t pte) ...@@ -553,24 +553,45 @@ static u64 swap_pte_to_pagemap_entry(pte_t pte)
return swp_type(e) | (swp_offset(e) << MAX_SWAPFILES_SHIFT); return swp_type(e) | (swp_offset(e) << MAX_SWAPFILES_SHIFT);
} }
static unsigned long pte_to_pagemap_entry(pte_t pte)
{
unsigned long pme = 0;
if (is_swap_pte(pte))
pme = PM_PFRAME(swap_pte_to_pagemap_entry(pte))
| PM_PSHIFT(PAGE_SHIFT) | PM_SWAP;
else if (pte_present(pte))
pme = PM_PFRAME(pte_pfn(pte))
| PM_PSHIFT(PAGE_SHIFT) | PM_PRESENT;
return pme;
}
static int pagemap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, static int pagemap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
struct mm_walk *walk) struct mm_walk *walk)
{ {
struct vm_area_struct *vma;
struct pagemapread *pm = walk->private; struct pagemapread *pm = walk->private;
pte_t *pte; pte_t *pte;
int err = 0; int err = 0;
/* find the first VMA at or above 'addr' */
vma = find_vma(walk->mm, addr);
for (; addr != end; addr += PAGE_SIZE) { for (; addr != end; addr += PAGE_SIZE) {
u64 pfn = PM_NOT_PRESENT; u64 pfn = PM_NOT_PRESENT;
pte = pte_offset_map(pmd, addr);
if (is_swap_pte(*pte)) /* check to see if we've left 'vma' behind
pfn = PM_PFRAME(swap_pte_to_pagemap_entry(*pte)) * and need a new, higher one */
| PM_PSHIFT(PAGE_SHIFT) | PM_SWAP; if (vma && (addr >= vma->vm_end))
else if (pte_present(*pte)) vma = find_vma(walk->mm, addr);
pfn = PM_PFRAME(pte_pfn(*pte))
| PM_PSHIFT(PAGE_SHIFT) | PM_PRESENT; /* check that 'vma' actually covers this address,
/* unmap so we're not in atomic when we copy to userspace */ * and that it isn't a huge page vma */
pte_unmap(pte); if (vma && (vma->vm_start <= addr) &&
!is_vm_hugetlb_page(vma)) {
pte = pte_offset_map(pmd, addr);
pfn = pte_to_pagemap_entry(*pte);
/* unmap before userspace copy */
pte_unmap(pte);
}
err = add_to_pagemap(addr, pfn, pm); err = add_to_pagemap(addr, pfn, pm);
if (err) if (err)
return err; return err;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment