Commit c0eb315a authored by Nicholas Piggin's avatar Nicholas Piggin Committed by Linus Torvalds

mm/vmalloc: fix HUGE_VMAP regression by enabling huge pages in vmalloc_to_page

vmalloc_to_page returns NULL for addresses mapped by larger pages[*].
Whether or not a vmap is huge depends on the architecture details,
alignments, boot options, etc., which the caller can not be expected to
know.  Therefore HUGE_VMAP is a regression for vmalloc_to_page.

This change teaches vmalloc_to_page about larger pages, and returns the
struct page that corresponds to the offset within the large page.  This
makes the API agnostic to mapping implementation details.

[*] As explained by commit 029c54b0 ("mm/vmalloc.c: huge-vmap:
    fail gracefully on unexpected huge vmap mappings")

[npiggin@gmail.com: sparc32: add stub pud_page define for walking huge vmalloc page tables]
  Link: https://lkml.kernel.org/r/20210324232825.1157363-1-npiggin@gmail.com

Link: https://lkml.kernel.org/r/20210317062402.533919-3-npiggin@gmail.comSigned-off-by: default avatarNicholas Piggin <npiggin@gmail.com>
Reviewed-by: default avatarMiaohe Lin <linmiaohe@huawei.com>
Reviewed-by: default avatarChristoph Hellwig <hch@lst.de>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Ding Tianhong <dingtianhong@huawei.com>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Michael Ellerman <mpe@ellerman.id.au>
Cc: Russell King <linux@armlinux.org.uk>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Uladzislau Rezki (Sony) <urezki@gmail.com>
Cc: Will Deacon <will@kernel.org>
Cc: Stephen Rothwell <sfr@canb.auug.org.au>
Cc: David S. Miller <davem@davemloft.net>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 972472c7
...@@ -321,6 +321,9 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) ...@@ -321,6 +321,9 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
pgprot_val(newprot)); pgprot_val(newprot));
} }
/* only used by the huge vmap code, should never be called */
#define pud_page(pud) NULL
struct seq_file; struct seq_file;
void mmu_info(struct seq_file *m); void mmu_info(struct seq_file *m);
......
...@@ -34,7 +34,7 @@ ...@@ -34,7 +34,7 @@
#include <linux/bitops.h> #include <linux/bitops.h>
#include <linux/rbtree_augmented.h> #include <linux/rbtree_augmented.h>
#include <linux/overflow.h> #include <linux/overflow.h>
#include <linux/pgtable.h>
#include <linux/uaccess.h> #include <linux/uaccess.h>
#include <asm/tlbflush.h> #include <asm/tlbflush.h>
#include <asm/shmparam.h> #include <asm/shmparam.h>
...@@ -343,7 +343,9 @@ int is_vmalloc_or_module_addr(const void *x) ...@@ -343,7 +343,9 @@ int is_vmalloc_or_module_addr(const void *x)
} }
/* /*
* Walk a vmap address to the struct page it maps. * Walk a vmap address to the struct page it maps. Huge vmap mappings will
* return the tail page that corresponds to the base page address, which
* matches small vmap mappings.
*/ */
struct page *vmalloc_to_page(const void *vmalloc_addr) struct page *vmalloc_to_page(const void *vmalloc_addr)
{ {
...@@ -363,25 +365,33 @@ struct page *vmalloc_to_page(const void *vmalloc_addr) ...@@ -363,25 +365,33 @@ struct page *vmalloc_to_page(const void *vmalloc_addr)
if (pgd_none(*pgd)) if (pgd_none(*pgd))
return NULL; return NULL;
if (WARN_ON_ONCE(pgd_leaf(*pgd)))
return NULL; /* XXX: no allowance for huge pgd */
if (WARN_ON_ONCE(pgd_bad(*pgd)))
return NULL;
p4d = p4d_offset(pgd, addr); p4d = p4d_offset(pgd, addr);
if (p4d_none(*p4d)) if (p4d_none(*p4d))
return NULL; return NULL;
pud = pud_offset(p4d, addr); if (p4d_leaf(*p4d))
return p4d_page(*p4d) + ((addr & ~P4D_MASK) >> PAGE_SHIFT);
if (WARN_ON_ONCE(p4d_bad(*p4d)))
return NULL;
/* pud = pud_offset(p4d, addr);
* Don't dereference bad PUD or PMD (below) entries. This will also if (pud_none(*pud))
* identify huge mappings, which we may encounter on architectures return NULL;
* that define CONFIG_HAVE_ARCH_HUGE_VMAP=y. Such regions will be if (pud_leaf(*pud))
* identified as vmalloc addresses by is_vmalloc_addr(), but are return pud_page(*pud) + ((addr & ~PUD_MASK) >> PAGE_SHIFT);
* not [unambiguously] associated with a struct page, so there is if (WARN_ON_ONCE(pud_bad(*pud)))
* no correct value to return for them.
*/
WARN_ON_ONCE(pud_bad(*pud));
if (pud_none(*pud) || pud_bad(*pud))
return NULL; return NULL;
pmd = pmd_offset(pud, addr); pmd = pmd_offset(pud, addr);
WARN_ON_ONCE(pmd_bad(*pmd)); if (pmd_none(*pmd))
if (pmd_none(*pmd) || pmd_bad(*pmd)) return NULL;
if (pmd_leaf(*pmd))
return pmd_page(*pmd) + ((addr & ~PMD_MASK) >> PAGE_SHIFT);
if (WARN_ON_ONCE(pmd_bad(*pmd)))
return NULL; return NULL;
ptep = pte_offset_map(pmd, addr); ptep = pte_offset_map(pmd, addr);
...@@ -389,6 +399,7 @@ struct page *vmalloc_to_page(const void *vmalloc_addr) ...@@ -389,6 +399,7 @@ struct page *vmalloc_to_page(const void *vmalloc_addr)
if (pte_present(pte)) if (pte_present(pte))
page = pte_page(pte); page = pte_page(pte);
pte_unmap(ptep); pte_unmap(ptep);
return page; return page;
} }
EXPORT_SYMBOL(vmalloc_to_page); EXPORT_SYMBOL(vmalloc_to_page);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment