Commit bbc180a5 authored by Nicholas Piggin's avatar Nicholas Piggin Committed by Linus Torvalds

mm: HUGE_VMAP arch support cleanup

This changes the awkward approach where architectures provide init
functions to determine which levels they can provide large mappings for,
to one where the arch is queried for each call.

This removes code and indirection, and allows constant-folding of dead
code for unsupported levels.

This also adds a prot argument to the arch query.  This is unused
currently but could help with some architectures (e.g., some powerpc
processors can't map uncacheable memory with large pages).

Link: https://lkml.kernel.org/r/20210317062402.533919-7-npiggin@gmail.comSigned-off-by: default avatarNicholas Piggin <npiggin@gmail.com>
Reviewed-by: default avatarDing Tianhong <dingtianhong@huawei.com>
Acked-by: Catalin Marinas <catalin.marinas@arm.com> [arm64]
Cc: Will Deacon <will@kernel.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Borislav Petkov <bp@alien8.de>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: Christoph Hellwig <hch@lst.de>
Cc: Miaohe Lin <linmiaohe@huawei.com>
Cc: Michael Ellerman <mpe@ellerman.id.au>
Cc: Russell King <linux@armlinux.org.uk>
Cc: Uladzislau Rezki (Sony) <urezki@gmail.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 95f0ddf0
#ifndef _ASM_ARM64_VMALLOC_H #ifndef _ASM_ARM64_VMALLOC_H
#define _ASM_ARM64_VMALLOC_H #define _ASM_ARM64_VMALLOC_H
#include <asm/page.h>
#ifdef CONFIG_HAVE_ARCH_HUGE_VMAP
bool arch_vmap_p4d_supported(pgprot_t prot);
bool arch_vmap_pud_supported(pgprot_t prot);
bool arch_vmap_pmd_supported(pgprot_t prot);
#endif
#endif /* _ASM_ARM64_VMALLOC_H */ #endif /* _ASM_ARM64_VMALLOC_H */
...@@ -1339,12 +1339,12 @@ void *__init fixmap_remap_fdt(phys_addr_t dt_phys, int *size, pgprot_t prot) ...@@ -1339,12 +1339,12 @@ void *__init fixmap_remap_fdt(phys_addr_t dt_phys, int *size, pgprot_t prot)
return dt_virt; return dt_virt;
} }
int __init arch_ioremap_p4d_supported(void) bool arch_vmap_p4d_supported(pgprot_t prot)
{ {
return 0; return false;
} }
int __init arch_ioremap_pud_supported(void) bool arch_vmap_pud_supported(pgprot_t prot)
{ {
/* /*
* Only 4k granule supports level 1 block mappings. * Only 4k granule supports level 1 block mappings.
...@@ -1354,9 +1354,9 @@ int __init arch_ioremap_pud_supported(void) ...@@ -1354,9 +1354,9 @@ int __init arch_ioremap_pud_supported(void)
!IS_ENABLED(CONFIG_PTDUMP_DEBUGFS); !IS_ENABLED(CONFIG_PTDUMP_DEBUGFS);
} }
int __init arch_ioremap_pmd_supported(void) bool arch_vmap_pmd_supported(pgprot_t prot)
{ {
/* See arch_ioremap_pud_supported() */ /* See arch_vmap_pud_supported() */
return !IS_ENABLED(CONFIG_PTDUMP_DEBUGFS); return !IS_ENABLED(CONFIG_PTDUMP_DEBUGFS);
} }
......
#ifndef _ASM_POWERPC_VMALLOC_H #ifndef _ASM_POWERPC_VMALLOC_H
#define _ASM_POWERPC_VMALLOC_H #define _ASM_POWERPC_VMALLOC_H
#include <asm/page.h>
#ifdef CONFIG_HAVE_ARCH_HUGE_VMAP
bool arch_vmap_p4d_supported(pgprot_t prot);
bool arch_vmap_pud_supported(pgprot_t prot);
bool arch_vmap_pmd_supported(pgprot_t prot);
#endif
#endif /* _ASM_POWERPC_VMALLOC_H */ #endif /* _ASM_POWERPC_VMALLOC_H */
...@@ -1082,13 +1082,13 @@ void radix__ptep_modify_prot_commit(struct vm_area_struct *vma, ...@@ -1082,13 +1082,13 @@ void radix__ptep_modify_prot_commit(struct vm_area_struct *vma,
set_pte_at(mm, addr, ptep, pte); set_pte_at(mm, addr, ptep, pte);
} }
int __init arch_ioremap_pud_supported(void) bool arch_vmap_pud_supported(pgprot_t prot)
{ {
/* HPT does not cope with large pages in the vmalloc area */ /* HPT does not cope with large pages in the vmalloc area */
return radix_enabled(); return radix_enabled();
} }
int __init arch_ioremap_pmd_supported(void) bool arch_vmap_pmd_supported(pgprot_t prot)
{ {
return radix_enabled(); return radix_enabled();
} }
...@@ -1182,7 +1182,7 @@ int pmd_free_pte_page(pmd_t *pmd, unsigned long addr) ...@@ -1182,7 +1182,7 @@ int pmd_free_pte_page(pmd_t *pmd, unsigned long addr)
return 1; return 1;
} }
int __init arch_ioremap_p4d_supported(void) bool arch_vmap_p4d_supported(pgprot_t prot)
{ {
return 0; return false;
} }
#ifndef _ASM_X86_VMALLOC_H #ifndef _ASM_X86_VMALLOC_H
#define _ASM_X86_VMALLOC_H #define _ASM_X86_VMALLOC_H
#include <asm/page.h>
#include <asm/pgtable_areas.h> #include <asm/pgtable_areas.h>
#ifdef CONFIG_HAVE_ARCH_HUGE_VMAP
bool arch_vmap_p4d_supported(pgprot_t prot);
bool arch_vmap_pud_supported(pgprot_t prot);
bool arch_vmap_pmd_supported(pgprot_t prot);
#endif
#endif /* _ASM_X86_VMALLOC_H */ #endif /* _ASM_X86_VMALLOC_H */
...@@ -481,24 +481,26 @@ void iounmap(volatile void __iomem *addr) ...@@ -481,24 +481,26 @@ void iounmap(volatile void __iomem *addr)
} }
EXPORT_SYMBOL(iounmap); EXPORT_SYMBOL(iounmap);
int __init arch_ioremap_p4d_supported(void) #ifdef CONFIG_HAVE_ARCH_HUGE_VMAP
bool arch_vmap_p4d_supported(pgprot_t prot)
{ {
return 0; return false;
} }
int __init arch_ioremap_pud_supported(void) bool arch_vmap_pud_supported(pgprot_t prot)
{ {
#ifdef CONFIG_X86_64 #ifdef CONFIG_X86_64
return boot_cpu_has(X86_FEATURE_GBPAGES); return boot_cpu_has(X86_FEATURE_GBPAGES);
#else #else
return 0; return false;
#endif #endif
} }
int __init arch_ioremap_pmd_supported(void) bool arch_vmap_pmd_supported(pgprot_t prot)
{ {
return boot_cpu_has(X86_FEATURE_PSE); return boot_cpu_has(X86_FEATURE_PSE);
} }
#endif
/* /*
* Convert a physical pointer to a virtual kernel pointer for /dev/mem * Convert a physical pointer to a virtual kernel pointer for /dev/mem
......
...@@ -31,15 +31,6 @@ static inline int ioremap_page_range(unsigned long addr, unsigned long end, ...@@ -31,15 +31,6 @@ static inline int ioremap_page_range(unsigned long addr, unsigned long end,
} }
#endif #endif
#ifdef CONFIG_HAVE_ARCH_HUGE_VMAP
void __init ioremap_huge_init(void);
int arch_ioremap_p4d_supported(void);
int arch_ioremap_pud_supported(void);
int arch_ioremap_pmd_supported(void);
#else
static inline void ioremap_huge_init(void) { }
#endif
/* /*
* Managed iomap interface * Managed iomap interface
*/ */
......
...@@ -78,6 +78,12 @@ struct vmap_area { ...@@ -78,6 +78,12 @@ struct vmap_area {
}; };
}; };
#ifndef CONFIG_HAVE_ARCH_HUGE_VMAP
static inline bool arch_vmap_p4d_supported(pgprot_t prot) { return false; }
static inline bool arch_vmap_pud_supported(pgprot_t prot) { return false; }
static inline bool arch_vmap_pmd_supported(pgprot_t prot) { return false; }
#endif
/* /*
* Highlevel APIs for driver use * Highlevel APIs for driver use
*/ */
......
...@@ -837,7 +837,6 @@ static void __init mm_init(void) ...@@ -837,7 +837,6 @@ static void __init mm_init(void)
pgtable_init(); pgtable_init();
debug_objects_mem_init(); debug_objects_mem_init();
vmalloc_init(); vmalloc_init();
ioremap_huge_init();
/* Should be run before the first non-init thread is created */ /* Should be run before the first non-init thread is created */
init_espfix_bsp(); init_espfix_bsp();
/* Should be run after espfix64 is set up. */ /* Should be run after espfix64 is set up. */
......
...@@ -247,7 +247,7 @@ static void __init pmd_huge_tests(pmd_t *pmdp, unsigned long pfn, pgprot_t prot) ...@@ -247,7 +247,7 @@ static void __init pmd_huge_tests(pmd_t *pmdp, unsigned long pfn, pgprot_t prot)
{ {
pmd_t pmd; pmd_t pmd;
if (!arch_ioremap_pmd_supported()) if (!arch_vmap_pmd_supported(prot))
return; return;
pr_debug("Validating PMD huge\n"); pr_debug("Validating PMD huge\n");
...@@ -385,7 +385,7 @@ static void __init pud_huge_tests(pud_t *pudp, unsigned long pfn, pgprot_t prot) ...@@ -385,7 +385,7 @@ static void __init pud_huge_tests(pud_t *pudp, unsigned long pfn, pgprot_t prot)
{ {
pud_t pud; pud_t pud;
if (!arch_ioremap_pud_supported()) if (!arch_vmap_pud_supported(prot))
return; return;
pr_debug("Validating PUD huge\n"); pr_debug("Validating PUD huge\n");
......
...@@ -16,49 +16,16 @@ ...@@ -16,49 +16,16 @@
#include "pgalloc-track.h" #include "pgalloc-track.h"
#ifdef CONFIG_HAVE_ARCH_HUGE_VMAP #ifdef CONFIG_HAVE_ARCH_HUGE_VMAP
static int __read_mostly ioremap_p4d_capable; static bool __ro_after_init iomap_max_page_shift = PAGE_SHIFT;
static int __read_mostly ioremap_pud_capable;
static int __read_mostly ioremap_pmd_capable;
static int __read_mostly ioremap_huge_disabled;
static int __init set_nohugeiomap(char *str) static int __init set_nohugeiomap(char *str)
{ {
ioremap_huge_disabled = 1; iomap_max_page_shift = P4D_SHIFT;
return 0; return 0;
} }
early_param("nohugeiomap", set_nohugeiomap); early_param("nohugeiomap", set_nohugeiomap);
#else /* CONFIG_HAVE_ARCH_HUGE_VMAP */
void __init ioremap_huge_init(void) static const bool iomap_max_page_shift = PAGE_SHIFT;
{
if (!ioremap_huge_disabled) {
if (arch_ioremap_p4d_supported())
ioremap_p4d_capable = 1;
if (arch_ioremap_pud_supported())
ioremap_pud_capable = 1;
if (arch_ioremap_pmd_supported())
ioremap_pmd_capable = 1;
}
}
static inline int ioremap_p4d_enabled(void)
{
return ioremap_p4d_capable;
}
static inline int ioremap_pud_enabled(void)
{
return ioremap_pud_capable;
}
static inline int ioremap_pmd_enabled(void)
{
return ioremap_pmd_capable;
}
#else /* !CONFIG_HAVE_ARCH_HUGE_VMAP */
static inline int ioremap_p4d_enabled(void) { return 0; }
static inline int ioremap_pud_enabled(void) { return 0; }
static inline int ioremap_pmd_enabled(void) { return 0; }
#endif /* CONFIG_HAVE_ARCH_HUGE_VMAP */ #endif /* CONFIG_HAVE_ARCH_HUGE_VMAP */
static int vmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, static int vmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
...@@ -82,9 +49,13 @@ static int vmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, ...@@ -82,9 +49,13 @@ static int vmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
} }
static int vmap_try_huge_pmd(pmd_t *pmd, unsigned long addr, unsigned long end, static int vmap_try_huge_pmd(pmd_t *pmd, unsigned long addr, unsigned long end,
phys_addr_t phys_addr, pgprot_t prot) phys_addr_t phys_addr, pgprot_t prot,
unsigned int max_page_shift)
{ {
if (!ioremap_pmd_enabled()) if (max_page_shift < PMD_SHIFT)
return 0;
if (!arch_vmap_pmd_supported(prot))
return 0; return 0;
if ((end - addr) != PMD_SIZE) if ((end - addr) != PMD_SIZE)
...@@ -104,7 +75,7 @@ static int vmap_try_huge_pmd(pmd_t *pmd, unsigned long addr, unsigned long end, ...@@ -104,7 +75,7 @@ static int vmap_try_huge_pmd(pmd_t *pmd, unsigned long addr, unsigned long end,
static int vmap_pmd_range(pud_t *pud, unsigned long addr, unsigned long end, static int vmap_pmd_range(pud_t *pud, unsigned long addr, unsigned long end,
phys_addr_t phys_addr, pgprot_t prot, phys_addr_t phys_addr, pgprot_t prot,
pgtbl_mod_mask *mask) unsigned int max_page_shift, pgtbl_mod_mask *mask)
{ {
pmd_t *pmd; pmd_t *pmd;
unsigned long next; unsigned long next;
...@@ -115,7 +86,8 @@ static int vmap_pmd_range(pud_t *pud, unsigned long addr, unsigned long end, ...@@ -115,7 +86,8 @@ static int vmap_pmd_range(pud_t *pud, unsigned long addr, unsigned long end,
do { do {
next = pmd_addr_end(addr, end); next = pmd_addr_end(addr, end);
if (vmap_try_huge_pmd(pmd, addr, next, phys_addr, prot)) { if (vmap_try_huge_pmd(pmd, addr, next, phys_addr, prot,
max_page_shift)) {
*mask |= PGTBL_PMD_MODIFIED; *mask |= PGTBL_PMD_MODIFIED;
continue; continue;
} }
...@@ -127,9 +99,13 @@ static int vmap_pmd_range(pud_t *pud, unsigned long addr, unsigned long end, ...@@ -127,9 +99,13 @@ static int vmap_pmd_range(pud_t *pud, unsigned long addr, unsigned long end,
} }
static int vmap_try_huge_pud(pud_t *pud, unsigned long addr, unsigned long end, static int vmap_try_huge_pud(pud_t *pud, unsigned long addr, unsigned long end,
phys_addr_t phys_addr, pgprot_t prot) phys_addr_t phys_addr, pgprot_t prot,
unsigned int max_page_shift)
{ {
if (!ioremap_pud_enabled()) if (max_page_shift < PUD_SHIFT)
return 0;
if (!arch_vmap_pud_supported(prot))
return 0; return 0;
if ((end - addr) != PUD_SIZE) if ((end - addr) != PUD_SIZE)
...@@ -149,7 +125,7 @@ static int vmap_try_huge_pud(pud_t *pud, unsigned long addr, unsigned long end, ...@@ -149,7 +125,7 @@ static int vmap_try_huge_pud(pud_t *pud, unsigned long addr, unsigned long end,
static int vmap_pud_range(p4d_t *p4d, unsigned long addr, unsigned long end, static int vmap_pud_range(p4d_t *p4d, unsigned long addr, unsigned long end,
phys_addr_t phys_addr, pgprot_t prot, phys_addr_t phys_addr, pgprot_t prot,
pgtbl_mod_mask *mask) unsigned int max_page_shift, pgtbl_mod_mask *mask)
{ {
pud_t *pud; pud_t *pud;
unsigned long next; unsigned long next;
...@@ -160,21 +136,27 @@ static int vmap_pud_range(p4d_t *p4d, unsigned long addr, unsigned long end, ...@@ -160,21 +136,27 @@ static int vmap_pud_range(p4d_t *p4d, unsigned long addr, unsigned long end,
do { do {
next = pud_addr_end(addr, end); next = pud_addr_end(addr, end);
if (vmap_try_huge_pud(pud, addr, next, phys_addr, prot)) { if (vmap_try_huge_pud(pud, addr, next, phys_addr, prot,
max_page_shift)) {
*mask |= PGTBL_PUD_MODIFIED; *mask |= PGTBL_PUD_MODIFIED;
continue; continue;
} }
if (vmap_pmd_range(pud, addr, next, phys_addr, prot, mask)) if (vmap_pmd_range(pud, addr, next, phys_addr, prot,
max_page_shift, mask))
return -ENOMEM; return -ENOMEM;
} while (pud++, phys_addr += (next - addr), addr = next, addr != end); } while (pud++, phys_addr += (next - addr), addr = next, addr != end);
return 0; return 0;
} }
static int vmap_try_huge_p4d(p4d_t *p4d, unsigned long addr, unsigned long end, static int vmap_try_huge_p4d(p4d_t *p4d, unsigned long addr, unsigned long end,
phys_addr_t phys_addr, pgprot_t prot) phys_addr_t phys_addr, pgprot_t prot,
unsigned int max_page_shift)
{ {
if (!ioremap_p4d_enabled()) if (max_page_shift < P4D_SHIFT)
return 0;
if (!arch_vmap_p4d_supported(prot))
return 0; return 0;
if ((end - addr) != P4D_SIZE) if ((end - addr) != P4D_SIZE)
...@@ -194,7 +176,7 @@ static int vmap_try_huge_p4d(p4d_t *p4d, unsigned long addr, unsigned long end, ...@@ -194,7 +176,7 @@ static int vmap_try_huge_p4d(p4d_t *p4d, unsigned long addr, unsigned long end,
static int vmap_p4d_range(pgd_t *pgd, unsigned long addr, unsigned long end, static int vmap_p4d_range(pgd_t *pgd, unsigned long addr, unsigned long end,
phys_addr_t phys_addr, pgprot_t prot, phys_addr_t phys_addr, pgprot_t prot,
pgtbl_mod_mask *mask) unsigned int max_page_shift, pgtbl_mod_mask *mask)
{ {
p4d_t *p4d; p4d_t *p4d;
unsigned long next; unsigned long next;
...@@ -205,19 +187,22 @@ static int vmap_p4d_range(pgd_t *pgd, unsigned long addr, unsigned long end, ...@@ -205,19 +187,22 @@ static int vmap_p4d_range(pgd_t *pgd, unsigned long addr, unsigned long end,
do { do {
next = p4d_addr_end(addr, end); next = p4d_addr_end(addr, end);
if (vmap_try_huge_p4d(p4d, addr, next, phys_addr, prot)) { if (vmap_try_huge_p4d(p4d, addr, next, phys_addr, prot,
max_page_shift)) {
*mask |= PGTBL_P4D_MODIFIED; *mask |= PGTBL_P4D_MODIFIED;
continue; continue;
} }
if (vmap_pud_range(p4d, addr, next, phys_addr, prot, mask)) if (vmap_pud_range(p4d, addr, next, phys_addr, prot,
max_page_shift, mask))
return -ENOMEM; return -ENOMEM;
} while (p4d++, phys_addr += (next - addr), addr = next, addr != end); } while (p4d++, phys_addr += (next - addr), addr = next, addr != end);
return 0; return 0;
} }
static int vmap_range(unsigned long addr, unsigned long end, static int vmap_range(unsigned long addr, unsigned long end,
phys_addr_t phys_addr, pgprot_t prot) phys_addr_t phys_addr, pgprot_t prot,
unsigned int max_page_shift)
{ {
pgd_t *pgd; pgd_t *pgd;
unsigned long start; unsigned long start;
...@@ -232,7 +217,8 @@ static int vmap_range(unsigned long addr, unsigned long end, ...@@ -232,7 +217,8 @@ static int vmap_range(unsigned long addr, unsigned long end,
pgd = pgd_offset_k(addr); pgd = pgd_offset_k(addr);
do { do {
next = pgd_addr_end(addr, end); next = pgd_addr_end(addr, end);
err = vmap_p4d_range(pgd, addr, next, phys_addr, prot, &mask); err = vmap_p4d_range(pgd, addr, next, phys_addr, prot,
max_page_shift, &mask);
if (err) if (err)
break; break;
} while (pgd++, phys_addr += (next - addr), addr = next, addr != end); } while (pgd++, phys_addr += (next - addr), addr = next, addr != end);
...@@ -248,7 +234,7 @@ static int vmap_range(unsigned long addr, unsigned long end, ...@@ -248,7 +234,7 @@ static int vmap_range(unsigned long addr, unsigned long end,
int ioremap_page_range(unsigned long addr, int ioremap_page_range(unsigned long addr,
unsigned long end, phys_addr_t phys_addr, pgprot_t prot) unsigned long end, phys_addr_t phys_addr, pgprot_t prot)
{ {
return vmap_range(addr, end, phys_addr, prot); return vmap_range(addr, end, phys_addr, prot, iomap_max_page_shift);
} }
#ifdef CONFIG_GENERIC_IOREMAP #ifdef CONFIG_GENERIC_IOREMAP
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment