Commit d1837cba authored by David Gibson's avatar David Gibson Committed by Benjamin Herrenschmidt

powerpc/mm: Cleanup initialization of hugepages on powerpc

This patch simplifies the logic used to initialize hugepages on
powerpc.  The somewhat oddly named set_huge_psize() is renamed to
add_huge_page_size() and now does all necessary verification of
whether it's given a valid hugepage sizes (instead of just some) and
instantiates the generic hstate structure (but no more).

hugetlbpage_init() now steps through the available pagesizes, checks
if they're valid for hugepages by calling add_huge_page_size() and
initializes the kmem_caches for the hugepage pagetables.  This means
we can now eliminate the mmu_huge_psizes array, since we no longer
need to pass the sizing information for the pagetable caches from
set_huge_psize() into hugetlbpage_init()

Determination of the default huge page size is also moved from the
hash code into the general hugepage code.
Signed-off-by: default avatarDavid Gibson <dwg@au1.ibm.com>
Signed-off-by: default avatarBenjamin Herrenschmidt <benh@kernel.crashing.org>
parent a4fe3ce7
...@@ -90,7 +90,7 @@ extern unsigned int HPAGE_SHIFT; ...@@ -90,7 +90,7 @@ extern unsigned int HPAGE_SHIFT;
#define HPAGE_SIZE ((1UL) << HPAGE_SHIFT) #define HPAGE_SIZE ((1UL) << HPAGE_SHIFT)
#define HPAGE_MASK (~(HPAGE_SIZE - 1)) #define HPAGE_MASK (~(HPAGE_SIZE - 1))
#define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT) #define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT)
#define HUGE_MAX_HSTATE 3 #define HUGE_MAX_HSTATE (MMU_PAGE_COUNT-1)
#endif /* __ASSEMBLY__ */ #endif /* __ASSEMBLY__ */
......
...@@ -481,16 +481,6 @@ static void __init htab_init_page_sizes(void) ...@@ -481,16 +481,6 @@ static void __init htab_init_page_sizes(void)
#ifdef CONFIG_HUGETLB_PAGE #ifdef CONFIG_HUGETLB_PAGE
/* Reserve 16G huge page memory sections for huge pages */ /* Reserve 16G huge page memory sections for huge pages */
of_scan_flat_dt(htab_dt_scan_hugepage_blocks, NULL); of_scan_flat_dt(htab_dt_scan_hugepage_blocks, NULL);
/* Set default large page size. Currently, we pick 16M or 1M depending
* on what is available
*/
if (mmu_psize_defs[MMU_PAGE_16M].shift)
HPAGE_SHIFT = mmu_psize_defs[MMU_PAGE_16M].shift;
/* With 4k/4level pagetables, we can't (for now) cope with a
* huge page size < PMD_SIZE */
else if (mmu_psize_defs[MMU_PAGE_1M].shift)
HPAGE_SHIFT = mmu_psize_defs[MMU_PAGE_1M].shift;
#endif /* CONFIG_HUGETLB_PAGE */ #endif /* CONFIG_HUGETLB_PAGE */
} }
......
...@@ -37,27 +37,17 @@ ...@@ -37,27 +37,17 @@
static unsigned long gpage_freearray[MAX_NUMBER_GPAGES]; static unsigned long gpage_freearray[MAX_NUMBER_GPAGES];
static unsigned nr_gpages; static unsigned nr_gpages;
/* Array of valid huge page sizes - non-zero value(hugepte_shift) is
* stored for the huge page sizes that are valid.
*/
static unsigned int mmu_huge_psizes[MMU_PAGE_COUNT] = { }; /* initialize all to 0 */
/* Flag to mark huge PD pointers. This means pmd_bad() and pud_bad() /* Flag to mark huge PD pointers. This means pmd_bad() and pud_bad()
* will choke on pointers to hugepte tables, which is handy for * will choke on pointers to hugepte tables, which is handy for
* catching screwups early. */ * catching screwups early. */
static inline int shift_to_mmu_psize(unsigned int shift) static inline int shift_to_mmu_psize(unsigned int shift)
{ {
switch (shift) { int psize;
#ifndef CONFIG_PPC_64K_PAGES
case PAGE_SHIFT_64K: for (psize = 0; psize < MMU_PAGE_COUNT; ++psize)
return MMU_PAGE_64K; if (mmu_psize_defs[psize].shift == shift)
#endif return psize;
case PAGE_SHIFT_16M:
return MMU_PAGE_16M;
case PAGE_SHIFT_16G:
return MMU_PAGE_16G;
}
return -1; return -1;
} }
...@@ -502,8 +492,6 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, ...@@ -502,8 +492,6 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
struct hstate *hstate = hstate_file(file); struct hstate *hstate = hstate_file(file);
int mmu_psize = shift_to_mmu_psize(huge_page_shift(hstate)); int mmu_psize = shift_to_mmu_psize(huge_page_shift(hstate));
if (!mmu_huge_psizes[mmu_psize])
return -EINVAL;
return slice_get_unmapped_area(addr, len, flags, mmu_psize, 1, 0); return slice_get_unmapped_area(addr, len, flags, mmu_psize, 1, 0);
} }
...@@ -666,47 +654,46 @@ int __hash_page_huge(unsigned long ea, unsigned long access, unsigned long vsid, ...@@ -666,47 +654,46 @@ int __hash_page_huge(unsigned long ea, unsigned long access, unsigned long vsid,
return err; return err;
} }
static void __init set_huge_psize(int psize) static int __init add_huge_page_size(unsigned long long size)
{ {
unsigned pdshift; int shift = __ffs(size);
int mmu_psize;
/* Check that it is a page size supported by the hardware and /* Check that it is a page size supported by the hardware and
* that it fits within pagetable limits. */ * that it fits within pagetable and slice limits. */
if (mmu_psize_defs[psize].shift && if (!is_power_of_2(size)
mmu_psize_defs[psize].shift < SID_SHIFT_1T && || (shift > SLICE_HIGH_SHIFT) || (shift <= PAGE_SHIFT))
(mmu_psize_defs[psize].shift > MIN_HUGEPTE_SHIFT || return -EINVAL;
mmu_psize_defs[psize].shift == PAGE_SHIFT_64K ||
mmu_psize_defs[psize].shift == PAGE_SHIFT_16G)) {
/* Return if huge page size has already been setup or is the
* same as the base page size. */
if (mmu_huge_psizes[psize] ||
mmu_psize_defs[psize].shift == PAGE_SHIFT)
return;
hugetlb_add_hstate(mmu_psize_defs[psize].shift - PAGE_SHIFT);
if (mmu_psize_defs[psize].shift < PMD_SHIFT) if ((mmu_psize = shift_to_mmu_psize(shift)) < 0)
pdshift = PMD_SHIFT; return -EINVAL;
else if (mmu_psize_defs[psize].shift < PUD_SHIFT)
pdshift = PUD_SHIFT; #ifdef CONFIG_SPU_FS_64K_LS
else /* Disable support for 64K huge pages when 64K SPU local store
pdshift = PGDIR_SHIFT; * support is enabled as the current implementation conflicts.
mmu_huge_psizes[psize] = pdshift - mmu_psize_defs[psize].shift; */
} if (shift == PAGE_SHIFT_64K)
return -EINVAL;
#endif /* CONFIG_SPU_FS_64K_LS */
BUG_ON(mmu_psize_defs[mmu_psize].shift != shift);
/* Return if huge page size has already been setup */
if (size_to_hstate(size))
return 0;
hugetlb_add_hstate(shift - PAGE_SHIFT);
return 0;
} }
static int __init hugepage_setup_sz(char *str) static int __init hugepage_setup_sz(char *str)
{ {
unsigned long long size; unsigned long long size;
int mmu_psize;
int shift;
size = memparse(str, &str); size = memparse(str, &str);
shift = __ffs(size); if (add_huge_page_size(size) != 0)
mmu_psize = shift_to_mmu_psize(shift);
if (mmu_psize >= 0 && mmu_psize_defs[mmu_psize].shift)
set_huge_psize(mmu_psize);
else
printk(KERN_WARNING "Invalid huge page size specified(%llu)\n", size); printk(KERN_WARNING "Invalid huge page size specified(%llu)\n", size);
return 1; return 1;
...@@ -720,30 +707,39 @@ static int __init hugetlbpage_init(void) ...@@ -720,30 +707,39 @@ static int __init hugetlbpage_init(void)
if (!cpu_has_feature(CPU_FTR_16M_PAGE)) if (!cpu_has_feature(CPU_FTR_16M_PAGE))
return -ENODEV; return -ENODEV;
/* Add supported huge page sizes. Need to change for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) {
* HUGE_MAX_HSTATE if the number of supported huge page sizes unsigned shift;
* changes. unsigned pdshift;
*/
set_huge_psize(MMU_PAGE_16M);
set_huge_psize(MMU_PAGE_16G);
/* Temporarily disable support for 64K huge pages when 64K SPU local if (!mmu_psize_defs[psize].shift)
* store support is enabled as the current implementation conflicts. continue;
*/
#ifndef CONFIG_SPU_FS_64K_LS
set_huge_psize(MMU_PAGE_64K);
#endif
for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) { shift = mmu_psize_to_shift(psize);
if (mmu_huge_psizes[psize]) {
pgtable_cache_add(mmu_huge_psizes[psize], NULL); if (add_huge_page_size(1ULL << shift) < 0)
if (!PGT_CACHE(mmu_huge_psizes[psize])) continue;
panic("hugetlbpage_init(): could not create "
"pgtable cache for %d bit pagesize\n", if (shift < PMD_SHIFT)
mmu_psize_to_shift(psize)); pdshift = PMD_SHIFT;
} else if (shift < PUD_SHIFT)
pdshift = PUD_SHIFT;
else
pdshift = PGDIR_SHIFT;
pgtable_cache_add(pdshift - shift, NULL);
if (!PGT_CACHE(pdshift - shift))
panic("hugetlbpage_init(): could not create "
"pgtable cache for %d bit pagesize\n", shift);
} }
/* Set default large page size. Currently, we pick 16M or 1M
* depending on what is available
*/
if (mmu_psize_defs[MMU_PAGE_16M].shift)
HPAGE_SHIFT = mmu_psize_defs[MMU_PAGE_16M].shift;
else if (mmu_psize_defs[MMU_PAGE_1M].shift)
HPAGE_SHIFT = mmu_psize_defs[MMU_PAGE_1M].shift;
return 0; return 0;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment