Commit 4ec161cf authored by Jon Tollefson's avatar Jon Tollefson Committed by Paul Mackerras

[POWERPC] Add hugepagesz boot-time parameter

This adds the hugepagesz boot-time parameter for ppc64.  It lets one
pick the size for huge pages.  The choices available are 64K and 16M
when the base page size is 4k.  It defaults to 16M (previously the
only only choice) if nothing or an invalid choice is specified.

Tested 64K huge pages successfully with the libhugetlbfs 1.2.
Signed-off-by: default avatarJon Tollefson <kniht@linux.vnet.ibm.com>
Signed-off-by: default avatarPaul Mackerras <paulus@samba.org>
parent 5b14e5f9
...@@ -685,6 +685,7 @@ and is between 256 and 4096 characters. It is defined in the file ...@@ -685,6 +685,7 @@ and is between 256 and 4096 characters. It is defined in the file
See Documentation/isdn/README.HiSax. See Documentation/isdn/README.HiSax.
hugepages= [HW,X86-32,IA-64] Maximal number of HugeTLB pages. hugepages= [HW,X86-32,IA-64] Maximal number of HugeTLB pages.
hugepagesz= [HW,IA-64,PPC] The size of the HugeTLB pages.
i8042.direct [HW] Put keyboard port into non-translated mode i8042.direct [HW] Put keyboard port into non-translated mode
i8042.dumbkbd [HW] Pretend that controller can only read data from i8042.dumbkbd [HW] Pretend that controller can only read data from
......
...@@ -369,18 +369,11 @@ static void __init htab_init_page_sizes(void) ...@@ -369,18 +369,11 @@ static void __init htab_init_page_sizes(void)
* on what is available * on what is available
*/ */
if (mmu_psize_defs[MMU_PAGE_16M].shift) if (mmu_psize_defs[MMU_PAGE_16M].shift)
mmu_huge_psize = MMU_PAGE_16M; set_huge_psize(MMU_PAGE_16M);
/* With 4k/4level pagetables, we can't (for now) cope with a /* With 4k/4level pagetables, we can't (for now) cope with a
* huge page size < PMD_SIZE */ * huge page size < PMD_SIZE */
else if (mmu_psize_defs[MMU_PAGE_1M].shift) else if (mmu_psize_defs[MMU_PAGE_1M].shift)
mmu_huge_psize = MMU_PAGE_1M; set_huge_psize(MMU_PAGE_1M);
/* Calculate HPAGE_SHIFT and sanity check it */
if (mmu_psize_defs[mmu_huge_psize].shift > MIN_HUGEPTE_SHIFT &&
mmu_psize_defs[mmu_huge_psize].shift < SID_SHIFT)
HPAGE_SHIFT = mmu_psize_defs[mmu_huge_psize].shift;
else
HPAGE_SHIFT = 0; /* No huge pages dude ! */
#endif /* CONFIG_HUGETLB_PAGE */ #endif /* CONFIG_HUGETLB_PAGE */
} }
......
...@@ -24,18 +24,17 @@ ...@@ -24,18 +24,17 @@
#include <asm/cputable.h> #include <asm/cputable.h>
#include <asm/spu.h> #include <asm/spu.h>
#define HPAGE_SHIFT_64K 16
#define HPAGE_SHIFT_16M 24
#define NUM_LOW_AREAS (0x100000000UL >> SID_SHIFT) #define NUM_LOW_AREAS (0x100000000UL >> SID_SHIFT)
#define NUM_HIGH_AREAS (PGTABLE_RANGE >> HTLB_AREA_SHIFT) #define NUM_HIGH_AREAS (PGTABLE_RANGE >> HTLB_AREA_SHIFT)
#ifdef CONFIG_PPC_64K_PAGES unsigned int hugepte_shift;
#define HUGEPTE_INDEX_SIZE (PMD_SHIFT-HPAGE_SHIFT) #define PTRS_PER_HUGEPTE (1 << hugepte_shift)
#else #define HUGEPTE_TABLE_SIZE (sizeof(pte_t) << hugepte_shift)
#define HUGEPTE_INDEX_SIZE (PUD_SHIFT-HPAGE_SHIFT)
#endif
#define PTRS_PER_HUGEPTE (1 << HUGEPTE_INDEX_SIZE)
#define HUGEPTE_TABLE_SIZE (sizeof(pte_t) << HUGEPTE_INDEX_SIZE)
#define HUGEPD_SHIFT (HPAGE_SHIFT + HUGEPTE_INDEX_SIZE) #define HUGEPD_SHIFT (HPAGE_SHIFT + hugepte_shift)
#define HUGEPD_SIZE (1UL << HUGEPD_SHIFT) #define HUGEPD_SIZE (1UL << HUGEPD_SHIFT)
#define HUGEPD_MASK (~(HUGEPD_SIZE-1)) #define HUGEPD_MASK (~(HUGEPD_SIZE-1))
...@@ -82,11 +81,35 @@ static int __hugepte_alloc(struct mm_struct *mm, hugepd_t *hpdp, ...@@ -82,11 +81,35 @@ static int __hugepte_alloc(struct mm_struct *mm, hugepd_t *hpdp,
return 0; return 0;
} }
/* Base page size affects how we walk hugetlb page tables */
#ifdef CONFIG_PPC_64K_PAGES
#define hpmd_offset(pud, addr) pmd_offset(pud, addr)
#define hpmd_alloc(mm, pud, addr) pmd_alloc(mm, pud, addr)
#else
static inline
pmd_t *hpmd_offset(pud_t *pud, unsigned long addr)
{
if (HPAGE_SHIFT == HPAGE_SHIFT_64K)
return pmd_offset(pud, addr);
else
return (pmd_t *) pud;
}
static inline
pmd_t *hpmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long addr)
{
if (HPAGE_SHIFT == HPAGE_SHIFT_64K)
return pmd_alloc(mm, pud, addr);
else
return (pmd_t *) pud;
}
#endif
/* Modelled after find_linux_pte() */ /* Modelled after find_linux_pte() */
pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr) pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
{ {
pgd_t *pg; pgd_t *pg;
pud_t *pu; pud_t *pu;
pmd_t *pm;
BUG_ON(get_slice_psize(mm, addr) != mmu_huge_psize); BUG_ON(get_slice_psize(mm, addr) != mmu_huge_psize);
...@@ -96,14 +119,9 @@ pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr) ...@@ -96,14 +119,9 @@ pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
if (!pgd_none(*pg)) { if (!pgd_none(*pg)) {
pu = pud_offset(pg, addr); pu = pud_offset(pg, addr);
if (!pud_none(*pu)) { if (!pud_none(*pu)) {
#ifdef CONFIG_PPC_64K_PAGES pm = hpmd_offset(pu, addr);
pmd_t *pm;
pm = pmd_offset(pu, addr);
if (!pmd_none(*pm)) if (!pmd_none(*pm))
return hugepte_offset((hugepd_t *)pm, addr); return hugepte_offset((hugepd_t *)pm, addr);
#else
return hugepte_offset((hugepd_t *)pu, addr);
#endif
} }
} }
...@@ -114,6 +132,7 @@ pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr) ...@@ -114,6 +132,7 @@ pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr)
{ {
pgd_t *pg; pgd_t *pg;
pud_t *pu; pud_t *pu;
pmd_t *pm;
hugepd_t *hpdp = NULL; hugepd_t *hpdp = NULL;
BUG_ON(get_slice_psize(mm, addr) != mmu_huge_psize); BUG_ON(get_slice_psize(mm, addr) != mmu_huge_psize);
...@@ -124,14 +143,9 @@ pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr) ...@@ -124,14 +143,9 @@ pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr)
pu = pud_alloc(mm, pg, addr); pu = pud_alloc(mm, pg, addr);
if (pu) { if (pu) {
#ifdef CONFIG_PPC_64K_PAGES pm = hpmd_alloc(mm, pu, addr);
pmd_t *pm;
pm = pmd_alloc(mm, pu, addr);
if (pm) if (pm)
hpdp = (hugepd_t *)pm; hpdp = (hugepd_t *)pm;
#else
hpdp = (hugepd_t *)pu;
#endif
} }
if (! hpdp) if (! hpdp)
...@@ -158,7 +172,6 @@ static void free_hugepte_range(struct mmu_gather *tlb, hugepd_t *hpdp) ...@@ -158,7 +172,6 @@ static void free_hugepte_range(struct mmu_gather *tlb, hugepd_t *hpdp)
PGF_CACHENUM_MASK)); PGF_CACHENUM_MASK));
} }
#ifdef CONFIG_PPC_64K_PAGES
static void hugetlb_free_pmd_range(struct mmu_gather *tlb, pud_t *pud, static void hugetlb_free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
unsigned long addr, unsigned long end, unsigned long addr, unsigned long end,
unsigned long floor, unsigned long ceiling) unsigned long floor, unsigned long ceiling)
...@@ -191,7 +204,6 @@ static void hugetlb_free_pmd_range(struct mmu_gather *tlb, pud_t *pud, ...@@ -191,7 +204,6 @@ static void hugetlb_free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
pud_clear(pud); pud_clear(pud);
pmd_free_tlb(tlb, pmd); pmd_free_tlb(tlb, pmd);
} }
#endif
static void hugetlb_free_pud_range(struct mmu_gather *tlb, pgd_t *pgd, static void hugetlb_free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
unsigned long addr, unsigned long end, unsigned long addr, unsigned long end,
...@@ -210,9 +222,15 @@ static void hugetlb_free_pud_range(struct mmu_gather *tlb, pgd_t *pgd, ...@@ -210,9 +222,15 @@ static void hugetlb_free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
continue; continue;
hugetlb_free_pmd_range(tlb, pud, addr, next, floor, ceiling); hugetlb_free_pmd_range(tlb, pud, addr, next, floor, ceiling);
#else #else
if (pud_none(*pud)) if (HPAGE_SHIFT == HPAGE_SHIFT_64K) {
continue; if (pud_none_or_clear_bad(pud))
free_hugepte_range(tlb, (hugepd_t *)pud); continue;
hugetlb_free_pmd_range(tlb, pud, addr, next, floor, ceiling);
} else {
if (pud_none(*pud))
continue;
free_hugepte_range(tlb, (hugepd_t *)pud);
}
#endif #endif
} while (pud++, addr = next, addr != end); } while (pud++, addr = next, addr != end);
...@@ -526,6 +544,57 @@ int hash_huge_page(struct mm_struct *mm, unsigned long access, ...@@ -526,6 +544,57 @@ int hash_huge_page(struct mm_struct *mm, unsigned long access,
return err; return err;
} }
void set_huge_psize(int psize)
{
/* Check that it is a page size supported by the hardware and
* that it fits within pagetable limits. */
if (mmu_psize_defs[psize].shift && mmu_psize_defs[psize].shift < SID_SHIFT &&
(mmu_psize_defs[psize].shift > MIN_HUGEPTE_SHIFT ||
mmu_psize_defs[psize].shift == HPAGE_SHIFT_64K)) {
HPAGE_SHIFT = mmu_psize_defs[psize].shift;
mmu_huge_psize = psize;
#ifdef CONFIG_PPC_64K_PAGES
hugepte_shift = (PMD_SHIFT-HPAGE_SHIFT);
#else
if (HPAGE_SHIFT == HPAGE_SHIFT_64K)
hugepte_shift = (PMD_SHIFT-HPAGE_SHIFT);
else
hugepte_shift = (PUD_SHIFT-HPAGE_SHIFT);
#endif
} else
HPAGE_SHIFT = 0;
}
static int __init hugepage_setup_sz(char *str)
{
unsigned long long size;
int mmu_psize = -1;
int shift;
size = memparse(str, &str);
shift = __ffs(size);
switch (shift) {
#ifndef CONFIG_PPC_64K_PAGES
case HPAGE_SHIFT_64K:
mmu_psize = MMU_PAGE_64K;
break;
#endif
case HPAGE_SHIFT_16M:
mmu_psize = MMU_PAGE_16M;
break;
}
if (mmu_psize >=0 && mmu_psize_defs[mmu_psize].shift)
set_huge_psize(mmu_psize);
else
printk(KERN_WARNING "Invalid huge page size specified(%llu)\n", size);
return 1;
}
__setup("hugepagesz=", hugepage_setup_sz);
static void zero_ctor(struct kmem_cache *cache, void *addr) static void zero_ctor(struct kmem_cache *cache, void *addr)
{ {
memset(addr, 0, kmem_cache_size(cache)); memset(addr, 0, kmem_cache_size(cache));
......
...@@ -278,6 +278,7 @@ extern int hash_huge_page(struct mm_struct *mm, unsigned long access, ...@@ -278,6 +278,7 @@ extern int hash_huge_page(struct mm_struct *mm, unsigned long access,
extern int htab_bolt_mapping(unsigned long vstart, unsigned long vend, extern int htab_bolt_mapping(unsigned long vstart, unsigned long vend,
unsigned long pstart, unsigned long mode, unsigned long pstart, unsigned long mode,
int psize, int ssize); int psize, int ssize);
extern void set_huge_psize(int psize);
extern void htab_initialize(void); extern void htab_initialize(void);
extern void htab_initialize_secondary(void); extern void htab_initialize_secondary(void);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment