Commit f940f528 authored by Aneesh Kumar K.V's avatar Aneesh Kumar K.V Committed by Benjamin Herrenschmidt

powerpc/THP: Double the PMD table size for THP

THP code does PTE page allocation along with large page request and deposit them
for later use. This is to ensure that we won't have any failures when we split
hugepages to regular pages.

On powerpc we want to use the deposited PTE page for storing hash pte slot and
secondary bit information for the HPTEs. We use the second half
of the pmd table to save the deposted PTE page.
Reviewed-by: default avatarDavid Gibson <david@gibson.dropbear.id.au>
Signed-off-by: default avatarAneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
Signed-off-by: default avatarBenjamin Herrenschmidt <benh@kernel.crashing.org>
parent db3d8534
...@@ -221,17 +221,17 @@ static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t table, ...@@ -221,17 +221,17 @@ static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t table,
static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr) static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
{ {
return kmem_cache_alloc(PGT_CACHE(PMD_INDEX_SIZE), return kmem_cache_alloc(PGT_CACHE(PMD_CACHE_INDEX),
GFP_KERNEL|__GFP_REPEAT); GFP_KERNEL|__GFP_REPEAT);
} }
static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd) static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
{ {
kmem_cache_free(PGT_CACHE(PMD_INDEX_SIZE), pmd); kmem_cache_free(PGT_CACHE(PMD_CACHE_INDEX), pmd);
} }
#define __pmd_free_tlb(tlb, pmd, addr) \ #define __pmd_free_tlb(tlb, pmd, addr) \
pgtable_free_tlb(tlb, pmd, PMD_INDEX_SIZE) pgtable_free_tlb(tlb, pmd, PMD_CACHE_INDEX)
#ifndef CONFIG_PPC_64K_PAGES #ifndef CONFIG_PPC_64K_PAGES
#define __pud_free_tlb(tlb, pud, addr) \ #define __pud_free_tlb(tlb, pud, addr) \
pgtable_free_tlb(tlb, pud, PUD_INDEX_SIZE) pgtable_free_tlb(tlb, pud, PUD_INDEX_SIZE)
......
...@@ -33,7 +33,8 @@ ...@@ -33,7 +33,8 @@
#define PGDIR_MASK (~(PGDIR_SIZE-1)) #define PGDIR_MASK (~(PGDIR_SIZE-1))
/* Bits to mask out from a PMD to get to the PTE page */ /* Bits to mask out from a PMD to get to the PTE page */
#define PMD_MASKED_BITS 0x1ff /* PMDs point to PTE table fragments which are 4K aligned. */
#define PMD_MASKED_BITS 0xfff
/* Bits to mask out from a PGD/PUD to get to the PMD page */ /* Bits to mask out from a PGD/PUD to get to the PMD page */
#define PUD_MASKED_BITS 0x1ff #define PUD_MASKED_BITS 0x1ff
......
...@@ -20,7 +20,11 @@ ...@@ -20,7 +20,11 @@
PUD_INDEX_SIZE + PGD_INDEX_SIZE + PAGE_SHIFT) PUD_INDEX_SIZE + PGD_INDEX_SIZE + PAGE_SHIFT)
#define PGTABLE_RANGE (ASM_CONST(1) << PGTABLE_EADDR_SIZE) #define PGTABLE_RANGE (ASM_CONST(1) << PGTABLE_EADDR_SIZE)
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
#define PMD_CACHE_INDEX (PMD_INDEX_SIZE + 1)
#else
#define PMD_CACHE_INDEX PMD_INDEX_SIZE
#endif
/* /*
* Define the address range of the kernel non-linear virtual area * Define the address range of the kernel non-linear virtual area
*/ */
......
...@@ -88,7 +88,11 @@ static void pgd_ctor(void *addr) ...@@ -88,7 +88,11 @@ static void pgd_ctor(void *addr)
static void pmd_ctor(void *addr) static void pmd_ctor(void *addr)
{ {
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
memset(addr, 0, PMD_TABLE_SIZE * 2);
#else
memset(addr, 0, PMD_TABLE_SIZE); memset(addr, 0, PMD_TABLE_SIZE);
#endif
} }
struct kmem_cache *pgtable_cache[MAX_PGTABLE_INDEX_SIZE]; struct kmem_cache *pgtable_cache[MAX_PGTABLE_INDEX_SIZE];
...@@ -137,10 +141,9 @@ void pgtable_cache_add(unsigned shift, void (*ctor)(void *)) ...@@ -137,10 +141,9 @@ void pgtable_cache_add(unsigned shift, void (*ctor)(void *))
void pgtable_cache_init(void) void pgtable_cache_init(void)
{ {
pgtable_cache_add(PGD_INDEX_SIZE, pgd_ctor); pgtable_cache_add(PGD_INDEX_SIZE, pgd_ctor);
pgtable_cache_add(PMD_INDEX_SIZE, pmd_ctor); pgtable_cache_add(PMD_CACHE_INDEX, pmd_ctor);
if (!PGT_CACHE(PGD_INDEX_SIZE) || !PGT_CACHE(PMD_INDEX_SIZE)) if (!PGT_CACHE(PGD_INDEX_SIZE) || !PGT_CACHE(PMD_CACHE_INDEX))
panic("Couldn't allocate pgtable caches"); panic("Couldn't allocate pgtable caches");
/* In all current configs, when the PUD index exists it's the /* In all current configs, when the PUD index exists it's the
* same size as either the pgd or pmd index. Verify that the * same size as either the pgd or pmd index. Verify that the
* initialization above has also created a PUD cache. This * initialization above has also created a PUD cache. This
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment