Commit fadd03c6 authored by Aneesh Kumar K.V's avatar Aneesh Kumar K.V Committed by Michael Ellerman

powerpc/mm/hash/4k: Free hugetlb page table caches correctly.

With 4k page size for hugetlb we allocate hugepage directories from its on slab
cache. With patch 0c4d2680 ("powerpc/book3s64/mm: Simplify the rcu callback for page table free")
we missed to free these allocated hugepd tables.

Update pgtable_free to handle hugetlb hugepd directory table.

Fixes: 0c4d2680 ("powerpc/book3s64/mm: Simplify the rcu callback for page table free")
Signed-off-by: default avatarAneesh Kumar K.V <aneesh.kumar@linux.ibm.com>
[mpe: Add CONFIG_HUGETLB_PAGE guard to fix build break]
Signed-off-by: default avatarMichael Ellerman <mpe@ellerman.id.au>
parent 758380b8
......@@ -108,6 +108,7 @@ static inline void pgtable_free(void *table, unsigned index_size)
}
#define check_pgt_cache() do { } while (0)
#define get_hugepd_cache_index(x) (x)
#ifdef CONFIG_SMP
static inline void pgtable_free_tlb(struct mmu_gather *tlb,
......
......@@ -49,6 +49,27 @@ static inline int hugepd_ok(hugepd_t hpd)
}
#define is_hugepd(hpd) (hugepd_ok(hpd))
/*
* 16M and 16G huge page directory tables are allocated from slab cache
*
*/
#define H_16M_CACHE_INDEX (PAGE_SHIFT + H_PTE_INDEX_SIZE + H_PMD_INDEX_SIZE - 24)
#define H_16G_CACHE_INDEX \
(PAGE_SHIFT + H_PTE_INDEX_SIZE + H_PMD_INDEX_SIZE + H_PUD_INDEX_SIZE - 34)
static inline int get_hugepd_cache_index(int index)
{
switch (index) {
case H_16M_CACHE_INDEX:
return HTLB_16M_INDEX;
case H_16G_CACHE_INDEX:
return HTLB_16G_INDEX;
default:
BUG();
}
/* should not reach */
}
#else /* !CONFIG_HUGETLB_PAGE */
static inline int pmd_huge(pmd_t pmd) { return 0; }
static inline int pud_huge(pud_t pud) { return 0; }
......
......@@ -45,8 +45,17 @@ static inline int hugepd_ok(hugepd_t hpd)
{
return 0;
}
#define is_hugepd(pdep) 0
/*
* This should never get called
*/
static inline int get_hugepd_cache_index(int index)
{
BUG();
}
#else /* !CONFIG_HUGETLB_PAGE */
static inline int pmd_huge(pmd_t pmd) { return 0; }
static inline int pud_huge(pud_t pud) { return 0; }
......
......@@ -287,6 +287,11 @@ enum pgtable_index {
PMD_INDEX,
PUD_INDEX,
PGD_INDEX,
/*
* Below are used with 4k page size and hugetlb
*/
HTLB_16M_INDEX,
HTLB_16G_INDEX,
};
extern unsigned long __vmalloc_start;
......
......@@ -109,6 +109,7 @@ static inline void pgtable_free(void *table, unsigned index_size)
}
#define check_pgt_cache() do { } while (0)
#define get_hugepd_cache_index(x) (x)
#ifdef CONFIG_SMP
static inline void pgtable_free_tlb(struct mmu_gather *tlb,
......
......@@ -141,6 +141,7 @@ static inline void pgtable_free(void *table, int shift)
}
}
#define get_hugepd_cache_index(x) (x)
#ifdef CONFIG_SMP
static inline void pgtable_free_tlb(struct mmu_gather *tlb, void *table, int shift)
{
......
......@@ -337,7 +337,8 @@ static void free_hugepd_range(struct mmu_gather *tlb, hugepd_t *hpdp, int pdshif
if (shift >= pdshift)
hugepd_free(tlb, hugepte);
else
pgtable_free_tlb(tlb, hugepte, pdshift - shift);
pgtable_free_tlb(tlb, hugepte,
get_hugepd_cache_index(pdshift - shift));
}
static void hugetlb_free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
......
......@@ -409,6 +409,18 @@ static inline void pgtable_free(void *table, int index)
case PUD_INDEX:
kmem_cache_free(PGT_CACHE(PUD_CACHE_INDEX), table);
break;
#if defined(CONFIG_PPC_4K_PAGES) && defined(CONFIG_HUGETLB_PAGE)
/* 16M hugepd directory at pud level */
case HTLB_16M_INDEX:
BUILD_BUG_ON(H_16M_CACHE_INDEX <= 0);
kmem_cache_free(PGT_CACHE(H_16M_CACHE_INDEX), table);
break;
/* 16G hugepd directory at the pgd level */
case HTLB_16G_INDEX:
BUILD_BUG_ON(H_16G_CACHE_INDEX <= 0);
kmem_cache_free(PGT_CACHE(H_16G_CACHE_INDEX), table);
break;
#endif
/* We don't free pgd table via RCU callback */
default:
BUG();
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment