Commit 0c4d2680 authored by Aneesh Kumar K.V's avatar Aneesh Kumar K.V Committed by Michael Ellerman

powerpc/book3s64/mm: Simplify the rcu callback for page table free

Instead of encoding shift in the table address, use an enumerated index value.
This allow us to do different things in the callback for pte and pmd.
Signed-off-by: default avatarAneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
Signed-off-by: default avatarMichael Ellerman <mpe@ellerman.id.au>
parent 1c7ec8a4
......@@ -124,14 +124,14 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
}
static inline void __pud_free_tlb(struct mmu_gather *tlb, pud_t *pud,
unsigned long address)
unsigned long address)
{
/*
* By now all the pud entries should be none entries. So go
* ahead and flush the page walk cache
*/
flush_tlb_pgtable(tlb, address);
pgtable_free_tlb(tlb, pud, PUD_CACHE_INDEX);
pgtable_free_tlb(tlb, pud, PUD_INDEX);
}
static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
......@@ -146,14 +146,14 @@ static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
}
static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd,
unsigned long address)
unsigned long address)
{
/*
* By now all the pud entries should be none entries. So go
* ahead and flush the page walk cache
*/
flush_tlb_pgtable(tlb, address);
return pgtable_free_tlb(tlb, pmd, PMD_CACHE_INDEX);
return pgtable_free_tlb(tlb, pmd, PMD_INDEX);
}
static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd,
......@@ -203,7 +203,7 @@ static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t table,
* ahead and flush the page walk cache
*/
flush_tlb_pgtable(tlb, address);
pgtable_free_tlb(tlb, table, 0);
pgtable_free_tlb(tlb, table, PTE_INDEX);
}
#define check_pgt_cache() do { } while (0)
......
......@@ -273,6 +273,16 @@ extern unsigned long __pte_frag_size_shift;
/* Bits to mask out from a PGD to get to the PUD page */
#define PGD_MASKED_BITS 0xc0000000000000ffUL
/*
* Used as an indicator for rcu callback functions
*/
enum pgtable_index {
PTE_INDEX = 0,
PMD_INDEX,
PUD_INDEX,
PGD_INDEX,
};
extern unsigned long __vmalloc_start;
extern unsigned long __vmalloc_end;
#define VMALLOC_START __vmalloc_start
......
......@@ -309,38 +309,45 @@ void pte_fragment_free(unsigned long *table, int kernel)
}
}
static inline void pgtable_free(void *table, int index)
{
switch (index) {
case PTE_INDEX:
pte_fragment_free(table, 0);
break;
case PMD_INDEX:
kmem_cache_free(PGT_CACHE(PMD_CACHE_INDEX), table);
break;
case PUD_INDEX:
kmem_cache_free(PGT_CACHE(PUD_CACHE_INDEX), table);
break;
/* We don't free pgd table via RCU callback */
default:
BUG();
}
}
#ifdef CONFIG_SMP
void pgtable_free_tlb(struct mmu_gather *tlb, void *table, int shift)
void pgtable_free_tlb(struct mmu_gather *tlb, void *table, int index)
{
unsigned long pgf = (unsigned long)table;
BUG_ON(shift > MAX_PGTABLE_INDEX_SIZE);
pgf |= shift;
BUG_ON(index > MAX_PGTABLE_INDEX_SIZE);
pgf |= index;
tlb_remove_table(tlb, (void *)pgf);
}
void __tlb_remove_table(void *_table)
{
void *table = (void *)((unsigned long)_table & ~MAX_PGTABLE_INDEX_SIZE);
unsigned int shift = (unsigned long)_table & MAX_PGTABLE_INDEX_SIZE;
unsigned int index = (unsigned long)_table & MAX_PGTABLE_INDEX_SIZE;
if (!shift)
/* PTE page needs special handling */
pte_fragment_free(table, 0);
else {
BUG_ON(shift > MAX_PGTABLE_INDEX_SIZE);
kmem_cache_free(PGT_CACHE(shift), table);
}
return pgtable_free(table, index);
}
#else
void pgtable_free_tlb(struct mmu_gather *tlb, void *table, int shift)
void pgtable_free_tlb(struct mmu_gather *tlb, void *table, int index)
{
if (!shift) {
/* PTE page needs special handling */
pte_fragment_free(table, 0);
} else {
BUG_ON(shift > MAX_PGTABLE_INDEX_SIZE);
kmem_cache_free(PGT_CACHE(shift), table);
}
return pgtable_free(table, index);
}
#endif
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment