Commit 7bf82eb3 authored by Matthew Wilcox (Oracle)'s avatar Matthew Wilcox (Oracle) Committed by Helge Deller

parisc: Rename PMD_ORDER to PMD_TABLE_ORDER

This is the order of the page table allocation, not the order of a PMD.
Signed-off-by: default avatarMatthew Wilcox (Oracle) <willy@infradead.org>
Signed-off-by: default avatarHelge Deller <deller@gmx.de>
parent 7f2dcc73
...@@ -48,15 +48,15 @@ static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address) ...@@ -48,15 +48,15 @@ static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
{ {
pmd_t *pmd; pmd_t *pmd;
pmd = (pmd_t *)__get_free_pages(GFP_PGTABLE_KERNEL, PMD_ORDER); pmd = (pmd_t *)__get_free_pages(GFP_PGTABLE_KERNEL, PMD_TABLE_ORDER);
if (likely(pmd)) if (likely(pmd))
memset ((void *)pmd, 0, PAGE_SIZE << PMD_ORDER); memset ((void *)pmd, 0, PAGE_SIZE << PMD_TABLE_ORDER);
return pmd; return pmd;
} }
static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd) static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
{ {
free_pages((unsigned long)pmd, PMD_ORDER); free_pages((unsigned long)pmd, PMD_TABLE_ORDER);
} }
#endif #endif
......
...@@ -112,7 +112,7 @@ static inline void purge_tlb_entries(struct mm_struct *mm, unsigned long addr) ...@@ -112,7 +112,7 @@ static inline void purge_tlb_entries(struct mm_struct *mm, unsigned long addr)
#define KERNEL_INITIAL_SIZE (1 << KERNEL_INITIAL_ORDER) #define KERNEL_INITIAL_SIZE (1 << KERNEL_INITIAL_ORDER)
#if CONFIG_PGTABLE_LEVELS == 3 #if CONFIG_PGTABLE_LEVELS == 3
#define PMD_ORDER 1 #define PMD_TABLE_ORDER 1
#define PGD_ORDER 0 #define PGD_ORDER 0
#else #else
#define PGD_ORDER 1 #define PGD_ORDER 1
...@@ -131,7 +131,7 @@ static inline void purge_tlb_entries(struct mm_struct *mm, unsigned long addr) ...@@ -131,7 +131,7 @@ static inline void purge_tlb_entries(struct mm_struct *mm, unsigned long addr)
#define PMD_SHIFT (PLD_SHIFT + BITS_PER_PTE) #define PMD_SHIFT (PLD_SHIFT + BITS_PER_PTE)
#define PMD_SIZE (1UL << PMD_SHIFT) #define PMD_SIZE (1UL << PMD_SHIFT)
#define PMD_MASK (~(PMD_SIZE-1)) #define PMD_MASK (~(PMD_SIZE-1))
#define BITS_PER_PMD (PAGE_SHIFT + PMD_ORDER - BITS_PER_PMD_ENTRY) #define BITS_PER_PMD (PAGE_SHIFT + PMD_TABLE_ORDER - BITS_PER_PMD_ENTRY)
#define PTRS_PER_PMD (1UL << BITS_PER_PMD) #define PTRS_PER_PMD (1UL << BITS_PER_PMD)
#else #else
#define BITS_PER_PMD 0 #define BITS_PER_PMD 0
......
...@@ -378,8 +378,8 @@ static void __init map_pages(unsigned long start_vaddr, ...@@ -378,8 +378,8 @@ static void __init map_pages(unsigned long start_vaddr,
#if CONFIG_PGTABLE_LEVELS == 3 #if CONFIG_PGTABLE_LEVELS == 3
if (pud_none(*pud)) { if (pud_none(*pud)) {
pmd = memblock_alloc(PAGE_SIZE << PMD_ORDER, pmd = memblock_alloc(PAGE_SIZE << PMD_TABLE_ORDER,
PAGE_SIZE << PMD_ORDER); PAGE_SIZE << PMD_TABLE_ORDER);
if (!pmd) if (!pmd)
panic("pmd allocation failed.\n"); panic("pmd allocation failed.\n");
pud_populate(NULL, pud, pmd); pud_populate(NULL, pud, pmd);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment