Commit c03ab9e3 authored by Mike Rapoport's avatar Mike Rapoport Committed by Linus Torvalds

ia64: add support for folded p4d page tables

Implement primitives necessary for the 4th level folding, add walks of p4d
level where appropriate, remove usage of __ARCH_USE_5LEVEL_HACK and
replace 5level-fixup.h with pgtable-nop4d.h
Signed-off-by: default avatarMike Rapoport <rppt@linux.ibm.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Cc: Arnd Bergmann <arnd@arndb.de>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: Brian Cain <bcain@codeaurora.org>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Christophe Leroy <christophe.leroy@c-s.fr>
Cc: Fenghua Yu <fenghua.yu@intel.com>
Cc: Geert Uytterhoeven <geert+renesas@glider.be>
Cc: Guan Xuetao <gxt@pku.edu.cn>
Cc: James Morse <james.morse@arm.com>
Cc: Jonas Bonn <jonas@southpole.se>
Cc: Julien Thierry <julien.thierry.kdev@gmail.com>
Cc: Ley Foon Tan <ley.foon.tan@intel.com>
Cc: Marc Zyngier <maz@kernel.org>
Cc: Michael Ellerman <mpe@ellerman.id.au>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Rich Felker <dalias@libc.org>
Cc: Russell King <linux@armlinux.org.uk>
Cc: Stafford Horne <shorne@gmail.com>
Cc: Stefan Kristiansson <stefan.kristiansson@saunalahti.fi>
Cc: Suzuki K Poulose <suzuki.poulose@arm.com>
Cc: Tony Luck <tony.luck@intel.com>
Cc: Will Deacon <will@kernel.org>
Cc: Yoshinori Sato <ysato@users.sourceforge.jp>
Link: http://lkml.kernel.org/r/20200414153455.21744-6-rppt@kernel.orgSigned-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 00b13def
...@@ -36,9 +36,9 @@ static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd) ...@@ -36,9 +36,9 @@ static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
#if CONFIG_PGTABLE_LEVELS == 4 #if CONFIG_PGTABLE_LEVELS == 4
static inline void static inline void
pgd_populate(struct mm_struct *mm, pgd_t * pgd_entry, pud_t * pud) p4d_populate(struct mm_struct *mm, p4d_t * p4d_entry, pud_t * pud)
{ {
pgd_val(*pgd_entry) = __pa(pud); p4d_val(*p4d_entry) = __pa(pud);
} }
static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr) static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
......
...@@ -283,12 +283,12 @@ extern unsigned long VMALLOC_END; ...@@ -283,12 +283,12 @@ extern unsigned long VMALLOC_END;
#define pud_page(pud) virt_to_page((pud_val(pud) + PAGE_OFFSET)) #define pud_page(pud) virt_to_page((pud_val(pud) + PAGE_OFFSET))
#if CONFIG_PGTABLE_LEVELS == 4 #if CONFIG_PGTABLE_LEVELS == 4
#define pgd_none(pgd) (!pgd_val(pgd)) #define p4d_none(p4d) (!p4d_val(p4d))
#define pgd_bad(pgd) (!ia64_phys_addr_valid(pgd_val(pgd))) #define p4d_bad(p4d) (!ia64_phys_addr_valid(p4d_val(p4d)))
#define pgd_present(pgd) (pgd_val(pgd) != 0UL) #define p4d_present(p4d) (p4d_val(p4d) != 0UL)
#define pgd_clear(pgdp) (pgd_val(*(pgdp)) = 0UL) #define p4d_clear(p4dp) (p4d_val(*(p4dp)) = 0UL)
#define pgd_page_vaddr(pgd) ((unsigned long) __va(pgd_val(pgd) & _PFN_MASK)) #define p4d_page_vaddr(p4d) ((unsigned long) __va(p4d_val(p4d) & _PFN_MASK))
#define pgd_page(pgd) virt_to_page((pgd_val(pgd) + PAGE_OFFSET)) #define p4d_page(p4d) virt_to_page((p4d_val(p4d) + PAGE_OFFSET))
#endif #endif
/* /*
...@@ -386,7 +386,7 @@ pgd_offset (const struct mm_struct *mm, unsigned long address) ...@@ -386,7 +386,7 @@ pgd_offset (const struct mm_struct *mm, unsigned long address)
#if CONFIG_PGTABLE_LEVELS == 4 #if CONFIG_PGTABLE_LEVELS == 4
/* Find an entry in the second-level page table.. */ /* Find an entry in the second-level page table.. */
#define pud_offset(dir,addr) \ #define pud_offset(dir,addr) \
((pud_t *) pgd_page_vaddr(*(dir)) + (((addr) >> PUD_SHIFT) & (PTRS_PER_PUD - 1))) ((pud_t *) p4d_page_vaddr(*(dir)) + (((addr) >> PUD_SHIFT) & (PTRS_PER_PUD - 1)))
#endif #endif
/* Find an entry in the third-level page table.. */ /* Find an entry in the third-level page table.. */
...@@ -580,10 +580,9 @@ extern struct page *zero_page_memmap_ptr; ...@@ -580,10 +580,9 @@ extern struct page *zero_page_memmap_ptr;
#if CONFIG_PGTABLE_LEVELS == 3 #if CONFIG_PGTABLE_LEVELS == 3
#define __ARCH_USE_5LEVEL_HACK
#include <asm-generic/pgtable-nopud.h> #include <asm-generic/pgtable-nopud.h>
#endif #endif
#include <asm-generic/5level-fixup.h> #include <asm-generic/pgtable-nop4d.h>
#include <asm-generic/pgtable.h> #include <asm-generic/pgtable.h>
#endif /* _ASM_IA64_PGTABLE_H */ #endif /* _ASM_IA64_PGTABLE_H */
...@@ -29,6 +29,7 @@ static int ...@@ -29,6 +29,7 @@ static int
mapped_kernel_page_is_present (unsigned long address) mapped_kernel_page_is_present (unsigned long address)
{ {
pgd_t *pgd; pgd_t *pgd;
p4d_t *p4d;
pud_t *pud; pud_t *pud;
pmd_t *pmd; pmd_t *pmd;
pte_t *ptep, pte; pte_t *ptep, pte;
...@@ -37,7 +38,11 @@ mapped_kernel_page_is_present (unsigned long address) ...@@ -37,7 +38,11 @@ mapped_kernel_page_is_present (unsigned long address)
if (pgd_none(*pgd) || pgd_bad(*pgd)) if (pgd_none(*pgd) || pgd_bad(*pgd))
return 0; return 0;
pud = pud_offset(pgd, address); p4d = p4d_offset(pgd, address);
if (p4d_none(*p4d) || p4d_bad(*p4d))
return 0;
pud = pud_offset(p4d, address);
if (pud_none(*pud) || pud_bad(*pud)) if (pud_none(*pud) || pud_bad(*pud))
return 0; return 0;
......
...@@ -30,12 +30,14 @@ huge_pte_alloc(struct mm_struct *mm, unsigned long addr, unsigned long sz) ...@@ -30,12 +30,14 @@ huge_pte_alloc(struct mm_struct *mm, unsigned long addr, unsigned long sz)
{ {
unsigned long taddr = htlbpage_to_page(addr); unsigned long taddr = htlbpage_to_page(addr);
pgd_t *pgd; pgd_t *pgd;
p4d_t *p4d;
pud_t *pud; pud_t *pud;
pmd_t *pmd; pmd_t *pmd;
pte_t *pte = NULL; pte_t *pte = NULL;
pgd = pgd_offset(mm, taddr); pgd = pgd_offset(mm, taddr);
pud = pud_alloc(mm, pgd, taddr); p4d = p4d_offset(pgd, taddr);
pud = pud_alloc(mm, p4d, taddr);
if (pud) { if (pud) {
pmd = pmd_alloc(mm, pud, taddr); pmd = pmd_alloc(mm, pud, taddr);
if (pmd) if (pmd)
...@@ -49,19 +51,23 @@ huge_pte_offset (struct mm_struct *mm, unsigned long addr, unsigned long sz) ...@@ -49,19 +51,23 @@ huge_pte_offset (struct mm_struct *mm, unsigned long addr, unsigned long sz)
{ {
unsigned long taddr = htlbpage_to_page(addr); unsigned long taddr = htlbpage_to_page(addr);
pgd_t *pgd; pgd_t *pgd;
p4d_t *p4d;
pud_t *pud; pud_t *pud;
pmd_t *pmd; pmd_t *pmd;
pte_t *pte = NULL; pte_t *pte = NULL;
pgd = pgd_offset(mm, taddr); pgd = pgd_offset(mm, taddr);
if (pgd_present(*pgd)) { if (pgd_present(*pgd)) {
pud = pud_offset(pgd, taddr); p4d = p4d_offset(pgd, addr);
if (p4d_present(*p4d)) {
pud = pud_offset(p4d, taddr);
if (pud_present(*pud)) { if (pud_present(*pud)) {
pmd = pmd_offset(pud, taddr); pmd = pmd_offset(pud, taddr);
if (pmd_present(*pmd)) if (pmd_present(*pmd))
pte = pte_offset_map(pmd, taddr); pte = pte_offset_map(pmd, taddr);
} }
} }
}
return pte; return pte;
} }
......
...@@ -208,6 +208,7 @@ static struct page * __init ...@@ -208,6 +208,7 @@ static struct page * __init
put_kernel_page (struct page *page, unsigned long address, pgprot_t pgprot) put_kernel_page (struct page *page, unsigned long address, pgprot_t pgprot)
{ {
pgd_t *pgd; pgd_t *pgd;
p4d_t *p4d;
pud_t *pud; pud_t *pud;
pmd_t *pmd; pmd_t *pmd;
pte_t *pte; pte_t *pte;
...@@ -215,7 +216,10 @@ put_kernel_page (struct page *page, unsigned long address, pgprot_t pgprot) ...@@ -215,7 +216,10 @@ put_kernel_page (struct page *page, unsigned long address, pgprot_t pgprot)
pgd = pgd_offset_k(address); /* note: this is NOT pgd_offset()! */ pgd = pgd_offset_k(address); /* note: this is NOT pgd_offset()! */
{ {
pud = pud_alloc(&init_mm, pgd, address); p4d = p4d_alloc(&init_mm, pgd, address);
if (!p4d)
goto out;
pud = pud_alloc(&init_mm, p4d, address);
if (!pud) if (!pud)
goto out; goto out;
pmd = pmd_alloc(&init_mm, pud, address); pmd = pmd_alloc(&init_mm, pud, address);
...@@ -382,6 +386,7 @@ int vmemmap_find_next_valid_pfn(int node, int i) ...@@ -382,6 +386,7 @@ int vmemmap_find_next_valid_pfn(int node, int i)
do { do {
pgd_t *pgd; pgd_t *pgd;
p4d_t *p4d;
pud_t *pud; pud_t *pud;
pmd_t *pmd; pmd_t *pmd;
pte_t *pte; pte_t *pte;
...@@ -392,7 +397,13 @@ int vmemmap_find_next_valid_pfn(int node, int i) ...@@ -392,7 +397,13 @@ int vmemmap_find_next_valid_pfn(int node, int i)
continue; continue;
} }
pud = pud_offset(pgd, end_address); p4d = p4d_offset(pgd, end_address);
if (p4d_none(*p4d)) {
end_address += P4D_SIZE;
continue;
}
pud = pud_offset(p4d, end_address);
if (pud_none(*pud)) { if (pud_none(*pud)) {
end_address += PUD_SIZE; end_address += PUD_SIZE;
continue; continue;
...@@ -430,6 +441,7 @@ int __init create_mem_map_page_table(u64 start, u64 end, void *arg) ...@@ -430,6 +441,7 @@ int __init create_mem_map_page_table(u64 start, u64 end, void *arg)
struct page *map_start, *map_end; struct page *map_start, *map_end;
int node; int node;
pgd_t *pgd; pgd_t *pgd;
p4d_t *p4d;
pud_t *pud; pud_t *pud;
pmd_t *pmd; pmd_t *pmd;
pte_t *pte; pte_t *pte;
...@@ -444,12 +456,20 @@ int __init create_mem_map_page_table(u64 start, u64 end, void *arg) ...@@ -444,12 +456,20 @@ int __init create_mem_map_page_table(u64 start, u64 end, void *arg)
for (address = start_page; address < end_page; address += PAGE_SIZE) { for (address = start_page; address < end_page; address += PAGE_SIZE) {
pgd = pgd_offset_k(address); pgd = pgd_offset_k(address);
if (pgd_none(*pgd)) { if (pgd_none(*pgd)) {
p4d = memblock_alloc_node(PAGE_SIZE, PAGE_SIZE, node);
if (!p4d)
goto err_alloc;
pgd_populate(&init_mm, pgd, p4d);
}
p4d = p4d_offset(pgd, address);
if (p4d_none(*p4d)) {
pud = memblock_alloc_node(PAGE_SIZE, PAGE_SIZE, node); pud = memblock_alloc_node(PAGE_SIZE, PAGE_SIZE, node);
if (!pud) if (!pud)
goto err_alloc; goto err_alloc;
pgd_populate(&init_mm, pgd, pud); p4d_populate(&init_mm, p4d, pud);
} }
pud = pud_offset(pgd, address); pud = pud_offset(p4d, address);
if (pud_none(*pud)) { if (pud_none(*pud)) {
pmd = memblock_alloc_node(PAGE_SIZE, PAGE_SIZE, node); pmd = memblock_alloc_node(PAGE_SIZE, PAGE_SIZE, node);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment