Commit 7235db26 authored by Mike Rapoport's avatar Mike Rapoport Committed by Linus Torvalds

sparc32: use pgtable-nopud instead of 4level-fixup

32-bit version of sparc has three-level page tables and can use
pgtable-nopud and folding of the upper layers.

Replace usage of include/asm-generic/4level-fixup.h with
include/asm-generic/pgtable-nopud.h and adjust page table manipulation
macros and functions accordingly.

Link: http://lkml.kernel.org/r/1572938135-31886-11-git-send-email-rppt@kernel.orgSigned-off-by: default avatarMike Rapoport <rppt@linux.ibm.com>
Acked-by: default avatarDavid S. Miller <davem@davemloft.net>
Tested-by: default avatarAnatoly Pugachev <matorola@gmail.com>
Cc: Anton Ivanov <anton.ivanov@cambridgegreys.com>
Cc: Arnd Bergmann <arnd@arndb.de>
Cc: Geert Uytterhoeven <geert@linux-m68k.org>
Cc: Greentime Hu <green.hu@gmail.com>
Cc: Greg Ungerer <gerg@linux-m68k.org>
Cc: Helge Deller <deller@gmx.de>
Cc: "James E.J. Bottomley" <James.Bottomley@HansenPartnership.com>
Cc: Jeff Dike <jdike@addtoit.com>
Cc: "Kirill A. Shutemov" <kirill@shutemov.name>
Cc: Mark Salter <msalter@redhat.com>
Cc: Matt Turner <mattst88@gmail.com>
Cc: Michal Simek <monstr@monstr.eu>
Cc: Peter Rosin <peda@axentia.se>
Cc: Richard Weinberger <richard@nod.at>
Cc: Rolf Eike Beer <eike-kernel@sf-tec.de>
Cc: Russell King <linux@armlinux.org.uk>
Cc: Russell King <rmk+kernel@armlinux.org.uk>
Cc: Sam Creasey <sammy@sammy.net>
Cc: Vincent Chen <deanbo422@gmail.com>
Cc: Vineet Gupta <Vineet.Gupta1@synopsys.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 2fa245c1
...@@ -26,14 +26,14 @@ static inline void free_pgd_fast(pgd_t *pgd) ...@@ -26,14 +26,14 @@ static inline void free_pgd_fast(pgd_t *pgd)
#define pgd_free(mm, pgd) free_pgd_fast(pgd) #define pgd_free(mm, pgd) free_pgd_fast(pgd)
#define pgd_alloc(mm) get_pgd_fast() #define pgd_alloc(mm) get_pgd_fast()
static inline void pgd_set(pgd_t * pgdp, pmd_t * pmdp) static inline void pud_set(pud_t * pudp, pmd_t * pmdp)
{ {
unsigned long pa = __nocache_pa(pmdp); unsigned long pa = __nocache_pa(pmdp);
set_pte((pte_t *)pgdp, __pte((SRMMU_ET_PTD | (pa >> 4)))); set_pte((pte_t *)pudp, __pte((SRMMU_ET_PTD | (pa >> 4))));
} }
#define pgd_populate(MM, PGD, PMD) pgd_set(PGD, PMD) #define pud_populate(MM, PGD, PMD) pud_set(PGD, PMD)
static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, static inline pmd_t *pmd_alloc_one(struct mm_struct *mm,
unsigned long address) unsigned long address)
......
...@@ -12,7 +12,7 @@ ...@@ -12,7 +12,7 @@
#include <linux/const.h> #include <linux/const.h>
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
#include <asm-generic/4level-fixup.h> #include <asm-generic/pgtable-nopud.h>
#include <linux/spinlock.h> #include <linux/spinlock.h>
#include <linux/mm_types.h> #include <linux/mm_types.h>
...@@ -132,12 +132,12 @@ static inline struct page *pmd_page(pmd_t pmd) ...@@ -132,12 +132,12 @@ static inline struct page *pmd_page(pmd_t pmd)
return pfn_to_page((pmd_val(pmd) & SRMMU_PTD_PMASK) >> (PAGE_SHIFT-4)); return pfn_to_page((pmd_val(pmd) & SRMMU_PTD_PMASK) >> (PAGE_SHIFT-4));
} }
static inline unsigned long pgd_page_vaddr(pgd_t pgd) static inline unsigned long pud_page_vaddr(pud_t pud)
{ {
if (srmmu_device_memory(pgd_val(pgd))) { if (srmmu_device_memory(pud_val(pud))) {
return ~0; return ~0;
} else { } else {
unsigned long v = pgd_val(pgd) & SRMMU_PTD_PMASK; unsigned long v = pud_val(pud) & SRMMU_PTD_PMASK;
return (unsigned long)__nocache_va(v << 4); return (unsigned long)__nocache_va(v << 4);
} }
} }
...@@ -184,24 +184,24 @@ static inline void pmd_clear(pmd_t *pmdp) ...@@ -184,24 +184,24 @@ static inline void pmd_clear(pmd_t *pmdp)
set_pte((pte_t *)&pmdp->pmdv[i], __pte(0)); set_pte((pte_t *)&pmdp->pmdv[i], __pte(0));
} }
static inline int pgd_none(pgd_t pgd) static inline int pud_none(pud_t pud)
{ {
return !(pgd_val(pgd) & 0xFFFFFFF); return !(pud_val(pud) & 0xFFFFFFF);
} }
static inline int pgd_bad(pgd_t pgd) static inline int pud_bad(pud_t pud)
{ {
return (pgd_val(pgd) & SRMMU_ET_MASK) != SRMMU_ET_PTD; return (pud_val(pud) & SRMMU_ET_MASK) != SRMMU_ET_PTD;
} }
static inline int pgd_present(pgd_t pgd) static inline int pud_present(pud_t pud)
{ {
return ((pgd_val(pgd) & SRMMU_ET_MASK) == SRMMU_ET_PTD); return ((pud_val(pud) & SRMMU_ET_MASK) == SRMMU_ET_PTD);
} }
static inline void pgd_clear(pgd_t *pgdp) static inline void pud_clear(pud_t *pudp)
{ {
set_pte((pte_t *)pgdp, __pte(0)); set_pte((pte_t *)pudp, __pte(0));
} }
/* /*
...@@ -319,9 +319,9 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) ...@@ -319,9 +319,9 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
#define pgd_offset_k(address) pgd_offset(&init_mm, address) #define pgd_offset_k(address) pgd_offset(&init_mm, address)
/* Find an entry in the second-level page table.. */ /* Find an entry in the second-level page table.. */
static inline pmd_t *pmd_offset(pgd_t * dir, unsigned long address) static inline pmd_t *pmd_offset(pud_t * dir, unsigned long address)
{ {
return (pmd_t *) pgd_page_vaddr(*dir) + return (pmd_t *) pud_page_vaddr(*dir) +
((address >> PMD_SHIFT) & (PTRS_PER_PMD - 1)); ((address >> PMD_SHIFT) & (PTRS_PER_PMD - 1));
} }
......
...@@ -351,6 +351,8 @@ asmlinkage void do_sparc_fault(struct pt_regs *regs, int text_fault, int write, ...@@ -351,6 +351,8 @@ asmlinkage void do_sparc_fault(struct pt_regs *regs, int text_fault, int write,
*/ */
int offset = pgd_index(address); int offset = pgd_index(address);
pgd_t *pgd, *pgd_k; pgd_t *pgd, *pgd_k;
p4d_t *p4d, *p4d_k;
pud_t *pud, *pud_k;
pmd_t *pmd, *pmd_k; pmd_t *pmd, *pmd_k;
pgd = tsk->active_mm->pgd + offset; pgd = tsk->active_mm->pgd + offset;
...@@ -363,8 +365,13 @@ asmlinkage void do_sparc_fault(struct pt_regs *regs, int text_fault, int write, ...@@ -363,8 +365,13 @@ asmlinkage void do_sparc_fault(struct pt_regs *regs, int text_fault, int write,
return; return;
} }
pmd = pmd_offset(pgd, address); p4d = p4d_offset(pgd, address);
pmd_k = pmd_offset(pgd_k, address); pud = pud_offset(p4d, address);
pmd = pmd_offset(pud, address);
p4d_k = p4d_offset(pgd_k, address);
pud_k = pud_offset(p4d_k, address);
pmd_k = pmd_offset(pud_k, address);
if (pmd_present(*pmd) || !pmd_present(*pmd_k)) if (pmd_present(*pmd) || !pmd_present(*pmd_k))
goto bad_area_nosemaphore; goto bad_area_nosemaphore;
......
...@@ -39,10 +39,14 @@ static pte_t *kmap_pte; ...@@ -39,10 +39,14 @@ static pte_t *kmap_pte;
void __init kmap_init(void) void __init kmap_init(void)
{ {
unsigned long address; unsigned long address;
p4d_t *p4d;
pud_t *pud;
pmd_t *dir; pmd_t *dir;
address = __fix_to_virt(FIX_KMAP_BEGIN); address = __fix_to_virt(FIX_KMAP_BEGIN);
dir = pmd_offset(pgd_offset_k(address), address); p4d = p4d_offset(pgd_offset_k(address), address);
pud = pud_offset(p4d, address);
dir = pmd_offset(pud, address);
/* cache the first kmap pte */ /* cache the first kmap pte */
kmap_pte = pte_offset_kernel(dir, address); kmap_pte = pte_offset_kernel(dir, address);
......
...@@ -239,12 +239,16 @@ static void *iounit_alloc(struct device *dev, size_t len, ...@@ -239,12 +239,16 @@ static void *iounit_alloc(struct device *dev, size_t len,
page = va; page = va;
{ {
pgd_t *pgdp; pgd_t *pgdp;
p4d_t *p4dp;
pud_t *pudp;
pmd_t *pmdp; pmd_t *pmdp;
pte_t *ptep; pte_t *ptep;
long i; long i;
pgdp = pgd_offset(&init_mm, addr); pgdp = pgd_offset(&init_mm, addr);
pmdp = pmd_offset(pgdp, addr); p4dp = p4d_offset(pgdp, addr);
pudp = pud_offset(p4dp, addr);
pmdp = pmd_offset(pudp, addr);
ptep = pte_offset_map(pmdp, addr); ptep = pte_offset_map(pmdp, addr);
set_pte(ptep, mk_pte(virt_to_page(page), dvma_prot)); set_pte(ptep, mk_pte(virt_to_page(page), dvma_prot));
......
...@@ -343,6 +343,8 @@ static void *sbus_iommu_alloc(struct device *dev, size_t len, ...@@ -343,6 +343,8 @@ static void *sbus_iommu_alloc(struct device *dev, size_t len,
page = va; page = va;
{ {
pgd_t *pgdp; pgd_t *pgdp;
p4d_t *p4dp;
pud_t *pudp;
pmd_t *pmdp; pmd_t *pmdp;
pte_t *ptep; pte_t *ptep;
...@@ -354,7 +356,9 @@ static void *sbus_iommu_alloc(struct device *dev, size_t len, ...@@ -354,7 +356,9 @@ static void *sbus_iommu_alloc(struct device *dev, size_t len,
__flush_page_to_ram(page); __flush_page_to_ram(page);
pgdp = pgd_offset(&init_mm, addr); pgdp = pgd_offset(&init_mm, addr);
pmdp = pmd_offset(pgdp, addr); p4dp = p4d_offset(pgdp, addr);
pudp = pud_offset(p4dp, addr);
pmdp = pmd_offset(pudp, addr);
ptep = pte_offset_map(pmdp, addr); ptep = pte_offset_map(pmdp, addr);
set_pte(ptep, mk_pte(virt_to_page(page), dvma_prot)); set_pte(ptep, mk_pte(virt_to_page(page), dvma_prot));
......
...@@ -296,6 +296,8 @@ static void __init srmmu_nocache_init(void) ...@@ -296,6 +296,8 @@ static void __init srmmu_nocache_init(void)
void *srmmu_nocache_bitmap; void *srmmu_nocache_bitmap;
unsigned int bitmap_bits; unsigned int bitmap_bits;
pgd_t *pgd; pgd_t *pgd;
p4d_t *p4d;
pud_t *pud;
pmd_t *pmd; pmd_t *pmd;
pte_t *pte; pte_t *pte;
unsigned long paddr, vaddr; unsigned long paddr, vaddr;
...@@ -329,6 +331,8 @@ static void __init srmmu_nocache_init(void) ...@@ -329,6 +331,8 @@ static void __init srmmu_nocache_init(void)
while (vaddr < srmmu_nocache_end) { while (vaddr < srmmu_nocache_end) {
pgd = pgd_offset_k(vaddr); pgd = pgd_offset_k(vaddr);
p4d = p4d_offset(__nocache_fix(pgd), vaddr);
pud = pud_offset(__nocache_fix(p4d), vaddr);
pmd = pmd_offset(__nocache_fix(pgd), vaddr); pmd = pmd_offset(__nocache_fix(pgd), vaddr);
pte = pte_offset_kernel(__nocache_fix(pmd), vaddr); pte = pte_offset_kernel(__nocache_fix(pmd), vaddr);
...@@ -516,13 +520,17 @@ static inline void srmmu_mapioaddr(unsigned long physaddr, ...@@ -516,13 +520,17 @@ static inline void srmmu_mapioaddr(unsigned long physaddr,
unsigned long virt_addr, int bus_type) unsigned long virt_addr, int bus_type)
{ {
pgd_t *pgdp; pgd_t *pgdp;
p4d_t *p4dp;
pud_t *pudp;
pmd_t *pmdp; pmd_t *pmdp;
pte_t *ptep; pte_t *ptep;
unsigned long tmp; unsigned long tmp;
physaddr &= PAGE_MASK; physaddr &= PAGE_MASK;
pgdp = pgd_offset_k(virt_addr); pgdp = pgd_offset_k(virt_addr);
pmdp = pmd_offset(pgdp, virt_addr); p4dp = p4d_offset(pgdp, virt_addr);
pudp = pud_offset(p4dp, virt_addr);
pmdp = pmd_offset(pudp, virt_addr);
ptep = pte_offset_kernel(pmdp, virt_addr); ptep = pte_offset_kernel(pmdp, virt_addr);
tmp = (physaddr >> 4) | SRMMU_ET_PTE; tmp = (physaddr >> 4) | SRMMU_ET_PTE;
...@@ -551,11 +559,16 @@ void srmmu_mapiorange(unsigned int bus, unsigned long xpa, ...@@ -551,11 +559,16 @@ void srmmu_mapiorange(unsigned int bus, unsigned long xpa,
static inline void srmmu_unmapioaddr(unsigned long virt_addr) static inline void srmmu_unmapioaddr(unsigned long virt_addr)
{ {
pgd_t *pgdp; pgd_t *pgdp;
p4d_t *p4dp;
pud_t *pudp;
pmd_t *pmdp; pmd_t *pmdp;
pte_t *ptep; pte_t *ptep;
pgdp = pgd_offset_k(virt_addr); pgdp = pgd_offset_k(virt_addr);
pmdp = pmd_offset(pgdp, virt_addr); p4dp = p4d_offset(pgdp, virt_addr);
pudp = pud_offset(p4dp, virt_addr);
pmdp = pmd_offset(pudp, virt_addr);
ptep = pte_offset_kernel(pmdp, virt_addr); ptep = pte_offset_kernel(pmdp, virt_addr);
/* No need to flush uncacheable page. */ /* No need to flush uncacheable page. */
...@@ -693,20 +706,24 @@ static void __init srmmu_early_allocate_ptable_skeleton(unsigned long start, ...@@ -693,20 +706,24 @@ static void __init srmmu_early_allocate_ptable_skeleton(unsigned long start,
unsigned long end) unsigned long end)
{ {
pgd_t *pgdp; pgd_t *pgdp;
p4d_t *p4dp;
pud_t *pudp;
pmd_t *pmdp; pmd_t *pmdp;
pte_t *ptep; pte_t *ptep;
while (start < end) { while (start < end) {
pgdp = pgd_offset_k(start); pgdp = pgd_offset_k(start);
if (pgd_none(*(pgd_t *)__nocache_fix(pgdp))) { p4dp = p4d_offset(pgdp, start);
pudp = pud_offset(p4dp, start);
if (pud_none(*(pud_t *)__nocache_fix(pudp))) {
pmdp = __srmmu_get_nocache( pmdp = __srmmu_get_nocache(
SRMMU_PMD_TABLE_SIZE, SRMMU_PMD_TABLE_SIZE); SRMMU_PMD_TABLE_SIZE, SRMMU_PMD_TABLE_SIZE);
if (pmdp == NULL) if (pmdp == NULL)
early_pgtable_allocfail("pmd"); early_pgtable_allocfail("pmd");
memset(__nocache_fix(pmdp), 0, SRMMU_PMD_TABLE_SIZE); memset(__nocache_fix(pmdp), 0, SRMMU_PMD_TABLE_SIZE);
pgd_set(__nocache_fix(pgdp), pmdp); pud_set(__nocache_fix(pudp), pmdp);
} }
pmdp = pmd_offset(__nocache_fix(pgdp), start); pmdp = pmd_offset(__nocache_fix(pudp), start);
if (srmmu_pmd_none(*(pmd_t *)__nocache_fix(pmdp))) { if (srmmu_pmd_none(*(pmd_t *)__nocache_fix(pmdp))) {
ptep = __srmmu_get_nocache(PTE_SIZE, PTE_SIZE); ptep = __srmmu_get_nocache(PTE_SIZE, PTE_SIZE);
if (ptep == NULL) if (ptep == NULL)
...@@ -724,19 +741,23 @@ static void __init srmmu_allocate_ptable_skeleton(unsigned long start, ...@@ -724,19 +741,23 @@ static void __init srmmu_allocate_ptable_skeleton(unsigned long start,
unsigned long end) unsigned long end)
{ {
pgd_t *pgdp; pgd_t *pgdp;
p4d_t *p4dp;
pud_t *pudp;
pmd_t *pmdp; pmd_t *pmdp;
pte_t *ptep; pte_t *ptep;
while (start < end) { while (start < end) {
pgdp = pgd_offset_k(start); pgdp = pgd_offset_k(start);
if (pgd_none(*pgdp)) { p4dp = p4d_offset(pgdp, start);
pudp = pud_offset(p4dp, start);
if (pud_none(*pudp)) {
pmdp = __srmmu_get_nocache(SRMMU_PMD_TABLE_SIZE, SRMMU_PMD_TABLE_SIZE); pmdp = __srmmu_get_nocache(SRMMU_PMD_TABLE_SIZE, SRMMU_PMD_TABLE_SIZE);
if (pmdp == NULL) if (pmdp == NULL)
early_pgtable_allocfail("pmd"); early_pgtable_allocfail("pmd");
memset(pmdp, 0, SRMMU_PMD_TABLE_SIZE); memset(pmdp, 0, SRMMU_PMD_TABLE_SIZE);
pgd_set(pgdp, pmdp); pud_set((pud_t *)pgdp, pmdp);
} }
pmdp = pmd_offset(pgdp, start); pmdp = pmd_offset(pudp, start);
if (srmmu_pmd_none(*pmdp)) { if (srmmu_pmd_none(*pmdp)) {
ptep = __srmmu_get_nocache(PTE_SIZE, ptep = __srmmu_get_nocache(PTE_SIZE,
PTE_SIZE); PTE_SIZE);
...@@ -779,6 +800,8 @@ static void __init srmmu_inherit_prom_mappings(unsigned long start, ...@@ -779,6 +800,8 @@ static void __init srmmu_inherit_prom_mappings(unsigned long start,
unsigned long probed; unsigned long probed;
unsigned long addr; unsigned long addr;
pgd_t *pgdp; pgd_t *pgdp;
p4d_t *p4dp;
pud_t *pudp;
pmd_t *pmdp; pmd_t *pmdp;
pte_t *ptep; pte_t *ptep;
int what; /* 0 = normal-pte, 1 = pmd-level pte, 2 = pgd-level pte */ int what; /* 0 = normal-pte, 1 = pmd-level pte, 2 = pgd-level pte */
...@@ -810,18 +833,20 @@ static void __init srmmu_inherit_prom_mappings(unsigned long start, ...@@ -810,18 +833,20 @@ static void __init srmmu_inherit_prom_mappings(unsigned long start,
} }
pgdp = pgd_offset_k(start); pgdp = pgd_offset_k(start);
p4dp = p4d_offset(pgdp, start);
pudp = pud_offset(p4dp, start);
if (what == 2) { if (what == 2) {
*(pgd_t *)__nocache_fix(pgdp) = __pgd(probed); *(pgd_t *)__nocache_fix(pgdp) = __pgd(probed);
start += SRMMU_PGDIR_SIZE; start += SRMMU_PGDIR_SIZE;
continue; continue;
} }
if (pgd_none(*(pgd_t *)__nocache_fix(pgdp))) { if (pud_none(*(pud_t *)__nocache_fix(pudp))) {
pmdp = __srmmu_get_nocache(SRMMU_PMD_TABLE_SIZE, pmdp = __srmmu_get_nocache(SRMMU_PMD_TABLE_SIZE,
SRMMU_PMD_TABLE_SIZE); SRMMU_PMD_TABLE_SIZE);
if (pmdp == NULL) if (pmdp == NULL)
early_pgtable_allocfail("pmd"); early_pgtable_allocfail("pmd");
memset(__nocache_fix(pmdp), 0, SRMMU_PMD_TABLE_SIZE); memset(__nocache_fix(pmdp), 0, SRMMU_PMD_TABLE_SIZE);
pgd_set(__nocache_fix(pgdp), pmdp); pud_set(__nocache_fix(pudp), pmdp);
} }
pmdp = pmd_offset(__nocache_fix(pgdp), start); pmdp = pmd_offset(__nocache_fix(pgdp), start);
if (srmmu_pmd_none(*(pmd_t *)__nocache_fix(pmdp))) { if (srmmu_pmd_none(*(pmd_t *)__nocache_fix(pmdp))) {
...@@ -906,6 +931,8 @@ void __init srmmu_paging_init(void) ...@@ -906,6 +931,8 @@ void __init srmmu_paging_init(void)
phandle cpunode; phandle cpunode;
char node_str[128]; char node_str[128];
pgd_t *pgd; pgd_t *pgd;
p4d_t *p4d;
pud_t *pud;
pmd_t *pmd; pmd_t *pmd;
pte_t *pte; pte_t *pte;
unsigned long pages_avail; unsigned long pages_avail;
...@@ -967,7 +994,9 @@ void __init srmmu_paging_init(void) ...@@ -967,7 +994,9 @@ void __init srmmu_paging_init(void)
srmmu_allocate_ptable_skeleton(PKMAP_BASE, PKMAP_END); srmmu_allocate_ptable_skeleton(PKMAP_BASE, PKMAP_END);
pgd = pgd_offset_k(PKMAP_BASE); pgd = pgd_offset_k(PKMAP_BASE);
pmd = pmd_offset(pgd, PKMAP_BASE); p4d = p4d_offset(pgd, PKMAP_BASE);
pud = pud_offset(p4d, PKMAP_BASE);
pmd = pmd_offset(pud, PKMAP_BASE);
pte = pte_offset_kernel(pmd, PKMAP_BASE); pte = pte_offset_kernel(pmd, PKMAP_BASE);
pkmap_page_table = pte; pkmap_page_table = pte;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment