Commit a012c1e8 authored by David S. Miller's avatar David S. Miller

Merge branch 'Rework-sparc32-page-table-layout'

Will Deacon says:

====================
Rework sparc32 page-table layout

This is a reposting of the patch series I sent previously to rework the
sparc32 page-table layout so that 'pmd_t' can be used safely with
READ_ONCE():

https://lore.kernel.org/lkml/20200324104005.11279-1-will@kernel.org

This is blocking the READ_ONCE() rework, which in turn allows us to
bump the minimum GCC version for building the kernel up to 4.8.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 24085f70 8c8f3156
...@@ -54,7 +54,7 @@ extern struct sparc_phys_banks sp_banks[SPARC_PHYS_BANKS+1]; ...@@ -54,7 +54,7 @@ extern struct sparc_phys_banks sp_banks[SPARC_PHYS_BANKS+1];
*/ */
typedef struct { unsigned long pte; } pte_t; typedef struct { unsigned long pte; } pte_t;
typedef struct { unsigned long iopte; } iopte_t; typedef struct { unsigned long iopte; } iopte_t;
typedef struct { unsigned long pmdv[16]; } pmd_t; typedef struct { unsigned long pmd; } pmd_t;
typedef struct { unsigned long pgd; } pgd_t; typedef struct { unsigned long pgd; } pgd_t;
typedef struct { unsigned long ctxd; } ctxd_t; typedef struct { unsigned long ctxd; } ctxd_t;
typedef struct { unsigned long pgprot; } pgprot_t; typedef struct { unsigned long pgprot; } pgprot_t;
...@@ -62,7 +62,7 @@ typedef struct { unsigned long iopgprot; } iopgprot_t; ...@@ -62,7 +62,7 @@ typedef struct { unsigned long iopgprot; } iopgprot_t;
#define pte_val(x) ((x).pte) #define pte_val(x) ((x).pte)
#define iopte_val(x) ((x).iopte) #define iopte_val(x) ((x).iopte)
#define pmd_val(x) ((x).pmdv[0]) #define pmd_val(x) ((x).pmd)
#define pgd_val(x) ((x).pgd) #define pgd_val(x) ((x).pgd)
#define ctxd_val(x) ((x).ctxd) #define ctxd_val(x) ((x).ctxd)
#define pgprot_val(x) ((x).pgprot) #define pgprot_val(x) ((x).pgprot)
...@@ -82,7 +82,7 @@ typedef struct { unsigned long iopgprot; } iopgprot_t; ...@@ -82,7 +82,7 @@ typedef struct { unsigned long iopgprot; } iopgprot_t;
*/ */
typedef unsigned long pte_t; typedef unsigned long pte_t;
typedef unsigned long iopte_t; typedef unsigned long iopte_t;
typedef struct { unsigned long pmdv[16]; } pmd_t; typedef unsigned long pmd_t;
typedef unsigned long pgd_t; typedef unsigned long pgd_t;
typedef unsigned long ctxd_t; typedef unsigned long ctxd_t;
typedef unsigned long pgprot_t; typedef unsigned long pgprot_t;
...@@ -90,14 +90,14 @@ typedef unsigned long iopgprot_t; ...@@ -90,14 +90,14 @@ typedef unsigned long iopgprot_t;
#define pte_val(x) (x) #define pte_val(x) (x)
#define iopte_val(x) (x) #define iopte_val(x) (x)
#define pmd_val(x) ((x).pmdv[0]) #define pmd_val(x) (x)
#define pgd_val(x) (x) #define pgd_val(x) (x)
#define ctxd_val(x) (x) #define ctxd_val(x) (x)
#define pgprot_val(x) (x) #define pgprot_val(x) (x)
#define iopgprot_val(x) (x) #define iopgprot_val(x) (x)
#define __pte(x) (x) #define __pte(x) (x)
#define __pmd(x) ((pmd_t) { { (x) }, }) #define __pmd(x) (x)
#define __iopte(x) (x) #define __iopte(x) (x)
#define __pgd(x) (x) #define __pgd(x) (x)
#define __ctxd(x) (x) #define __ctxd(x) (x)
...@@ -106,7 +106,7 @@ typedef unsigned long iopgprot_t; ...@@ -106,7 +106,7 @@ typedef unsigned long iopgprot_t;
#endif #endif
typedef struct page *pgtable_t; typedef pte_t *pgtable_t;
#define TASK_UNMAPPED_BASE 0x50000000 #define TASK_UNMAPPED_BASE 0x50000000
......
...@@ -50,23 +50,24 @@ static inline void free_pmd_fast(pmd_t * pmd) ...@@ -50,23 +50,24 @@ static inline void free_pmd_fast(pmd_t * pmd)
#define pmd_free(mm, pmd) free_pmd_fast(pmd) #define pmd_free(mm, pmd) free_pmd_fast(pmd)
#define __pmd_free_tlb(tlb, pmd, addr) pmd_free((tlb)->mm, pmd) #define __pmd_free_tlb(tlb, pmd, addr) pmd_free((tlb)->mm, pmd)
void pmd_populate(struct mm_struct *mm, pmd_t *pmdp, struct page *ptep); #define pmd_populate(mm, pmd, pte) pmd_set(pmd, pte)
#define pmd_pgtable(pmd) pmd_page(pmd) #define pmd_pgtable(pmd) (pgtable_t)__pmd_page(pmd)
void pmd_set(pmd_t *pmdp, pte_t *ptep); void pmd_set(pmd_t *pmdp, pte_t *ptep);
#define pmd_populate_kernel(MM, PMD, PTE) pmd_set(PMD, PTE) #define pmd_populate_kernel pmd_populate
pgtable_t pte_alloc_one(struct mm_struct *mm); pgtable_t pte_alloc_one(struct mm_struct *mm);
static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm) static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm)
{ {
return srmmu_get_nocache(PTE_SIZE, PTE_SIZE); return srmmu_get_nocache(SRMMU_PTE_TABLE_SIZE,
SRMMU_PTE_TABLE_SIZE);
} }
static inline void free_pte_fast(pte_t *pte) static inline void free_pte_fast(pte_t *pte)
{ {
srmmu_free_nocache(pte, PTE_SIZE); srmmu_free_nocache(pte, SRMMU_PTE_TABLE_SIZE);
} }
#define pte_free_kernel(mm, pte) free_pte_fast(pte) #define pte_free_kernel(mm, pte) free_pte_fast(pte)
......
...@@ -11,6 +11,16 @@ ...@@ -11,6 +11,16 @@
#include <linux/const.h> #include <linux/const.h>
#define PMD_SHIFT 18
#define PMD_SIZE (1UL << PMD_SHIFT)
#define PMD_MASK (~(PMD_SIZE-1))
#define PMD_ALIGN(__addr) (((__addr) + ~PMD_MASK) & PMD_MASK)
#define PGDIR_SHIFT 24
#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
#define PGDIR_MASK (~(PGDIR_SIZE-1))
#define PGDIR_ALIGN(__addr) (((__addr) + ~PGDIR_MASK) & PGDIR_MASK)
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
#include <asm-generic/pgtable-nopud.h> #include <asm-generic/pgtable-nopud.h>
...@@ -34,17 +44,10 @@ unsigned long __init bootmem_init(unsigned long *pages_avail); ...@@ -34,17 +44,10 @@ unsigned long __init bootmem_init(unsigned long *pages_avail);
#define pmd_ERROR(e) __builtin_trap() #define pmd_ERROR(e) __builtin_trap()
#define pgd_ERROR(e) __builtin_trap() #define pgd_ERROR(e) __builtin_trap()
#define PMD_SHIFT 22 #define PTRS_PER_PTE 64
#define PMD_SIZE (1UL << PMD_SHIFT) #define PTRS_PER_PMD 64
#define PMD_MASK (~(PMD_SIZE-1)) #define PTRS_PER_PGD 256
#define PMD_ALIGN(__addr) (((__addr) + ~PMD_MASK) & PMD_MASK) #define USER_PTRS_PER_PGD PAGE_OFFSET / PGDIR_SIZE
#define PGDIR_SHIFT SRMMU_PGDIR_SHIFT
#define PGDIR_SIZE SRMMU_PGDIR_SIZE
#define PGDIR_MASK SRMMU_PGDIR_MASK
#define PTRS_PER_PTE 1024
#define PTRS_PER_PMD SRMMU_PTRS_PER_PMD
#define PTRS_PER_PGD SRMMU_PTRS_PER_PGD
#define USER_PTRS_PER_PGD PAGE_OFFSET / SRMMU_PGDIR_SIZE
#define FIRST_USER_ADDRESS 0UL #define FIRST_USER_ADDRESS 0UL
#define PTE_SIZE (PTRS_PER_PTE*4) #define PTE_SIZE (PTRS_PER_PTE*4)
...@@ -132,6 +135,17 @@ static inline struct page *pmd_page(pmd_t pmd) ...@@ -132,6 +135,17 @@ static inline struct page *pmd_page(pmd_t pmd)
return pfn_to_page((pmd_val(pmd) & SRMMU_PTD_PMASK) >> (PAGE_SHIFT-4)); return pfn_to_page((pmd_val(pmd) & SRMMU_PTD_PMASK) >> (PAGE_SHIFT-4));
} }
static inline unsigned long __pmd_page(pmd_t pmd)
{
unsigned long v;
if (srmmu_device_memory(pmd_val(pmd)))
BUG();
v = pmd_val(pmd) & SRMMU_PTD_PMASK;
return (unsigned long)__nocache_va(v << 4);
}
static inline unsigned long pud_page_vaddr(pud_t pud) static inline unsigned long pud_page_vaddr(pud_t pud)
{ {
if (srmmu_device_memory(pud_val(pud))) { if (srmmu_device_memory(pud_val(pud))) {
...@@ -179,9 +193,7 @@ static inline int pmd_none(pmd_t pmd) ...@@ -179,9 +193,7 @@ static inline int pmd_none(pmd_t pmd)
static inline void pmd_clear(pmd_t *pmdp) static inline void pmd_clear(pmd_t *pmdp)
{ {
int i; set_pte((pte_t *)&pmd_val(*pmdp), __pte(0));
for (i = 0; i < PTRS_PER_PTE/SRMMU_REAL_PTRS_PER_PTE; i++)
set_pte((pte_t *)&pmdp->pmdv[i], __pte(0));
} }
static inline int pud_none(pud_t pud) static inline int pud_none(pud_t pud)
......
...@@ -17,39 +17,9 @@ ...@@ -17,39 +17,9 @@
/* Number of contexts is implementation-dependent; 64k is the most we support */ /* Number of contexts is implementation-dependent; 64k is the most we support */
#define SRMMU_MAX_CONTEXTS 65536 #define SRMMU_MAX_CONTEXTS 65536
/* PMD_SHIFT determines the size of the area a second-level page table entry can map */ #define SRMMU_PTE_TABLE_SIZE (PTRS_PER_PTE*4)
#define SRMMU_REAL_PMD_SHIFT 18 #define SRMMU_PMD_TABLE_SIZE (PTRS_PER_PMD*4)
#define SRMMU_REAL_PMD_SIZE (1UL << SRMMU_REAL_PMD_SHIFT) #define SRMMU_PGD_TABLE_SIZE (PTRS_PER_PGD*4)
#define SRMMU_REAL_PMD_MASK (~(SRMMU_REAL_PMD_SIZE-1))
#define SRMMU_REAL_PMD_ALIGN(__addr) (((__addr)+SRMMU_REAL_PMD_SIZE-1)&SRMMU_REAL_PMD_MASK)
/* PGDIR_SHIFT determines what a third-level page table entry can map */
#define SRMMU_PGDIR_SHIFT 24
#define SRMMU_PGDIR_SIZE (1UL << SRMMU_PGDIR_SHIFT)
#define SRMMU_PGDIR_MASK (~(SRMMU_PGDIR_SIZE-1))
#define SRMMU_PGDIR_ALIGN(addr) (((addr)+SRMMU_PGDIR_SIZE-1)&SRMMU_PGDIR_MASK)
#define SRMMU_REAL_PTRS_PER_PTE 64
#define SRMMU_REAL_PTRS_PER_PMD 64
#define SRMMU_PTRS_PER_PGD 256
#define SRMMU_REAL_PTE_TABLE_SIZE (SRMMU_REAL_PTRS_PER_PTE*4)
#define SRMMU_PMD_TABLE_SIZE (SRMMU_REAL_PTRS_PER_PMD*4)
#define SRMMU_PGD_TABLE_SIZE (SRMMU_PTRS_PER_PGD*4)
/*
* To support pagetables in highmem, Linux introduces APIs which
* return struct page* and generally manipulate page tables when
* they are not mapped into kernel space. Our hardware page tables
* are smaller than pages. We lump hardware tabes into big, page sized
* software tables.
*
* PMD_SHIFT determines the size of the area a second-level page table entry
* can map, and our pmd_t is 16 times larger than normal. The values which
* were once defined here are now generic for 4c and srmmu, so they're
* found in pgtable.h.
*/
#define SRMMU_PTRS_PER_PMD 4
/* Definition of the values in the ET field of PTD's and PTE's */ /* Definition of the values in the ET field of PTD's and PTE's */
#define SRMMU_ET_MASK 0x3 #define SRMMU_ET_MASK 0x3
......
...@@ -10,6 +10,7 @@ ...@@ -10,6 +10,7 @@
#include <asm/asi.h> #include <asm/asi.h>
#include <asm/mxcc.h> #include <asm/mxcc.h>
#include <asm/pgtable.h>
#include <asm/pgtsrmmu.h> #include <asm/pgtsrmmu.h>
/* Bits in the SRMMU control register for GNU/Viking modules. /* Bits in the SRMMU control register for GNU/Viking modules.
...@@ -227,7 +228,7 @@ static inline unsigned long viking_hwprobe(unsigned long vaddr) ...@@ -227,7 +228,7 @@ static inline unsigned long viking_hwprobe(unsigned long vaddr)
: "=r" (val) : "=r" (val)
: "r" (vaddr | 0x200), "i" (ASI_M_FLUSH_PROBE)); : "r" (vaddr | 0x200), "i" (ASI_M_FLUSH_PROBE));
if ((val & SRMMU_ET_MASK) == SRMMU_ET_PTE) { if ((val & SRMMU_ET_MASK) == SRMMU_ET_PTE) {
vaddr &= ~SRMMU_PGDIR_MASK; vaddr &= ~PGDIR_MASK;
vaddr >>= PAGE_SHIFT; vaddr >>= PAGE_SHIFT;
return val | (vaddr << 8); return val | (vaddr << 8);
} }
...@@ -237,7 +238,7 @@ static inline unsigned long viking_hwprobe(unsigned long vaddr) ...@@ -237,7 +238,7 @@ static inline unsigned long viking_hwprobe(unsigned long vaddr)
: "=r" (val) : "=r" (val)
: "r" (vaddr | 0x100), "i" (ASI_M_FLUSH_PROBE)); : "r" (vaddr | 0x100), "i" (ASI_M_FLUSH_PROBE));
if ((val & SRMMU_ET_MASK) == SRMMU_ET_PTE) { if ((val & SRMMU_ET_MASK) == SRMMU_ET_PTE) {
vaddr &= ~SRMMU_REAL_PMD_MASK; vaddr &= ~PMD_MASK;
vaddr >>= PAGE_SHIFT; vaddr >>= PAGE_SHIFT;
return val | (vaddr << 8); return val | (vaddr << 8);
} }
......
...@@ -24,7 +24,7 @@ ...@@ -24,7 +24,7 @@
#include <asm/winmacro.h> #include <asm/winmacro.h>
#include <asm/thread_info.h> /* TI_UWINMASK */ #include <asm/thread_info.h> /* TI_UWINMASK */
#include <asm/errno.h> #include <asm/errno.h>
#include <asm/pgtsrmmu.h> /* SRMMU_PGDIR_SHIFT */ #include <asm/pgtable.h> /* PGDIR_SHIFT */
#include <asm/export.h> #include <asm/export.h>
.data .data
...@@ -273,7 +273,7 @@ not_a_sun4: ...@@ -273,7 +273,7 @@ not_a_sun4:
lda [%o1] ASI_M_BYPASS, %o2 ! This is the 0x0 16MB pgd lda [%o1] ASI_M_BYPASS, %o2 ! This is the 0x0 16MB pgd
/* Calculate to KERNBASE entry. */ /* Calculate to KERNBASE entry. */
add %o1, KERNBASE >> (SRMMU_PGDIR_SHIFT - 2), %o3 add %o1, KERNBASE >> (PGDIR_SHIFT - 2), %o3
/* Poke the entry into the calculated address. */ /* Poke the entry into the calculated address. */
sta %o2, [%o3] ASI_M_BYPASS sta %o2, [%o3] ASI_M_BYPASS
...@@ -317,7 +317,7 @@ srmmu_not_viking: ...@@ -317,7 +317,7 @@ srmmu_not_viking:
sll %g1, 0x8, %g1 ! make phys addr for l1 tbl sll %g1, 0x8, %g1 ! make phys addr for l1 tbl
lda [%g1] ASI_M_BYPASS, %g2 ! get level1 entry for 0x0 lda [%g1] ASI_M_BYPASS, %g2 ! get level1 entry for 0x0
add %g1, KERNBASE >> (SRMMU_PGDIR_SHIFT - 2), %g3 add %g1, KERNBASE >> (PGDIR_SHIFT - 2), %g3
sta %g2, [%g3] ASI_M_BYPASS ! place at KERNBASE entry sta %g2, [%g3] ASI_M_BYPASS ! place at KERNBASE entry
b go_to_highmem b go_to_highmem
nop ! wheee.... nop ! wheee....
...@@ -341,7 +341,7 @@ leon_remap: ...@@ -341,7 +341,7 @@ leon_remap:
sll %g1, 0x8, %g1 ! make phys addr for l1 tbl sll %g1, 0x8, %g1 ! make phys addr for l1 tbl
lda [%g1] ASI_M_BYPASS, %g2 ! get level1 entry for 0x0 lda [%g1] ASI_M_BYPASS, %g2 ! get level1 entry for 0x0
add %g1, KERNBASE >> (SRMMU_PGDIR_SHIFT - 2), %g3 add %g1, KERNBASE >> (PGDIR_SHIFT - 2), %g3
sta %g2, [%g3] ASI_M_BYPASS ! place at KERNBASE entry sta %g2, [%g3] ASI_M_BYPASS ! place at KERNBASE entry
b go_to_highmem b go_to_highmem
nop ! wheee.... nop ! wheee....
......
...@@ -10,6 +10,7 @@ ...@@ -10,6 +10,7 @@
#include <asm/asm-offsets.h> #include <asm/asm-offsets.h>
#include <asm/asi.h> #include <asm/asi.h>
#include <asm/page.h> #include <asm/page.h>
#include <asm/pgtable.h>
#include <asm/pgtsrmmu.h> #include <asm/pgtsrmmu.h>
#include <linux/init.h> #include <linux/init.h>
...@@ -293,7 +294,7 @@ hypersparc_flush_tlb_range: ...@@ -293,7 +294,7 @@ hypersparc_flush_tlb_range:
cmp %o3, -1 cmp %o3, -1
be hypersparc_flush_tlb_range_out be hypersparc_flush_tlb_range_out
#endif #endif
sethi %hi(~((1 << SRMMU_PGDIR_SHIFT) - 1)), %o4 sethi %hi(~((1 << PGDIR_SHIFT) - 1)), %o4
sta %o3, [%g1] ASI_M_MMUREGS sta %o3, [%g1] ASI_M_MMUREGS
and %o1, %o4, %o1 and %o1, %o4, %o1
add %o1, 0x200, %o1 add %o1, 0x200, %o1
......
...@@ -136,26 +136,8 @@ static void msi_set_sync(void) ...@@ -136,26 +136,8 @@ static void msi_set_sync(void)
void pmd_set(pmd_t *pmdp, pte_t *ptep) void pmd_set(pmd_t *pmdp, pte_t *ptep)
{ {
unsigned long ptp; /* Physical address, shifted right by 4 */ unsigned long ptp = __nocache_pa(ptep) >> 4;
int i; set_pte((pte_t *)&pmd_val(*pmdp), __pte(SRMMU_ET_PTD | ptp));
ptp = __nocache_pa(ptep) >> 4;
for (i = 0; i < PTRS_PER_PTE/SRMMU_REAL_PTRS_PER_PTE; i++) {
set_pte((pte_t *)&pmdp->pmdv[i], __pte(SRMMU_ET_PTD | ptp));
ptp += (SRMMU_REAL_PTRS_PER_PTE * sizeof(pte_t) >> 4);
}
}
void pmd_populate(struct mm_struct *mm, pmd_t *pmdp, struct page *ptep)
{
unsigned long ptp; /* Physical address, shifted right by 4 */
int i;
ptp = page_to_pfn(ptep) << (PAGE_SHIFT-4); /* watch for overflow */
for (i = 0; i < PTRS_PER_PTE/SRMMU_REAL_PTRS_PER_PTE; i++) {
set_pte((pte_t *)&pmdp->pmdv[i], __pte(SRMMU_ET_PTD | ptp));
ptp += (SRMMU_REAL_PTRS_PER_PTE * sizeof(pte_t) >> 4);
}
} }
/* Find an entry in the third-level page table.. */ /* Find an entry in the third-level page table.. */
...@@ -163,7 +145,7 @@ pte_t *pte_offset_kernel(pmd_t *dir, unsigned long address) ...@@ -163,7 +145,7 @@ pte_t *pte_offset_kernel(pmd_t *dir, unsigned long address)
{ {
void *pte; void *pte;
pte = __nocache_va((dir->pmdv[0] & SRMMU_PTD_PMASK) << 4); pte = __nocache_va((pmd_val(*dir) & SRMMU_PTD_PMASK) << 4);
return (pte_t *) pte + return (pte_t *) pte +
((address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)); ((address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1));
} }
...@@ -175,18 +157,18 @@ pte_t *pte_offset_kernel(pmd_t *dir, unsigned long address) ...@@ -175,18 +157,18 @@ pte_t *pte_offset_kernel(pmd_t *dir, unsigned long address)
*/ */
static void *__srmmu_get_nocache(int size, int align) static void *__srmmu_get_nocache(int size, int align)
{ {
int offset; int offset, minsz = 1 << SRMMU_NOCACHE_BITMAP_SHIFT;
unsigned long addr; unsigned long addr;
if (size < SRMMU_NOCACHE_BITMAP_SHIFT) { if (size < minsz) {
printk(KERN_ERR "Size 0x%x too small for nocache request\n", printk(KERN_ERR "Size 0x%x too small for nocache request\n",
size); size);
size = SRMMU_NOCACHE_BITMAP_SHIFT; size = minsz;
} }
if (size & (SRMMU_NOCACHE_BITMAP_SHIFT - 1)) { if (size & (minsz - 1)) {
printk(KERN_ERR "Size 0x%x unaligned int nocache request\n", printk(KERN_ERR "Size 0x%x unaligned in nocache request\n",
size); size);
size += SRMMU_NOCACHE_BITMAP_SHIFT - 1; size += minsz - 1;
} }
BUG_ON(align > SRMMU_NOCACHE_ALIGN_MAX); BUG_ON(align > SRMMU_NOCACHE_ALIGN_MAX);
...@@ -376,31 +358,26 @@ pgd_t *get_pgd_fast(void) ...@@ -376,31 +358,26 @@ pgd_t *get_pgd_fast(void)
*/ */
pgtable_t pte_alloc_one(struct mm_struct *mm) pgtable_t pte_alloc_one(struct mm_struct *mm)
{ {
unsigned long pte; pte_t *ptep;
struct page *page; struct page *page;
if ((pte = (unsigned long)pte_alloc_one_kernel(mm)) == 0) if ((ptep = pte_alloc_one_kernel(mm)) == 0)
return NULL; return NULL;
page = pfn_to_page(__nocache_pa(pte) >> PAGE_SHIFT); page = pfn_to_page(__nocache_pa((unsigned long)ptep) >> PAGE_SHIFT);
if (!pgtable_pte_page_ctor(page)) { if (!pgtable_pte_page_ctor(page)) {
__free_page(page); __free_page(page);
return NULL; return NULL;
} }
return page; return ptep;
} }
void pte_free(struct mm_struct *mm, pgtable_t pte) void pte_free(struct mm_struct *mm, pgtable_t ptep)
{ {
unsigned long p; struct page *page;
pgtable_pte_page_dtor(pte);
p = (unsigned long)page_address(pte); /* Cached address (for test) */
if (p == 0)
BUG();
p = page_to_pfn(pte) << PAGE_SHIFT; /* Physical address */
/* free non cached virtual address*/ page = pfn_to_page(__nocache_pa((unsigned long)ptep) >> PAGE_SHIFT);
srmmu_free_nocache(__nocache_va(p), PTE_SIZE); pgtable_pte_page_dtor(page);
srmmu_free_nocache(ptep, SRMMU_PTE_TABLE_SIZE);
} }
/* context handling - a dynamically sized pool is used */ /* context handling - a dynamically sized pool is used */
...@@ -822,13 +799,13 @@ static void __init srmmu_inherit_prom_mappings(unsigned long start, ...@@ -822,13 +799,13 @@ static void __init srmmu_inherit_prom_mappings(unsigned long start,
what = 0; what = 0;
addr = start - PAGE_SIZE; addr = start - PAGE_SIZE;
if (!(start & ~(SRMMU_REAL_PMD_MASK))) { if (!(start & ~(PMD_MASK))) {
if (srmmu_probe(addr + SRMMU_REAL_PMD_SIZE) == probed) if (srmmu_probe(addr + PMD_SIZE) == probed)
what = 1; what = 1;
} }
if (!(start & ~(SRMMU_PGDIR_MASK))) { if (!(start & ~(PGDIR_MASK))) {
if (srmmu_probe(addr + SRMMU_PGDIR_SIZE) == probed) if (srmmu_probe(addr + PGDIR_SIZE) == probed)
what = 2; what = 2;
} }
...@@ -837,7 +814,7 @@ static void __init srmmu_inherit_prom_mappings(unsigned long start, ...@@ -837,7 +814,7 @@ static void __init srmmu_inherit_prom_mappings(unsigned long start,
pudp = pud_offset(p4dp, start); pudp = pud_offset(p4dp, start);
if (what == 2) { if (what == 2) {
*(pgd_t *)__nocache_fix(pgdp) = __pgd(probed); *(pgd_t *)__nocache_fix(pgdp) = __pgd(probed);
start += SRMMU_PGDIR_SIZE; start += PGDIR_SIZE;
continue; continue;
} }
if (pud_none(*(pud_t *)__nocache_fix(pudp))) { if (pud_none(*(pud_t *)__nocache_fix(pudp))) {
...@@ -849,6 +826,11 @@ static void __init srmmu_inherit_prom_mappings(unsigned long start, ...@@ -849,6 +826,11 @@ static void __init srmmu_inherit_prom_mappings(unsigned long start,
pud_set(__nocache_fix(pudp), pmdp); pud_set(__nocache_fix(pudp), pmdp);
} }
pmdp = pmd_offset(__nocache_fix(pgdp), start); pmdp = pmd_offset(__nocache_fix(pgdp), start);
if (what == 1) {
*(pmd_t *)__nocache_fix(pmdp) = __pmd(probed);
start += PMD_SIZE;
continue;
}
if (srmmu_pmd_none(*(pmd_t *)__nocache_fix(pmdp))) { if (srmmu_pmd_none(*(pmd_t *)__nocache_fix(pmdp))) {
ptep = __srmmu_get_nocache(PTE_SIZE, PTE_SIZE); ptep = __srmmu_get_nocache(PTE_SIZE, PTE_SIZE);
if (ptep == NULL) if (ptep == NULL)
...@@ -856,19 +838,6 @@ static void __init srmmu_inherit_prom_mappings(unsigned long start, ...@@ -856,19 +838,6 @@ static void __init srmmu_inherit_prom_mappings(unsigned long start,
memset(__nocache_fix(ptep), 0, PTE_SIZE); memset(__nocache_fix(ptep), 0, PTE_SIZE);
pmd_set(__nocache_fix(pmdp), ptep); pmd_set(__nocache_fix(pmdp), ptep);
} }
if (what == 1) {
/* We bend the rule where all 16 PTPs in a pmd_t point
* inside the same PTE page, and we leak a perfectly
* good hardware PTE piece. Alternatives seem worse.
*/
unsigned int x; /* Index of HW PMD in soft cluster */
unsigned long *val;
x = (start >> PMD_SHIFT) & 15;
val = &pmdp->pmdv[x];
*(unsigned long *)__nocache_fix(val) = probed;
start += SRMMU_REAL_PMD_SIZE;
continue;
}
ptep = pte_offset_kernel(__nocache_fix(pmdp), start); ptep = pte_offset_kernel(__nocache_fix(pmdp), start);
*(pte_t *)__nocache_fix(ptep) = __pte(probed); *(pte_t *)__nocache_fix(ptep) = __pte(probed);
start += PAGE_SIZE; start += PAGE_SIZE;
...@@ -890,9 +859,9 @@ static void __init do_large_mapping(unsigned long vaddr, unsigned long phys_base ...@@ -890,9 +859,9 @@ static void __init do_large_mapping(unsigned long vaddr, unsigned long phys_base
/* Map sp_bank entry SP_ENTRY, starting at virtual address VBASE. */ /* Map sp_bank entry SP_ENTRY, starting at virtual address VBASE. */
static unsigned long __init map_spbank(unsigned long vbase, int sp_entry) static unsigned long __init map_spbank(unsigned long vbase, int sp_entry)
{ {
unsigned long pstart = (sp_banks[sp_entry].base_addr & SRMMU_PGDIR_MASK); unsigned long pstart = (sp_banks[sp_entry].base_addr & PGDIR_MASK);
unsigned long vstart = (vbase & SRMMU_PGDIR_MASK); unsigned long vstart = (vbase & PGDIR_MASK);
unsigned long vend = SRMMU_PGDIR_ALIGN(vbase + sp_banks[sp_entry].num_bytes); unsigned long vend = PGDIR_ALIGN(vbase + sp_banks[sp_entry].num_bytes);
/* Map "low" memory only */ /* Map "low" memory only */
const unsigned long min_vaddr = PAGE_OFFSET; const unsigned long min_vaddr = PAGE_OFFSET;
const unsigned long max_vaddr = PAGE_OFFSET + SRMMU_MAXMEM; const unsigned long max_vaddr = PAGE_OFFSET + SRMMU_MAXMEM;
...@@ -905,7 +874,7 @@ static unsigned long __init map_spbank(unsigned long vbase, int sp_entry) ...@@ -905,7 +874,7 @@ static unsigned long __init map_spbank(unsigned long vbase, int sp_entry)
while (vstart < vend) { while (vstart < vend) {
do_large_mapping(vstart, pstart); do_large_mapping(vstart, pstart);
vstart += SRMMU_PGDIR_SIZE; pstart += SRMMU_PGDIR_SIZE; vstart += PGDIR_SIZE; pstart += PGDIR_SIZE;
} }
return vstart; return vstart;
} }
......
...@@ -13,6 +13,7 @@ ...@@ -13,6 +13,7 @@
#include <asm/asi.h> #include <asm/asi.h>
#include <asm/mxcc.h> #include <asm/mxcc.h>
#include <asm/page.h> #include <asm/page.h>
#include <asm/pgtable.h>
#include <asm/pgtsrmmu.h> #include <asm/pgtsrmmu.h>
#include <asm/viking.h> #include <asm/viking.h>
...@@ -157,7 +158,7 @@ viking_flush_tlb_range: ...@@ -157,7 +158,7 @@ viking_flush_tlb_range:
cmp %o3, -1 cmp %o3, -1
be 2f be 2f
#endif #endif
sethi %hi(~((1 << SRMMU_PGDIR_SHIFT) - 1)), %o4 sethi %hi(~((1 << PGDIR_SHIFT) - 1)), %o4
sta %o3, [%g1] ASI_M_MMUREGS sta %o3, [%g1] ASI_M_MMUREGS
and %o1, %o4, %o1 and %o1, %o4, %o1
add %o1, 0x200, %o1 add %o1, 0x200, %o1
...@@ -243,7 +244,7 @@ sun4dsmp_flush_tlb_range: ...@@ -243,7 +244,7 @@ sun4dsmp_flush_tlb_range:
ld [%o0 + VMA_VM_MM], %o0 ld [%o0 + VMA_VM_MM], %o0
ld [%o0 + AOFF_mm_context], %o3 ld [%o0 + AOFF_mm_context], %o3
lda [%g1] ASI_M_MMUREGS, %g5 lda [%g1] ASI_M_MMUREGS, %g5
sethi %hi(~((1 << SRMMU_PGDIR_SHIFT) - 1)), %o4 sethi %hi(~((1 << PGDIR_SHIFT) - 1)), %o4
sta %o3, [%g1] ASI_M_MMUREGS sta %o3, [%g1] ASI_M_MMUREGS
and %o1, %o4, %o1 and %o1, %o4, %o1
add %o1, 0x200, %o1 add %o1, 0x200, %o1
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment