Commit 3610cce8 authored by Martin Schwidefsky's avatar Martin Schwidefsky

[S390] Cleanup page table definitions.

- De-confuse the defines for the address-space-control-elements
  and the segment/region table entries.
- Create out of line functions for page table allocation / freeing.
- Simplify get_shadow_xxx functions.
Signed-off-by: default avatarMartin Schwidefsky <schwidefsky@de.ibm.com>
parent e4aa402e
...@@ -2,6 +2,6 @@ ...@@ -2,6 +2,6 @@
# Makefile for the linux s390-specific parts of the memory manager. # Makefile for the linux s390-specific parts of the memory manager.
# #
obj-y := init.o fault.o extmem.o mmap.o vmem.o obj-y := init.o fault.o extmem.o mmap.o vmem.o pgtable.o
obj-$(CONFIG_CMM) += cmm.o obj-$(CONFIG_CMM) += cmm.o
...@@ -103,32 +103,28 @@ static void __init setup_ro_region(void) ...@@ -103,32 +103,28 @@ static void __init setup_ro_region(void)
*/ */
void __init paging_init(void) void __init paging_init(void)
{ {
pgd_t *pg_dir;
int i;
unsigned long pgdir_k;
static const int ssm_mask = 0x04000000L; static const int ssm_mask = 0x04000000L;
unsigned long max_zone_pfns[MAX_NR_ZONES]; unsigned long max_zone_pfns[MAX_NR_ZONES];
unsigned long pgd_type;
pg_dir = swapper_pg_dir; init_mm.pgd = swapper_pg_dir;
S390_lowcore.kernel_asce = __pa(init_mm.pgd) & PAGE_MASK;
#ifdef CONFIG_64BIT #ifdef CONFIG_64BIT
pgdir_k = (__pa(swapper_pg_dir) & PAGE_MASK) | _KERN_REGION_TABLE; S390_lowcore.kernel_asce |= _ASCE_TYPE_REGION3 | _ASCE_TABLE_LENGTH;
for (i = 0; i < PTRS_PER_PGD; i++) pgd_type = _REGION3_ENTRY_EMPTY;
pgd_clear_kernel(pg_dir + i);
#else #else
pgdir_k = (__pa(swapper_pg_dir) & PAGE_MASK) | _KERNSEG_TABLE; S390_lowcore.kernel_asce |= _ASCE_TABLE_LENGTH;
for (i = 0; i < PTRS_PER_PGD; i++) pgd_type = _SEGMENT_ENTRY_EMPTY;
pmd_clear_kernel((pmd_t *)(pg_dir + i));
#endif #endif
clear_table((unsigned long *) init_mm.pgd, pgd_type,
sizeof(unsigned long)*2048);
vmem_map_init(); vmem_map_init();
setup_ro_region(); setup_ro_region();
S390_lowcore.kernel_asce = pgdir_k;
/* enable virtual mapping in kernel mode */ /* enable virtual mapping in kernel mode */
__ctl_load(pgdir_k, 1, 1); __ctl_load(S390_lowcore.kernel_asce, 1, 1);
__ctl_load(pgdir_k, 7, 7); __ctl_load(S390_lowcore.kernel_asce, 7, 7);
__ctl_load(pgdir_k, 13, 13); __ctl_load(S390_lowcore.kernel_asce, 13, 13);
__raw_local_irq_ssm(ssm_mask); __raw_local_irq_ssm(ssm_mask);
memset(max_zone_pfns, 0, sizeof(max_zone_pfns)); memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
......
/*
* arch/s390/mm/pgtable.c
*
* Copyright IBM Corp. 2007
* Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
*/
#include <linux/sched.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/mm.h>
#include <linux/swap.h>
#include <linux/smp.h>
#include <linux/highmem.h>
#include <linux/slab.h>
#include <linux/pagemap.h>
#include <linux/spinlock.h>
#include <linux/module.h>
#include <linux/quicklist.h>
#include <asm/system.h>
#include <asm/pgtable.h>
#include <asm/pgalloc.h>
#include <asm/tlb.h>
#include <asm/tlbflush.h>
#ifndef CONFIG_64BIT
#define ALLOC_ORDER 1
#else
#define ALLOC_ORDER 2
#endif
unsigned long *crst_table_alloc(struct mm_struct *mm, int noexec)
{
struct page *page = alloc_pages(GFP_KERNEL, ALLOC_ORDER);
if (!page)
return NULL;
page->index = 0;
if (noexec) {
struct page *shadow = alloc_pages(GFP_KERNEL, ALLOC_ORDER);
if (!shadow) {
__free_pages(page, ALLOC_ORDER);
return NULL;
}
page->index = page_to_phys(shadow);
}
return (unsigned long *) page_to_phys(page);
}
void crst_table_free(unsigned long *table)
{
unsigned long *shadow = get_shadow_table(table);
if (shadow)
free_pages((unsigned long) shadow, ALLOC_ORDER);
free_pages((unsigned long) table, ALLOC_ORDER);
}
/*
* page table entry allocation/free routines.
*/
unsigned long *page_table_alloc(int noexec)
{
struct page *page = alloc_page(GFP_KERNEL);
unsigned long *table;
if (!page)
return NULL;
page->index = 0;
if (noexec) {
struct page *shadow = alloc_page(GFP_KERNEL);
if (!shadow) {
__free_page(page);
return NULL;
}
table = (unsigned long *) page_to_phys(shadow);
clear_table(table, _PAGE_TYPE_EMPTY, PAGE_SIZE);
page->index = (addr_t) table;
}
table = (unsigned long *) page_to_phys(page);
clear_table(table, _PAGE_TYPE_EMPTY, PAGE_SIZE);
return table;
}
void page_table_free(unsigned long *table)
{
unsigned long *shadow = get_shadow_pte(table);
if (shadow)
free_page((unsigned long) shadow);
free_page((unsigned long) table);
}
...@@ -75,29 +75,24 @@ static void __init_refok *vmem_alloc_pages(unsigned int order) ...@@ -75,29 +75,24 @@ static void __init_refok *vmem_alloc_pages(unsigned int order)
static inline pmd_t *vmem_pmd_alloc(void) static inline pmd_t *vmem_pmd_alloc(void)
{ {
pmd_t *pmd; pmd_t *pmd = NULL;
int i;
pmd = vmem_alloc_pages(PMD_ALLOC_ORDER); #ifdef CONFIG_64BIT
pmd = vmem_alloc_pages(2);
if (!pmd) if (!pmd)
return NULL; return NULL;
for (i = 0; i < PTRS_PER_PMD; i++) clear_table((unsigned long *) pmd, _SEGMENT_ENTRY_EMPTY, PAGE_SIZE*4);
pmd_clear_kernel(pmd + i); #endif
return pmd; return pmd;
} }
static inline pte_t *vmem_pte_alloc(void) static inline pte_t *vmem_pte_alloc(void)
{ {
pte_t *pte; pte_t *pte = vmem_alloc_pages(0);
pte_t empty_pte;
int i;
pte = vmem_alloc_pages(PTE_ALLOC_ORDER);
if (!pte) if (!pte)
return NULL; return NULL;
pte_val(empty_pte) = _PAGE_TYPE_EMPTY; clear_table((unsigned long *) pte, _PAGE_TYPE_EMPTY, PAGE_SIZE);
for (i = 0; i < PTRS_PER_PTE; i++)
pte[i] = empty_pte;
return pte; return pte;
} }
......
...@@ -21,45 +21,43 @@ ...@@ -21,45 +21,43 @@
#ifndef __s390x__ #ifndef __s390x__
#define LCTL_OPCODE "lctl" #define LCTL_OPCODE "lctl"
#define PGTABLE_BITS (_SEGMENT_TABLE|USER_STD_MASK)
#else #else
#define LCTL_OPCODE "lctlg" #define LCTL_OPCODE "lctlg"
#define PGTABLE_BITS (_REGION_TABLE|USER_STD_MASK)
#endif #endif
static inline void enter_lazy_tlb(struct mm_struct *mm, static inline void update_mm(struct mm_struct *mm, struct task_struct *tsk)
struct task_struct *tsk)
{ {
pgd_t *pgd = mm->pgd;
unsigned long asce_bits;
/* Calculate asce bits from the first pgd table entry. */
asce_bits = _ASCE_TABLE_LENGTH | _ASCE_USER_BITS;
#ifdef CONFIG_64BIT
asce_bits |= _ASCE_TYPE_REGION3;
#endif
S390_lowcore.user_asce = asce_bits | __pa(pgd);
if (switch_amode) {
/* Load primary space page table origin. */
pgd_t *shadow_pgd = get_shadow_table(pgd) ? : pgd;
S390_lowcore.user_exec_asce = asce_bits | __pa(shadow_pgd);
asm volatile(LCTL_OPCODE" 1,1,%0\n"
: : "m" (S390_lowcore.user_exec_asce) );
} else
/* Load home space page table origin. */
asm volatile(LCTL_OPCODE" 13,13,%0"
: : "m" (S390_lowcore.user_asce) );
} }
static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
struct task_struct *tsk) struct task_struct *tsk)
{ {
pgd_t *shadow_pgd = get_shadow_pgd(next->pgd); if (unlikely(prev == next))
return;
if (prev != next) {
S390_lowcore.user_asce = (__pa(next->pgd) & PAGE_MASK) |
PGTABLE_BITS;
if (shadow_pgd) {
/* Load primary/secondary space page table origin. */
S390_lowcore.user_exec_asce =
(__pa(shadow_pgd) & PAGE_MASK) | PGTABLE_BITS;
asm volatile(LCTL_OPCODE" 1,1,%0\n"
LCTL_OPCODE" 7,7,%1"
: : "m" (S390_lowcore.user_exec_asce),
"m" (S390_lowcore.user_asce) );
} else if (switch_amode) {
/* Load primary space page table origin. */
asm volatile(LCTL_OPCODE" 1,1,%0"
: : "m" (S390_lowcore.user_asce) );
} else
/* Load home space page table origin. */
asm volatile(LCTL_OPCODE" 13,13,%0"
: : "m" (S390_lowcore.user_asce) );
}
cpu_set(smp_processor_id(), next->cpu_vm_mask); cpu_set(smp_processor_id(), next->cpu_vm_mask);
update_mm(next, tsk);
} }
#define enter_lazy_tlb(mm,tsk) do { } while (0)
#define deactivate_mm(tsk,mm) do { } while (0) #define deactivate_mm(tsk,mm) do { } while (0)
static inline void activate_mm(struct mm_struct *prev, static inline void activate_mm(struct mm_struct *prev,
......
...@@ -19,114 +19,75 @@ ...@@ -19,114 +19,75 @@
#define check_pgt_cache() do {} while (0) #define check_pgt_cache() do {} while (0)
/* unsigned long *crst_table_alloc(struct mm_struct *, int);
* Page allocation orders. void crst_table_free(unsigned long *);
*/
#ifndef __s390x__
# define PTE_ALLOC_ORDER 0
# define PMD_ALLOC_ORDER 0
# define PGD_ALLOC_ORDER 1
#else /* __s390x__ */
# define PTE_ALLOC_ORDER 0
# define PMD_ALLOC_ORDER 2
# define PGD_ALLOC_ORDER 2
#endif /* __s390x__ */
/* unsigned long *page_table_alloc(int);
* Allocate and free page tables. The xxx_kernel() versions are void page_table_free(unsigned long *);
* used to allocate a kernel page table - this turns on ASN bits
* if any.
*/
static inline pgd_t *pgd_alloc(struct mm_struct *mm) static inline void clear_table(unsigned long *s, unsigned long val, size_t n)
{ {
pgd_t *pgd = (pgd_t *) __get_free_pages(GFP_KERNEL, PGD_ALLOC_ORDER); *s = val;
int i; n = (n / 256) - 1;
asm volatile(
if (!pgd) #ifdef CONFIG_64BIT
return NULL; " mvc 8(248,%0),0(%0)\n"
if (s390_noexec) {
pgd_t *shadow_pgd = (pgd_t *)
__get_free_pages(GFP_KERNEL, PGD_ALLOC_ORDER);
struct page *page = virt_to_page(pgd);
if (!shadow_pgd) {
free_pages((unsigned long) pgd, PGD_ALLOC_ORDER);
return NULL;
}
page->lru.next = (void *) shadow_pgd;
}
for (i = 0; i < PTRS_PER_PGD; i++)
#ifndef __s390x__
pmd_clear(pmd_offset(pgd + i, i*PGDIR_SIZE));
#else #else
pgd_clear(pgd + i); " mvc 4(252,%0),0(%0)\n"
#endif #endif
return pgd; "0: mvc 256(256,%0),0(%0)\n"
" la %0,256(%0)\n"
" brct %1,0b\n"
: "+a" (s), "+d" (n));
} }
static inline void pgd_free(pgd_t *pgd) static inline void crst_table_init(unsigned long *crst, unsigned long entry)
{ {
pgd_t *shadow_pgd = get_shadow_pgd(pgd); clear_table(crst, entry, sizeof(unsigned long)*2048);
crst = get_shadow_table(crst);
if (shadow_pgd) if (crst)
free_pages((unsigned long) shadow_pgd, PGD_ALLOC_ORDER); clear_table(crst, entry, sizeof(unsigned long)*2048);
free_pages((unsigned long) pgd, PGD_ALLOC_ORDER);
} }
#ifndef __s390x__ #ifndef __s390x__
/*
* page middle directory allocation/free routines. static inline unsigned long pgd_entry_type(struct mm_struct *mm)
* We use pmd cache only on s390x, so these are dummy routines. This {
* code never triggers because the pgd will always be present. return _SEGMENT_ENTRY_EMPTY;
*/ }
#define pmd_alloc_one(mm,address) ({ BUG(); ((pmd_t *)2); })
#define pmd_free(x) do { } while (0) #define pmd_alloc_one(mm,address) ({ BUG(); ((pmd_t *)2); })
#define pgd_populate(mm, pmd, pte) BUG() #define pmd_free(x) do { } while (0)
#define pgd_populate(mm, pmd, pte) BUG()
#define pgd_populate_kernel(mm, pmd, pte) BUG() #define pgd_populate_kernel(mm, pmd, pte) BUG()
#else /* __s390x__ */ #else /* __s390x__ */
static inline pmd_t * pmd_alloc_one(struct mm_struct *mm, unsigned long vmaddr)
static inline unsigned long pgd_entry_type(struct mm_struct *mm)
{ {
pmd_t *pmd = (pmd_t *) __get_free_pages(GFP_KERNEL, PMD_ALLOC_ORDER); return _REGION3_ENTRY_EMPTY;
int i;
if (!pmd)
return NULL;
if (s390_noexec) {
pmd_t *shadow_pmd = (pmd_t *)
__get_free_pages(GFP_KERNEL, PMD_ALLOC_ORDER);
struct page *page = virt_to_page(pmd);
if (!shadow_pmd) {
free_pages((unsigned long) pmd, PMD_ALLOC_ORDER);
return NULL;
}
page->lru.next = (void *) shadow_pmd;
}
for (i=0; i < PTRS_PER_PMD; i++)
pmd_clear(pmd + i);
return pmd;
} }
static inline void pmd_free (pmd_t *pmd) static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long vmaddr)
{ {
pmd_t *shadow_pmd = get_shadow_pmd(pmd); unsigned long *crst = crst_table_alloc(mm, s390_noexec);
if (crst)
if (shadow_pmd) crst_table_init(crst, _SEGMENT_ENTRY_EMPTY);
free_pages((unsigned long) shadow_pmd, PMD_ALLOC_ORDER); return (pmd_t *) crst;
free_pages((unsigned long) pmd, PMD_ALLOC_ORDER);
} }
#define pmd_free(pmd) crst_table_free((unsigned long *) pmd)
static inline void static inline void pgd_populate_kernel(struct mm_struct *mm,
pgd_populate_kernel(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd) pgd_t *pgd, pmd_t *pmd)
{ {
pgd_val(*pgd) = _PGD_ENTRY | __pa(pmd); pgd_val(*pgd) = _REGION3_ENTRY | __pa(pmd);
} }
static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd) static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
{ {
pgd_t *shadow_pgd = get_shadow_pgd(pgd); pgd_t *shadow_pgd = get_shadow_table(pgd);
pmd_t *shadow_pmd = get_shadow_pmd(pmd); pmd_t *shadow_pmd = get_shadow_table(pmd);
if (shadow_pgd && shadow_pmd) if (shadow_pgd && shadow_pmd)
pgd_populate_kernel(mm, shadow_pgd, shadow_pmd); pgd_populate_kernel(mm, shadow_pgd, shadow_pmd);
...@@ -135,17 +96,26 @@ static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd) ...@@ -135,17 +96,26 @@ static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
#endif /* __s390x__ */ #endif /* __s390x__ */
static inline pgd_t *pgd_alloc(struct mm_struct *mm)
{
unsigned long *crst = crst_table_alloc(mm, s390_noexec);
if (crst)
crst_table_init(crst, pgd_entry_type(mm));
return (pgd_t *) crst;
}
#define pgd_free(pgd) crst_table_free((unsigned long *) pgd)
static inline void static inline void
pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, pte_t *pte) pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, pte_t *pte)
{ {
#ifndef __s390x__ #ifndef __s390x__
pmd_val(pmd[0]) = _PAGE_TABLE + __pa(pte); pmd_val(pmd[0]) = _SEGMENT_ENTRY + __pa(pte);
pmd_val(pmd[1]) = _PAGE_TABLE + __pa(pte+256); pmd_val(pmd[1]) = _SEGMENT_ENTRY + __pa(pte+256);
pmd_val(pmd[2]) = _PAGE_TABLE + __pa(pte+512); pmd_val(pmd[2]) = _SEGMENT_ENTRY + __pa(pte+512);
pmd_val(pmd[3]) = _PAGE_TABLE + __pa(pte+768); pmd_val(pmd[3]) = _SEGMENT_ENTRY + __pa(pte+768);
#else /* __s390x__ */ #else /* __s390x__ */
pmd_val(*pmd) = _PMD_ENTRY + __pa(pte); pmd_val(*pmd) = _SEGMENT_ENTRY + __pa(pte);
pmd_val1(*pmd) = _PMD_ENTRY + __pa(pte+256); pmd_val1(*pmd) = _SEGMENT_ENTRY + __pa(pte+256);
#endif /* __s390x__ */ #endif /* __s390x__ */
} }
...@@ -153,7 +123,7 @@ static inline void ...@@ -153,7 +123,7 @@ static inline void
pmd_populate(struct mm_struct *mm, pmd_t *pmd, struct page *page) pmd_populate(struct mm_struct *mm, pmd_t *pmd, struct page *page)
{ {
pte_t *pte = (pte_t *)page_to_phys(page); pte_t *pte = (pte_t *)page_to_phys(page);
pmd_t *shadow_pmd = get_shadow_pmd(pmd); pmd_t *shadow_pmd = get_shadow_table(pmd);
pte_t *shadow_pte = get_shadow_pte(pte); pte_t *shadow_pte = get_shadow_pte(pte);
pmd_populate_kernel(mm, pmd, pte); pmd_populate_kernel(mm, pmd, pte);
...@@ -164,57 +134,14 @@ pmd_populate(struct mm_struct *mm, pmd_t *pmd, struct page *page) ...@@ -164,57 +134,14 @@ pmd_populate(struct mm_struct *mm, pmd_t *pmd, struct page *page)
/* /*
* page table entry allocation/free routines. * page table entry allocation/free routines.
*/ */
static inline pte_t * #define pte_alloc_one_kernel(mm, vmaddr) \
pte_alloc_one_kernel(struct mm_struct *mm, unsigned long vmaddr) ((pte_t *) page_table_alloc(s390_noexec))
{ #define pte_alloc_one(mm, vmaddr) \
pte_t *pte = (pte_t *) __get_free_page(GFP_KERNEL|__GFP_REPEAT); virt_to_page(page_table_alloc(s390_noexec))
int i;
#define pte_free_kernel(pte) \
if (!pte) page_table_free((unsigned long *) pte)
return NULL; #define pte_free(pte) \
if (s390_noexec) { page_table_free((unsigned long *) page_to_phys((struct page *) pte))
pte_t *shadow_pte = (pte_t *)
__get_free_page(GFP_KERNEL|__GFP_REPEAT);
struct page *page = virt_to_page(pte);
if (!shadow_pte) {
free_page((unsigned long) pte);
return NULL;
}
page->lru.next = (void *) shadow_pte;
}
for (i=0; i < PTRS_PER_PTE; i++) {
pte_clear(mm, vmaddr, pte + i);
vmaddr += PAGE_SIZE;
}
return pte;
}
static inline struct page *
pte_alloc_one(struct mm_struct *mm, unsigned long vmaddr)
{
pte_t *pte = pte_alloc_one_kernel(mm, vmaddr);
if (pte)
return virt_to_page(pte);
return NULL;
}
static inline void pte_free_kernel(pte_t *pte)
{
pte_t *shadow_pte = get_shadow_pte(pte);
if (shadow_pte)
free_page((unsigned long) shadow_pte);
free_page((unsigned long) pte);
}
static inline void pte_free(struct page *pte)
{
struct page *shadow_page = get_shadow_page(pte);
if (shadow_page)
__free_page(shadow_page);
__free_page(pte);
}
#endif /* _S390_PGALLOC_H */ #endif /* _S390_PGALLOC_H */
This diff is collapsed.
...@@ -127,12 +127,6 @@ struct stack_frame { ...@@ -127,12 +127,6 @@ struct stack_frame {
#define ARCH_MIN_TASKALIGN 8 #define ARCH_MIN_TASKALIGN 8
#ifndef __s390x__
# define __SWAPPER_PG_DIR __pa(&swapper_pg_dir[0]) + _SEGMENT_TABLE
#else /* __s390x__ */
# define __SWAPPER_PG_DIR __pa(&swapper_pg_dir[0]) + _REGION_TABLE
#endif /* __s390x__ */
#define INIT_THREAD { \ #define INIT_THREAD { \
.ksp = sizeof(init_stack) + (unsigned long) &init_stack, \ .ksp = sizeof(init_stack) + (unsigned long) &init_stack, \
} }
......
...@@ -61,7 +61,7 @@ static inline void __tlb_flush_mm(struct mm_struct * mm) ...@@ -61,7 +61,7 @@ static inline void __tlb_flush_mm(struct mm_struct * mm)
* only ran on the local cpu. * only ran on the local cpu.
*/ */
if (MACHINE_HAS_IDTE) { if (MACHINE_HAS_IDTE) {
pgd_t *shadow_pgd = get_shadow_pgd(mm->pgd); pgd_t *shadow_pgd = get_shadow_table(mm->pgd);
if (shadow_pgd) if (shadow_pgd)
__tlb_flush_idte(shadow_pgd); __tlb_flush_idte(shadow_pgd);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment