Commit dee4f8ff authored by Roman Zippel's avatar Roman Zippel Committed by Linus Torvalds

[PATCH] m68k mmu update

This patch removes the quicklist support and updates m68k to the recent
page table and tlb interface changes.
It also includes some smaller cleanups:
- turn "extern inline" into "static inline"
- convert some macros into inline functions
parent e6d19c6a
......@@ -35,20 +35,6 @@
mmu_gather_t mmu_gathers[NR_CPUS];
int do_check_pgt_cache(int low, int high)
{
int freed = 0;
if(pgtable_cache_size > high) {
do {
if(pmd_quicklist)
freed += free_pmd_slow(get_pmd_fast());
if(pte_quicklist)
free_pte_slow(get_pte_fast()), freed++;
} while(pgtable_cache_size > low);
}
return freed;
}
/*
* BAD_PAGE is the page that is used for page faults when linux
* is out-of-memory. Older versions of linux just did a
......@@ -56,27 +42,9 @@ int do_check_pgt_cache(int low, int high)
* for a process dying in kernel mode, possibly leaving an inode
* unused etc..
*
* BAD_PAGETABLE is the accompanying page-table: it is initialized
* to point to BAD_PAGE entries.
*
* ZERO_PAGE is a special page that is used for zero-initialized
* data and COW.
*/
unsigned long empty_bad_page_table;
pte_t *__bad_pagetable(void)
{
memset((void *)empty_bad_page_table, 0, PAGE_SIZE);
return (pte_t *)empty_bad_page_table;
}
unsigned long empty_bad_page;
pte_t __bad_page(void)
{
memset ((void *)empty_bad_page, 0, PAGE_SIZE);
return pte_mkdirty(__mk_pte(empty_bad_page, PAGE_SHARED));
}
unsigned long empty_zero_page;
......@@ -106,7 +74,6 @@ void show_mem(void)
printk("%d reserved pages\n",reserved);
printk("%d pages shared\n",shared);
printk("%d pages swap cached\n",cached);
printk("%ld pages in page table cache\n",pgtable_cache_size);
printk("%ld buffermem pages\n", nr_buffermem_pages());
}
......@@ -127,7 +94,7 @@ void __init mem_init(void)
unsigned long tmp;
int i;
max_mapnr = num_physpages = MAP_NR(high_memory);
max_mapnr = num_physpages = (((unsigned long)high_memory - PAGE_OFFSET) >> PAGE_SHIFT);
#ifdef CONFIG_ATARI
if (MACH_IS_ATARI)
......@@ -138,12 +105,6 @@ void __init mem_init(void)
totalram_pages = free_all_bootmem();
for (tmp = PAGE_OFFSET ; tmp < (unsigned long)high_memory; tmp += PAGE_SIZE) {
#if 0
#ifndef CONFIG_SUN3
if (virt_to_phys ((void *)tmp) >= mach_max_dma_address)
clear_bit(PG_DMA, &virt_to_page(tmp)->flags);
#endif
#endif
if (PageReserved(virt_to_page(tmp))) {
if (tmp >= (unsigned long)&_text
&& tmp < (unsigned long)&_etext)
......@@ -155,14 +116,6 @@ void __init mem_init(void)
datapages++;
continue;
}
#if 0
set_page_count(virt_to_page(tmp), 1);
#ifdef CONFIG_BLK_DEV_INITRD
if (!initrd_start ||
(tmp < (initrd_start & PAGE_MASK) || tmp >= initrd_end))
#endif
free_page(tmp);
#endif
}
#ifndef CONFIG_SUN3
......
......@@ -189,7 +189,7 @@ void *__ioremap(unsigned long physaddr, unsigned long size, int cacheflag)
printk ("\npa=%#lx va=%#lx ", physaddr, virtaddr);
#endif
pgd_dir = pgd_offset_k(virtaddr);
pmd_dir = pmd_alloc_kernel(pgd_dir, virtaddr);
pmd_dir = pmd_alloc(&init_mm, pgd_dir, virtaddr);
if (!pmd_dir) {
printk("ioremap: no mem for pmd_dir\n");
return NULL;
......@@ -201,7 +201,7 @@ void *__ioremap(unsigned long physaddr, unsigned long size, int cacheflag)
virtaddr += PTRTREESIZE;
size -= PTRTREESIZE;
} else {
pte_dir = pte_alloc_kernel(pmd_dir, virtaddr);
pte_dir = pte_alloc_kernel(&init_mm, pmd_dir, virtaddr);
if (!pte_dir) {
printk("ioremap: no mem for pte_dir\n");
return NULL;
......@@ -273,7 +273,7 @@ void __iounmap(void *addr, unsigned long size)
pmd_clear(pmd_dir);
return;
}
pte_dir = pte_offset(pmd_dir, virtaddr);
pte_dir = pte_offset_kernel(pmd_dir, virtaddr);
pte_val(*pte_dir) = 0;
virtaddr += PAGE_SIZE;
......@@ -350,7 +350,7 @@ void kernel_set_cachemode(void *addr, unsigned long size, int cmode)
pmd_clear(pmd_dir);
return;
}
pte_dir = pte_offset(pmd_dir, virtaddr);
pte_dir = pte_offset_kernel(pmd_dir, virtaddr);
pte_val(*pte_dir) = (pte_val(*pte_dir) & _CACHEMASK040) | cmode;
virtaddr += PAGE_SIZE;
......
......@@ -25,69 +25,6 @@
#include <asm/amigahw.h>
#endif
struct pgtable_cache_struct quicklists;
void __bad_pte(pmd_t *pmd)
{
printk("Bad pmd in pte_alloc: %08lx\n", pmd_val(*pmd));
pmd_set(pmd, BAD_PAGETABLE);
}
void __bad_pmd(pgd_t *pgd)
{
printk("Bad pgd in pmd_alloc: %08lx\n", pgd_val(*pgd));
pgd_set(pgd, (pmd_t *)BAD_PAGETABLE);
}
#if 0
pte_t *get_pte_slow(pmd_t *pmd, unsigned long offset)
{
pte_t *pte;
pte = (pte_t *) __get_free_page(GFP_KERNEL);
if (pmd_none(*pmd)) {
if (pte) {
clear_page(pte);
__flush_page_to_ram((unsigned long)pte);
flush_tlb_kernel_page((unsigned long)pte);
nocache_page((unsigned long)pte);
pmd_set(pmd, pte);
return pte + offset;
}
pmd_set(pmd, BAD_PAGETABLE);
return NULL;
}
free_page((unsigned long)pte);
if (pmd_bad(*pmd)) {
__bad_pte(pmd);
return NULL;
}
return (pte_t *)__pmd_page(*pmd) + offset;
}
#endif
#if 0
pmd_t *get_pmd_slow(pgd_t *pgd, unsigned long offset)
{
pmd_t *pmd;
pmd = get_pointer_table();
if (pgd_none(*pgd)) {
if (pmd) {
pgd_set(pgd, pmd);
return pmd + offset;
}
pgd_set(pgd, (pmd_t *)BAD_PAGETABLE);
return NULL;
}
free_pointer_table(pmd);
if (pgd_bad(*pgd)) {
__bad_pmd(pgd);
return NULL;
}
return (pmd_t *)__pgd_page(*pgd) + offset;
}
#endif
/* ++andreas: {get,free}_pointer_table rewritten to use unused fields from
struct page instead of separately kmalloced struct. Stolen from
......
......@@ -176,7 +176,7 @@ map_chunk (unsigned long addr, long size)
pte_dir = kernel_page_table();
pmd_set(pmd_dir, pte_dir);
}
pte_dir = pte_offset(pmd_dir, virtaddr);
pte_dir = pte_offset_kernel(pmd_dir, virtaddr);
if (virtaddr) {
if (!pte_present(*pte_dir))
......@@ -262,15 +262,13 @@ void __init paging_init(void)
* initialize the bad page table and bad page to point
* to a couple of allocated pages
*/
empty_bad_page_table = (unsigned long)alloc_bootmem_pages(PAGE_SIZE);
empty_bad_page = (unsigned long)alloc_bootmem_pages(PAGE_SIZE);
empty_zero_page = (unsigned long)alloc_bootmem_pages(PAGE_SIZE);
memset((void *)empty_zero_page, 0, PAGE_SIZE);
/*
* Set up SFC/DFC registers (user data space)
* Set up SFC/DFC registers
*/
set_fs (USER_DS);
set_fs(KERNEL_DS);
#ifdef DEBUG
printk ("before free_area_init\n");
......
......@@ -59,8 +59,6 @@ void __init paging_init(void)
#ifdef TEST_VERIFY_AREA
wp_works_ok = 0;
#endif
empty_bad_page_table = (unsigned long)alloc_bootmem_pages(PAGE_SIZE);
empty_bad_page = (unsigned long)alloc_bootmem_pages(PAGE_SIZE);
empty_zero_page = (unsigned long)alloc_bootmem_pages(PAGE_SIZE);
memset((void *)empty_zero_page, 0, PAGE_SIZE);
......
#ifndef _MOTOROLA_PGALLOC_H
#define _MOTOROLA_PGALLOC_H
#include <asm/tlbflush.h>
extern struct pgtable_cache_struct {
unsigned long *pmd_cache;
unsigned long *pte_cache;
/* This counts in units of pointer tables, of which can be eight per page. */
unsigned long pgtable_cache_sz;
} quicklists;
#define pgd_quicklist ((unsigned long *)0)
#define pmd_quicklist (quicklists.pmd_cache)
#define pte_quicklist (quicklists.pte_cache)
/* This isn't accurate because of fragmentation of allocated pages for
pointer tables, but that should not be a problem. */
#define pgtable_cache_size ((quicklists.pgtable_cache_sz+7)/8)
extern pte_t *get_pte_slow(pmd_t *pmd, unsigned long offset);
extern pmd_t *get_pmd_slow(pgd_t *pgd, unsigned long offset);
#include <asm/tlb.h>
extern pmd_t *get_pointer_table(void);
extern int free_pointer_table(pmd_t *);
extern inline pte_t *get_pte_fast(void)
{
unsigned long *ret;
ret = pte_quicklist;
if (ret) {
pte_quicklist = (unsigned long *)*ret;
ret[0] = 0;
quicklists.pgtable_cache_sz -= 8;
}
return (pte_t *)ret;
}
#define pte_alloc_one_fast(mm,addr) get_pte_fast()
static inline pte_t *pte_alloc_one(struct mm_struct *mm, unsigned long address)
static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
{
pte_t *pte;
......@@ -53,123 +22,90 @@ static inline pte_t *pte_alloc_one(struct mm_struct *mm, unsigned long address)
return pte;
}
extern __inline__ pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
{
return get_pointer_table();
}
extern inline void free_pte_fast(pte_t *pte)
{
*(unsigned long *)pte = (unsigned long)pte_quicklist;
pte_quicklist = (unsigned long *)pte;
quicklists.pgtable_cache_sz += 8;
}
extern inline void free_pte_slow(pte_t *pte)
static inline void pte_free_kernel(pte_t *pte)
{
cache_page((unsigned long)pte);
free_page((unsigned long) pte);
}
extern inline pmd_t *get_pmd_fast(void)
static inline struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address)
{
unsigned long *ret;
struct page *page = alloc_pages(GFP_KERNEL, 0);
pte_t *pte;
if(!page)
return NULL;
ret = pmd_quicklist;
if (ret) {
pmd_quicklist = (unsigned long *)*ret;
ret[0] = 0;
quicklists.pgtable_cache_sz--;
pte = kmap(page);
if (pte) {
clear_page(pte);
__flush_page_to_ram((unsigned long)pte);
flush_tlb_kernel_page((unsigned long)pte);
nocache_page((unsigned long)pte);
}
return (pmd_t *)ret;
}
#define pmd_alloc_one_fast(mm,addr) get_pmd_fast()
kunmap(pte);
extern inline void free_pmd_fast(pmd_t *pmd)
{
*(unsigned long *)pmd = (unsigned long)pmd_quicklist;
pmd_quicklist = (unsigned long *) pmd;
quicklists.pgtable_cache_sz++;
return page;
}
extern inline int free_pmd_slow(pmd_t *pmd)
static inline void pte_free(struct page *page)
{
return free_pointer_table(pmd);
cache_page((unsigned long)kmap(page));
kunmap(page);
__free_page(page);
}
/* The pgd cache is folded into the pmd cache, so these are dummy routines. */
extern inline pgd_t *get_pgd_fast(void)
static inline void pte_free_tlb(mmu_gather_t *tlb, struct page *page)
{
return (pgd_t *)0;
cache_page((unsigned long)kmap(page));
kunmap(page);
__free_page(page);
}
extern inline void free_pgd_fast(pgd_t *pgd)
{
}
extern inline void free_pgd_slow(pgd_t *pgd)
static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
{
return get_pointer_table();
}
extern void __bad_pte(pmd_t *pmd);
extern void __bad_pmd(pgd_t *pgd);
extern inline void pte_free(pte_t *pte)
static inline int pmd_free(pmd_t *pmd)
{
free_pte_fast(pte);
return free_pointer_table(pmd);
}
extern inline void pmd_free(pmd_t *pmd)
static inline int pmd_free_tlb(mmu_gather_t *tlb, pmd_t *pmd)
{
free_pmd_fast(pmd);
return free_pointer_table(pmd);
}
extern inline void pte_free_kernel(pte_t *pte)
static inline void pgd_free(pgd_t *pgd)
{
free_pte_fast(pte);
pmd_free((pmd_t *)pgd);
}
extern inline pte_t *pte_alloc_kernel(pmd_t *pmd, unsigned long address)
static inline pgd_t *pgd_alloc(struct mm_struct *mm)
{
return pte_alloc(&init_mm,pmd, address);
return (pgd_t *)get_pointer_table();
}
extern inline void pmd_free_kernel(pmd_t *pmd)
{
free_pmd_fast(pmd);
}
extern inline pmd_t *pmd_alloc_kernel(pgd_t *pgd, unsigned long address)
static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, pte_t *pte)
{
return pmd_alloc(&init_mm,pgd, address);
pmd_set(pmd, pte);
}
extern inline void pgd_free(pgd_t *pgd)
static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, struct page *page)
{
free_pmd_fast((pmd_t *)pgd);
pmd_set(pmd, page_address(page));
}
extern inline pgd_t *pgd_alloc(struct mm_struct *mm)
static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
{
pgd_t *pgd = (pgd_t *)get_pmd_fast();
if (!pgd)
pgd = (pgd_t *)get_pointer_table();
return pgd;
pgd_set(pgd, pmd);
}
#define pmd_populate(MM, PMD, PTE) pmd_set(PMD, PTE)
#define pgd_populate(MM, PGD, PMD) pgd_set(PGD, PMD)
extern int do_check_pgt_cache(int, int);
extern inline void set_pgdir(unsigned long address, pgd_t entry)
{
}
#define check_pgt_cache() do { } while (0)
#endif /* _MOTOROLA_PGALLOC_H */
/* sun3_pgalloc.h --
* reorganization around 2.3.39, routines moved from sun3_pgtable.h
*
*
* 02/27/2002 -- Modified to support "highpte" implementation in 2.5.5 (Sam)
*
* moved 1/26/2000 Sam Creasey
*/
#ifndef _SUN3_PGALLOC_H
#define _SUN3_PGALLOC_H
/* Pagetable caches. */
//todo: should implement for at least ptes. --m
#define pgd_quicklist ((unsigned long *) 0)
#define pmd_quicklist ((unsigned long *) 0)
#define pte_quicklist ((unsigned long *) 0)
#define pgtable_cache_size (0L)
/* Allocation and deallocation of various flavours of pagetables. */
extern inline int free_pmd_fast (pmd_t *pmdp) { return 0; }
extern inline int free_pmd_slow (pmd_t *pmdp) { return 0; }
extern inline pmd_t *get_pmd_fast (void) { return (pmd_t *) 0; }
//todo: implement the following properly.
#define get_pte_fast() ((pte_t *) 0)
#define get_pte_slow pte_alloc
#define free_pte_fast(pte)
#define free_pte_slow pte_free
#include <asm/tlb.h>
/* FIXME - when we get this compiling */
/* erm, now that it's compiling, what do we do with it? */
#define _KERNPG_TABLE 0
extern inline void pte_free_kernel(pte_t * pte)
{
free_page((unsigned long) pte);
}
extern const char bad_pmd_string[];
extern inline pte_t * pte_alloc_kernel(pmd_t * pmd, unsigned long address)
{
address = (address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1);
if (pmd_none(*pmd)) {
pte_t * page = (pte_t *) get_free_page(GFP_KERNEL);
if (pmd_none(*pmd)) {
if (page) {
pmd_val(*pmd) = _KERNPG_TABLE + __pa(page);
return page + address;
}
pmd_val(*pmd) = _KERNPG_TABLE + __pa((unsigned long)BAD_PAGETABLE);
return NULL;
}
free_page((unsigned long) page);
}
if (pmd_bad(*pmd)) {
printk(bad_pmd_string, pmd_val(*pmd));
printk("at kernel pgd off %08x\n", (unsigned int)pmd);
pmd_val(*pmd) = _KERNPG_TABLE + __pa((unsigned long)BAD_PAGETABLE);
return NULL;
}
return (pte_t *) __pmd_page(*pmd) + address;
}
#define pmd_alloc_one(mm,address) ({ BUG(); ((pmd_t *)2); })
/*
* allocating and freeing a pmd is trivial: the 1-entry pmd is
* inside the pgd, so has no extra memory associated with it.
*/
extern inline void pmd_free_kernel(pmd_t * pmd)
static inline void pte_free_kernel(pte_t * pte)
{
// pmd_val(*pmd) = 0;
free_page((unsigned long) pte);
}
extern inline pmd_t * pmd_alloc_kernel(pgd_t * pgd, unsigned long address)
static inline void pte_free(struct page *page)
{
return (pmd_t *) pgd;
__free_page(page);
}
#define pmd_alloc_one_fast(mm, address) ({ BUG(); ((pmd_t *)1); })
#define pmd_alloc_one(mm,address) ({ BUG(); ((pmd_t *)2); })
extern inline void pte_free(pte_t * pte)
static inline void pte_free_tlb(mmu_gather_t *tlb, struct page *page)
{
free_page((unsigned long) pte);
tlb_remove_page(tlb, page);
}
static inline pte_t *pte_alloc_one(struct mm_struct *mm, unsigned long address)
static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
unsigned long address)
{
unsigned long page = __get_free_page(GFP_KERNEL);
......@@ -90,30 +45,45 @@ static inline pte_t *pte_alloc_one(struct mm_struct *mm, unsigned long address)
return NULL;
memset((void *)page, 0, PAGE_SIZE);
// pmd_val(*pmd) = SUN3_PMD_MAGIC + __pa(page);
/* pmd_val(*pmd) = __pa(page); */
return (pte_t *) (page);
}
#define pte_alloc_one_fast(mm,addr) pte_alloc_one(mm,addr)
static inline struct page *pte_alloc_one(struct mm_struct *mm,
unsigned long address)
{
struct page *page = alloc_pages(GFP_KERNEL, 0);
if (page == NULL)
return NULL;
clear_highpage(page);
return page;
}
#define pmd_populate(mm, pmd, pte) (pmd_val(*pmd) = __pa((unsigned long)pte))
static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, pte_t *pte)
{
pmd_val(*pmd) = __pa((unsigned long)pte);
}
static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, struct page *page)
{
pmd_val(*pmd) = __pa((unsigned long)page_address(page));
}
/*
* allocating and freeing a pmd is trivial: the 1-entry pmd is
* inside the pgd, so has no extra memory associated with it.
*/
extern inline void pmd_free(pmd_t * pmd)
{
pmd_val(*pmd) = 0;
}
#define pmd_free(x) do { } while (0)
#define pmd_free_tlb(tlb, x) do { } while (0)
extern inline void pgd_free(pgd_t * pgd)
static inline void pgd_free(pgd_t * pgd)
{
free_page((unsigned long) pgd);
}
extern inline pgd_t * pgd_alloc(struct mm_struct *mm)
static inline pgd_t * pgd_alloc(struct mm_struct *mm)
{
pgd_t *new_pgd;
......@@ -125,14 +95,6 @@ extern inline pgd_t * pgd_alloc(struct mm_struct *mm)
#define pgd_populate(mm, pmd, pte) BUG()
/* FIXME: the sun3 doesn't have a page table cache!
(but the motorola routine should just return 0) */
extern int do_check_pgt_cache(int, int);
extern inline void set_pgdir(unsigned long address, pgd_t entry)
{
}
/* Reserved PMEGs. */
extern char sun3_reserved_pmeg[SUN3_PMEGS_NUM];
......@@ -141,5 +103,6 @@ extern unsigned char pmeg_alloc[SUN3_PMEGS_NUM];
extern unsigned char pmeg_ctx[SUN3_PMEGS_NUM];
#define check_pgt_cache() do { } while (0)
#endif /* SUN3_PGALLOC_H */
#ifndef _M68K_TLB_H
#define _M68K_TLB_H
/*
* m68k doesn't need any special per-pte or
* per-vma handling..
*/
#define tlb_start_vma(tlb, vma) do { } while (0)
#define tlb_end_vma(tlb, vma) do { } while (0)
#define tlb_remove_tlb_entry(tlb, pte, address) do { } while (0)
/*
* .. because we flush the whole mm when it
* fills up.
*/
#define tlb_flush(tlb) flush_tlb_mm((tlb)->mm)
#include <asm-generic/tlb.h>
#endif /* _M68K_TLB_H */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment