Commit 57cb5f38 authored by Geert Uytterhoeven's avatar Geert Uytterhoeven Committed by Linus Torvalds

[PATCH] M68k update (part 30)

M68k core mm updates
  - Update fault handling
  - Remove superfluous flush_tlb_page()
  - Remove obsolete comment
  - empty_zero_page becomes a void *
  - Type and cast clean ups
  - Remove duplicated test for voff == 0
  - Move check_pgt_cache() to pgtable.h
  - Pte and pmd updates
  - Add additional parameter to {clear,copy}_user_page()
  - pfn updates
  - BUG() updates
  - Move check_pgt_cache() from motorola_pgalloc.h
  - Misc updates
  - Add __page_address() and page_to_phys()
parent 208b7d1e
......@@ -152,22 +152,25 @@ int do_page_fault(struct pt_regs *regs, unsigned long address,
* make sure we exit gracefully rather than endlessly redo
* the fault.
*/
survive:
fault = handle_mm_fault(mm, vma, address, write);
#ifdef DEBUG
printk("handle_mm_fault returns %d\n",fault);
#endif
if (fault < 0)
goto out_of_memory;
if (!fault)
switch (fault) {
case 1:
current->min_flt++;
break;
case 2:
current->maj_flt++;
break;
case 0:
goto bus_err;
default:
goto out_of_memory;
}
/* There seems to be a missing invalidate somewhere in do_no_page.
* Until I found it, this one cures the problem and makes
* 1.2 run on the 68040 (Martin Apel).
*/
#warning should be obsolete now...
if (CPU_IS_040_OR_060)
flush_tlb_page(vma, address);
up_read(&mm->mmap_sem);
return 0;
......@@ -176,6 +179,13 @@ int do_page_fault(struct pt_regs *regs, unsigned long address,
* us unable to handle the page fault gracefully.
*/
out_of_memory:
up_read(&mm->mmap_sem);
if (current->pid == 1) {
yield();
down_read(&mm->mmap_sem);
goto survive;
}
printk("VM: killing process %s\n", current->comm);
if (user_mode(regs))
do_exit(SIGKILL);
......
......@@ -36,17 +36,11 @@
mmu_gather_t mmu_gathers[NR_CPUS];
/*
* BAD_PAGE is the page that is used for page faults when linux
* is out-of-memory. Older versions of linux just did a
* do_exit(), but using this instead means there is less risk
* for a process dying in kernel mode, possibly leaving an inode
* unused etc..
*
* ZERO_PAGE is a special page that is used for zero-initialized
* data and COW.
*/
unsigned long empty_zero_page;
void *empty_zero_page;
void show_mem(void)
{
......
......@@ -77,14 +77,14 @@ pmd_t *get_pointer_table (void)
* virtual address space to be noncacheable.
*/
if (mask == 0) {
unsigned long page;
void *page;
ptable_desc *new;
if (!(page = get_free_page (GFP_KERNEL)))
if (!(page = (void *)get_free_page(GFP_KERNEL)))
return 0;
flush_tlb_kernel_page(page);
nocache_page (page);
nocache_page(page);
new = PD_PTABLE(page);
PD_MARKBITS(new) = 0xfe;
......@@ -119,7 +119,7 @@ int free_pointer_table (pmd_t *ptable)
if (PD_MARKBITS(dp) == 0xff) {
/* all tables in page are free, free page */
list_del(dp);
cache_page (page);
cache_page((void *)page);
free_page (page);
return 1;
} else if (ptable_list.next != dp) {
......@@ -186,10 +186,6 @@ unsigned long mm_vtop(unsigned long vaddr)
voff -= m68k_memory[i].size;
} while (++i < m68k_num_memory);
/* As a special case allow `__pa(high_memory)'. */
if (voff == 0)
return m68k_memory[i-1].addr + m68k_memory[i-1].size;
/* As a special case allow `__pa(high_memory)'. */
if (voff == 0)
return m68k_memory[i-1].addr + m68k_memory[i-1].size;
......
......@@ -52,9 +52,9 @@ static pte_t * __init kernel_page_table(void)
ptablep = (pte_t *)alloc_bootmem_low_pages(PAGE_SIZE);
clear_page(ptablep);
__flush_page_to_ram((unsigned long) ptablep);
flush_tlb_kernel_page((unsigned long) ptablep);
nocache_page ((unsigned long)ptablep);
__flush_page_to_ram(ptablep);
flush_tlb_kernel_page(ptablep);
nocache_page(ptablep);
return ptablep;
}
......@@ -87,15 +87,15 @@ static pmd_t * __init kernel_ptr_table(void)
#endif
}
if (((unsigned long)(last_pgtable + PTRS_PER_PMD) & ~PAGE_MASK) == 0) {
last_pgtable += PTRS_PER_PMD;
if (((unsigned long)last_pgtable & ~PAGE_MASK) == 0) {
last_pgtable = (pmd_t *)alloc_bootmem_low_pages(PAGE_SIZE);
clear_page(last_pgtable);
__flush_page_to_ram((unsigned long)last_pgtable);
flush_tlb_kernel_page((unsigned long)last_pgtable);
nocache_page((unsigned long)last_pgtable);
} else
last_pgtable += PTRS_PER_PMD;
__flush_page_to_ram(last_pgtable);
flush_tlb_kernel_page(last_pgtable);
nocache_page(last_pgtable);
}
return last_pgtable;
}
......@@ -262,8 +262,8 @@ void __init paging_init(void)
* initialize the bad page table and bad page to point
* to a couple of allocated pages
*/
empty_zero_page = (unsigned long)alloc_bootmem_pages(PAGE_SIZE);
memset((void *)empty_zero_page, 0, PAGE_SIZE);
empty_zero_page = alloc_bootmem_pages(PAGE_SIZE);
memset(empty_zero_page, 0, PAGE_SIZE);
/*
* Set up SFC/DFC registers
......
......@@ -14,9 +14,9 @@ static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long ad
pte = (pte_t *) __get_free_page(GFP_KERNEL);
if (pte) {
clear_page(pte);
__flush_page_to_ram((unsigned long)pte);
flush_tlb_kernel_page((unsigned long)pte);
nocache_page((unsigned long)pte);
__flush_page_to_ram(pte);
flush_tlb_kernel_page(pte);
nocache_page(pte);
}
return pte;
......@@ -24,7 +24,7 @@ static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long ad
static inline void pte_free_kernel(pte_t *pte)
{
cache_page((unsigned long)pte);
cache_page(pte);
free_page((unsigned long) pte);
}
......@@ -39,9 +39,9 @@ static inline struct page *pte_alloc_one(struct mm_struct *mm, unsigned long add
pte = kmap(page);
if (pte) {
clear_page(pte);
__flush_page_to_ram((unsigned long)pte);
flush_tlb_kernel_page((unsigned long)pte);
nocache_page((unsigned long)pte);
__flush_page_to_ram(pte);
flush_tlb_kernel_page(pte);
nocache_page(pte);
}
kunmap(pte);
......@@ -50,14 +50,14 @@ static inline struct page *pte_alloc_one(struct mm_struct *mm, unsigned long add
static inline void pte_free(struct page *page)
{
cache_page((unsigned long)kmap(page));
cache_page(kmap(page));
kunmap(page);
__free_page(page);
}
static inline void pte_free_tlb(mmu_gather_t *tlb, struct page *page)
{
cache_page((unsigned long)kmap(page));
cache_page(kmap(page));
kunmap(page);
__free_page(page);
}
......@@ -105,7 +105,4 @@ static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
pgd_set(pgd, pmd);
}
#define check_pgt_cache() do { } while (0)
#endif /* _MOTOROLA_PGALLOC_H */
......@@ -93,21 +93,7 @@ extern unsigned long mm_cachebits;
* Conversion functions: convert a page and protection to a page entry,
* and a page entry and page directory to the page they refer to.
*/
#define __mk_pte(page, pgprot) \
({ \
pte_t __pte; \
\
pte_val(__pte) = __pa(page) + pgprot_val(pgprot); \
__pte; \
})
#define mk_pte(page, pgprot) __mk_pte(page_address(page), (pgprot))
#define mk_pte_phys(physpage, pgprot) \
({ \
pte_t __pte; \
\
pte_val(__pte) = (physpage) + pgprot_val(pgprot); \
__pte; \
})
#define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
extern inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
{ pte_val(pte) = (pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot); return pte; }
......@@ -134,7 +120,10 @@ extern inline void pgd_set(pgd_t * pgdp, pmd_t * pmdp)
#define pte_none(pte) (!pte_val(pte))
#define pte_present(pte) (pte_val(pte) & (_PAGE_PRESENT | _PAGE_FAKE_SUPER))
#define pte_clear(ptep) ({ pte_val(*(ptep)) = 0; })
#define pte_pagenr(pte) ((__pte_page(pte) - PAGE_OFFSET) >> PAGE_SHIFT)
#define pte_page(pte) (mem_map + ((unsigned long)(__va(pte_val(pte)) - PAGE_OFFSET) >> PAGE_SHIFT))
#define pte_pfn(pte) (pte_val(pte) >> PAGE_SHIFT)
#define pfn_pte(pfn, prot) __pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot))
#define pmd_none(pmd) (!pmd_val(pmd))
#define pmd_bad(pmd) ((pmd_val(pmd) & _DESCTYPE_MASK) != _PAGE_TABLE)
......@@ -145,16 +134,13 @@ extern inline void pgd_set(pgd_t * pgdp, pmd_t * pmdp)
while (--__i >= 0) \
*__ptr++ = 0; \
})
#define pmd_page(pmd) (mem_map + ((unsigned long)(__va(pmd_val(pmd)) - PAGE_OFFSET) >> PAGE_SHIFT))
#define pgd_none(pgd) (!pgd_val(pgd))
#define pgd_bad(pgd) ((pgd_val(pgd) & _DESCTYPE_MASK) != _PAGE_TABLE)
#define pgd_present(pgd) (pgd_val(pgd) & _PAGE_TABLE)
#define pgd_clear(pgdp) ({ pgd_val(*pgdp) = 0; })
/* Permanent address of a page. */
#define page_address(page) ({ if (!(page)->virtual) BUG(); (page)->virtual; })
#define __page_address(page) (PAGE_OFFSET + (((page) - mem_map) << PAGE_SHIFT))
#define pte_page(pte) (mem_map+pte_pagenr(pte))
#define pte_ERROR(e) \
printk("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, pte_val(e))
......@@ -217,11 +203,15 @@ extern inline pmd_t * pmd_offset(pgd_t * dir, unsigned long address)
}
/* Find an entry in the third-level page table.. */
extern inline pte_t * pte_offset(pmd_t * pmdp, unsigned long address)
extern inline pte_t * pte_offset_kernel(pmd_t * pmdp, unsigned long address)
{
return (pte_t *)__pmd_page(*pmdp) + ((address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1));
}
#define pte_offset_map(pmdp,address) ((pte_t *)kmap(pmd_page(*pmdp)) + ((address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)))
#define pte_offset_map_nested(pmdp, address) pte_offset_map(pmdp, address)
#define pte_unmap(pte) kunmap(pte)
#define pte_unmap_nested(pte) kunmap(pte)
/*
* Allocate and free page tables. The xxx_kernel() versions are
......@@ -233,30 +223,34 @@ extern inline pte_t * pte_offset(pmd_t * pmdp, unsigned long address)
* from both the cache and ATC, or the CPU might not notice that the
* cache setting for the page has been changed. -jskov
*/
static inline void nocache_page (unsigned long vaddr)
static inline void nocache_page(void *vaddr)
{
unsigned long addr = (unsigned long)vaddr;
if (CPU_IS_040_OR_060) {
pgd_t *dir;
pmd_t *pmdp;
pte_t *ptep;
dir = pgd_offset_k(vaddr);
pmdp = pmd_offset(dir,vaddr);
ptep = pte_offset(pmdp,vaddr);
dir = pgd_offset_k(addr);
pmdp = pmd_offset(dir, addr);
ptep = pte_offset_kernel(pmdp, addr);
*ptep = pte_mknocache(*ptep);
}
}
static inline void cache_page (unsigned long vaddr)
static inline void cache_page(void *vaddr)
{
unsigned long addr = (unsigned long)vaddr;
if (CPU_IS_040_OR_060) {
pgd_t *dir;
pmd_t *pmdp;
pte_t *ptep;
dir = pgd_offset_k(vaddr);
pmdp = pmd_offset(dir,vaddr);
ptep = pte_offset(pmdp,vaddr);
dir = pgd_offset_k(addr);
pmdp = pmd_offset(dir, addr);
ptep = pte_offset_kernel(pmdp, addr);
*ptep = pte_mkcache(*ptep);
}
}
......
......@@ -76,8 +76,8 @@ static inline void clear_page(void *page)
#define copy_page(to,from) memcpy((to), (from), PAGE_SIZE)
#endif
#define clear_user_page(page, vaddr) clear_page(page)
#define copy_user_page(to, from, vaddr) copy_page(to, from)
#define clear_user_page(page, vaddr, pg) clear_page(page)
#define copy_user_page(to, from, vaddr, pg) copy_page(to, from)
/*
* These are used to make use of C type-checking..
......@@ -159,10 +159,25 @@ static inline void *__va(unsigned long x)
}
#endif /* CONFIG_SUN3 */
#define MAP_NR(addr) (((unsigned long)(addr)-PAGE_OFFSET) >> PAGE_SHIFT)
/*
* NOTE: virtual isn't really correct, actually it should be the offset into the
* memory node, but we have no highmem, so that works for now.
* TODO: implement (fast) pfn<->pgdat_idx conversion functions, this makes lots
* of the shifts unneccessary.
*/
#define virt_to_pfn(kaddr) (__pa(kaddr) >> PAGE_SHIFT)
#define pfn_to_virt(pfn) __va((pfn) << PAGE_SHIFT)
#define virt_to_page(kaddr) (mem_map + (((unsigned long)(kaddr)-PAGE_OFFSET) >> PAGE_SHIFT))
#define VALID_PAGE(page) ((page - mem_map) < max_mapnr)
#define page_to_virt(page) ((((page) - mem_map) << PAGE_SHIFT) + PAGE_OFFSET)
#define pfn_to_page(pfn) virt_to_page(pfn_to_virt(pfn))
#define page_to_pfn(page) virt_to_pfn(page_to_virt(page))
#define virt_addr_valid(kaddr) ((void *)(kaddr) >= (void *)PAGE_OFFSET && (void *)(kaddr) < high_memory)
#define pfn_valid(pfn) virt_addr_valid(pfn_to_virt(pfn))
#ifdef CONFIG_DEBUG_BUGVERBOSE
#ifndef CONFIG_SUN3
#define BUG() do { \
printk("kernel BUG at %s:%d!\n", __FILE__, __LINE__); \
......@@ -174,6 +189,11 @@ static inline void *__va(unsigned long x)
panic("BUG!"); \
} while (0)
#endif
#else
#define BUG() do { \
asm volatile("illegal"); \
} while (0)
#endif
#define PAGE_BUG(page) do { \
BUG(); \
......
......@@ -6,6 +6,7 @@
#ifndef __ASSEMBLY__
#include <asm/processor.h>
#include <linux/sched.h>
#include <linux/threads.h>
/*
......@@ -88,20 +89,12 @@ extern unsigned long vmalloc_end;
#endif /* CONFIG_SUN3 */
/* zero page used for uninitialized stuff */
extern unsigned long empty_zero_page;
extern void *empty_zero_page;
/*
* BAD_PAGETABLE is used when we need a bogus page-table, while
* BAD_PAGE is used for a bogus page.
*
* ZERO_PAGE is a global shared page that is always zero: used
* for zero-mapped memory areas etc..
*/
extern pte_t __bad_page(void);
extern pte_t * __bad_pagetable(void);
#define BAD_PAGETABLE __bad_pagetable()
#define BAD_PAGE __bad_page()
#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
/* number of bits that fit into a memory pointer */
......@@ -185,4 +178,6 @@ extern inline void update_mmu_cache(struct vm_area_struct * vma,
*/
#define pgtable_cache_init() do { } while (0)
#define check_pgt_cache() do { } while (0)
#endif /* _M68K_PGTABLE_H */
......@@ -99,11 +99,8 @@
* Conversion functions: convert a page and protection to a page entry,
* and a page entry and page directory to the page they refer to.
*/
#define __mk_pte(page, pgprot) \
({ pte_t __pte; pte_val(__pte) = (__pa(page) >> PAGE_SHIFT) | pgprot_val(pgprot); __pte; })
#define mk_pte(page, pgprot) __mk_pte(page_address(page), (pgprot))
#define mk_pte_phys(physpage, pgprot) \
({ pte_t __pte; pte_val(__pte) = ((physpage) >> PAGE_SHIFT) | pgprot_val(pgprot); __pte; })
#define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
extern inline pte_t pte_modify (pte_t pte, pgprot_t newprot)
{ pte_val(pte) = (pte_val(pte) & SUN3_PAGE_CHG_MASK) | pgprot_val(newprot); return pte; }
......@@ -121,12 +118,12 @@ extern inline int pte_none (pte_t pte) { return !pte_val (pte); }
extern inline int pte_present (pte_t pte) { return pte_val (pte) & SUN3_PAGE_VALID; }
extern inline void pte_clear (pte_t *ptep) { pte_val (*ptep) = 0; }
/* FIXME: this is only a guess */
#define pte_pagenr(pte) ((__pte_page(pte) - PAGE_OFFSET) >> PAGE_SHIFT)
/* Permanent address of a page. */
#define page_address(page) ({ if (!(page)->virtual) BUG(); (page)->virtual; })
#define __page_address(page) (PAGE_OFFSET + (((page) - mem_map) << PAGE_SHIFT))
#define pte_page(pte) (mem_map+pte_pagenr(pte))
#define pte_pfn(pte) (pte_val(pte) & SUN3_PAGE_PGNUM_MASK)
#define pfn_pte(pfn, pgprot) \
({ pte_t __pte; pte_val(__pte) = pfn | pgprot_val(pgprot); __pte; })
#define pte_page(pte) (mem_map+((__pte_page(pte) - PAGE_OFFSET) >> PAGE_SHIFT))
#define pmd_page(pmd) (mem_map+((__pmd_page(pmd) - PAGE_OFFSET) >> PAGE_SHIFT))
extern inline int pmd_none2 (pmd_t *pmd) { return !pmd_val (*pmd); }
......@@ -199,21 +196,13 @@ extern inline pmd_t *pmd_offset (pgd_t *pgd, unsigned long address)
}
/* Find an entry in the third-level pagetable. */
#define pte_offset(pmd, address) \
((pte_t *) __pmd_page (*pmd) + ((address >> PAGE_SHIFT) & (PTRS_PER_PTE-1)))
/* Disable caching for page at given kernel virtual address. */
static inline void nocache_page (unsigned long vaddr)
{
/* Don't think this is required on sun3. --m */
}
/* Enable caching for page at given kernel virtual address. */
static inline void cache_page (unsigned long vaddr)
{
/* Don't think this is required on sun3. --m */
}
#define __pte_offset(address) ((address >> PAGE_SHIFT) & (PTRS_PER_PTE-1))
#define pte_offset_kernel(pmd, address) ((pte_t *) __pmd_page(*pmd) + __pte_offset(address))
/* FIXME: should we bother with kmap() here? */
#define pte_offset_map(pmd, address) ((pte_t *)kmap(pmd_page(*pmd)) + __pte_offset(address))
#define pte_offset_map_nested(pmd, address) pte_offset_map(pmd, address)
#define pte_unmap(pte) kunmap(pte)
#define pte_unmap_nested(pte) kunmap(pte)
#endif /* !__ASSEMBLY__ */
......
......@@ -73,6 +73,10 @@ extern inline void * phys_to_virt(unsigned long address)
}
#endif
/* Permanent address of a page. */
#define __page_address(page) (PAGE_OFFSET + (((page) - mem_map) << PAGE_SHIFT))
#define page_to_phys(page) virt_to_phys((void *)__page_address(page))
/*
* IO bus memory addresses are 1:1 with the physical address,
* except on the PCI bus of the Hades.
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment