Commit e1dccf46 authored by David S. Miller's avatar David S. Miller

Get Sparc64 building again, both UP and SMP.

parent 4d7c1a20
......@@ -7,6 +7,7 @@
*/
#include <asm/pgtable.h>
#include <asm/mmu_context.h>
#if PAGE_SHIFT == 13
#define FILL_VALID_SZ_BITS1(r1) \
......
......@@ -6,6 +6,9 @@
* Copyright (C) 1997,1998 Jakub Jelinek (jj@ultra.linux.cz)
*/
#include <asm/pgtable.h>
#include <asm/mmu_context.h>
/* %g1 TLB_SFSR (%g1 + %g1 == TLB_TAG_ACCESS)
* %g2 (KERN_HIGHBITS | KERN_LOWBITS)
* %g3 VPTE base (0xfffffffe00000000) Spitfire/Blackbird (44-bit VA space)
......
......@@ -40,6 +40,7 @@
#include <asm/head.h>
#include <asm/starfire.h>
#include <asm/hardirq.h>
#include <asm/mmu_context.h>
#ifdef CONFIG_IP_PNP
#include <net/ipconfig.h>
......
......@@ -23,6 +23,7 @@
#include <asm/ptrace.h>
#include <asm/atomic.h>
#include <asm/tlbflush.h>
#include <asm/mmu_context.h>
#include <asm/irq.h>
#include <asm/page.h>
......
......@@ -30,6 +30,7 @@
#include <asm/io.h>
#include <asm/uaccess.h>
#include <asm/mmu_context.h>
#include <asm/tlbflush.h>
#include <asm/dma.h>
#include <asm/starfire.h>
#include <asm/tlb.h>
......@@ -89,17 +90,15 @@ void check_pgt_cache(void)
if (pgd_cache_size > PGT_CACHE_HIGH / 4) {
struct page *page, *page2;
for (page2 = NULL, page = (struct page *)pgd_quicklist; page;) {
if ((unsigned long)page->pprev_hash == 3) {
if ((unsigned long)page->lru.prev == 3) {
if (page2)
page2->next_hash = page->next_hash;
page2->lru.next = page->lru.next;
else
(struct page *)pgd_quicklist = page->next_hash;
page->next_hash = NULL;
page->pprev_hash = NULL;
(struct page *)pgd_quicklist = page->lru.next;
pgd_cache_size -= 2;
__free_page(page);
if (page2)
page = page2->next_hash;
page = (struct page *)page2->lru.next;
else
page = (struct page *)pgd_quicklist;
if (pgd_cache_size <= PGT_CACHE_LOW / 4)
......@@ -107,7 +106,7 @@ void check_pgt_cache(void)
continue;
}
page2 = page;
page = page->next_hash;
page = (struct page *)page->lru.next;
}
}
#endif
......
......@@ -141,6 +141,8 @@ static inline void switch_mm(struct mm_struct *old_mm, struct mm_struct *mm, str
spin_unlock(&mm->page_table_lock);
}
extern void __flush_tlb_mm(unsigned long, unsigned long);
/* Activate a new MM instance for the current task. */
static inline void activate_mm(struct mm_struct *active_mm, struct mm_struct *mm)
{
......
......@@ -35,11 +35,11 @@ static __inline__ void free_pgd_fast(pgd_t *pgd)
struct page *page = virt_to_page(pgd);
preempt_disable();
if (!page->pprev_hash) {
(unsigned long *)page->next_hash = pgd_quicklist;
if (!page->lru.prev) {
(unsigned long *)page->lru.next = pgd_quicklist;
pgd_quicklist = (unsigned long *)page;
}
(unsigned long)page->pprev_hash |=
(unsigned long)page->lru.prev |=
(((unsigned long)pgd & (PAGE_SIZE / 2)) ? 2 : 1);
pgd_cache_size++;
preempt_enable();
......@@ -51,7 +51,7 @@ static __inline__ pgd_t *get_pgd_fast(void)
preempt_disable();
if ((ret = (struct page *)pgd_quicklist) != NULL) {
unsigned long mask = (unsigned long)ret->pprev_hash;
unsigned long mask = (unsigned long)ret->lru.prev;
unsigned long off = 0;
if (mask & 1)
......@@ -60,9 +60,9 @@ static __inline__ pgd_t *get_pgd_fast(void)
off = PAGE_SIZE / 2;
mask &= ~2;
}
(unsigned long)ret->pprev_hash = mask;
(unsigned long)ret->lru.prev = mask;
if (!mask)
pgd_quicklist = (unsigned long *)ret->next_hash;
pgd_quicklist = (unsigned long *)ret->lru.next;
ret = (struct page *)(__page_address(ret) + off);
pgd_cache_size--;
preempt_enable();
......@@ -74,10 +74,10 @@ static __inline__ pgd_t *get_pgd_fast(void)
if (page) {
ret = (struct page *)page_address(page);
clear_page(ret);
(unsigned long)page->pprev_hash = 2;
(unsigned long)page->lru.prev = 2;
preempt_disable();
(unsigned long *)page->next_hash = pgd_quicklist;
(unsigned long *)page->lru.next = pgd_quicklist;
pgd_quicklist = (unsigned long *)page;
pgd_cache_size++;
preempt_enable();
......
......@@ -14,7 +14,6 @@
#include <asm/spitfire.h>
#include <asm/asi.h>
#include <asm/mmu_context.h>
#include <asm/system.h>
#include <asm/page.h>
#include <asm/processor.h>
......@@ -63,6 +62,8 @@
#ifndef __ASSEMBLY__
#include <linux/sched.h>
/* Certain architectures need to do special things when pte's
* within a page table are directly modified. Thus, the following
* hook is made available.
......@@ -286,6 +287,7 @@ extern pgd_t swapper_pg_dir[1];
#define mmu_lockarea(vaddr, len) (vaddr)
#define mmu_unlockarea(vaddr, len) do { } while(0)
struct vm_area_struct;
extern void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t);
/* Make a non-present pseudo-TTE. */
......
......@@ -3,6 +3,7 @@
#include <linux/config.h>
#include <linux/mm.h>
#include <asm/mmu_context.h>
/* TLB flush operations. */
......@@ -22,43 +23,43 @@ extern void __flush_tlb_kernel_range(unsigned long start, unsigned long end);
__flush_tlb_kernel_range(start,end)
#define flush_tlb_mm(__mm) \
do { if(CTX_VALID((__mm)->context)) \
do { if (CTX_VALID((__mm)->context)) \
__flush_tlb_mm(CTX_HWBITS((__mm)->context), SECONDARY_CONTEXT); \
} while(0)
} while (0)
#define flush_tlb_range(__vma, start, end) \
do { if(CTX_VALID((__vma)->vm_mm->context)) { \
do { if (CTX_VALID((__vma)->vm_mm->context)) { \
unsigned long __start = (start)&PAGE_MASK; \
unsigned long __end = PAGE_ALIGN(end); \
__flush_tlb_range(CTX_HWBITS((__vma)->vm_mm->context), __start, \
SECONDARY_CONTEXT, __end, PAGE_SIZE, \
(__end - __start)); \
} \
} while(0)
} while (0)
#define flush_tlb_vpte_range(__mm, start, end) \
do { if(CTX_VALID((__mm)->context)) { \
do { if (CTX_VALID((__mm)->context)) { \
unsigned long __start = (start)&PAGE_MASK; \
unsigned long __end = PAGE_ALIGN(end); \
__flush_tlb_range(CTX_HWBITS((__mm)->context), __start, \
SECONDARY_CONTEXT, __end, PAGE_SIZE, \
(__end - __start)); \
} \
} while(0)
} while (0)
#define flush_tlb_page(vma, page) \
do { struct mm_struct *__mm = (vma)->vm_mm; \
if(CTX_VALID(__mm->context)) \
if (CTX_VALID(__mm->context)) \
__flush_tlb_page(CTX_HWBITS(__mm->context), (page)&PAGE_MASK, \
SECONDARY_CONTEXT); \
} while(0)
} while (0)
#define flush_tlb_vpte_page(mm, addr) \
do { struct mm_struct *__mm = (mm); \
if(CTX_VALID(__mm->context)) \
if (CTX_VALID(__mm->context)) \
__flush_tlb_page(CTX_HWBITS(__mm->context), (addr)&PAGE_MASK, \
SECONDARY_CONTEXT); \
} while(0)
} while (0)
#else /* CONFIG_SMP */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment