Commit fecb0199 authored by Linus Torvalds's avatar Linus Torvalds

Merge bk://ppc.bkbits.net/for-linus-ppc

into penguin.transmeta.com:/home/penguin/torvalds/repositories/kernel/linux
parents a291da91 2fc01c94
...@@ -44,6 +44,7 @@ SECTIONS ...@@ -44,6 +44,7 @@ SECTIONS
{ {
*(.data) *(.data)
*(.data1) *(.data1)
*(.data.boot)
*(.sdata) *(.sdata)
*(.sdata2) *(.sdata2)
*(.got.plt) *(.got) *(.got.plt) *(.got)
......
...@@ -59,7 +59,9 @@ char netroot_string[] = "root=/dev/nfs rw ip=auto"; ...@@ -59,7 +59,9 @@ char netroot_string[] = "root=/dev/nfs rw ip=auto";
/* Serial port to use. */ /* Serial port to use. */
unsigned long com_port; unsigned long com_port;
bd_t hold_resid_buf; /* We need to make sure that this is before the images to ensure
* that it's in a mapped location. - Tom */
bd_t hold_resid_buf __attribute__ ((__section__ (".data.boot")));
bd_t *hold_residual = &hold_resid_buf; bd_t *hold_residual = &hold_resid_buf;
extern unsigned long serial_init(int chan, bd_t *bp); extern unsigned long serial_init(int chan, bd_t *bp);
......
...@@ -258,8 +258,9 @@ endmenu ...@@ -258,8 +258,9 @@ endmenu
mainmenu_option next_comment mainmenu_option next_comment
comment 'General setup' comment 'General setup'
bool 'Prompt for advanced kernel configuration options' CONFIG_ADVANCED_OPTIONS
bool 'High memory support' CONFIG_HIGHMEM bool 'High memory support' CONFIG_HIGHMEM
dep_bool ' Support for PTEs in high memory' CONFIG_HIGHPTE $CONFIG_HIGHMEM
bool 'Prompt for advanced kernel configuration options' CONFIG_ADVANCED_OPTIONS
if [ "$CONFIG_ADVANCED_OPTIONS" = "y" ]; then if [ "$CONFIG_ADVANCED_OPTIONS" = "y" ]; then
if [ "$CONFIG_HIGHMEM" = "y" ]; then if [ "$CONFIG_HIGHMEM" = "y" ]; then
bool " Set high memory pool address" CONFIG_HIGHMEM_START_BOOL bool " Set high memory pool address" CONFIG_HIGHMEM_START_BOOL
......
...@@ -288,15 +288,12 @@ ret_from_syscall_2: ...@@ -288,15 +288,12 @@ ret_from_syscall_2:
*/ */
_GLOBAL(_switch) _GLOBAL(_switch)
stwu r1,-INT_FRAME_SIZE(r1) stwu r1,-INT_FRAME_SIZE(r1)
stw r0,GPR0(r1) mflr r0
lwz r0,0(r1) stw r0,INT_FRAME_SIZE+4(r1)
stw r0,GPR1(r1)
/* r3-r13 are caller saved -- Cort */ /* r3-r13 are caller saved -- Cort */
SAVE_GPR(2, r1)
SAVE_8GPRS(14, r1) SAVE_8GPRS(14, r1)
SAVE_10GPRS(22, r1) SAVE_10GPRS(22, r1)
mflr r20 /* Return to switch caller */ stw r0,_NIP(r1) /* Return to switch caller */
stw r20,INT_FRAME_SIZE+4(r1)
mfmsr r22 mfmsr r22
li r0,MSR_FP /* Disable floating-point */ li r0,MSR_FP /* Disable floating-point */
#ifdef CONFIG_ALTIVEC #ifdef CONFIG_ALTIVEC
...@@ -309,17 +306,9 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) ...@@ -309,17 +306,9 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
andc r22,r22,r0 andc r22,r22,r0
mtmsr r22 mtmsr r22
isync isync
1: stw r20,_NIP(r1) 1: stw r22,_MSR(r1)
stw r22,_MSR(r1)
stw r20,_LINK(r1)
mfcr r20 mfcr r20
mfctr r22
mfspr r23,XER
stw r20,_CCR(r1) stw r20,_CCR(r1)
stw r22,_CTR(r1)
stw r23,_XER(r1)
li r0,0x0ff0
stw r0,TRAP(r1)
stw r1,KSP(r3) /* Set old stack pointer */ stw r1,KSP(r3) /* Set old stack pointer */
tophys(r0,r4) tophys(r0,r4)
......
...@@ -839,7 +839,7 @@ hash_page: ...@@ -839,7 +839,7 @@ hash_page:
/* /*
* We hard enable here (but first soft disable) so that the hash_page * We hard enable here (but first soft disable) so that the hash_page
* code can spin on the hash_table_lock without problem on a shared * code can spin on the mmu_hash_lock without problem on a shared
* processor * processor
*/ */
li r0,0 li r0,0
......
...@@ -29,7 +29,7 @@ ...@@ -29,7 +29,7 @@
.align 5 .align 5
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
.comm hash_table_lock,4 .comm mmu_hash_lock,4
#endif /* CONFIG_SMP */ #endif /* CONFIG_SMP */
_GLOBAL(is_msr_enabled) _GLOBAL(is_msr_enabled)
......
...@@ -47,17 +47,10 @@ extern unsigned long yield_count; ...@@ -47,17 +47,10 @@ extern unsigned long yield_count;
#define run_light_on(x) do { } while (0) #define run_light_on(x) do { } while (0)
#endif /* CONFIG_PPC_ISERIES */ #endif /* CONFIG_PPC_ISERIES */
void zero_paged(void);
void power_save(void); void power_save(void);
unsigned long zero_paged_on = 0; unsigned long zero_paged_on;
unsigned long powersave_nap = 0; unsigned long powersave_nap;
unsigned long *zero_cache; /* head linked list of pre-zero'd pages */
atomic_t zerototal; /* # pages zero'd over time */
atomic_t zeropage_hits; /* # zero'd pages request that we've done */
atomic_t zero_sz; /* # currently pre-zero'd pages */
atomic_t zeropage_calls; /* # zero'd pages request that've been made */
int idled(void) int idled(void)
{ {
...@@ -92,7 +85,6 @@ int idled(void) ...@@ -92,7 +85,6 @@ int idled(void)
if (need_resched()) { if (need_resched()) {
run_light_on(1); run_light_on(1);
schedule(); schedule();
check_pgt_cache();
} }
#ifdef CONFIG_PPC_ISERIES #ifdef CONFIG_PPC_ISERIES
else { else {
...@@ -115,141 +107,6 @@ int cpu_idle(void) ...@@ -115,141 +107,6 @@ int cpu_idle(void)
return 0; return 0;
} }
#if 0
/*
* Returns a pre-zero'd page from the list otherwise returns
* NULL.
*/
unsigned long get_zero_page_fast(void)
{
unsigned long page = 0;
atomic_inc(&zero_cache_calls);
if ( zero_quicklist )
{
/* atomically remove this page from the list */
register unsigned long tmp;
asm ( "101:lwarx %1,0,%3\n" /* reserve zero_cache */
" lwz %0,0(%1)\n" /* get next -- new zero_cache */
PPC405_ERR77(0,%3)
" stwcx. %0,0,%3\n" /* update zero_cache */
" bne- 101b\n" /* if lost reservation try again */
: "=&r" (tmp), "=&r" (page), "+m" (zero_cache)
: "r" (&zero_quicklist)
: "cc" );
#ifdef CONFIG_SMP
/* if another cpu beat us above this can happen -- Cort */
if ( page == 0 )
return 0;
#endif /* CONFIG_SMP */
/* we can update zerocount after the fact since it is not
* used for anything but control of a loop which doesn't
* matter since it won't affect anything if it zeros one
* less page -- Cort
*/
atomic_inc((atomic_t *)&zero_cache_hits);
atomic_dec((atomic_t *)&zero_cache_sz);
/* zero out the pointer to next in the page */
*(unsigned long *)page = 0;
return page;
}
return 0;
}
/*
* Experimental stuff to zero out pages in the idle task
* to speed up get_free_pages(). Zero's out pages until
* we've reached the limit of zero'd pages. We handle
* reschedule()'s in here so when we return we know we've
* zero'd all we need to for now.
*/
int zero_cache_water[2] = { 25, 96 }; /* high and low water marks for zero cache */
void zero_paged(void)
{
unsigned long pageptr = 0; /* current page being zero'd */
unsigned long bytecount = 0;
register unsigned long tmp;
pte_t *pte;
if ( atomic_read(&zero_cache_sz) >= zero_cache_water[0] )
return;
while ( (atomic_read(&zero_cache_sz) < zero_cache_water[1]) && !need_resched() )
{
/*
* Mark a page as reserved so we can mess with it
* If we're interrupted we keep this page and our place in it
* since we validly hold it and it's reserved for us.
*/
pageptr = __get_free_pages(GFP_ATOMIC, 0);
if ( !pageptr )
return;
cond_resched();
/*
* Make the page no cache so we don't blow our cache with 0's
*/
pte = find_pte(&init_mm, pageptr);
if ( !pte )
{
printk("pte NULL in zero_paged()\n");
return;
}
pte_uncache(*pte);
flush_tlb_page(find_vma(&init_mm,pageptr),pageptr);
/*
* Important here to not take time away from real processes.
*/
for ( bytecount = 0; bytecount < PAGE_SIZE ; bytecount += 4 )
{
cond_resched();
*(unsigned long *)(bytecount + pageptr) = 0;
}
/*
* If we finished zero-ing out a page add this page to
* the zero_cache atomically -- we can't use
* down/up since we can't sleep in idle.
* Disabling interrupts is also a bad idea since we would
* steal time away from real processes.
* We can also have several zero_paged's running
* on different processors so we can't interfere with them.
* So we update the list atomically without locking it.
* -- Cort
*/
/* turn cache on for this page */
pte_cache(*pte);
flush_tlb_page(find_vma(&init_mm,pageptr),pageptr);
/* atomically add this page to the list */
asm ( "101:lwarx %0,0,%2\n" /* reserve zero_cache */
" stw %0,0(%3)\n" /* update *pageptr */
#ifdef CONFIG_SMP
" sync\n" /* let store settle */
#endif
PPC405_ERR77(0,%2)
" stwcx. %3,0,%2\n" /* update zero_cache in mem */
" bne- 101b\n" /* if lost reservation try again */
: "=&r" (tmp), "+m" (zero_quicklist)
: "r" (&zero_quicklist), "r" (pageptr)
: "cc" );
/*
* This variable is used in the above loop and nowhere
* else so the worst that could happen is we would
* zero out one more or one less page than we want
* per processor on the machine. This is because
* we could add our page to the list but not have
* zerocount updated yet when another processor
* reads it. -- Cort
*/
atomic_inc((atomic_t *)&zero_cache_sz);
atomic_inc((atomic_t *)&zero_cache_total);
}
}
#endif /* 0 */
void power_save(void) void power_save(void)
{ {
unsigned long hid0; unsigned long hid0;
......
...@@ -369,34 +369,39 @@ _GLOBAL(_tlbia) ...@@ -369,34 +369,39 @@ _GLOBAL(_tlbia)
isync isync
#else #else
#if defined(CONFIG_SMP) #if defined(CONFIG_SMP)
rlwinm r8,r1,0,0,18
lwz r8,TI_CPU(r8)
oris r8,r8,10
mfmsr r10 mfmsr r10
SYNC SYNC
rlwinm r0,r10,0,17,15 /* clear bit 16 (MSR_EE) */ rlwinm r0,r10,0,17,15 /* clear bit 16 (MSR_EE) */
rlwinm r0,r0,0,28,26 /* clear DR */
mtmsr r0 mtmsr r0
SYNC SYNC_601
lis r9,hash_table_lock@h isync
ori r9,r9,hash_table_lock@l lis r9,mmu_hash_lock@h
rlwinm r8,r1,0,0,18 ori r9,r9,mmu_hash_lock@l
lwz r8,TI_CPU(r8) tophys(r9,r9)
oris r8,r8,10
10: lwarx r7,0,r9 10: lwarx r7,0,r9
cmpi 0,r7,0 cmpi 0,r7,0
bne- 10b bne- 10b
/* No 405 Erratum 77 fix needed here, because 4xx can't do SMP */
stwcx. r8,0,r9 stwcx. r8,0,r9
bne- 10b bne- 10b
#endif /* CONFIG_SMP */
sync sync
tlbia tlbia
sync sync
#ifdef CONFIG_SMP
TLBSYNC TLBSYNC
li r0,0 li r0,0
stw r0,0(r9) /* clear hash_table_lock */ stw r0,0(r9) /* clear mmu_hash_lock */
mtmsr r10 mtmsr r10
SYNC SYNC_601
#endif isync
#endif #else /* CONFIG_SMP */
sync
tlbia
sync
#endif /* CONFIG_SMP */
#endif /* CONFIG_4xx */
blr blr
/* /*
...@@ -415,33 +420,37 @@ _GLOBAL(_tlbie) ...@@ -415,33 +420,37 @@ _GLOBAL(_tlbie)
10: 10:
#else #else
#if defined(CONFIG_SMP) #if defined(CONFIG_SMP)
rlwinm r8,r1,0,0,18
lwz r8,TI_CPU(r8)
oris r8,r8,11
mfmsr r10 mfmsr r10
SYNC SYNC
rlwinm r0,r10,0,17,15 /* clear bit 16 (MSR_EE) */ rlwinm r0,r10,0,17,15 /* clear bit 16 (MSR_EE) */
rlwinm r0,r0,0,28,26 /* clear DR */
mtmsr r0 mtmsr r0
SYNC SYNC_601
lis r9,hash_table_lock@h isync
ori r9,r9,hash_table_lock@l lis r9,mmu_hash_lock@h
rlwinm r8,r1,0,0,18 ori r9,r9,mmu_hash_lock@l
lwz r8,TI_CPU(r8) tophys(r9,r9)
oris r8,r8,11
10: lwarx r7,0,r9 10: lwarx r7,0,r9
cmpi 0,r7,0 cmpi 0,r7,0
bne- 10b bne- 10b
PPC405_ERR77(0,r9)
stwcx. r8,0,r9 stwcx. r8,0,r9
bne- 10b bne- 10b
eieio eieio
#endif /* CONFIG_SMP */
tlbie r3 tlbie r3
sync sync
#ifdef CONFIG_SMP
TLBSYNC TLBSYNC
li r0,0 li r0,0
stw r0,0(r9) /* clear hash_table_lock */ stw r0,0(r9) /* clear mmu_hash_lock */
mtmsr r10 mtmsr r10
SYNC SYNC_601
#endif isync
#else /* CONFIG_SMP */
tlbie r3
sync
#endif /* CONFIG_SMP */
#endif /* CONFIG_4xx */ #endif /* CONFIG_4xx */
blr blr
...@@ -630,6 +639,40 @@ _GLOBAL(__flush_dcache_icache) ...@@ -630,6 +639,40 @@ _GLOBAL(__flush_dcache_icache)
isync isync
blr blr
/*
* Flush a particular page from the data cache to RAM, identified
* by its physical address. We turn off the MMU so we can just use
* the physical address (this may be a highmem page without a kernel
* mapping).
*
* void __flush_dcache_icache_phys(unsigned long physaddr)
*/
_GLOBAL(__flush_dcache_icache_phys)
mfspr r5,PVR
rlwinm r5,r5,16,16,31
cmpi 0,r5,1
beqlr /* for 601, do nothing */
mfmsr r10
rlwinm r0,r10,0,28,26 /* clear DR */
mtmsr r0
isync
rlwinm r3,r3,0,0,19 /* Get page base address */
li r4,4096/L1_CACHE_LINE_SIZE /* Number of lines in a page */
mtctr r4
mr r6,r3
0: dcbst 0,r3 /* Write line to ram */
addi r3,r3,L1_CACHE_LINE_SIZE
bdnz 0b
sync
mtctr r4
1: icbi 0,r6
addi r6,r6,L1_CACHE_LINE_SIZE
bdnz 1b
sync
mtmsr r10 /* restore DR */
isync
blr
/* /*
* Clear a page using the dcbz instruction, which doesn't cause any * Clear a page using the dcbz instruction, which doesn't cause any
* memory traffic (except to write out any cache lines which get * memory traffic (except to write out any cache lines which get
......
...@@ -218,13 +218,13 @@ EXPORT_SYMBOL(__global_sti); ...@@ -218,13 +218,13 @@ EXPORT_SYMBOL(__global_sti);
EXPORT_SYMBOL(__global_save_flags); EXPORT_SYMBOL(__global_save_flags);
EXPORT_SYMBOL(__global_restore_flags); EXPORT_SYMBOL(__global_restore_flags);
#ifdef SPINLOCK_DEBUG #ifdef SPINLOCK_DEBUG
EXPORT_SYMBOL(_spin_lock); EXPORT_SYMBOL(_raw_spin_lock);
EXPORT_SYMBOL(_spin_unlock); EXPORT_SYMBOL(_raw_spin_unlock);
EXPORT_SYMBOL(spin_trylock); EXPORT_SYMBOL(_raw_spin_trylock);
EXPORT_SYMBOL(_read_lock); EXPORT_SYMBOL(_raw_read_lock);
EXPORT_SYMBOL(_read_unlock); EXPORT_SYMBOL(_raw_read_unlock);
EXPORT_SYMBOL(_write_lock); EXPORT_SYMBOL(_raw_write_lock);
EXPORT_SYMBOL(_write_unlock); EXPORT_SYMBOL(_raw_write_unlock);
#endif #endif
EXPORT_SYMBOL(smp_call_function); EXPORT_SYMBOL(smp_call_function);
EXPORT_SYMBOL(smp_hw_index); EXPORT_SYMBOL(smp_hw_index);
...@@ -361,7 +361,7 @@ EXPORT_SYMBOL(set_context); ...@@ -361,7 +361,7 @@ EXPORT_SYMBOL(set_context);
EXPORT_SYMBOL(handle_mm_fault); /* For MOL */ EXPORT_SYMBOL(handle_mm_fault); /* For MOL */
EXPORT_SYMBOL_NOVERS(disarm_decr); EXPORT_SYMBOL_NOVERS(disarm_decr);
#ifdef CONFIG_PPC_STD_MMU #ifdef CONFIG_PPC_STD_MMU
EXPORT_SYMBOL(flush_hash_page); /* For MOL */ EXPORT_SYMBOL(flush_hash_pages); /* For MOL */
extern long *intercept_table; extern long *intercept_table;
EXPORT_SYMBOL(intercept_table); EXPORT_SYMBOL(intercept_table);
#endif #endif
......
...@@ -291,26 +291,6 @@ void smp_call_function_interrupt(void) ...@@ -291,26 +291,6 @@ void smp_call_function_interrupt(void)
atomic_inc(&call_data->finished); atomic_inc(&call_data->finished);
} }
/*
* Task migration callback.
*/
void smp_task_migration_interrupt(void *new_task)
{
task_t *p;
p = new_task;
sched_task_migrated(p);
}
/*
* This function sends a 'task migration' IPI to another CPU.
* Must be called from syscall contexts, with interrupts *enabled*.
*/
void smp_migrate_task(int cpu, task_t *p)
{
__smp_call_function(smp_task_migration_interrupt, p, 0, cpu);
}
void __init smp_boot_cpus(void) void __init smp_boot_cpus(void)
{ {
int i, cpu_nr; int i, cpu_nr;
......
...@@ -28,7 +28,7 @@ ...@@ -28,7 +28,7 @@
* since they may inhibit forward progress by other CPUs in getting * since they may inhibit forward progress by other CPUs in getting
* a lock. * a lock.
*/ */
static unsigned long __spin_trylock(volatile unsigned long *lock) unsigned long __spin_trylock(volatile unsigned long *lock)
{ {
unsigned long ret; unsigned long ret;
......
...@@ -50,8 +50,6 @@ ...@@ -50,8 +50,6 @@
#include <asm/smp.h> #include <asm/smp.h>
#include <asm/machdep.h> #include <asm/machdep.h>
extern int get_pteptr(struct mm_struct *mm, unsigned long addr, pte_t **ptep);
/* This function will allocate the requested contiguous pages and /* This function will allocate the requested contiguous pages and
* map them into the kernel's vmalloc() space. This is done so we * map them into the kernel's vmalloc() space. This is done so we
* get unique mapping for these pages, outside of the kernel's 1:1 * get unique mapping for these pages, outside of the kernel's 1:1
...@@ -157,6 +155,6 @@ size_t size, int direction) ...@@ -157,6 +155,6 @@ size_t size, int direction)
{ {
unsigned long start; unsigned long start;
start = (unsigned long)(page->virtual) + offset; start = page_address(page) + offset;
consistent_sync(start, size, direction); consistent_sync(start, size, direction);
} }
...@@ -154,6 +154,7 @@ void do_page_fault(struct pt_regs *regs, unsigned long address, ...@@ -154,6 +154,7 @@ void do_page_fault(struct pt_regs *regs, unsigned long address,
/* Since 4xx supports per-page execute permission, /* Since 4xx supports per-page execute permission,
* we lazily flush dcache to icache. */ * we lazily flush dcache to icache. */
ptep = NULL;
if (get_pteptr(mm, address, &ptep) && pte_present(*ptep)) { if (get_pteptr(mm, address, &ptep) && pte_present(*ptep)) {
struct page *page = pte_page(*ptep); struct page *page = pte_page(*ptep);
...@@ -164,9 +165,12 @@ void do_page_fault(struct pt_regs *regs, unsigned long address, ...@@ -164,9 +165,12 @@ void do_page_fault(struct pt_regs *regs, unsigned long address,
} }
pte_update(ptep, 0, _PAGE_HWEXEC); pte_update(ptep, 0, _PAGE_HWEXEC);
_tlbie(address); _tlbie(address);
pte_unmap(ptep);
up_read(&mm->mmap_sem); up_read(&mm->mmap_sem);
return; return;
} }
if (ptep != NULL)
pte_unmap(ptep);
#endif #endif
/* a read */ /* a read */
} else { } else {
...@@ -289,27 +293,18 @@ pte_t *va_to_pte(unsigned long address) ...@@ -289,27 +293,18 @@ pte_t *va_to_pte(unsigned long address)
struct mm_struct *mm; struct mm_struct *mm;
if (address < TASK_SIZE) if (address < TASK_SIZE)
mm = current->mm; return NULL;
else
mm = &init_mm;
dir = pgd_offset(mm, address & PAGE_MASK); dir = pgd_offset(&init_mm, address);
if (dir) { if (dir) {
pmd = pmd_offset(dir, address & PAGE_MASK); pmd = pmd_offset(dir, address & PAGE_MASK);
if (pmd && pmd_present(*pmd)) { if (pmd && pmd_present(*pmd)) {
pte = pte_offset(pmd, address & PAGE_MASK); pte = pte_offset_kernel(pmd, address & PAGE_MASK);
if (pte && pte_present(*pte)) { if (pte && pte_present(*pte))
return(pte); return(pte);
} }
} }
else { return NULL;
return (0);
}
}
else {
return (0);
}
return (0);
} }
unsigned long va_to_phys(unsigned long address) unsigned long va_to_phys(unsigned long address)
...@@ -334,7 +329,7 @@ print_8xx_pte(struct mm_struct *mm, unsigned long addr) ...@@ -334,7 +329,7 @@ print_8xx_pte(struct mm_struct *mm, unsigned long addr)
if (pgd) { if (pgd) {
pmd = pmd_offset(pgd, addr & PAGE_MASK); pmd = pmd_offset(pgd, addr & PAGE_MASK);
if (pmd && pmd_present(*pmd)) { if (pmd && pmd_present(*pmd)) {
pte = pte_offset(pmd, addr & PAGE_MASK); pte = pte_offset_kernel(pmd, addr & PAGE_MASK);
if (pte) { if (pte) {
printk(" (0x%08lx)->(0x%08lx)->0x%08lx\n", printk(" (0x%08lx)->(0x%08lx)->0x%08lx\n",
(long)pgd, (long)pte, (long)pte_val(*pte)); (long)pgd, (long)pte, (long)pte_val(*pte));
...@@ -375,7 +370,7 @@ get_8xx_pte(struct mm_struct *mm, unsigned long addr) ...@@ -375,7 +370,7 @@ get_8xx_pte(struct mm_struct *mm, unsigned long addr)
if (pgd) { if (pgd) {
pmd = pmd_offset(pgd, addr & PAGE_MASK); pmd = pmd_offset(pgd, addr & PAGE_MASK);
if (pmd && pmd_present(*pmd)) { if (pmd && pmd_present(*pmd)) {
pte = pte_offset(pmd, addr & PAGE_MASK); pte = pte_offset_kernel(pmd, addr & PAGE_MASK);
if (pte) { if (pte) {
retval = (int)pte_val(*pte); retval = (int)pte_val(*pte);
} }
......
...@@ -36,7 +36,7 @@ ...@@ -36,7 +36,7 @@
#include <kernel/ppc_defs.h> #include <kernel/ppc_defs.h>
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
.comm hash_table_lock,4 .comm mmu_hash_lock,4
#endif /* CONFIG_SMP */ #endif /* CONFIG_SMP */
/* /*
...@@ -62,8 +62,8 @@ hash_page: ...@@ -62,8 +62,8 @@ hash_page:
#endif #endif
tophys(r7,0) /* gets -KERNELBASE into r7 */ tophys(r7,0) /* gets -KERNELBASE into r7 */
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
addis r2,r7,hash_table_lock@h addis r2,r7,mmu_hash_lock@h
ori r2,r2,hash_table_lock@l ori r2,r2,mmu_hash_lock@l
lis r0,0x0fff lis r0,0x0fff
b 10f b 10f
11: lwz r6,0(r2) 11: lwz r6,0(r2)
...@@ -88,8 +88,8 @@ hash_page: ...@@ -88,8 +88,8 @@ hash_page:
rlwimi r3,r23,32-12,29,29 /* MSR_PR -> _PAGE_USER */ rlwimi r3,r23,32-12,29,29 /* MSR_PR -> _PAGE_USER */
112: add r5,r5,r7 /* convert to phys addr */ 112: add r5,r5,r7 /* convert to phys addr */
rlwimi r5,r4,12,20,29 /* insert top 10 bits of address */ rlwimi r5,r4,12,20,29 /* insert top 10 bits of address */
lwz r5,0(r5) /* get pmd entry */ lwz r2,0(r5) /* get pmd entry */
rlwinm. r5,r5,0,0,19 /* extract address of pte page */ rlwinm. r2,r2,0,0,19 /* extract address of pte page */
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
beq- hash_page_out /* return if no mapping */ beq- hash_page_out /* return if no mapping */
#else #else
...@@ -99,7 +99,6 @@ hash_page: ...@@ -99,7 +99,6 @@ hash_page:
to the address following the rfi. */ to the address following the rfi. */
beqlr- beqlr-
#endif #endif
add r2,r5,r7 /* convert to phys addr */
rlwimi r2,r4,22,20,29 /* insert next 10 bits of address */ rlwimi r2,r4,22,20,29 /* insert next 10 bits of address */
rlwinm r0,r3,32-3,24,24 /* _PAGE_RW access -> _PAGE_DIRTY */ rlwinm r0,r3,32-3,24,24 /* _PAGE_RW access -> _PAGE_DIRTY */
ori r0,r0,_PAGE_ACCESSED|_PAGE_HASHPTE ori r0,r0,_PAGE_ACCESSED|_PAGE_HASHPTE
...@@ -142,9 +141,9 @@ retry: ...@@ -142,9 +141,9 @@ retry:
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
eieio eieio
addis r2,r7,hash_table_lock@ha addis r2,r7,mmu_hash_lock@ha
li r0,0 li r0,0
stw r0,hash_table_lock@l(r2) stw r0,mmu_hash_lock@l(r2)
#endif #endif
/* Return from the exception */ /* Return from the exception */
...@@ -174,16 +173,16 @@ retry: ...@@ -174,16 +173,16 @@ retry:
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
hash_page_out: hash_page_out:
eieio eieio
addis r2,r7,hash_table_lock@ha addis r2,r7,mmu_hash_lock@ha
li r0,0 li r0,0
stw r0,hash_table_lock@l(r2) stw r0,mmu_hash_lock@l(r2)
blr blr
#endif /* CONFIG_SMP */ #endif /* CONFIG_SMP */
/* /*
* Add an entry for a particular page to the hash table. * Add an entry for a particular page to the hash table.
* *
* add_hash_page(unsigned context, unsigned long va, pte_t pte) * add_hash_page(unsigned context, unsigned long va, unsigned long pmdval)
* *
* We assume any necessary modifications to the pte (e.g. setting * We assume any necessary modifications to the pte (e.g. setting
* the accessed bit) have already been done and that there is actually * the accessed bit) have already been done and that there is actually
...@@ -199,31 +198,41 @@ _GLOBAL(add_hash_page) ...@@ -199,31 +198,41 @@ _GLOBAL(add_hash_page)
mulli r0,r0,0x111 /* multiply by ESID skew */ mulli r0,r0,0x111 /* multiply by ESID skew */
add r3,r3,r0 /* note create_hpte trims to 24 bits */ add r3,r3,r0 /* note create_hpte trims to 24 bits */
#ifdef CONFIG_SMP
rlwinm r8,r1,0,0,18 /* use cpu number to make tag */
lwz r8,TI_CPU(r8) /* to go in mmu_hash_lock */
oris r8,r8,12
#endif /* CONFIG_SMP */
/* /*
* We disable interrupts here, even on UP, because we don't * We disable interrupts here, even on UP, because we don't
* want to race with hash_page, and because we want the * want to race with hash_page, and because we want the
* _PAGE_HASHPTE bit to be a reliable indication of whether * _PAGE_HASHPTE bit to be a reliable indication of whether
* the HPTE exists (or at least whether one did once). -- paulus * the HPTE exists (or at least whether one did once).
* We also turn off the MMU for data accesses so that we
* we can't take a hash table miss (assuming the code is
* covered by a BAT). -- paulus
*/ */
mfmsr r10 mfmsr r10
SYNC SYNC
rlwinm r0,r10,0,17,15 /* clear bit 16 (MSR_EE) */ rlwinm r0,r10,0,17,15 /* clear bit 16 (MSR_EE) */
rlwinm r0,r0,0,28,26 /* clear MSR_DR */
mtmsr r0 mtmsr r0
SYNC SYNC_601
isync
tophys(r7,0)
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
lis r9,hash_table_lock@h addis r9,r7,mmu_hash_lock@ha
ori r9,r9,hash_table_lock@l addi r9,r9,mmu_hash_lock@l
rlwinm r8,r1,0,0,18 10: lwarx r0,0,r9 /* take the mmu_hash_lock */
lwz r8,TI_CPU(r8) cmpi 0,r0,0
oris r8,r8,12
10: lwarx r7,0,r9
cmpi 0,r7,0
bne- 11f bne- 11f
stwcx. r8,0,r9 stwcx. r8,0,r9
beq+ 12f beq+ 12f
11: lwz r7,0(r9) 11: lwz r0,0(r9)
cmpi 0,r7,0 cmpi 0,r0,0
beq 10b beq 10b
b 11b b 11b
12: isync 12: isync
...@@ -234,18 +243,18 @@ _GLOBAL(add_hash_page) ...@@ -234,18 +243,18 @@ _GLOBAL(add_hash_page)
* If _PAGE_HASHPTE was already set, we don't replace the existing * If _PAGE_HASHPTE was already set, we don't replace the existing
* HPTE, so we just unlock and return. * HPTE, so we just unlock and return.
*/ */
mr r7,r5 mr r8,r5
1: lwarx r6,0,r7 rlwimi r8,r4,22,20,29
1: lwarx r6,0,r8
andi. r0,r6,_PAGE_HASHPTE andi. r0,r6,_PAGE_HASHPTE
bne 9f /* if HASHPTE already set, done */ bne 9f /* if HASHPTE already set, done */
ori r5,r6,_PAGE_ACCESSED|_PAGE_HASHPTE ori r5,r6,_PAGE_HASHPTE
stwcx. r5,0,r7 stwcx. r5,0,r8
bne- 1b bne- 1b
li r7,0 /* no address offset needed */
bl create_hpte bl create_hpte
lis r8,htab_preloads@ha addis r8,r7,htab_preloads@ha
lwz r3,htab_preloads@l(r8) lwz r3,htab_preloads@l(r8)
addi r3,r3,1 addi r3,r3,1
stw r3,htab_preloads@l(r8) stw r3,htab_preloads@l(r8)
...@@ -254,15 +263,16 @@ _GLOBAL(add_hash_page) ...@@ -254,15 +263,16 @@ _GLOBAL(add_hash_page)
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
eieio eieio
li r0,0 li r0,0
stw r0,0(r9) /* clear hash_table_lock */ stw r0,0(r9) /* clear mmu_hash_lock */
#endif #endif
/* reenable interrupts and DR */
mtmsr r10
SYNC_601
isync
lwz r0,4(r1) lwz r0,4(r1)
mtlr r0 mtlr r0
/* reenable interrupts */
mtmsr r10
SYNC
blr blr
/* /*
...@@ -273,7 +283,7 @@ _GLOBAL(add_hash_page) ...@@ -273,7 +283,7 @@ _GLOBAL(add_hash_page)
* linux PTE (before setting _PAGE_HASHPTE) and r7 contains the * linux PTE (before setting _PAGE_HASHPTE) and r7 contains the
* offset to be added to addresses (0 if the MMU is on, * offset to be added to addresses (0 if the MMU is on,
* -KERNELBASE if it is off). * -KERNELBASE if it is off).
* On SMP, the caller should have the hash_table_lock held. * On SMP, the caller should have the mmu_hash_lock held.
* We assume that the caller has (or will) set the _PAGE_HASHPTE * We assume that the caller has (or will) set the _PAGE_HASHPTE
* bit in the linux PTE in memory. The value passed in r6 should * bit in the linux PTE in memory. The value passed in r6 should
* be the old linux PTE value; if it doesn't have _PAGE_HASHPTE set * be the old linux PTE value; if it doesn't have _PAGE_HASHPTE set
...@@ -486,41 +496,73 @@ found_slot: ...@@ -486,41 +496,73 @@ found_slot:
/* /*
* Flush the entry for a particular page from the hash table. * Flush the entry for a particular page from the hash table.
* *
* flush_hash_page(unsigned context, unsigned long va, pte_t *ptep) * flush_hash_pages(unsigned context, unsigned long va, unsigned long pmdval,
* int count)
* *
* We assume that there is a hash table in use (Hash != 0). * We assume that there is a hash table in use (Hash != 0).
*/ */
_GLOBAL(flush_hash_page) _GLOBAL(flush_hash_pages)
/* Convert context and va to VSID */ tophys(r7,0)
mulli r3,r3,897*16 /* multiply context by context skew */
rlwinm r0,r4,4,28,31 /* get ESID (top 4 bits of va) */
mulli r0,r0,0x111 /* multiply by ESID skew */
add r3,r3,r0 /* note code below trims to 24 bits */
/* /*
* We disable interrupts here, even on UP, because we want * We disable interrupts here, even on UP, because we want
* the _PAGE_HASHPTE bit to be a reliable indication of * the _PAGE_HASHPTE bit to be a reliable indication of
* whether the HPTE exists. -- paulus * whether the HPTE exists (or at least whether one did once).
* We also turn off the MMU for data accesses so that we
* we can't take a hash table miss (assuming the code is
* covered by a BAT). -- paulus
*/ */
mfmsr r10 mfmsr r10
rlwinm r0,r10,0,17,15 /* clear bit 16 (MSR_EE) */
SYNC SYNC
rlwinm r0,r10,0,17,15 /* clear bit 16 (MSR_EE) */
rlwinm r0,r0,0,28,26 /* clear MSR_DR */
mtmsr r0 mtmsr r0
SYNC SYNC_601
isync
/* First find a PTE in the range that has _PAGE_HASHPTE set */
rlwimi r5,r4,22,20,29
1: lwz r0,0(r5)
cmpwi cr1,r6,1
andi. r0,r0,_PAGE_HASHPTE
bne 2f
ble cr1,19f
addi r4,r4,0x1000
addi r5,r5,4
addi r6,r6,-1
b 1b
/* Convert context and va to VSID */
2: mulli r3,r3,897*16 /* multiply context by context skew */
rlwinm r0,r4,4,28,31 /* get ESID (top 4 bits of va) */
mulli r0,r0,0x111 /* multiply by ESID skew */
add r3,r3,r0 /* note code below trims to 24 bits */
/* Construct the high word of the PPC-style PTE (r11) */
#ifndef CONFIG_PPC64BRIDGE
rlwinm r11,r3,7,1,24 /* put VSID in 0x7fffff80 bits */
rlwimi r11,r4,10,26,31 /* put in API (abbrev page index) */
#else /* CONFIG_PPC64BRIDGE */
clrlwi r3,r3,8 /* reduce vsid to 24 bits */
sldi r11,r3,12 /* shift vsid into position */
rlwimi r11,r4,16,20,24 /* put in API (abbrev page index) */
#endif /* CONFIG_PPC64BRIDGE */
SET_V(r11) /* set V (valid) bit */
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
lis r9,hash_table_lock@h addis r9,r7,mmu_hash_lock@ha
ori r9,r9,hash_table_lock@l addi r9,r9,mmu_hash_lock@l
rlwinm r8,r1,0,0,18 rlwinm r8,r1,0,0,18
add r8,r8,r7
lwz r8,TI_CPU(r8) lwz r8,TI_CPU(r8)
oris r8,r8,9 oris r8,r8,9
10: lwarx r7,0,r9 10: lwarx r0,0,r9
cmpi 0,r7,0 cmpi 0,r0,0
bne- 11f bne- 11f
stwcx. r8,0,r9 stwcx. r8,0,r9
beq+ 12f beq+ 12f
11: lwz r7,0(r9) 11: lwz r0,0(r9)
cmpi 0,r7,0 cmpi 0,r0,0
beq 10b beq 10b
b 11b b 11b
12: isync 12: isync
...@@ -528,69 +570,72 @@ _GLOBAL(flush_hash_page) ...@@ -528,69 +570,72 @@ _GLOBAL(flush_hash_page)
/* /*
* Check the _PAGE_HASHPTE bit in the linux PTE. If it is * Check the _PAGE_HASHPTE bit in the linux PTE. If it is
* already clear, we're done. If not, clear it (atomically) * already clear, we're done (for this pte). If not,
* and proceed. -- paulus. * clear it (atomically) and proceed. -- paulus.
*/ */
1: lwarx r6,0,r5 /* fetch the pte */ 33: lwarx r8,0,r5 /* fetch the pte */
andi. r0,r6,_PAGE_HASHPTE andi. r0,r8,_PAGE_HASHPTE
beq 9f /* done if HASHPTE is already clear */ beq 8f /* done if HASHPTE is already clear */
rlwinm r6,r6,0,31,29 /* clear HASHPTE bit */ rlwinm r8,r8,0,31,29 /* clear HASHPTE bit */
stwcx. r6,0,r5 /* update the pte */ stwcx. r8,0,r5 /* update the pte */
bne- 1b bne- 33b
/* Construct the high word of the PPC-style PTE (r5) */
#ifndef CONFIG_PPC64BRIDGE
rlwinm r5,r3,7,1,24 /* put VSID in 0x7fffff80 bits */
rlwimi r5,r4,10,26,31 /* put in API (abbrev page index) */
#else /* CONFIG_PPC64BRIDGE */
clrlwi r3,r3,8 /* reduce vsid to 24 bits */
sldi r5,r3,12 /* shift vsid into position */
rlwimi r5,r4,16,20,24 /* put in API (abbrev page index) */
#endif /* CONFIG_PPC64BRIDGE */
SET_V(r5) /* set V (valid) bit */
/* Get the address of the primary PTE group in the hash table (r3) */ /* Get the address of the primary PTE group in the hash table (r3) */
.globl flush_hash_patch_A .globl flush_hash_patch_A
flush_hash_patch_A: flush_hash_patch_A:
lis r8,Hash_base@h /* base address of hash table */ addis r8,r7,Hash_base@h /* base address of hash table */
rlwimi r8,r3,LG_PTEG_SIZE,HASH_LEFT,HASH_RIGHT /* VSID -> hash */ rlwimi r8,r3,LG_PTEG_SIZE,HASH_LEFT,HASH_RIGHT /* VSID -> hash */
rlwinm r3,r4,20+LG_PTEG_SIZE,HASH_LEFT,HASH_RIGHT /* PI -> hash */ rlwinm r0,r4,20+LG_PTEG_SIZE,HASH_LEFT,HASH_RIGHT /* PI -> hash */
xor r3,r3,r8 /* make primary hash */ xor r8,r0,r8 /* make primary hash */
li r8,8 /* PTEs/group */
/* Search the primary PTEG for a PTE whose 1st (d)word matches r5 */ /* Search the primary PTEG for a PTE whose 1st (d)word matches r5 */
mtctr r8 li r0,8 /* PTEs/group */
addi r7,r3,-PTE_SIZE mtctr r0
1: LDPTEu r0,PTE_SIZE(r7) /* get next PTE */ addi r12,r8,-PTE_SIZE
CMPPTE 0,r0,r5 1: LDPTEu r0,PTE_SIZE(r12) /* get next PTE */
CMPPTE 0,r0,r11
bdnzf 2,1b /* loop while ctr != 0 && !cr0.eq */ bdnzf 2,1b /* loop while ctr != 0 && !cr0.eq */
beq+ 3f beq+ 3f
/* Search the secondary PTEG for a matching PTE */ /* Search the secondary PTEG for a matching PTE */
ori r5,r5,PTE_H /* set H (secondary hash) bit */ ori r11,r11,PTE_H /* set H (secondary hash) bit */
li r0,8 /* PTEs/group */
.globl flush_hash_patch_B .globl flush_hash_patch_B
flush_hash_patch_B: flush_hash_patch_B:
xoris r7,r3,Hash_msk>>16 /* compute secondary hash */ xoris r12,r8,Hash_msk>>16 /* compute secondary hash */
xori r7,r7,(-PTEG_SIZE & 0xffff) xori r12,r12,(-PTEG_SIZE & 0xffff)
addi r7,r7,-PTE_SIZE addi r12,r12,-PTE_SIZE
mtctr r8 mtctr r0
2: LDPTEu r0,PTE_SIZE(r7) 2: LDPTEu r0,PTE_SIZE(r12)
CMPPTE 0,r0,r5 CMPPTE 0,r0,r11
bdnzf 2,2b bdnzf 2,2b
bne- 4f /* should never fail to find it */ xori r11,r11,PTE_H /* clear H again */
bne- 4f /* should rarely fail to find it */
3: li r0,0 3: li r0,0
STPTE r0,0(r7) /* invalidate entry */ STPTE r0,0(r12) /* invalidate entry */
4: sync 4: sync
tlbie r4 /* in hw tlb too */ tlbie r4 /* in hw tlb too */
sync sync
8: ble cr1,9f /* if all ptes checked */
81: addi r6,r6,-1
addi r5,r5,4 /* advance to next pte */
addi r4,r4,0x1000
lwz r0,0(r5) /* check next pte */
cmpwi cr1,r6,1
andi. r0,r0,_PAGE_HASHPTE
bne 33b
bgt cr1,81b
9:
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
TLBSYNC TLBSYNC
9: li r0,0 li r0,0
stw r0,0(r9) /* clear hash_table_lock */ stw r0,0(r9) /* clear mmu_hash_lock */
#endif #endif
9: mtmsr r10 19: mtmsr r10
SYNC SYNC_601
isync
blr blr
...@@ -39,7 +39,7 @@ ...@@ -39,7 +39,7 @@
int iSeries_hpt_loaded; int iSeries_hpt_loaded;
static spinlock_t hash_table_lock = SPIN_LOCK_UNLOCKED; static spinlock_t mmu_hash_lock = SPIN_LOCK_UNLOCKED;
extern unsigned long htab_reloads; // Defined in ppc/kernel/ppc_htab.c extern unsigned long htab_reloads; // Defined in ppc/kernel/ppc_htab.c
extern unsigned long htab_evicts; extern unsigned long htab_evicts;
...@@ -159,10 +159,10 @@ int iSeries_create_hpte( unsigned long access, unsigned long va ) ...@@ -159,10 +159,10 @@ int iSeries_create_hpte( unsigned long access, unsigned long va )
access |= _PAGE_PRESENT; // _PAGE_PRESENT also needed access |= _PAGE_PRESENT; // _PAGE_PRESENT also needed
spin_lock( &hash_table_lock ); spin_lock( &mmu_hash_lock );
// check if pte is in the required state // check if pte is in the required state
if ( ( access & ~(pte_val(*pt)) ) ) { if ( ( access & ~(pte_val(*pt)) ) ) {
spin_unlock( &hash_table_lock ); spin_unlock( &mmu_hash_lock );
return 1; return 1;
} }
...@@ -177,18 +177,18 @@ int iSeries_create_hpte( unsigned long access, unsigned long va ) ...@@ -177,18 +177,18 @@ int iSeries_create_hpte( unsigned long access, unsigned long va )
va, va,
pte_val(*pt)); pte_val(*pt));
spin_unlock( &hash_table_lock ); spin_unlock( &mmu_hash_lock );
return 0; return 0;
} }
void add_hash_page(unsigned context, unsigned long va, pte_t *ptep) void add_hash_page(unsigned context, unsigned long va, pte_t *ptep)
{ {
spin_lock( &hash_table_lock ); spin_lock( &mmu_hash_lock );
pte_update(ptep,0,_PAGE_HASHPTE); pte_update(ptep,0,_PAGE_HASHPTE);
__create_hpte(CTX_TO_VSID(context, va), __create_hpte(CTX_TO_VSID(context, va),
va, va,
pte_val(*ptep)); pte_val(*ptep));
spin_unlock( &hash_table_lock ); spin_unlock( &mmu_hash_lock );
} }
int flush_hash_page(unsigned context, unsigned long va, pte_t *ptep) int flush_hash_page(unsigned context, unsigned long va, pte_t *ptep)
...@@ -208,7 +208,7 @@ int flush_hash_page(unsigned context, unsigned long va, pte_t *ptep) ...@@ -208,7 +208,7 @@ int flush_hash_page(unsigned context, unsigned long va, pte_t *ptep)
hpte1Ptr = hpte0Ptr + 1; hpte1Ptr = hpte0Ptr + 1;
*hpte0Ptr = *hpte1Ptr = 0; *hpte0Ptr = *hpte1Ptr = 0;
spin_lock( &hash_table_lock ); spin_lock( &mmu_hash_lock );
rtnIndex = HvCallHpt_findValid( &hpte, vpn ); rtnIndex = HvCallHpt_findValid( &hpte, vpn );
if ( hpte.v ) { if ( hpte.v ) {
...@@ -217,7 +217,7 @@ int flush_hash_page(unsigned context, unsigned long va, pte_t *ptep) ...@@ -217,7 +217,7 @@ int flush_hash_page(unsigned context, unsigned long va, pte_t *ptep)
rc = 0; rc = 0;
} else } else
rc = 1; rc = 1;
spin_unlock( &hash_table_lock ); spin_unlock( &mmu_hash_lock );
return rc; return rc;
} }
...@@ -175,12 +175,13 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, ...@@ -175,12 +175,13 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address,
pte_t *ptep; pte_t *ptep;
static int nopreload; static int nopreload;
if (nopreload) if (nopreload || address >= TASK_SIZE)
return; return;
mm = (address < TASK_SIZE)? vma->vm_mm: &init_mm; mm = vma->vm_mm;
pmd = pmd_offset(pgd_offset(mm, address), address); pmd = pmd_offset(pgd_offset(mm, address), address);
if (!pmd_none(*pmd)) { if (!pmd_none(*pmd)) {
ptep = pte_offset(pmd, address); ptep = pte_offset_map(pmd, address);
add_hash_page(mm->context, address, ptep); add_hash_page(mm->context, address, ptep);
pte_unmap(ptep);
} }
} }
...@@ -113,24 +113,6 @@ unsigned long __max_memory; ...@@ -113,24 +113,6 @@ unsigned long __max_memory;
/* max amount of low RAM to map in */ /* max amount of low RAM to map in */
unsigned long __max_low_memory = MAX_LOW_MEM; unsigned long __max_low_memory = MAX_LOW_MEM;
int do_check_pgt_cache(int low, int high)
{
int freed = 0;
if (pgtable_cache_size > high) {
do {
if (pgd_quicklist) {
free_pgd_slow(get_pgd_fast());
freed++;
}
if (pte_quicklist) {
pte_free_slow(pte_alloc_one_fast(NULL, 0));
freed++;
}
} while (pgtable_cache_size > low);
}
return freed;
}
void show_mem(void) void show_mem(void)
{ {
int i,free = 0,total = 0,reserved = 0; int i,free = 0,total = 0,reserved = 0;
...@@ -160,7 +142,6 @@ void show_mem(void) ...@@ -160,7 +142,6 @@ void show_mem(void)
printk("%d reserved pages\n",reserved); printk("%d reserved pages\n",reserved);
printk("%d pages shared\n",shared); printk("%d pages shared\n",shared);
printk("%d pages swap cached\n",cached); printk("%d pages swap cached\n",cached);
printk("%d pages in page table cache\n",(int)pgtable_cache_size);
show_buffers(); show_buffers();
} }
...@@ -396,9 +377,11 @@ void __init paging_init(void) ...@@ -396,9 +377,11 @@ void __init paging_init(void)
#ifdef CONFIG_HIGHMEM #ifdef CONFIG_HIGHMEM
map_page(PKMAP_BASE, 0, 0); /* XXX gross */ map_page(PKMAP_BASE, 0, 0); /* XXX gross */
pkmap_page_table = pte_offset(pmd_offset(pgd_offset_k(PKMAP_BASE), PKMAP_BASE), PKMAP_BASE); pkmap_page_table = pte_offset_kernel(pmd_offset(pgd_offset_k
(PKMAP_BASE), PKMAP_BASE), PKMAP_BASE);
map_page(KMAP_FIX_BEGIN, 0, 0); /* XXX gross */ map_page(KMAP_FIX_BEGIN, 0, 0); /* XXX gross */
kmap_pte = pte_offset(pmd_offset(pgd_offset_k(KMAP_FIX_BEGIN), KMAP_FIX_BEGIN), KMAP_FIX_BEGIN); kmap_pte = pte_offset_kernel(pmd_offset(pgd_offset_k
(KMAP_FIX_BEGIN), KMAP_FIX_BEGIN), KMAP_FIX_BEGIN);
kmap_prot = PAGE_KERNEL; kmap_prot = PAGE_KERNEL;
#endif /* CONFIG_HIGHMEM */ #endif /* CONFIG_HIGHMEM */
...@@ -588,10 +571,12 @@ void flush_dcache_page(struct page *page) ...@@ -588,10 +571,12 @@ void flush_dcache_page(struct page *page)
void flush_icache_page(struct vm_area_struct *vma, struct page *page) void flush_icache_page(struct vm_area_struct *vma, struct page *page)
{ {
unsigned long phys;
if (page->mapping && !PageReserved(page) if (page->mapping && !PageReserved(page)
&& !test_bit(PG_arch_1, &page->flags)) { && !test_bit(PG_arch_1, &page->flags)) {
__flush_dcache_icache(kmap(page)); phys = ((page - mem_map) << PAGE_SHIFT) + PPC_MEMSTART;
kunmap(page); __flush_dcache_icache_phys(phys);
set_bit(PG_arch_1, &page->flags); set_bit(PG_arch_1, &page->flags);
} }
} }
......
...@@ -61,11 +61,12 @@ extern void MMU_init_hw(void); ...@@ -61,11 +61,12 @@ extern void MMU_init_hw(void);
* which includes all new 82xx processors. We need tlbie/tlbsync here * which includes all new 82xx processors. We need tlbie/tlbsync here
* in that case (I think). -- Dan. * in that case (I think). -- Dan.
*/ */
static inline void flush_HPTE(unsigned context, unsigned long va, pte_t *pg) static inline void flush_HPTE(unsigned context, unsigned long va,
unsigned long pdval)
{ {
if ((Hash != 0) && if ((Hash != 0) &&
(cur_cpu_spec[0]->cpu_features & CPU_FTR_HPTE_TABLE)) (cur_cpu_spec[0]->cpu_features & CPU_FTR_HPTE_TABLE))
flush_hash_page(0, va, pg); flush_hash_pages(0, va, pdval, 1);
else else
_tlbie(va); _tlbie(va);
} }
......
...@@ -39,10 +39,6 @@ unsigned long ioremap_base; ...@@ -39,10 +39,6 @@ unsigned long ioremap_base;
unsigned long ioremap_bot; unsigned long ioremap_bot;
int io_bat_index; int io_bat_index;
#ifndef CONFIG_SMP
struct pgtable_cache_struct quicklists;
#endif
#if defined(CONFIG_6xx) || defined(CONFIG_POWER3) #if defined(CONFIG_6xx) || defined(CONFIG_POWER3)
#define HAVE_BATS 1 #define HAVE_BATS 1
#endif #endif
...@@ -173,12 +169,12 @@ map_page(unsigned long va, unsigned long pa, int flags) ...@@ -173,12 +169,12 @@ map_page(unsigned long va, unsigned long pa, int flags)
/* Use upper 10 bits of VA to index the first level map */ /* Use upper 10 bits of VA to index the first level map */
pd = pmd_offset(pgd_offset_k(va), va); pd = pmd_offset(pgd_offset_k(va), va);
/* Use middle 10 bits of VA to index the second-level map */ /* Use middle 10 bits of VA to index the second-level map */
pg = pte_alloc(&init_mm, pd, va); pg = pte_alloc_kernel(&init_mm, pd, va);
if (pg != 0) { if (pg != 0) {
err = 0; err = 0;
set_pte(pg, mk_pte_phys(pa & PAGE_MASK, __pgprot(flags))); set_pte(pg, mk_pte_phys(pa & PAGE_MASK, __pgprot(flags)));
if (mem_init_done) if (mem_init_done)
flush_HPTE(0, va, pg); flush_HPTE(0, va, pmd_val(*pd));
} }
spin_unlock(&init_mm.page_table_lock); spin_unlock(&init_mm.page_table_lock);
return err; return err;
...@@ -272,10 +268,11 @@ get_pteptr(struct mm_struct *mm, unsigned long addr, pte_t **ptep) ...@@ -272,10 +268,11 @@ get_pteptr(struct mm_struct *mm, unsigned long addr, pte_t **ptep)
if (pgd) { if (pgd) {
pmd = pmd_offset(pgd, addr & PAGE_MASK); pmd = pmd_offset(pgd, addr & PAGE_MASK);
if (pmd_present(*pmd)) { if (pmd_present(*pmd)) {
pte = pte_offset(pmd, addr & PAGE_MASK); pte = pte_offset_map(pmd, addr & PAGE_MASK);
if (pte) { if (pte) {
retval = 1; retval = 1;
*ptep = pte; *ptep = pte;
/* XXX caller needs to do pte_unmap, yuck */
} }
} }
} }
...@@ -312,8 +309,10 @@ unsigned long iopa(unsigned long addr) ...@@ -312,8 +309,10 @@ unsigned long iopa(unsigned long addr)
mm = &init_mm; mm = &init_mm;
pa = 0; pa = 0;
if (get_pteptr(mm, addr, &pte)) if (get_pteptr(mm, addr, &pte)) {
pa = (pte_val(*pte) & PAGE_MASK) | (addr & ~PAGE_MASK); pa = (pte_val(*pte) & PAGE_MASK) | (addr & ~PAGE_MASK);
pte_unmap(pte);
}
return(pa); return(pa);
} }
......
...@@ -33,6 +33,7 @@ ...@@ -33,6 +33,7 @@
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/mm.h> #include <linux/mm.h>
#include <linux/init.h> #include <linux/init.h>
#include <linux/highmem.h>
#include <asm/prom.h> #include <asm/prom.h>
#include <asm/mmu.h> #include <asm/mmu.h>
...@@ -289,7 +290,6 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, ...@@ -289,7 +290,6 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address,
{ {
struct mm_struct *mm; struct mm_struct *mm;
pmd_t *pmd; pmd_t *pmd;
pte_t *ptep;
static int nopreload; static int nopreload;
if (Hash == 0 || nopreload) if (Hash == 0 || nopreload)
...@@ -299,8 +299,6 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, ...@@ -299,8 +299,6 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address,
return; return;
mm = (address < TASK_SIZE)? vma->vm_mm: &init_mm; mm = (address < TASK_SIZE)? vma->vm_mm: &init_mm;
pmd = pmd_offset(pgd_offset(mm, address), address); pmd = pmd_offset(pgd_offset(mm, address), address);
if (!pmd_none(*pmd)) { if (!pmd_none(*pmd))
ptep = pte_offset(pmd, address); add_hash_page(mm->context, address, pmd_val(*pmd));
add_hash_page(mm->context, address, ptep);
}
} }
...@@ -30,6 +30,7 @@ ...@@ -30,6 +30,7 @@
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/mm.h> #include <linux/mm.h>
#include <linux/init.h> #include <linux/init.h>
#include <linux/highmem.h>
#include "mmu_decl.h" #include "mmu_decl.h"
...@@ -104,7 +105,6 @@ local_flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr) ...@@ -104,7 +105,6 @@ local_flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr)
{ {
struct mm_struct *mm; struct mm_struct *mm;
pmd_t *pmd; pmd_t *pmd;
pte_t *pte;
if (Hash == 0) { if (Hash == 0) {
_tlbie(vmaddr); _tlbie(vmaddr);
...@@ -112,11 +112,8 @@ local_flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr) ...@@ -112,11 +112,8 @@ local_flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr)
} }
mm = (vmaddr < TASK_SIZE)? vma->vm_mm: &init_mm; mm = (vmaddr < TASK_SIZE)? vma->vm_mm: &init_mm;
pmd = pmd_offset(pgd_offset(mm, vmaddr), vmaddr); pmd = pmd_offset(pgd_offset(mm, vmaddr), vmaddr);
if (!pmd_none(*pmd)) { if (!pmd_none(*pmd))
pte = pte_offset(pmd, vmaddr); flush_hash_pages(mm->context, vmaddr, pmd_val(*pmd), 1);
if (pte_val(*pte) & _PAGE_HASHPTE)
flush_hash_page(mm->context, vmaddr, pte);
}
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
smp_send_tlb_invalidate(0); smp_send_tlb_invalidate(0);
#endif #endif
...@@ -133,8 +130,8 @@ local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned ...@@ -133,8 +130,8 @@ local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned
{ {
struct mm_struct *mm = vma->vm_mm; struct mm_struct *mm = vma->vm_mm;
pmd_t *pmd; pmd_t *pmd;
pte_t *pte;
unsigned long pmd_end; unsigned long pmd_end;
int count;
unsigned int ctx = mm->context; unsigned int ctx = mm->context;
if (Hash == 0) { if (Hash == 0) {
...@@ -144,24 +141,21 @@ local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned ...@@ -144,24 +141,21 @@ local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned
start &= PAGE_MASK; start &= PAGE_MASK;
if (start >= end) if (start >= end)
return; return;
end = (end - 1) | ~PAGE_MASK;
pmd = pmd_offset(pgd_offset(mm, start), start); pmd = pmd_offset(pgd_offset(mm, start), start);
do { for (;;) {
pmd_end = (start + PGDIR_SIZE) & PGDIR_MASK; pmd_end = ((start + PGDIR_SIZE) & PGDIR_MASK) - 1;
if (!pmd_none(*pmd)) { if (pmd_end > end)
if (!pmd_end || pmd_end > end)
pmd_end = end; pmd_end = end;
pte = pte_offset(pmd, start); if (!pmd_none(*pmd)) {
do { count = ((pmd_end - start) >> PAGE_SHIFT) + 1;
if ((pte_val(*pte) & _PAGE_HASHPTE) != 0) flush_hash_pages(ctx, start, pmd_val(*pmd), count);
flush_hash_page(ctx, start, pte);
start += PAGE_SIZE;
++pte;
} while (start && start < pmd_end);
} else {
start = pmd_end;
} }
if (pmd_end == end)
break;
start = pmd_end + 1;
++pmd; ++pmd;
} while (start && start < end); }
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
smp_send_tlb_invalidate(0); smp_send_tlb_invalidate(0);
......
...@@ -106,7 +106,7 @@ static inline void *kmap_atomic(struct page *page, enum km_type type) ...@@ -106,7 +106,7 @@ static inline void *kmap_atomic(struct page *page, enum km_type type)
static inline void kunmap_atomic(void *kvaddr, enum km_type type) static inline void kunmap_atomic(void *kvaddr, enum km_type type)
{ {
#if HIGHMEM_DEBUG #if HIGHMEM_DEBUG
unsigned long vaddr = (unsigned long) kvaddr; unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
unsigned int idx = type + KM_TYPE_NR*smp_processor_id(); unsigned int idx = type + KM_TYPE_NR*smp_processor_id();
if (vaddr < KMAP_FIX_BEGIN) // FIXME if (vaddr < KMAP_FIX_BEGIN) // FIXME
......
/* /*
* BK Id: SCCS/s.kmap_types.h 1.9 08/29/01 14:03:05 paulus * BK Id: %F% %I% %G% %U% %#%
*/ */
#ifdef __KERNEL__ #ifdef __KERNEL__
#ifndef _ASM_KMAP_TYPES_H #ifndef _ASM_KMAP_TYPES_H
...@@ -12,6 +12,8 @@ enum km_type { ...@@ -12,6 +12,8 @@ enum km_type {
KM_USER0, KM_USER0,
KM_USER1, KM_USER1,
KM_BIO_IRQ, KM_BIO_IRQ,
KM_PTE0,
KM_PTE1,
KM_TYPE_NR KM_TYPE_NR
}; };
......
/* /*
* BK Id: SCCS/s.pgalloc.h 1.9 05/17/01 18:14:25 cort * BK Id: %F% %I% %G% %U% %#%
*/ */
#ifdef __KERNEL__ #ifdef __KERNEL__
#ifndef _PPC_PGALLOC_H #ifndef _PPC_PGALLOC_H
...@@ -7,55 +7,12 @@ ...@@ -7,55 +7,12 @@
#include <linux/config.h> #include <linux/config.h>
#include <linux/threads.h> #include <linux/threads.h>
#include <linux/highmem.h>
#include <asm/processor.h> #include <asm/processor.h>
/*
* This is handled very differently on the PPC since out page tables
* are all 0's and I want to be able to use these zero'd pages elsewhere
* as well - it gives us quite a speedup.
*
* Note that the SMP/UP versions are the same but we don't need a
* per cpu list of zero pages because we do the zero-ing with the cache
* off and the access routines are lock-free but the pgt cache stuff
* is per-cpu since it isn't done with any lock-free access routines
* (although I think we need arch-specific routines so I can do lock-free).
*
* I need to generalize this so we can use it for other arch's as well.
* -- Cort
*/
#ifdef CONFIG_SMP
#define quicklists cpu_data[smp_processor_id()]
#else
extern struct pgtable_cache_struct {
unsigned long *pgd_cache;
unsigned long *pte_cache;
unsigned long pgtable_cache_sz;
} quicklists;
#endif
#define pgd_quicklist (quicklists.pgd_cache)
#define pmd_quicklist ((unsigned long *)0)
#define pte_quicklist (quicklists.pte_cache)
#define pgtable_cache_size (quicklists.pgtable_cache_sz)
extern unsigned long *zero_cache; /* head linked list of pre-zero'd pages */
extern atomic_t zero_sz; /* # currently pre-zero'd pages */
extern atomic_t zeropage_hits; /* # zero'd pages request that we've done */
extern atomic_t zeropage_calls; /* # zero'd pages request that've been made */
extern atomic_t zerototal; /* # pages zero'd over time */
#define zero_quicklist (zero_cache)
#define zero_cache_sz (zero_sz)
#define zero_cache_calls (zeropage_calls)
#define zero_cache_hits (zeropage_hits)
#define zero_cache_total (zerototal)
/* return a pre-zero'd page from the list, return NULL if none available -- Cort */
extern unsigned long get_zero_page_fast(void);
extern void __bad_pte(pmd_t *pmd); extern void __bad_pte(pmd_t *pmd);
extern __inline__ pgd_t *get_pgd_slow(void) static inline pgd_t *pgd_alloc(struct mm_struct *mm)
{ {
pgd_t *ret; pgd_t *ret;
...@@ -64,85 +21,75 @@ extern __inline__ pgd_t *get_pgd_slow(void) ...@@ -64,85 +21,75 @@ extern __inline__ pgd_t *get_pgd_slow(void)
return ret; return ret;
} }
extern __inline__ pgd_t *get_pgd_fast(void) extern __inline__ void pgd_free(pgd_t *pgd)
{
unsigned long *ret;
if ((ret = pgd_quicklist) != NULL) {
pgd_quicklist = (unsigned long *)(*ret);
ret[0] = 0;
pgtable_cache_size--;
} else
ret = (unsigned long *)get_pgd_slow();
return (pgd_t *)ret;
}
extern __inline__ void free_pgd_fast(pgd_t *pgd)
{
*(unsigned long **)pgd = pgd_quicklist;
pgd_quicklist = (unsigned long *) pgd;
pgtable_cache_size++;
}
extern __inline__ void free_pgd_slow(pgd_t *pgd)
{ {
free_page((unsigned long)pgd); free_page((unsigned long)pgd);
} }
#define pgd_free(pgd) free_pgd_fast(pgd)
#define pgd_alloc(mm) get_pgd_fast()
/* /*
* We don't have any real pmd's, and this code never triggers because * We don't have any real pmd's, and this code never triggers because
* the pgd will always be present.. * the pgd will always be present..
*/ */
#define pmd_alloc_one_fast(mm, address) ({ BUG(); ((pmd_t *)1); })
#define pmd_alloc_one(mm,address) ({ BUG(); ((pmd_t *)2); }) #define pmd_alloc_one(mm,address) ({ BUG(); ((pmd_t *)2); })
#define pmd_free(x) do { } while (0) #define pmd_free(x) do { } while (0)
#define pgd_populate(mm, pmd, pte) BUG() #define pgd_populate(mm, pmd, pte) BUG()
static inline pte_t *pte_alloc_one(struct mm_struct *mm, unsigned long address) static inline pte_t *
pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
{ {
pte_t *pte; pte_t *pte;
extern int mem_init_done; extern int mem_init_done;
extern void *early_get_page(void); extern void *early_get_page(void);
int timeout = 0;
if (mem_init_done) if (mem_init_done) {
pte = (pte_t *) __get_free_page(GFP_KERNEL); while ((pte = (pte_t *) __get_free_page(GFP_KERNEL)) == NULL
else && ++timeout < 10) {
set_current_state(TASK_UNINTERRUPTIBLE);
schedule_timeout(HZ);
}
} else
pte = (pte_t *) early_get_page(); pte = (pte_t *) early_get_page();
if (pte != NULL) if (pte != NULL)
clear_page(pte); clear_page(pte);
return pte; return pte;
} }
static inline pte_t *pte_alloc_one_fast(struct mm_struct *mm, unsigned long address) static inline struct page *
pte_alloc_one(struct mm_struct *mm, unsigned long address)
{ {
unsigned long *ret; struct page *pte;
int timeout = 0;
#ifdef CONFIG_HIGHPTE
int flags = GFP_KERNEL | __GFP_HIGHMEM;
#else
int flags = GFP_KERNEL;
#endif
if ((ret = pte_quicklist) != NULL) { while ((pte = alloc_pages(flags, 0)) == NULL) {
pte_quicklist = (unsigned long *)(*ret); if (++timeout >= 10)
ret[0] = 0; return NULL;
pgtable_cache_size--; set_current_state(TASK_UNINTERRUPTIBLE);
schedule_timeout(HZ);
} }
return (pte_t *)ret; clear_highpage(pte);
return pte;
} }
extern __inline__ void pte_free_fast(pte_t *pte) static inline void pte_free_kernel(pte_t *pte)
{ {
*(unsigned long **)pte = pte_quicklist; free_page((unsigned long)pte);
pte_quicklist = (unsigned long *) pte;
pgtable_cache_size++;
} }
extern __inline__ void pte_free_slow(pte_t *pte) static inline void pte_free(struct page *pte)
{ {
free_page((unsigned long)pte); __free_page(pte);
} }
#define pte_free(pte) pte_free_slow(pte) #define pmd_populate_kernel(mm, pmd, pte) \
(pmd_val(*(pmd)) = __pa(pte))
#define pmd_populate(mm, pmd, pte) (pmd_val(*(pmd)) = (unsigned long) (pte)) #define pmd_populate(mm, pmd, pte) \
(pmd_val(*(pmd)) = ((pte) - mem_map) << PAGE_SHIFT)
extern int do_check_pgt_cache(int, int); extern int do_check_pgt_cache(int, int);
......
...@@ -13,6 +13,7 @@ ...@@ -13,6 +13,7 @@
#include <asm/processor.h> /* For TASK_SIZE */ #include <asm/processor.h> /* For TASK_SIZE */
#include <asm/mmu.h> #include <asm/mmu.h>
#include <asm/page.h> #include <asm/page.h>
#include <asm/kmap_types.h>
extern void _tlbie(unsigned long address); extern void _tlbie(unsigned long address);
extern void _tlbia(void); extern void _tlbia(void);
...@@ -98,6 +99,7 @@ extern void flush_icache_user_range(struct vm_area_struct *vma, ...@@ -98,6 +99,7 @@ extern void flush_icache_user_range(struct vm_area_struct *vma,
struct page *page, unsigned long addr, int len); struct page *page, unsigned long addr, int len);
extern void flush_icache_range(unsigned long, unsigned long); extern void flush_icache_range(unsigned long, unsigned long);
extern void __flush_dcache_icache(void *page_va); extern void __flush_dcache_icache(void *page_va);
extern void __flush_dcache_icache_phys(unsigned long physaddr);
extern void flush_dcache_page(struct page *page); extern void flush_dcache_page(struct page *page);
extern void flush_icache_page(struct vm_area_struct *vma, struct page *page); extern void flush_icache_page(struct vm_area_struct *vma, struct page *page);
...@@ -274,7 +276,6 @@ extern unsigned long ioremap_bot, ioremap_base; ...@@ -274,7 +276,6 @@ extern unsigned long ioremap_bot, ioremap_base;
#define _PAGE_HWWRITE 0x0100 /* h/w write enable: never set in Linux PTE */ #define _PAGE_HWWRITE 0x0100 /* h/w write enable: never set in Linux PTE */
#define _PAGE_USER 0x0800 /* One of the PP bits, the other is USER&~RW */ #define _PAGE_USER 0x0800 /* One of the PP bits, the other is USER&~RW */
#define _PMD_PRESENT 0x0001
#define _PMD_PAGE_MASK 0x000c #define _PMD_PAGE_MASK 0x000c
#define _PMD_PAGE_8M 0x000c #define _PMD_PAGE_8M 0x000c
...@@ -385,8 +386,8 @@ extern unsigned long empty_zero_page[1024]; ...@@ -385,8 +386,8 @@ extern unsigned long empty_zero_page[1024];
#define pte_clear(ptep) do { set_pte((ptep), __pte(0)); } while (0) #define pte_clear(ptep) do { set_pte((ptep), __pte(0)); } while (0)
#define pmd_none(pmd) (!pmd_val(pmd)) #define pmd_none(pmd) (!pmd_val(pmd))
#define pmd_bad(pmd) ((pmd_val(pmd) & ~PAGE_MASK) != 0) #define pmd_bad(pmd) (0)
#define pmd_present(pmd) ((pmd_val(pmd) & PAGE_MASK) != 0) #define pmd_present(pmd) (pmd_val(pmd) != 0)
#define pmd_clear(pmdp) do { pmd_val(*(pmdp)) = 0; } while (0) #define pmd_clear(pmdp) do { pmd_val(*(pmdp)) = 0; } while (0)
#define pte_page(x) (mem_map+(unsigned long)((pte_val(x)-PPC_MEMSTART) >> PAGE_SHIFT)) #define pte_page(x) (mem_map+(unsigned long)((pte_val(x)-PPC_MEMSTART) >> PAGE_SHIFT))
...@@ -530,7 +531,10 @@ static inline void ptep_mkdirty(pte_t *ptep) ...@@ -530,7 +531,10 @@ static inline void ptep_mkdirty(pte_t *ptep)
#define pte_same(A,B) (((pte_val(A) ^ pte_val(B)) & ~_PAGE_HASHPTE) == 0) #define pte_same(A,B) (((pte_val(A) ^ pte_val(B)) & ~_PAGE_HASHPTE) == 0)
#define pmd_page(pmd) (pmd_val(pmd) & PAGE_MASK) #define pmd_page_kernel(pmd) \
((unsigned long) __va(pmd_val(pmd) & PAGE_MASK))
#define pmd_page(pmd) \
(mem_map + (pmd_val(pmd) >> PAGE_SHIFT))
/* to find an entry in a kernel page-table-directory */ /* to find an entry in a kernel page-table-directory */
#define pgd_offset_k(address) pgd_offset(&init_mm, address) #define pgd_offset_k(address) pgd_offset(&init_mm, address)
...@@ -546,10 +550,17 @@ static inline pmd_t * pmd_offset(pgd_t * dir, unsigned long address) ...@@ -546,10 +550,17 @@ static inline pmd_t * pmd_offset(pgd_t * dir, unsigned long address)
} }
/* Find an entry in the third-level page table.. */ /* Find an entry in the third-level page table.. */
static inline pte_t * pte_offset(pmd_t * dir, unsigned long address) #define __pte_offset(address) \
{ ((address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
return (pte_t *) pmd_page(*dir) + ((address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)); #define pte_offset_kernel(dir, addr) \
} ((pte_t *) pmd_page_kernel(*(dir)) + __pte_offset(addr))
#define pte_offset_map(dir, addr) \
((pte_t *) kmap_atomic(pmd_page(*(dir)), KM_PTE0) + __pte_offset(addr))
#define pte_offset_map_nested(dir, addr) \
((pte_t *) kmap_atomic(pmd_page(*(dir)), KM_PTE1) + __pte_offset(addr))
#define pte_unmap(pte) kunmap_atomic(pte, KM_PTE0)
#define pte_unmap_nested(pte) kunmap_atomic(pte, KM_PTE1)
extern pgd_t swapper_pg_dir[1024]; extern pgd_t swapper_pg_dir[1024];
extern void paging_init(void); extern void paging_init(void);
...@@ -558,10 +569,12 @@ extern void paging_init(void); ...@@ -558,10 +569,12 @@ extern void paging_init(void);
* When flushing the tlb entry for a page, we also need to flush the hash * When flushing the tlb entry for a page, we also need to flush the hash
* table entry. flush_hash_page is assembler (for speed) in hashtable.S. * table entry. flush_hash_page is assembler (for speed) in hashtable.S.
*/ */
extern int flush_hash_page(unsigned context, unsigned long va, pte_t *ptep); extern int flush_hash_pages(unsigned context, unsigned long va,
unsigned long pmdval, int count);
/* Add an HPTE to the hash table */ /* Add an HPTE to the hash table */
extern void add_hash_page(unsigned context, unsigned long va, pte_t *ptep); extern void add_hash_page(unsigned context, unsigned long va,
unsigned long pmdval);
/* /*
* Encode and decode a swap entry. * Encode and decode a swap entry.
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment