Commit 043baaa2 authored by Rob Radez's avatar Rob Radez Committed by David S. Miller

[SPARC]: Backport of 2.4.x dynamic-nocache.

parent e6c0c5e8
...@@ -38,7 +38,7 @@ void *kmap_atomic(struct page *page, enum km_type type) ...@@ -38,7 +38,7 @@ void *kmap_atomic(struct page *page, enum km_type type)
return page_address(page); return page_address(page);
idx = type + KM_TYPE_NR*smp_processor_id(); idx = type + KM_TYPE_NR*smp_processor_id();
vaddr = FIX_KMAP_BEGIN + idx * PAGE_SIZE; vaddr = fix_kmap_begin + idx * PAGE_SIZE;
/* XXX Fix - Anton */ /* XXX Fix - Anton */
#if 0 #if 0
...@@ -67,12 +67,12 @@ void kunmap_atomic(void *kvaddr, enum km_type type) ...@@ -67,12 +67,12 @@ void kunmap_atomic(void *kvaddr, enum km_type type)
unsigned long vaddr = (unsigned long) kvaddr; unsigned long vaddr = (unsigned long) kvaddr;
unsigned long idx = type + KM_TYPE_NR*smp_processor_id(); unsigned long idx = type + KM_TYPE_NR*smp_processor_id();
if (vaddr < FIX_KMAP_BEGIN) { // FIXME if (vaddr < fix_kmap_begin) { // FIXME
dec_preempt_count(); dec_preempt_count();
return; return;
} }
if (vaddr != FIX_KMAP_BEGIN + idx * PAGE_SIZE) if (vaddr != fix_kmap_begin + idx * PAGE_SIZE)
BUG(); BUG();
/* XXX Fix - Anton */ /* XXX Fix - Anton */
......
...@@ -59,13 +59,17 @@ unsigned long highstart_pfn, highend_pfn; ...@@ -59,13 +59,17 @@ unsigned long highstart_pfn, highend_pfn;
pte_t *kmap_pte; pte_t *kmap_pte;
pgprot_t kmap_prot; pgprot_t kmap_prot;
/* These are set in {srmmu,sun4c}_paging_init() */
unsigned long fix_kmap_begin;
unsigned long fix_kmap_end;
#define kmap_get_fixed_pte(vaddr) \ #define kmap_get_fixed_pte(vaddr) \
pte_offset_kernel(pmd_offset(pgd_offset_k(vaddr), (vaddr)), (vaddr)) pte_offset_kernel(pmd_offset(pgd_offset_k(vaddr), (vaddr)), (vaddr))
void __init kmap_init(void) void __init kmap_init(void)
{ {
/* cache the first kmap pte */ /* cache the first kmap pte */
kmap_pte = kmap_get_fixed_pte(FIX_KMAP_BEGIN); kmap_pte = kmap_get_fixed_pte(fix_kmap_begin);
kmap_prot = __pgprot(SRMMU_ET_PTE | SRMMU_PRIV | SRMMU_CACHE); kmap_prot = __pgprot(SRMMU_ET_PTE | SRMMU_PRIV | SRMMU_CACHE);
} }
......
...@@ -136,8 +136,16 @@ static inline int srmmu_device_memory(unsigned long x) ...@@ -136,8 +136,16 @@ static inline int srmmu_device_memory(unsigned long x)
int srmmu_cache_pagetables; int srmmu_cache_pagetables;
/* XXX Make this dynamic based on ram size - Anton */ /* these will be initialized in srmmu_nocache_calcsize() */
#define SRMMU_NOCACHE_BITMAP_SIZE (SRMMU_NOCACHE_NPAGES * 16) int srmmu_nocache_npages;
unsigned long srmmu_nocache_size;
unsigned long srmmu_nocache_end;
unsigned long pkmap_base;
unsigned long pkmap_base_end;
unsigned long srmmu_nocache_bitmap_size;
extern unsigned long fix_kmap_begin;
extern unsigned long fix_kmap_end;
#define SRMMU_NOCACHE_BITMAP_SHIFT (PAGE_SHIFT - 4) #define SRMMU_NOCACHE_BITMAP_SHIFT (PAGE_SHIFT - 4)
void *srmmu_nocache_pool; void *srmmu_nocache_pool;
...@@ -331,7 +339,7 @@ static unsigned long __srmmu_get_nocache(int size, int align) ...@@ -331,7 +339,7 @@ static unsigned long __srmmu_get_nocache(int size, int align)
spin_lock(&srmmu_nocache_spinlock); spin_lock(&srmmu_nocache_spinlock);
repeat: repeat:
offset = find_next_zero_bit(srmmu_nocache_bitmap, SRMMU_NOCACHE_BITMAP_SIZE, offset); offset = find_next_zero_bit(srmmu_nocache_bitmap, srmmu_nocache_bitmap_size, offset);
/* we align on physical address */ /* we align on physical address */
if (align) { if (align) {
...@@ -341,7 +349,7 @@ static unsigned long __srmmu_get_nocache(int size, int align) ...@@ -341,7 +349,7 @@ static unsigned long __srmmu_get_nocache(int size, int align)
offset = (va_tmp - SRMMU_NOCACHE_VADDR) >> SRMMU_NOCACHE_BITMAP_SHIFT; offset = (va_tmp - SRMMU_NOCACHE_VADDR) >> SRMMU_NOCACHE_BITMAP_SHIFT;
} }
if ((SRMMU_NOCACHE_BITMAP_SIZE - offset) < size) { if ((srmmu_nocache_bitmap_size - offset) < size) {
printk("Run out of nocached RAM!\n"); printk("Run out of nocached RAM!\n");
spin_unlock(&srmmu_nocache_spinlock); spin_unlock(&srmmu_nocache_spinlock);
return 0; return 0;
...@@ -393,9 +401,9 @@ void srmmu_free_nocache(unsigned long vaddr, int size) ...@@ -393,9 +401,9 @@ void srmmu_free_nocache(unsigned long vaddr, int size)
vaddr, (unsigned long)SRMMU_NOCACHE_VADDR); vaddr, (unsigned long)SRMMU_NOCACHE_VADDR);
BUG(); BUG();
} }
if (vaddr >= SRMMU_NOCACHE_END) { if (vaddr >= srmmu_nocache_end) {
printk("Vaddr %lx is bigger than nocache end 0x%lx\n", printk("Vaddr %lx is bigger than nocache end 0x%lx\n",
vaddr, (unsigned long)SRMMU_NOCACHE_END); vaddr, srmmu_nocache_end);
BUG(); BUG();
} }
if (size & (size-1)) { if (size & (size-1)) {
...@@ -429,6 +437,35 @@ void srmmu_free_nocache(unsigned long vaddr, int size) ...@@ -429,6 +437,35 @@ void srmmu_free_nocache(unsigned long vaddr, int size)
void srmmu_early_allocate_ptable_skeleton(unsigned long start, unsigned long end); void srmmu_early_allocate_ptable_skeleton(unsigned long start, unsigned long end);
extern unsigned long probe_memory(void); /* in fault.c */
/* Reserve nocache dynamically proportionally to the amount of
* system RAM. -- Tomas Szepe <szepe@pinerecords.com>, June 2002
*/
void srmmu_nocache_calcsize(void)
{
unsigned long sysmemavail = probe_memory() / 1024;
srmmu_nocache_npages =
sysmemavail / SRMMU_NOCACHE_ALCRATIO / 1024 * 256;
if (sysmemavail % (SRMMU_NOCACHE_ALCRATIO * 1024))
srmmu_nocache_npages += 256;
/* anything above 1280 blows up */
if (srmmu_nocache_npages > 1280) srmmu_nocache_npages = 1280;
srmmu_nocache_size = srmmu_nocache_npages * PAGE_SIZE;
srmmu_nocache_bitmap_size = srmmu_nocache_npages * 16;
srmmu_nocache_end = SRMMU_NOCACHE_VADDR + srmmu_nocache_size;
fix_kmap_begin = srmmu_nocache_end;
fix_kmap_end = fix_kmap_begin + (KM_TYPE_NR * NR_CPUS - 1) * PAGE_SIZE;
pkmap_base = SRMMU_NOCACHE_VADDR + srmmu_nocache_size + 0x40000;
pkmap_base_end = pkmap_base + LAST_PKMAP * PAGE_SIZE;
/* printk("system memory available = %luk\nnocache ram size = %luk\n",
sysmemavail, srmmu_nocache_size / 1024); */
}
void srmmu_nocache_init(void) void srmmu_nocache_init(void)
{ {
pgd_t *pgd; pgd_t *pgd;
...@@ -437,24 +474,24 @@ void srmmu_nocache_init(void) ...@@ -437,24 +474,24 @@ void srmmu_nocache_init(void)
unsigned long paddr, vaddr; unsigned long paddr, vaddr;
unsigned long pteval; unsigned long pteval;
srmmu_nocache_pool = __alloc_bootmem(SRMMU_NOCACHE_SIZE, PAGE_SIZE, 0UL); srmmu_nocache_pool = __alloc_bootmem(srmmu_nocache_size, PAGE_SIZE, 0UL);
memset(srmmu_nocache_pool, 0, SRMMU_NOCACHE_SIZE); memset(srmmu_nocache_pool, 0, srmmu_nocache_size);
srmmu_nocache_bitmap = __alloc_bootmem(SRMMU_NOCACHE_BITMAP_SIZE, SMP_CACHE_BYTES, 0UL); srmmu_nocache_bitmap = __alloc_bootmem(srmmu_nocache_bitmap_size, SMP_CACHE_BYTES, 0UL);
memset(srmmu_nocache_bitmap, 0, SRMMU_NOCACHE_BITMAP_SIZE); memset(srmmu_nocache_bitmap, 0, srmmu_nocache_bitmap_size);
srmmu_swapper_pg_dir = (pgd_t *)__srmmu_get_nocache(SRMMU_PGD_TABLE_SIZE, SRMMU_PGD_TABLE_SIZE); srmmu_swapper_pg_dir = (pgd_t *)__srmmu_get_nocache(SRMMU_PGD_TABLE_SIZE, SRMMU_PGD_TABLE_SIZE);
memset(__nocache_fix(srmmu_swapper_pg_dir), 0, SRMMU_PGD_TABLE_SIZE); memset(__nocache_fix(srmmu_swapper_pg_dir), 0, SRMMU_PGD_TABLE_SIZE);
init_mm.pgd = srmmu_swapper_pg_dir; init_mm.pgd = srmmu_swapper_pg_dir;
srmmu_early_allocate_ptable_skeleton(SRMMU_NOCACHE_VADDR, SRMMU_NOCACHE_END); srmmu_early_allocate_ptable_skeleton(SRMMU_NOCACHE_VADDR, srmmu_nocache_end);
spin_lock_init(&srmmu_nocache_spinlock); spin_lock_init(&srmmu_nocache_spinlock);
paddr = __pa((unsigned long)srmmu_nocache_pool); paddr = __pa((unsigned long)srmmu_nocache_pool);
vaddr = SRMMU_NOCACHE_VADDR; vaddr = SRMMU_NOCACHE_VADDR;
while (vaddr < SRMMU_NOCACHE_END) { while (vaddr < srmmu_nocache_end) {
pgd = pgd_offset_k(vaddr); pgd = pgd_offset_k(vaddr);
pmd = srmmu_pmd_offset(__nocache_fix(pgd), vaddr); pmd = srmmu_pmd_offset(__nocache_fix(pgd), vaddr);
pte = srmmu_pte_offset(__nocache_fix(pmd), vaddr); pte = srmmu_pte_offset(__nocache_fix(pmd), vaddr);
...@@ -1286,6 +1323,7 @@ void __init srmmu_paging_init(void) ...@@ -1286,6 +1323,7 @@ void __init srmmu_paging_init(void)
pages_avail = 0; pages_avail = 0;
last_valid_pfn = bootmem_init(&pages_avail); last_valid_pfn = bootmem_init(&pages_avail);
srmmu_nocache_calcsize();
srmmu_nocache_init(); srmmu_nocache_init();
srmmu_inherit_prom_mappings(0xfe400000,(LINUX_OPPROM_ENDVM-PAGE_SIZE)); srmmu_inherit_prom_mappings(0xfe400000,(LINUX_OPPROM_ENDVM-PAGE_SIZE));
map_kernel(); map_kernel();
...@@ -1307,12 +1345,12 @@ void __init srmmu_paging_init(void) ...@@ -1307,12 +1345,12 @@ void __init srmmu_paging_init(void)
srmmu_allocate_ptable_skeleton(DVMA_VADDR, DVMA_END); srmmu_allocate_ptable_skeleton(DVMA_VADDR, DVMA_END);
#endif #endif
srmmu_allocate_ptable_skeleton(FIX_KMAP_BEGIN, FIX_KMAP_END); srmmu_allocate_ptable_skeleton(fix_kmap_begin, fix_kmap_end);
srmmu_allocate_ptable_skeleton(PKMAP_BASE, PKMAP_BASE_END); srmmu_allocate_ptable_skeleton(pkmap_base, pkmap_base_end);
pgd = pgd_offset_k(PKMAP_BASE); pgd = pgd_offset_k(pkmap_base);
pmd = srmmu_pmd_offset(pgd, PKMAP_BASE); pmd = srmmu_pmd_offset(pgd, pkmap_base);
pte = srmmu_pte_offset(pmd, PKMAP_BASE); pte = srmmu_pte_offset(pmd, pkmap_base);
pkmap_page_table = pte; pkmap_page_table = pte;
flush_cache_all(); flush_cache_all();
...@@ -1359,7 +1397,7 @@ static void srmmu_mmu_info(struct seq_file *m) ...@@ -1359,7 +1397,7 @@ static void srmmu_mmu_info(struct seq_file *m)
"nocache used\t: %d\n", "nocache used\t: %d\n",
srmmu_name, srmmu_name,
num_contexts, num_contexts,
SRMMU_NOCACHE_SIZE, srmmu_nocache_size,
(srmmu_nocache_used << SRMMU_NOCACHE_BITMAP_SHIFT)); (srmmu_nocache_used << SRMMU_NOCACHE_BITMAP_SHIFT));
} }
......
...@@ -2002,6 +2002,9 @@ extern unsigned long end; ...@@ -2002,6 +2002,9 @@ extern unsigned long end;
extern unsigned long bootmem_init(unsigned long *pages_avail); extern unsigned long bootmem_init(unsigned long *pages_avail);
extern unsigned long last_valid_pfn; extern unsigned long last_valid_pfn;
extern unsigned long fix_kmap_begin;
extern unsigned long fix_kmap_end;
void __init sun4c_paging_init(void) void __init sun4c_paging_init(void)
{ {
int i, cnt; int i, cnt;
...@@ -2009,6 +2012,9 @@ void __init sun4c_paging_init(void) ...@@ -2009,6 +2012,9 @@ void __init sun4c_paging_init(void)
extern struct resource sparc_iomap; extern struct resource sparc_iomap;
unsigned long end_pfn, pages_avail; unsigned long end_pfn, pages_avail;
fix_kmap_begin = KERNBASE + SRMMU_MAXMEM; /* Why bother with SRMMU_MAXMEM? */
fix_kmap_end = fix_kmap_begin + ((KM_TYPE_NR*NR_CPUS)-1)*PAGE_SIZE;
kernel_end = (unsigned long) &end; kernel_end = (unsigned long) &end;
kernel_end += (SUN4C_REAL_PGDIR_SIZE * 4); kernel_end += (SUN4C_REAL_PGDIR_SIZE * 4);
kernel_end = SUN4C_REAL_PGDIR_ALIGN(kernel_end); kernel_end = SUN4C_REAL_PGDIR_ALIGN(kernel_end);
......
...@@ -33,6 +33,12 @@ extern pte_t *kmap_pte; ...@@ -33,6 +33,12 @@ extern pte_t *kmap_pte;
extern pgprot_t kmap_prot; extern pgprot_t kmap_prot;
extern pte_t *pkmap_page_table; extern pte_t *pkmap_page_table;
/* This gets set in {srmmu,sun4c}_paging_init() */
extern unsigned long fix_kmap_begin;
/* Only used and set with srmmu? */
extern unsigned long pkmap_base;
extern void kmap_init(void) __init; extern void kmap_init(void) __init;
/* /*
...@@ -42,9 +48,9 @@ extern void kmap_init(void) __init; ...@@ -42,9 +48,9 @@ extern void kmap_init(void) __init;
*/ */
#define LAST_PKMAP 1024 #define LAST_PKMAP 1024
#define LAST_PKMAP_MASK (LAST_PKMAP-1) #define LAST_PKMAP_MASK (LAST_PKMAP - 1)
#define PKMAP_NR(virt) ((virt-PKMAP_BASE) >> PAGE_SHIFT) #define PKMAP_NR(virt) ((virt - pkmap_base) >> PAGE_SHIFT)
#define PKMAP_ADDR(nr) (PKMAP_BASE + ((nr) << PAGE_SHIFT)) #define PKMAP_ADDR(nr) (pkmap_base + ((nr) << PAGE_SHIFT))
extern void *kmap_high(struct page *page); extern void *kmap_high(struct page *page);
extern void kunmap_high(struct page *page); extern void kunmap_high(struct page *page);
...@@ -75,10 +81,10 @@ static inline struct page *kmap_atomic_to_page(void *ptr) ...@@ -75,10 +81,10 @@ static inline struct page *kmap_atomic_to_page(void *ptr)
unsigned long idx, vaddr = (unsigned long)ptr; unsigned long idx, vaddr = (unsigned long)ptr;
pte_t *pte; pte_t *pte;
if (vaddr < FIX_KMAP_BEGIN) if (vaddr < fix_kmap_begin)
return virt_to_page(ptr); return virt_to_page(ptr);
idx = ((vaddr - FIX_KMAP_BEGIN) >> PAGE_SHIFT); idx = ((vaddr - fix_kmap_begin) >> PAGE_SHIFT);
pte = kmap_pte + idx; pte = kmap_pte + idx;
return pte_page(*pte); return pte_page(*pte);
} }
......
...@@ -14,23 +14,26 @@ ...@@ -14,23 +14,26 @@
#define SRMMU_MAXMEM 0x0c000000 #define SRMMU_MAXMEM 0x0c000000
#define SRMMU_NOCACHE_VADDR 0xfc000000 /* KERNBASE + SRMMU_MAXMEM */ #define SRMMU_NOCACHE_VADDR (KERNBASE + SRMMU_MAXMEM)
/* XXX Make this dynamic based on ram size - Anton */ /* = 0x0fc000000 */
#define SRMMU_NOCACHE_NPAGES 256
#define SRMMU_NOCACHE_SIZE (SRMMU_NOCACHE_NPAGES * PAGE_SIZE)
#define SRMMU_NOCACHE_END (SRMMU_NOCACHE_VADDR + SRMMU_NOCACHE_SIZE)
#define FIX_KMAP_BEGIN 0xfc100000 /* The following constant is used in mm/srmmu.c::srmmu_nocache_calcsize()
#define FIX_KMAP_END (FIX_KMAP_BEGIN + ((KM_TYPE_NR*NR_CPUS)-1)*PAGE_SIZE) * to determine the amount of memory that will be reserved as nocache:
*
#define PKMAP_BASE 0xfc140000 * 256 pages will be taken as nocache per each
#define PKMAP_BASE_END (PKMAP_BASE+LAST_PKMAP*PAGE_SIZE) * SRMMU_NOCACHE_ALCRATIO MB of system memory.
*
* limits enforced: nocache minimum = 256 pages
* nocache maximum = 1280 pages
*/
#define SRMMU_NOCACHE_ALCRATIO 64 /* 256 pages per 64MB of system RAM */
#define SUN4M_IOBASE_VADDR 0xfd000000 /* Base for mapping pages */ #define SUN4M_IOBASE_VADDR 0xfd000000 /* Base for mapping pages */
#define IOBASE_VADDR 0xfe000000 #define IOBASE_VADDR 0xfe000000
#define IOBASE_END 0xfe300000 #define IOBASE_END 0xfe300000
#define VMALLOC_START 0xfe300000 #define VMALLOC_START 0xfe300000
/* XXX Alter this when I get around to fixing sun4c - Anton */ /* XXX Alter this when I get around to fixing sun4c - Anton */
#define VMALLOC_END 0xffc00000 #define VMALLOC_END 0xffc00000
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment