Commit a113994e authored by Linus Torvalds's avatar Linus Torvalds

Merge bk://bk.arm.linux.org.uk/linux-2.5-rmk

into home.transmeta.com:/home/torvalds/v2.5/linux
parents e847679b 27510721
......@@ -31,13 +31,15 @@ apcs-$(CONFIG_CPU_32) :=-mapcs-32
apcs-$(CONFIG_CPU_26) :=-mapcs-26 -mcpu=arm3
# This selects which instruction set is used.
# Note that GCC is lame - it doesn't numerically define an
# architecture version macro, but instead defines a whole
# series of macros.
arch-$(CONFIG_CPU_32v3) :=-D__LINUX_ARM_ARCH__=3 -march=armv3
# Note that GCC does not numerically define an architecture version
# macro, but instead defines a whole series of macros which makes
# testing for a specific architecture or later rather impossible.
#
# Note - GCC does accept -march=armv5te, but someone messed up the assembler or the
# gcc specs file - this needs fixing properly - ie in gcc and/or binutils.
arch-$(CONFIG_CPU_32v5) :=-D__LINUX_ARM_ARCH__=5 -march=armv5t
arch-$(CONFIG_CPU_32v4) :=-D__LINUX_ARM_ARCH__=4 -march=armv4
arch-$(CONFIG_CPU_32v5) :=-D__LINUX_ARM_ARCH__=5 -march=armv5te
arch-$(CONFIG_CPU_XSCALE) :=-D__LINUX_ARM_ARCH__=5 -march=armv4 -Wa,-mxscale #-march=armv5te
arch-$(CONFIG_CPU_32v3) :=-D__LINUX_ARM_ARCH__=3 -march=armv3
# This selects how we optimise for the processor.
tune-$(CONFIG_CPU_ARM610) :=-mtune=arm610
......@@ -48,13 +50,13 @@ tune-$(CONFIG_CPU_ARM922T) :=-mtune=arm9tdmi
tune-$(CONFIG_CPU_ARM926T) :=-mtune=arm9tdmi
tune-$(CONFIG_CPU_SA110) :=-mtune=strongarm110
tune-$(CONFIG_CPU_SA1100) :=-mtune=strongarm1100
tune-$(CONFIG_CPU_XSCALE) :=-mtune=strongarm #-mtune=xscale
tune-$(CONFIG_CPU_XSCALE) :=-mtune=strongarm -Wa,-mxscale #-mtune=xscale
# Force -mno-fpu to be passed to the assembler. Some versions of gcc don't
# do this with -msoft-float
CFLAGS_BOOT :=$(apcs-y) $(arch-y) $(tune-y) -mshort-load-bytes -msoft-float -Wa,-mno-fpu -Uarm
CFLAGS +=$(apcs-y) $(arch-y) $(tune-y) -mshort-load-bytes -msoft-float -Wa,-mno-fpu -Uarm
AFLAGS +=$(apcs-y) $(arch-y) -mno-fpu -msoft-float -Wa,-mno-fpu
AFLAGS +=$(apcs-y) $(arch-y) $(tune-y) -mno-fpu -msoft-float -Wa,-mno-fpu
#Default value
DATAADDR := .
......@@ -208,6 +210,7 @@ zi:; $(Q)$(MAKE) $(build)=$(boot) zinstall
)
arch/$(ARCH)/kernel/asm-offsets.s: include/asm include/linux/version.h \
include/asm-arm/.arch include/asm-arm/.proc \
include/config/MARKER
include/asm-$(ARCH)/constants.h: arch/$(ARCH)/kernel/asm-offsets.s
......
......@@ -50,6 +50,10 @@ __SA1100_start:
10:
#endif
mrc p15, 0, r0, c1, c0, 0 @ read control reg
ands r0, r0, #0x0d
beq 99f
@ Data cache might be active.
@ Be sure to flush kernel binary out of the cache,
@ whatever state it is, before it is turned off.
......@@ -68,11 +72,4 @@ __SA1100_start:
bic r0, r0, #0x0d @ clear WB, DC, MMU
bic r0, r0, #0x1000 @ clear Icache
mcr p15, 0, r0, c1, c0, 0
/*
* Pause for a short time so that we give enough time
* for the host to start a terminal up.
*/
mov r0, #0x00200000
1: subs r0, r0, #1
bne 1b
99:
This diff is collapsed.
......@@ -418,7 +418,7 @@ unsigned long get_wchan(struct task_struct *p)
if (!p || p == current || p->state == TASK_RUNNING)
return 0;
stack_page = 4096 + (unsigned long)p;
stack_page = 4096 + (unsigned long)p->thread_info;
fp = thread_saved_fp(p);
do {
if (fp < stack_page || fp > 4092+stack_page)
......
......@@ -74,6 +74,9 @@ struct cpu_tlb_fns cpu_tlb;
#ifdef MULTI_USER
struct cpu_user_fns cpu_user;
#endif
#ifdef MULTI_CACHE
struct cpu_cache_fns cpu_cache;
#endif
unsigned char aux_device_present;
char elf_platform[ELF_PLATFORM_SIZE];
......@@ -282,6 +285,9 @@ static void __init setup_processor(void)
#ifdef MULTI_USER
cpu_user = *list->user;
#endif
#ifdef MULTI_CACHE
cpu_cache = *list->cache;
#endif
printk("CPU: %s [%08x] revision %d (ARMv%s)\n",
cpu_name, processor_id, (int)processor_id & 15,
......@@ -323,58 +329,77 @@ static struct machine_desc * __init setup_machine(unsigned int nr)
return list;
}
static void __init early_initrd(char **p)
{
unsigned long start, size;
start = memparse(*p, p);
if (**p == ',') {
size = memparse((*p) + 1, p);
phys_initrd_start = start;
phys_initrd_size = size;
}
}
__early_param("initrd=", early_initrd);
/*
* Initial parsing of the command line. We need to pick out the
* memory size. We look for mem=size@start, where start and size
* are "size[KkMm]"
* Pick out the memory size. We look for mem=size@start,
* where start and size are "size[KkMm]"
*/
static void __init
parse_cmdline(struct meminfo *mi, char **cmdline_p, char *from)
static void __init early_mem(char **p)
{
static int usermem __initdata = 0;
unsigned long size, start;
/*
* If the user specifies memory size, we
* blow away any automatically generated
* size.
*/
if (usermem == 0) {
usermem = 1;
meminfo.nr_banks = 0;
}
start = PHYS_OFFSET;
size = memparse(*p, p);
if (**p == '@')
start = memparse(*p + 1, p);
meminfo.bank[meminfo.nr_banks].start = start;
meminfo.bank[meminfo.nr_banks].size = size;
meminfo.bank[meminfo.nr_banks].node = PHYS_TO_NID(start);
meminfo.nr_banks += 1;
}
__early_param("mem=", early_mem);
/*
* Initial parsing of the command line.
*/
static void __init parse_cmdline(char **cmdline_p, char *from)
{
char c = ' ', *to = command_line;
int usermem = 0, len = 0;
int len = 0;
for (;;) {
if (c == ' ' && !memcmp(from, "mem=", 4)) {
unsigned long size, start;
if (to != command_line)
to -= 1;
/*
* If the user specifies memory size, we
* blow away any automatically generated
* size.
*/
if (usermem == 0) {
usermem = 1;
mi->nr_banks = 0;
}
start = PHYS_OFFSET;
size = memparse(from + 4, &from);
if (*from == '@')
start = memparse(from + 1, &from);
mi->bank[mi->nr_banks].start = start;
mi->bank[mi->nr_banks].size = size;
mi->bank[mi->nr_banks].node = PHYS_TO_NID(start);
mi->nr_banks += 1;
} else if (c == ' ' && !memcmp(from, "initrd=", 7)) {
unsigned long start, size;
/*
* Remove space character
*/
if (to != command_line)
to -= 1;
start = memparse(from + 7, &from);
if (*from == ',') {
size = memparse(from + 1, &from);
phys_initrd_start = start;
phys_initrd_size = size;
if (c == ' ') {
extern struct early_params __early_begin, __early_end;
struct early_params *p;
for (p = &__early_begin; p < &__early_end; p++) {
int len = strlen(p->arg);
if (memcmp(from, p->arg, len) == 0) {
if (to != command_line)
to -= 1;
from += len;
p->fn(&from);
while (*from != ' ' && *from != '\0')
from++;
break;
}
}
}
c = *from++;
......@@ -536,6 +561,8 @@ __tagtable(ATAG_RAMDISK, parse_tag_ramdisk);
static int __init parse_tag_initrd(const struct tag *tag)
{
printk(KERN_WARNING "ATAG_INITRD is deprecated; "
"please update your bootloader.\n");
phys_initrd_start = __virt_to_phys(tag->u.initrd.start);
phys_initrd_size = tag->u.initrd.size;
return 0;
......@@ -668,7 +695,7 @@ void __init setup_arch(char **cmdline_p)
memcpy(saved_command_line, from, COMMAND_LINE_SIZE);
saved_command_line[COMMAND_LINE_SIZE-1] = '\0';
parse_cmdline(&meminfo, cmdline_p, from);
parse_cmdline(cmdline_p, from);
bootmem_init(&meminfo);
paging_init(&meminfo, mdesc);
request_standard_resources(&meminfo, mdesc);
......
......@@ -12,6 +12,7 @@
* CPU support functions
*/
#include <linux/config.h>
#include <linux/module.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/cpufreq.h>
......@@ -173,9 +174,9 @@ static int integrator_set_target(struct cpufreq_policy *policy,
return 0;
}
static int integrator_cpufreq_init(struct cpufreq *policy)
static int integrator_cpufreq_init(struct cpufreq_policy *policy)
{
unsigned long cus_allowed;
unsigned long cpus_allowed;
unsigned int cpu = policy->cpu;
u_int cm_osc, cm_stat, mem_freq_khz;
struct vco vco;
......
......@@ -18,6 +18,7 @@
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/init.h>
#include <linux/list.h>
#include <asm/hardware.h>
#include <asm/irq.h>
......
......@@ -15,6 +15,7 @@
#include <linux/kernel.h>
#include <linux/tty.h>
#include <linux/module.h>
#include <linux/mm.h>
#include <linux/errno.h>
#include <linux/serial_core.h>
#include <linux/delay.h>
......@@ -25,6 +26,7 @@
#include <asm/setup.h>
#include <asm/page.h>
#include <asm/pgtable.h>
#include <asm/tlbflush.h>
#include <asm/mach/arch.h>
#include <asm/mach/map.h>
......
......@@ -20,16 +20,16 @@ obj-$(CONFIG_DISCONTIGMEM) += discontig.o
p-$(CONFIG_CPU_26) += proc-arm2_3.o
# ARMv3
p-$(CONFIG_CPU_ARM610) += proc-arm6_7.o tlb-v3.o copypage-v3.o
p-$(CONFIG_CPU_ARM710) += proc-arm6_7.o tlb-v3.o copypage-v3.o
p-$(CONFIG_CPU_ARM610) += proc-arm6_7.o tlb-v3.o cache-v3.o copypage-v3.o
p-$(CONFIG_CPU_ARM710) += proc-arm6_7.o tlb-v3.o cache-v3.o copypage-v3.o
# ARMv4
p-$(CONFIG_CPU_ARM720T) += proc-arm720.o tlb-v4.o copypage-v4wt.o abort-lv4t.o
p-$(CONFIG_CPU_ARM920T) += proc-arm920.o tlb-v4wbi.o copypage-v4wb.o abort-ev4t.o
p-$(CONFIG_CPU_ARM922T) += proc-arm922.o tlb-v4wbi.o copypage-v4wb.o abort-ev4t.o
p-$(CONFIG_CPU_ARM1020) += proc-arm1020.o tlb-v4wbi.o copypage-v4wb.o abort-ev4t.o
p-$(CONFIG_CPU_SA110) += proc-sa110.o tlb-v4wb.o copypage-v4wb.o abort-ev4.o minicache.o
p-$(CONFIG_CPU_SA1100) += proc-sa110.o tlb-v4wb.o copypage-v4mc.o abort-ev4.o minicache.o
p-$(CONFIG_CPU_ARM720T) += proc-arm720.o tlb-v4.o cache-v4.o copypage-v4wt.o abort-lv4t.o
p-$(CONFIG_CPU_ARM920T) += proc-arm920.o tlb-v4wbi.o cache-v4wt.o copypage-v4wb.o abort-ev4t.o
p-$(CONFIG_CPU_ARM922T) += proc-arm922.o tlb-v4wbi.o cache-v4wt.o copypage-v4wb.o abort-ev4t.o
p-$(CONFIG_CPU_ARM1020) += proc-arm1020.o tlb-v4wbi.o cache-v4wt.o copypage-v4wb.o abort-ev4t.o
p-$(CONFIG_CPU_SA110) += proc-sa110.o tlb-v4wb.o cache-v4wb.o copypage-v4wb.o abort-ev4.o
p-$(CONFIG_CPU_SA1100) += proc-sa1100.o tlb-v4wb.o cache-v4wb.o copypage-v4mc.o abort-ev4.o minicache.o
# ARMv5
p-$(CONFIG_CPU_ARM926T) += proc-arm926.o tlb-v4wbi.o copypage-v4wb.o abort-ev5tej.o
......
/*
* linux/arch/arm/mm/cache-v3.S
*
* Copyright (C) 1997-2002 Russell king
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/linkage.h>
#include <asm/hardware.h>
#include <asm/page.h>
#include "proc-macros.S"
/*
* flush_user_cache_all()
*
* Invalidate all cache entries in a particular address
* space.
*
* - mm - mm_struct describing address space
*/
ENTRY(v3_flush_user_cache_all)
/* FALLTHROUGH */
/*
* flush_kern_cache_all()
*
* Clean and invalidate the entire cache.
*/
ENTRY(v3_flush_kern_cache_all)
/* FALLTHROUGH */
/*
* flush_user_cache_range(start, end, vm_flags)
*
* Invalidate a range of cache entries in the specified
* address space.
*
* - start - start address (may not be aligned)
* - end - end address (exclusive, may not be aligned)
* - vma - vma_area_struct describing address space
*/
ENTRY(v3_flush_user_cache_range)
mov ip, #0
mcreq p15, 0, ip, c7, c0, 0 @ flush ID cache
mov pc, lr
/*
* coherent_kern_range(start, end)
*
* Ensure coherency between the Icache and the Dcache in the
* region described by start. If you have non-snooping
* Harvard caches, you need to implement this function.
*
* - start - virtual start address
* - end - virtual end address
*/
ENTRY(v3_coherent_kern_range)
mov pc, lr
/*
* flush_kern_dcache_page(void *page)
*
* Ensure no D cache aliasing occurs, either with itself or
* the I cache
*
* - addr - page aligned address
*/
ENTRY(v3_flush_kern_dcache_page)
/* FALLTHROUGH */
/*
* dma_inv_range(start, end)
*
* Invalidate (discard) the specified virtual address range.
* May not write back any entries. If 'start' or 'end'
* are not cache line aligned, those lines must be written
* back.
*
* - start - virtual start address
* - end - virtual end address
*/
ENTRY(v3_dma_inv_range)
/* FALLTHROUGH */
/*
* dma_flush_range(start, end)
*
* Clean and invalidate the specified virtual address range.
*
* - start - virtual start address
* - end - virtual end address
*/
ENTRY(v3_dma_flush_range)
mov r0, #0
mcr p15, 0, r0, c7, c0, 0 @ flush ID cache
/* FALLTHROUGH */
/*
* dma_clean_range(start, end)
*
* Clean (write back) the specified virtual address range.
*
* - start - virtual start address
* - end - virtual end address
*/
ENTRY(v3_dma_clean_range)
mov pc, lr
ENTRY(v3_cache_fns)
.long v3_flush_kern_cache_all
.long v3_flush_user_cache_all
.long v3_flush_user_cache_range
.long v3_coherent_kern_range
.long v3_flush_kern_dcache_page
.long v3_dma_inv_range
.long v3_dma_clean_range
.long v3_dma_flush_range
/*
* linux/arch/arm/mm/cache-v4.S
*
* Copyright (C) 1997-2002 Russell king
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/linkage.h>
#include <asm/hardware.h>
#include <asm/page.h>
#include "proc-macros.S"
/*
* flush_user_cache_all()
*
* Invalidate all cache entries in a particular address
* space.
*
* - mm - mm_struct describing address space
*/
ENTRY(v4_flush_user_cache_all)
/* FALLTHROUGH */
/*
* flush_kern_cache_all()
*
* Clean and invalidate the entire cache.
*/
ENTRY(v4_flush_kern_cache_all)
mov r0, #0
mcr p15, 0, r0, c7, c7, 0 @ flush ID cache
mov pc, lr
/*
* flush_user_cache_range(start, end, vma)
*
* Invalidate a range of cache entries in the specified
* address space.
*
* - start - start address (may not be aligned)
* - end - end address (exclusive, may not be aligned)
* - vma - vma_area_struct describing address space
*/
ENTRY(v4_flush_user_cache_range)
mov ip, #0
mcreq p15, 0, ip, c7, c7, 0 @ flush ID cache
mov pc, lr
/*
* coherent_kern_range(start, end)
*
* Ensure coherency between the Icache and the Dcache in the
* region described by start. If you have non-snooping
* Harvard caches, you need to implement this function.
*
* - start - virtual start address
* - end - virtual end address
*/
ENTRY(v4_coherent_kern_range)
mov pc, lr
/*
* flush_kern_dcache_page(void *page)
*
* Ensure no D cache aliasing occurs, either with itself or
* the I cache
*
* - addr - page aligned address
*/
ENTRY(v4_flush_kern_dcache_page)
/* FALLTHROUGH */
/*
* dma_inv_range(start, end)
*
* Invalidate (discard) the specified virtual address range.
* May not write back any entries. If 'start' or 'end'
* are not cache line aligned, those lines must be written
* back.
*
* - start - virtual start address
* - end - virtual end address
*/
ENTRY(v4_dma_inv_range)
/* FALLTHROUGH */
/*
* dma_flush_range(start, end)
*
* Clean and invalidate the specified virtual address range.
*
* - start - virtual start address
* - end - virtual end address
*/
ENTRY(v4_dma_flush_range)
mov r0, #0
mcr p15, 0, r0, c7, c7, 0 @ flush ID cache
/* FALLTHROUGH */
/*
* dma_clean_range(start, end)
*
* Clean (write back) the specified virtual address range.
*
* - start - virtual start address
* - end - virtual end address
*/
ENTRY(v4_dma_clean_range)
mov pc, lr
ENTRY(v4_cache_fns)
.long v4_flush_kern_cache_all
.long v4_flush_user_cache_all
.long v4_flush_user_cache_range
.long v4_coherent_kern_range
.long v4_flush_kern_dcache_page
.long v4_dma_inv_range
.long v4_dma_clean_range
.long v4_dma_flush_range
/*
* linux/arch/arm/mm/cache-v4wb.S
*
* Copyright (C) 1997-2002 Russell king
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/linkage.h>
#include <asm/hardware.h>
#include <asm/page.h>
#include "proc-macros.S"
/*
* The size of one data cache line.
*/
#define CACHE_DLINESIZE 32
/*
* The total size of the data cache.
*/
#if defined(CONFIG_CPU_SA110)
# define CACHE_DSIZE 16384
#elif defined(CONFIG_CPU_SA1100)
# define CACHE_DSIZE 8192
#else
# error Unknown cache size
#endif
/*
* This is the size at which it becomes more efficient to
* clean the whole cache, rather than using the individual
* cache line maintainence instructions.
*
* Size Clean (ticks) Dirty (ticks)
* 4096 21 20 21 53 55 54
* 8192 40 41 40 106 100 102
* 16384 77 77 76 140 140 138
* 32768 150 149 150 214 216 212 <---
* 65536 296 297 296 351 358 361
* 131072 591 591 591 656 657 651
* Whole 132 136 132 221 217 207 <---
*/
#define CACHE_DLIMIT (CACHE_DSIZE * 4)
/*
* flush_user_cache_all()
*
* Clean and invalidate all cache entries in a particular address
* space.
*/
ENTRY(v4wb_flush_user_cache_all)
/* FALLTHROUGH */
/*
* flush_kern_cache_all()
*
* Clean and invalidate the entire cache.
*/
ENTRY(v4wb_flush_kern_cache_all)
mov ip, #0
mcr p15, 0, ip, c7, c5, 0 @ invalidate I cache
__flush_whole_cache:
mov r0, #FLUSH_BASE
add r1, r0, #CACHE_DSIZE
1: ldr r2, [r0], #32
cmp r0, r1
blo 1b
mcr p15, 0, ip, c7, c10, 4 @ drain write buffer
mov pc, lr
/*
* flush_user_cache_range(start, end, vm_flags)
*
* Invalidate a range of cache entries in the specified
* address space.
*
* - start - start address (inclusive, page aligned)
* - end - end address (exclusive, page aligned)
* - vma - vma_area_struct describing address space
*/
ENTRY(v4wb_flush_user_cache_range)
sub r3, r1, r0 @ calculate total size
tst r2, #VM_EXEC @ executable region?
mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache
cmp r3, #CACHE_DLIMIT @ total size >= limit?
bhs __flush_whole_cache @ flush whole D cache
1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry
add r0, r0, #CACHE_DLINESIZE
cmp r0, r1
blo 1b
tst r2, #VM_EXEC
mcrne p15, 0, ip, c7, c10, 4 @ drain write buffer
mov pc, lr
/*
* flush_kern_dcache_page(void *page)
*
* Ensure no D cache aliasing occurs, either with itself or
* the I cache
*
* - addr - page aligned address
*/
ENTRY(v4wb_flush_kern_dcache_page)
add r1, r0, #PAGE_SZ
/* fall through */
/*
* coherent_kern_range(start, end)
*
* Ensure coherency between the Icache and the Dcache in the
* region described by start. If you have non-snooping
* Harvard caches, you need to implement this function.
*
* - start - virtual start address
* - end - virtual end address
*/
ENTRY(v4wb_coherent_kern_range)
bic r0, r0, #CACHE_DLINESIZE - 1
1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry
add r0, r0, #CACHE_DLINESIZE
cmp r0, r1
blo 1b
mov ip, #0
mcr p15, 0, ip, c7, c5, 0 @ invalidate I cache
mcr p15, 0, ip, c7, c10, 4 @ drain WB
mov pc, lr
/*
* dma_inv_range(start, end)
*
* Invalidate (discard) the specified virtual address range.
* May not write back any entries. If 'start' or 'end'
* are not cache line aligned, those lines must be written
* back.
*
* - start - virtual start address
* - end - virtual end address
*/
ENTRY(v4wb_dma_inv_range)
tst r0, #CACHE_DLINESIZE - 1
bic r0, r0, #CACHE_DLINESIZE - 1
mcrne p15, 0, r0, c7, c10, 1 @ clean D entry
tst r1, #CACHE_DLINESIZE - 1
mcrne p15, 0, r1, c7, c10, 1 @ clean D entry
1: mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry
add r0, r0, #CACHE_DLINESIZE
cmp r0, r1
blo 1b
mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
mov pc, lr
/*
* dma_clean_range(start, end)
*
* Clean (write back) the specified virtual address range.
*
* - start - virtual start address
* - end - virtual end address
*/
ENTRY(v4wb_dma_clean_range)
bic r0, r0, #CACHE_DLINESIZE - 1
1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
add r0, r0, #CACHE_DLINESIZE
cmp r0, r1
blo 1b
mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
mov pc, lr
/*
* dma_flush_range(start, end)
*
* Clean and invalidate the specified virtual address range.
*
* - start - virtual start address
* - end - virtual end address
*
* This is actually the same as v4wb_coherent_kern_range()
*/
.globl v4wb_dma_flush_range
.set v4wb_dma_flush_range, v4wb_coherent_kern_range
ENTRY(v4wb_cache_fns)
.long v4wb_flush_kern_cache_all
.long v4wb_flush_user_cache_all
.long v4wb_flush_user_cache_range
.long v4wb_coherent_kern_range
.long v4wb_flush_kern_dcache_page
.long v4wb_dma_inv_range
.long v4wb_dma_clean_range
.long v4wb_dma_flush_range
/*
* linux/arch/arm/mm/cache-v4wt.S
*
* Copyright (C) 1997-2002 Russell king
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* ARMv4 write through cache operations support.
*
* We assume that the write buffer is not enabled.
*/
#include <linux/linkage.h>
#include <asm/hardware.h>
#include <asm/page.h>
#include "proc-macros.S"
/*
* The size of one data cache line.
*/
#define CACHE_DLINESIZE 32
/*
* The number of data cache segments.
*/
#define CACHE_DSEGMENTS 8
/*
* The number of lines in a cache segment.
*/
#define CACHE_DENTRIES 64
/*
* This is the size at which it becomes more efficient to
* clean the whole cache, rather than using the individual
* cache line maintainence instructions.
*
* *** This needs benchmarking
*/
#define CACHE_DLIMIT 16384
/*
* flush_user_cache_all()
*
* Invalidate all cache entries in a particular address
* space.
*/
ENTRY(v4wt_flush_user_cache_all)
/* FALLTHROUGH */
/*
* flush_kern_cache_all()
*
* Clean and invalidate the entire cache.
*/
ENTRY(v4wt_flush_kern_cache_all)
mov r2, #VM_EXEC
mov ip, #0
__flush_whole_cache:
tst r2, #VM_EXEC
mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache
mcr p15, 0, ip, c7, c6, 0 @ invalidate D cache
mov pc, lr
/*
* flush_user_cache_range(start, end, vm_flags)
*
* Clean and invalidate a range of cache entries in the specified
* address space.
*
* - start - start address (inclusive, page aligned)
* - end - end address (exclusive, page aligned)
* - vma - vma_area_struct describing address space
*/
ENTRY(v4wt_flush_user_cache_range)
sub r3, r1, r0 @ calculate total size
cmp r3, #CACHE_DLIMIT
bhs __flush_whole_cache
1: mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry
tst r2, #VM_EXEC
mcrne p15, 0, r0, c7, c5, 1 @ invalidate I entry
add r0, r0, #CACHE_DLINESIZE
cmp r0, r1
blo 1b
mov pc, lr
/*
* coherent_kern_range(start, end)
*
* Ensure coherency between the Icache and the Dcache in the
* region described by start. If you have non-snooping
* Harvard caches, you need to implement this function.
*
* - start - virtual start address
* - end - virtual end address
*/
ENTRY(v4wt_coherent_kern_range)
bic r0, r0, #CACHE_DLINESIZE - 1
1: mcr p15, 0, r0, c7, c5, 1 @ invalidate I entry
add r0, r0, #CACHE_DLINESIZE
cmp r0, r1
blo 1b
mov pc, lr
/*
* flush_kern_dcache_page(void *page)
*
* Ensure no D cache aliasing occurs, either with itself or
* the I cache
*
* - addr - page aligned address
*/
ENTRY(v4wt_flush_kern_dcache_page)
mov r2, #0
mcr p15, 0, r2, c7, c5, 0 @ invalidate I cache
add r1, r0, #PAGE_SZ
/* fallthrough */
/*
* dma_inv_range(start, end)
*
* Invalidate (discard) the specified virtual address range.
* May not write back any entries. If 'start' or 'end'
* are not cache line aligned, those lines must be written
* back.
*
* - start - virtual start address
* - end - virtual end address
*/
ENTRY(v4wt_dma_inv_range)
bic r0, r0, #CACHE_DLINESIZE - 1
1: mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry
add r0, r0, #CACHE_DLINESIZE
cmp r0, r1
blo 1b
/* FALLTHROUGH */
/*
* dma_clean_range(start, end)
*
* Clean the specified virtual address range.
*
* - start - virtual start address
* - end - virtual end address
*/
ENTRY(v4wt_dma_clean_range)
mov pc, lr
/*
* dma_flush_range(start, end)
*
* Clean and invalidate the specified virtual address range.
*
* - start - virtual start address
* - end - virtual end address
*/
.globl v4wt_dma_flush_range
.equ v4wt_dma_flush_range, v4wt_dma_inv_range
ENTRY(v4wt_cache_fns)
.long v4wt_flush_kern_cache_all
.long v4wt_flush_user_cache_all
.long v4wt_flush_user_cache_range
.long v4wt_coherent_kern_range
.long v4wt_flush_kern_dcache_page
.long v4wt_dma_inv_range
.long v4wt_dma_clean_range
.long v4wt_dma_flush_range
......@@ -161,11 +161,11 @@ void *consistent_alloc(int gfp, size_t size, dma_addr_t *handle,
/*
* Invalidate any data that might be lurking in the
* kernel direct-mapped region.
* kernel direct-mapped region for device DMA.
*/
{
unsigned long kaddr = (unsigned long)page_address(page);
invalidate_dcache_range(kaddr, kaddr + size);
dmac_inv_range(kaddr, kaddr + size);
}
/*
......@@ -330,7 +330,7 @@ static int __init consistent_init(void)
core_initcall(consistent_init);
/*
* make an area consistent.
* make an area consistent for devices.
*/
void consistent_sync(void *vaddr, size_t size, int direction)
{
......@@ -339,13 +339,13 @@ void consistent_sync(void *vaddr, size_t size, int direction)
switch (direction) {
case DMA_FROM_DEVICE: /* invalidate only */
invalidate_dcache_range(start, end);
dmac_inv_range(start, end);
break;
case DMA_TO_DEVICE: /* writeback only */
clean_dcache_range(start, end);
dmac_clean_range(start, end);
break;
case DMA_BIDIRECTIONAL: /* writeback and invalidate */
flush_dcache_range(start, end);
dmac_flush_range(start, end);
break;
default:
BUG();
......
......@@ -184,9 +184,8 @@ void __flush_dcache_page(struct page *page)
{
struct mm_struct *mm = current->active_mm;
struct list_head *l;
unsigned long kaddr = (unsigned long)page_address(page);
cpu_cache_clean_invalidate_range(kaddr, kaddr + PAGE_SIZE, 0);
__cpuc_flush_dcache_page(page_address(page));
if (!page->mapping)
return;
......@@ -291,10 +290,9 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr, pte_t pte)
page = pfn_to_page(pfn);
if (page->mapping) {
int dirty = test_and_clear_bit(PG_dcache_dirty, &page->flags);
unsigned long kaddr = (unsigned long)page_address(page);
if (dirty)
cpu_cache_clean_invalidate_range(kaddr, kaddr + PAGE_SIZE, 0);
__cpuc_flush_dcache_page(page_address(page));
make_coherent(vma, addr, page, dirty);
}
......
......@@ -12,19 +12,15 @@
#include <linux/module.h>
#include <linux/signal.h>
#include <linux/sched.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/string.h>
#include <linux/types.h>
#include <linux/ptrace.h>
#include <linux/mman.h>
#include <linux/mm.h>
#include <linux/interrupt.h>
#include <linux/proc_fs.h>
#include <linux/init.h>
#include <asm/system.h>
#include <asm/pgtable.h>
#include <asm/tlbflush.h>
#include <asm/uaccess.h>
#include "fault.h"
......
......@@ -24,30 +24,82 @@
#include <asm/mach/map.h>
static unsigned int cachepolicy __initdata = PMD_SECT_WB;
static unsigned int ecc_mask __initdata = 0;
struct cachepolicy {
char *policy;
unsigned int cr_mask;
unsigned int pmd;
};
static struct cachepolicy cache_policies[] __initdata = {
{ "uncached", CR1_W|CR1_C, PMD_SECT_UNCACHED },
{ "buffered", CR1_C, PMD_SECT_BUFFERED },
{ "writethrough", 0, PMD_SECT_WT },
#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
{ "writeback", 0, PMD_SECT_WB },
{ "writealloc", 0, PMD_SECT_WBWA }
#endif
};
/*
* These are useful for identifing cache coherency
* problems by allowing the cache or the cache and
* writebuffer to be turned off. (Note: the write
* buffer should not be on and the cache off).
*/
static int __init nocache_setup(char *__unused)
static void __init early_cachepolicy(char **p)
{
cr_alignment &= ~CR1_C;
cr_no_alignment &= ~CR1_C;
int i;
for (i = 0; i < ARRAY_SIZE(cache_policies); i++) {
int len = strlen(cache_policies[i].policy);
if (memcmp(*p, cache_policies[i].policy, len) == 0) {
cachepolicy = cache_policies[i].pmd;
cr_alignment &= ~cache_policies[i].cr_mask;
cr_no_alignment &= ~cache_policies[i].cr_mask;
*p += len;
break;
}
}
if (i == ARRAY_SIZE(cache_policies))
printk(KERN_ERR "ERROR: unknown or unsupported cache policy\n");
flush_cache_all();
set_cr(cr_alignment);
return 1;
}
static int __init nowrite_setup(char *__unused)
static void __init early_nocache(char **__unused)
{
cr_alignment &= ~(CR1_W|CR1_C);
cr_no_alignment &= ~(CR1_W|CR1_C);
flush_cache_all();
set_cr(cr_alignment);
return 1;
char *p = "buffered";
printk(KERN_WARNING "nocache is deprecated; use cachepolicy=%s\n", p);
early_cachepolicy(&p);
}
static void __init early_nowrite(char **__unused)
{
char *p = "uncached";
printk(KERN_WARNING "nowb is deprecated; use cachepolicy=%s\n", p);
early_cachepolicy(&p);
}
static void __init early_ecc(char **p)
{
if (memcmp(*p, "on", 2) == 0) {
ecc_mask = PMD_PROTECTION;
*p += 2;
} else if (memcmp(*p, "off", 3) == 0) {
ecc_mask = 0;
*p += 3;
}
}
__early_param("nocache", early_nocache);
__early_param("nowb", early_nowrite);
__early_param("cachepolicy=", early_cachepolicy);
__early_param("ecc=", early_ecc);
static int __init noalign_setup(char *__unused)
{
cr_alignment &= ~CR1_A;
......@@ -57,8 +109,6 @@ static int __init noalign_setup(char *__unused)
}
__setup("noalign", noalign_setup);
__setup("nocache", nocache_setup);
__setup("nowb", nowrite_setup);
#define FIRST_KERNEL_PGD_NR (FIRST_USER_PGD_NR + USER_PTRS_PER_PGD)
......@@ -197,7 +247,7 @@ alloc_init_page(unsigned long virt, unsigned long phys, unsigned int prot_l1, pg
pmdval = __pa(ptep) | prot_l1;
pmdp[0] = __pmd(pmdval);
pmdp[1] = __pmd(pmdval + 256 * sizeof(pte_t));
cpu_flush_pmd(pmdp);
flush_pmd_entry(pmdp);
}
ptep = pte_offset_kernel(pmdp, virt);
......@@ -231,32 +281,20 @@ static struct mem_types mem_types[] __initdata = {
.domain = DOMAIN_IO,
},
[MT_CACHECLEAN] = {
.prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
L_PTE_CACHEABLE | L_PTE_BUFFERABLE,
.prot_l1 = PMD_TYPE_TABLE | PMD_BIT4,
.prot_sect = PMD_TYPE_SECT | PMD_BIT4,
.domain = DOMAIN_KERNEL,
},
[MT_MINICLEAN] = {
.prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
L_PTE_CACHEABLE,
.prot_l1 = PMD_TYPE_TABLE | PMD_BIT4,
.prot_sect = PMD_TYPE_SECT | PMD_BIT4 | PMD_SECT_MINICACHE,
.domain = DOMAIN_KERNEL,
},
[MT_VECTORS] = {
.prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
L_PTE_CACHEABLE | L_PTE_BUFFERABLE |
L_PTE_EXEC,
.prot_l1 = PMD_TYPE_TABLE | PMD_BIT4,
.prot_sect = PMD_TYPE_SECT | PMD_BIT4,
.domain = DOMAIN_USER,
},
[MT_MEMORY] = {
.prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
L_PTE_CACHEABLE | L_PTE_BUFFERABLE |
L_PTE_EXEC | L_PTE_WRITE,
.prot_l1 = PMD_TYPE_TABLE | PMD_BIT4,
.prot_sect = PMD_TYPE_SECT | PMD_BIT4 | PMD_SECT_AP_WRITE,
.domain = DOMAIN_KERNEL,
}
......@@ -268,37 +306,50 @@ static struct mem_types mem_types[] __initdata = {
static void __init build_mem_type_table(void)
{
int cpu_arch = cpu_architecture();
#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH
int writethrough = 1;
#else
int writethrough = 0;
#endif
int writealloc = 0, ecc = 0;
const char *policy;
if (cpu_arch < CPU_ARCH_ARMv5) {
writealloc = 0;
ecc = 0;
/*
* ARMv5 can use ECC memory.
*/
if (cpu_arch == CPU_ARCH_ARMv5) {
mem_types[MT_VECTORS].prot_l1 |= ecc_mask;
mem_types[MT_MEMORY].prot_sect |= ecc_mask;
} else {
mem_types[MT_MINICLEAN].prot_sect &= ~PMD_SECT_TEX(1);
if (cachepolicy == PMD_SECT_WBWA)
cachepolicy = PMD_SECT_WB;
ecc_mask = 0;
}
if (writethrough) {
mem_types[MT_MEMORY].prot_sect |= cachepolicy;
switch (cachepolicy) {
default:
case PMD_SECT_UNCACHED:
policy = "uncached";
break;
case PMD_SECT_BUFFERED:
mem_types[MT_VECTORS].prot_pte |= PTE_BUFFERABLE;
policy = "buffered";
break;
case PMD_SECT_WT:
mem_types[MT_VECTORS].prot_pte |= PTE_BUFFERABLE|PTE_CACHEABLE;
mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_WT;
mem_types[MT_VECTORS].prot_sect |= PMD_SECT_WT;
mem_types[MT_MEMORY].prot_sect |= PMD_SECT_WT;
} else {
policy = "write through";
break;
case PMD_SECT_WB:
mem_types[MT_VECTORS].prot_pte |= PTE_BUFFERABLE|PTE_CACHEABLE;
mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_WB;
mem_types[MT_VECTORS].prot_sect |= PMD_SECT_WB;
if (writealloc)
mem_types[MT_MEMORY].prot_sect |= PMD_SECT_WBWA;
else
mem_types[MT_MEMORY].prot_sect |= PMD_SECT_WB;
}
if (ecc) {
mem_types[MT_VECTORS].prot_sect |= PMD_PROTECTION;
mem_types[MT_MEMORY].prot_sect |= PMD_PROTECTION;
policy = "write back";
break;
case PMD_SECT_WBWA:
mem_types[MT_VECTORS].prot_pte |= PTE_BUFFERABLE|PTE_CACHEABLE;
mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_WB;
policy = "write back, write allocate";
break;
}
printk("Memory policy: ECC %sabled, Data cache %s\n",
ecc_mask ? "en" : "dis", policy);
}
/*
......@@ -330,6 +381,14 @@ static void __init create_mapping(struct map_desc *md)
off = md->physical - virt;
length = md->length;
if (mem_types[md->type].prot_l1 == 0 &&
(virt & 0xfffff || (virt + off) & 0xfffff || (virt + length) & 0xfffff)) {
printk(KERN_WARNING "MM: map for 0x%08lx at 0x%08lx can not "
"be mapped using pages, ignoring.\n",
md->physical, md->virtual);
return;
}
while ((virt & 0xfffff || (virt + off) & 0xfffff) && length >= PAGE_SIZE) {
alloc_init_page(virt, virt + off, prot_l1, prot_pte);
......
This diff is collapsed.
......@@ -162,7 +162,7 @@ memc_phys_table_32:
* and inaccessible (0x01f00000).
* Params : r0 = page table pointer
*/
clear_tables: ldr r1, _arm3_set_pgd - 4
clear_tables: ldr r1, _arm3_switch_mm - 4
ldr r2, [r1]
sub r1, r0, #256 * 4 @ start of MEMC tables
add r2, r1, r2, lsl #2 @ end of tables
......@@ -186,14 +186,16 @@ clear_tables: ldr r1, _arm3_set_pgd - 4
mov pc, lr
/*
* Function: *_set_pgd(pgd_t *pgd)
* Function: *_switch_mm(pgd_t *pgd)
* Params : pgd New page tables/MEMC mapping
* Purpose : update MEMC hardware with new mapping
*/
.word page_nr
_arm3_set_pgd: mcr p15, 0, r1, c1, c0, 0 @ flush cache
_arm2_set_pgd: stmfd sp!, {lr}
ldr r1, _arm3_set_pgd - 4
_arm3_switch_mm:
mcr p15, 0, r1, c1, c0, 0 @ flush cache
_arm2_switch_mm:
stmfd sp!, {lr}
ldr r1, _arm3_switch_mm - 4
ldr r2, [r1]
sub r0, r0, #256 * 4 @ start of MEMC tables
add r1, r0, r2, lsl #2 @ end of tables
......@@ -273,9 +275,6 @@ _arm2_xchg_4: mov r2, pc
_arm3_xchg_4: swp r0, r0, [r1]
movs pc, lr
_arm2_3_check_bugs:
bics pc, lr, #0x04000000 @ Clear FIQ disable bit
cpu_arm2_name:
.asciz "ARM 2"
cpu_arm250_name:
......@@ -290,28 +289,25 @@ cpu_arm3_name:
*/
.globl arm2_processor_functions
arm2_processor_functions:
.word _arm2_3_check_bugs
.word _arm2_proc_init
.word _arm2_proc_fin
.word _arm2_set_pgd
.word _arm2_switch_mm
.word _arm2_xchg_1
.word _arm2_xchg_4
.globl arm250_processor_functions
arm250_processor_functions:
.word _arm2_3_check_bugs
.word _arm2_proc_init
.word _arm2_proc_fin
.word _arm2_set_pgd
.word _arm2_switch_mm
.word _arm3_xchg_1
.word _arm3_xchg_4
.globl arm3_processor_functions
arm3_processor_functions:
.word _arm2_3_check_bugs
.word _arm3_proc_init
.word _arm3_proc_fin
.word _arm3_set_pgd
.word _arm3_switch_mm
.word _arm3_xchg_1
.word _arm3_xchg_4
......
......@@ -188,20 +188,6 @@ Ldata_lateldrpostreg:
addeq r7, r0, r2
b Ldata_saver7
/*
* Function: arm6_7_check_bugs (void)
* : arm6_7_proc_init (void)
* : arm6_7_proc_fin (void)
*
* Notes : This processor does not require these
*/
ENTRY(cpu_arm6_check_bugs)
ENTRY(cpu_arm7_check_bugs)
mrs ip, cpsr
bic ip, ip, #PSR_F_BIT
msr cpsr, ip
mov pc, lr
ENTRY(cpu_arm6_proc_init)
ENTRY(cpu_arm7_proc_init)
mov pc, lr
......@@ -220,30 +206,19 @@ ENTRY(cpu_arm7_do_idle)
mov pc, lr
/*
* Function: arm6_7_set_pgd(unsigned long pgd_phys)
* Function: arm6_7_switch_mm(unsigned long pgd_phys)
* Params : pgd_phys Physical address of page table
* Purpose : Perform a task switch, saving the old processes state, and restoring
* the new.
*/
ENTRY(cpu_arm6_set_pgd)
ENTRY(cpu_arm7_set_pgd)
ENTRY(cpu_arm6_switch_mm)
ENTRY(cpu_arm7_switch_mm)
mov r1, #0
mcr p15, 0, r1, c7, c0, 0 @ flush cache
mcr p15, 0, r0, c2, c0, 0 @ update page table ptr
mcr p15, 0, r1, c5, c0, 0 @ flush TLBs
mov pc, lr
/*
* Function: arm6_flush_pmd(pmdp)
*
* Params : r0 = Address to set
*
* Purpose : Set a PMD and flush it out of any WB cache
*/
ENTRY(cpu_arm6_flush_pmd)
ENTRY(cpu_arm7_flush_pmd)
mov pc, lr
/*
* Function: arm6_7_set_pte(pte_t *ptep, pte_t pte)
* Params : r0 = Address to set
......@@ -324,7 +299,6 @@ __arm7_setup: mov r0, #0
.type arm6_processor_functions, #object
ENTRY(arm6_processor_functions)
.word cpu_arm6_data_abort
.word cpu_arm6_check_bugs
.word cpu_arm6_proc_init
.word cpu_arm6_proc_fin
.word cpu_arm6_reset
......@@ -345,8 +319,7 @@ ENTRY(arm6_processor_functions)
.word cpu_arm6_icache_invalidate_page
/* pgtable */
.word cpu_arm6_set_pgd
.word cpu_arm6_flush_pmd
.word cpu_arm6_switch_mm
.word cpu_arm6_set_pte
.size arm6_processor_functions, . - arm6_processor_functions
......@@ -358,7 +331,6 @@ ENTRY(arm6_processor_functions)
.type arm7_processor_functions, #object
ENTRY(arm7_processor_functions)
.word cpu_arm7_data_abort
.word cpu_arm7_check_bugs
.word cpu_arm7_proc_init
.word cpu_arm7_proc_fin
.word cpu_arm7_reset
......@@ -379,8 +351,7 @@ ENTRY(arm7_processor_functions)
.word cpu_arm7_icache_invalidate_page
/* pgtable */
.word cpu_arm7_set_pgd
.word cpu_arm7_flush_pmd
.word cpu_arm7_switch_mm
.word cpu_arm7_set_pte
.size arm7_processor_functions, . - arm7_processor_functions
......
......@@ -38,47 +38,12 @@
#include <asm/hardware.h>
/*
* Function: arm720_cache_clean_invalidate_all (void)
* : arm720_cache_clean_invalidate_page (unsigned long address, int size,
* int flags)
*
* Params : address Area start address
* : size size of area
* : flags b0 = I cache as well
*
* Purpose : Flush all cache lines
*/
ENTRY(cpu_arm720_cache_clean_invalidate_all)
ENTRY(cpu_arm720_cache_clean_invalidate_range)
ENTRY(cpu_arm720_icache_invalidate_range)
ENTRY(cpu_arm720_icache_invalidate_page)
ENTRY(cpu_arm720_dcache_invalidate_range)
mov r0, #0
mcr p15, 0, r0, c7, c7, 0 @ flush cache
mov pc, lr
/*
* These just expect cache lines to be cleaned. Since we have a writethrough
* cache, we never have any dirty cachelines to worry about.
*/
ENTRY(cpu_arm720_dcache_clean_range)
ENTRY(cpu_arm720_dcache_clean_page)
ENTRY(cpu_arm720_dcache_clean_entry)
mov pc, lr
/*
* Function: arm720_check_bugs (void)
* : arm720_proc_init (void)
* Function: arm720_proc_init (void)
* : arm720_proc_fin (void)
*
* Notes : This processor does not require these
*/
ENTRY(cpu_arm720_check_bugs)
mrs ip, cpsr
bic ip, ip, #PSR_F_BIT
msr cpsr, ip
mov pc, lr
ENTRY(cpu_arm720_dcache_clean_area)
ENTRY(cpu_arm720_proc_init)
mov pc, lr
......@@ -102,28 +67,18 @@ ENTRY(cpu_arm720_do_idle)
mov pc, lr
/*
* Function: arm720_set_pgd(unsigned long pgd_phys)
* Function: arm720_switch_mm(unsigned long pgd_phys)
* Params : pgd_phys Physical address of page table
* Purpose : Perform a task switch, saving the old process' state and restoring
* the new.
*/
ENTRY(cpu_arm720_set_pgd)
ENTRY(cpu_arm720_switch_mm)
mov r1, #0
mcr p15, 0, r1, c7, c7, 0 @ invalidate cache
mcr p15, 0, r0, c2, c0, 0 @ update page table ptr
mcr p15, 0, r1, c8, c7, 0 @ flush TLB (v4)
mov pc, lr
/*
* Function: arm720_flush_pmd(pmdp)
*
* Params : r0 = Address to set
*
* Purpose : Set a PMD and flush it out of any WB cache
*/
ENTRY(cpu_arm720_flush_pmd)
mov pc, lr
/*
* Function: arm720_set_pte(pte_t *ptep, pte_t pte)
* Params : r0 = Address to set
......@@ -140,7 +95,7 @@ ENTRY(cpu_arm720_set_pte)
bic r2, r2, #3
orr r2, r2, #HPTE_TYPE_SMALL
tst r1, #LPTE_USER | LPTE_EXEC @ User or Exec?
tst r1, #LPTE_USER @ User?
orrne r2, r2, #HPTE_AP_READ
tst r1, #LPTE_WRITE | LPTE_DIRTY @ Write and Dirty?
......@@ -194,31 +149,13 @@ __arm720_setup: mov r0, #0
.type arm720_processor_functions, #object
ENTRY(arm720_processor_functions)
.word v4t_late_abort
.word cpu_arm720_check_bugs
.word cpu_arm720_proc_init
.word cpu_arm720_proc_fin
.word cpu_arm720_reset
.word cpu_arm720_do_idle
/* cache */
.word cpu_arm720_cache_clean_invalidate_all
.word cpu_arm720_cache_clean_invalidate_range
/* dcache */
.word cpu_arm720_dcache_invalidate_range
.word cpu_arm720_dcache_clean_range
.word cpu_arm720_dcache_clean_page
.word cpu_arm720_dcache_clean_entry
/* icache */
.word cpu_arm720_icache_invalidate_range
.word cpu_arm720_icache_invalidate_page
/* pgtable */
.word cpu_arm720_set_pgd
.word cpu_arm720_flush_pmd
.word cpu_arm720_dcache_clean_area
.word cpu_arm720_switch_mm
.word cpu_arm720_set_pte
.size arm720_processor_functions, . - arm720_processor_functions
.type cpu_arch_name, #object
......@@ -249,4 +186,5 @@ __arm720_proc_info:
.long arm720_processor_functions
.long v4_tlb_fns
.long v4wt_user_fns
.long v4_cache_fns
.size __arm720_proc_info, . - __arm720_proc_info
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
/*
* linux/arch/arm/mm/proc-sa110.S
*
* Copyright (C) 1997-2002 Russell King
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* MMU functions for SA110
*
* These are the low level assembler for performing cache and TLB
* functions on the StrongARM-1100 and StrongARM-1110.
*
* Note that SA1100 and SA1110 share everything but their name and CPU ID.
*
* 12-jun-2000, Erik Mouw (J.A.K.Mouw@its.tudelft.nl):
* Flush the read buffer at context switches
*/
#include <linux/linkage.h>
#include <linux/init.h>
#include <asm/assembler.h>
#include <asm/constants.h>
#include <asm/procinfo.h>
#include <asm/hardware.h>
#include <asm/proc/pgtable.h>
/*
* the cache line size of the I and D cache
*/
#define DCACHELINESIZE 32
#define FLUSH_OFFSET 32768
.macro flush_1100_dcache rd, ra, re
ldr \rd, =flush_base
ldr \ra, [\rd]
eor \ra, \ra, #FLUSH_OFFSET
str \ra, [\rd]
add \re, \ra, #8192 @ only necessary for 8k
1001: ldr \rd, [\ra], #DCACHELINESIZE
teq \re, \ra
bne 1001b
#ifdef FLUSH_BASE_MINICACHE
add \ra, \ra, #FLUSH_BASE_MINICACHE - FLUSH_BASE
add \re, \ra, #512 @ only 512 bytes
1002: ldr \rd, [\ra], #DCACHELINESIZE
teq \re, \ra
bne 1002b
#endif
.endm
.data
flush_base:
.long FLUSH_BASE
.text
__INIT
/*
* cpu_sa1100_proc_init()
*/
ENTRY(cpu_sa1100_proc_init)
mov r0, #0
mcr p15, 0, r0, c15, c1, 2 @ Enable clock switching
mcr p15, 0, r0, c9, c0, 5 @ Allow read-buffer operations from userland
mov pc, lr
.previous
/*
* cpu_sa1100_proc_fin()
*
* Prepare the CPU for reset:
* - Disable interrupts
* - Clean and turn off caches.
*/
ENTRY(cpu_sa1100_proc_fin)
stmfd sp!, {lr}
mov ip, #PSR_F_BIT | PSR_I_BIT | SVC_MODE
msr cpsr_c, ip
flush_1100_dcache r0, r1, r2 @ clean caches
mov r0, #0
mcr p15, 0, r0, c15, c2, 2 @ Disable clock switching
mrc p15, 0, r0, c1, c0, 0 @ ctrl register
bic r0, r0, #0x1000 @ ...i............
bic r0, r0, #0x000e @ ............wca.
mcr p15, 0, r0, c1, c0, 0 @ disable caches
ldmfd sp!, {pc}
/*
* cpu_sa1100_reset(loc)
*
* Perform a soft reset of the system. Put the CPU into the
* same state as it would be if it had been reset, and branch
* to what would be the reset vector.
*
* loc: location to jump to for soft reset
*/
.align 5
ENTRY(cpu_sa1100_reset)
mov ip, #0
mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches
mcr p15, 0, ip, c7, c10, 4 @ drain WB
mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs
mrc p15, 0, ip, c1, c0, 0 @ ctrl register
bic ip, ip, #0x000f @ ............wcam
bic ip, ip, #0x1100 @ ...i...s........
mcr p15, 0, ip, c1, c0, 0 @ ctrl register
mov pc, r0
/*
* cpu_sa1100_do_idle(type)
*
* Cause the processor to idle
*
* type: call type:
* 0 = slow idle
* 1 = fast idle
* 2 = switch to slow processor clock
* 3 = switch to fast processor clock
*/
.align 5
ENTRY(cpu_sa1100_do_idle)
mov r0, r0 @ 4 nop padding
mov r0, r0
mov r0, r0
mov r0, r0 @ 4 nop padding
mov r0, r0
mov r0, r0
mov r0, #0
ldr r1, =UNCACHEABLE_ADDR @ ptr to uncacheable address
@ --- aligned to a cache line
mcr p15, 0, r0, c15, c2, 2 @ disable clock switching
ldr r1, [r1, #0] @ force switch to MCLK
mcr p15, 0, r0, c15, c8, 2 @ wait for interrupt
mov r0, r0 @ safety
mcr p15, 0, r0, c15, c1, 2 @ enable clock switching
mov pc, lr
/* ================================= CACHE ================================ */
/*
* cpu_sa1100_dcache_clean_area(addr,sz)
*
* Clean the specified entry of any caches such that the MMU
* translation fetches will obtain correct data.
*
* addr: cache-unaligned virtual address
*/
.align 5
ENTRY(cpu_sa1100_dcache_clean_area)
1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
add r0, r0, #DCACHELINESIZE
subs r1, r1, #DCACHELINESIZE
bhi 1b
mov pc, lr
/* =============================== PageTable ============================== */
/*
* cpu_sa1100_switch_mm(pgd)
*
* Set the translation base pointer to be as described by pgd.
*
* pgd: new page tables
*/
.align 5
ENTRY(cpu_sa1100_switch_mm)
flush_1100_dcache r3, ip, r1
mov ip, #0
mcr p15, 0, ip, c7, c5, 0 @ invalidate I cache
mcr p15, 0, ip, c9, c0, 0 @ invalidate RB
mcr p15, 0, ip, c7, c10, 4 @ drain WB
mcr p15, 0, r0, c2, c0, 0 @ load page table pointer
mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs
mov pc, lr
/*
* cpu_sa1100_set_pte(ptep, pte)
*
* Set a PTE and flush it out
*/
.align 5
ENTRY(cpu_sa1100_set_pte)
str r1, [r0], #-2048 @ linux version
eor r1, r1, #L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_WRITE | L_PTE_DIRTY
bic r2, r1, #0xff0
bic r2, r2, #3
orr r2, r2, #PTE_TYPE_SMALL
tst r1, #L_PTE_USER @ User or Exec?
orrne r2, r2, #PTE_SMALL_AP_URO_SRW
tst r1, #L_PTE_WRITE | L_PTE_DIRTY @ Write and Dirty?
orreq r2, r2, #PTE_SMALL_AP_UNO_SRW
tst r1, #L_PTE_PRESENT | L_PTE_YOUNG @ Present and Young?
movne r2, #0
str r2, [r0] @ hardware version
mov r0, r0
mcr p15, 0, r0, c7, c10, 1 @ clean D entry
mcr p15, 0, r0, c7, c10, 4 @ drain WB
mov pc, lr
cpu_sa1100_name:
.asciz "StrongARM-1100"
cpu_sa1110_name:
.asciz "StrongARM-1110"
.align
__INIT
__sa1100_setup:
mov r10, #0
mcr p15, 0, r10, c7, c7 @ invalidate I,D caches on v4
mcr p15, 0, r10, c7, c10, 4 @ drain write buffer on v4
mcr p15, 0, r10, c8, c7 @ invalidate I,D TLBs on v4
mov r0, #0x1f @ Domains 0, 1 = client
mcr p15, 0, r0, c3, c0 @ load domain access register
mcr p15, 0, r4, c2, c0 @ load page table pointer
mrc p15, 0, r0, c1, c0 @ get control register v4
bic r0, r0, #0x0e00 @ ..VI ZFRS BLDP WCAM
bic r0, r0, #0x0002 @ .... 000. .... ..0.
orr r0, r0, #0x003d
orr r0, r0, #0x3100 @ ..11 ...1 ..11 11.1
mov pc, lr
.text
/*
* Purpose : Function pointers used to access above functions - all calls
* come through these
*/
/*
* SA1100 and SA1110 share the same function calls
*/
.type sa1100_processor_functions, #object
ENTRY(sa1100_processor_functions)
.word v4_early_abort
.word cpu_sa1100_proc_init
.word cpu_sa1100_proc_fin
.word cpu_sa1100_reset
.word cpu_sa1100_do_idle
.word cpu_sa1100_dcache_clean_area
.word cpu_sa1100_switch_mm
.word cpu_sa1100_set_pte
.size sa1100_processor_functions, . - sa1100_processor_functions
.type cpu_arch_name, #object
cpu_arch_name:
.asciz "armv4"
.size cpu_arch_name, . - cpu_arch_name
.type cpu_elf_name, #object
cpu_elf_name:
.asciz "v4"
.size cpu_elf_name, . - cpu_elf_name
.align
.section ".proc.info", #alloc, #execinstr
.type __sa1100_proc_info,#object
__sa1100_proc_info:
.long 0x4401a110
.long 0xfffffff0
.long 0x00000c0e
b __sa1100_setup
.long cpu_arch_name
.long cpu_elf_name
.long HWCAP_SWP | HWCAP_HALF | HWCAP_26BIT | HWCAP_FAST_MULT
.long cpu_sa1100_name
.long sa1100_processor_functions
.long v4wb_tlb_fns
.long v4_mc_user_fns
.long v4wb_cache_fns
.size __sa1100_proc_info, . - __sa1100_proc_info
.type __sa1110_proc_info,#object
__sa1110_proc_info:
.long 0x6901b110
.long 0xfffffff0
.long 0x00000c0e
b __sa1100_setup
.long cpu_arch_name
.long cpu_elf_name
.long HWCAP_SWP | HWCAP_HALF | HWCAP_26BIT | HWCAP_FAST_MULT
.long cpu_sa1110_name
.long sa1100_processor_functions
.long v4wb_tlb_fns
.long v4_mc_user_fns
.long v4wb_cache_fns
.size __sa1110_proc_info, . - __sa1110_proc_info
......@@ -27,7 +27,6 @@ EXPORT_SYMBOL(cpu_dcache_invalidate_range);
EXPORT_SYMBOL(cpu_icache_invalidate_range);
EXPORT_SYMBOL(cpu_icache_invalidate_page);
EXPORT_SYMBOL(cpu_set_pgd);
EXPORT_SYMBOL(cpu_flush_pmd);
EXPORT_SYMBOL(cpu_set_pte);
#else
EXPORT_SYMBOL(processor);
......
This diff is collapsed.
......@@ -35,6 +35,9 @@ SECTIONS
__setup_start = .;
*(.init.setup)
__setup_end = .;
__early_begin = .;
*(__early_param)
__early_end = .;
__start___param = .;
*(__param)
__stop___param = .;
......
......@@ -67,17 +67,17 @@ static void iq80321_copy_to(struct map_info *map, unsigned long to, const void *
}
static struct map_info iq80321_map = {
name = "IQ80321 flash",
size = WINDOW_SIZE,
buswidth = BUSWIDTH,
read8 = iq80321_read8,
read16 = iq80321_read16,
read32 = iq80321_read32,
copy_from = iq80321_copy_from,
write8 = iq80321_write8,
write16 = iq80321_write16,
write32 = iq80321_write32,
copy_to = iq80321_copy_to
.name = "IQ80321 flash",
.size = WINDOW_SIZE,
.buswidth = BUSWIDTH,
.read8 = iq80321_read8,
.read16 = iq80321_read16,
.read32 = iq80321_read32,
.copy_from = iq80321_copy_from,
.write8 = iq80321_write8,
.write16 = iq80321_write16,
.write32 = iq80321_write32,
.copy_to = iq80321_copy_to
};
static struct mtd_partition iq80321_partitions[4] = {
......
......@@ -55,6 +55,10 @@
#include <asm/system.h>
#include <asm/uaccess.h>
#ifdef __arm__
#include <asm/mach-types.h>
#endif
#include "cyber2000fb.h"
struct cfb_info {
......
/*
* linux/include/asm-arm/arch-iop310/irqs.h
*
* Author: Nicolas Pitre
* Copyright: (C) 2001 MontaVista Software Inc.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* 06/13/01: Added 80310 on-chip interrupt sources <dsaxena@mvista.com>
*
*/
#include <linux/config.h>
/*
* XS80200 specific IRQs
*/
#define IRQ_XS80200_BCU 0 /* Bus Control Unit */
#define IRQ_XS80200_PMU 1 /* Performance Monitoring Unit */
#define IRQ_XS80200_EXTIRQ 2 /* external IRQ signal */
#define IRQ_XS80200_EXTFIQ 3 /* external IRQ signal */
#define NR_XS80200_IRQS 4
#define XSCALE_PMU_IRQ IRQ_XS80200_PMU
/*
* IOP80310 chipset interrupts
*/
#define IOP310_IRQ_OFS NR_XS80200_IRQS
#define IOP310_IRQ(x) (IOP310_IRQ_OFS + (x))
/*
* On FIQ1ISR register
*/
#define IRQ_IOP310_DMA0 IOP310_IRQ(0) /* DMA Channel 0 */
#define IRQ_IOP310_DMA1 IOP310_IRQ(1) /* DMA Channel 1 */
#define IRQ_IOP310_DMA2 IOP310_IRQ(2) /* DMA Channel 2 */
#define IRQ_IOP310_PMON IOP310_IRQ(3) /* Bus performance Unit */
#define IRQ_IOP310_AAU IOP310_IRQ(4) /* Application Accelator Unit */
/*
* On FIQ2ISR register
*/
#define IRQ_IOP310_I2C IOP310_IRQ(5) /* I2C unit */
#define IRQ_IOP310_MU IOP310_IRQ(6) /* messaging unit */
#define NR_IOP310_IRQS (IOP310_IRQ(6) + 1)
#define NR_IRQS NR_IOP310_IRQS
/*
* Interrupts available on the Cyclone IQ80310 board
*/
#ifdef CONFIG_ARCH_IQ80310
#define IQ80310_IRQ_OFS NR_IOP310_IRQS
#define IQ80310_IRQ(y) ((IQ80310_IRQ_OFS) + (y))
#define IRQ_IQ80310_TIMER IQ80310_IRQ(0) /* Timer Interrupt */
#define IRQ_IQ80310_I82559 IQ80310_IRQ(1) /* I82559 Ethernet Interrupt */
#define IRQ_IQ80310_UART1 IQ80310_IRQ(2) /* UART1 Interrupt */
#define IRQ_IQ80310_UART2 IQ80310_IRQ(3) /* UART2 Interrupt */
#define IRQ_IQ80310_INTD IQ80310_IRQ(4) /* PCI INTD */
/*
* ONLY AVAILABLE ON REV F OR NEWER BOARDS!
*/
#define IRQ_IQ80310_INTA IQ80310_IRQ(5) /* PCI INTA */
#define IRQ_IQ80310_INTB IQ80310_IRQ(6) /* PCI INTB */
#define IRQ_IQ80310_INTC IQ80310_IRQ(7) /* PCI INTC */
#undef NR_IRQS
#define NR_IRQS (IQ80310_IRQ(7) + 1)
#endif // CONFIG_ARCH_IQ80310
/*
* linux/include/asm-arm/bugs.h
*
* Copyright (C) 1995 Russell King
* Copyright (C) 1995-2003 Russell King
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
......@@ -12,6 +12,6 @@
#include <asm/proc-fns.h>
#define check_bugs() cpu_check_bugs()
#define check_bugs() do { } while (0)
#endif
......@@ -19,14 +19,12 @@ struct task_struct;
* relies on it.
*/
extern struct processor {
/* check for any bugs */
void (*_check_bugs)(void);
/* Set up any processor specifics */
void (*_proc_init)(void);
/* Disable any processor specifics */
void (*_proc_fin)(void);
/* set the MEMC hardware mappings */
void (*_set_pgd)(pgd_t *pgd);
void (*_switch_mm)(pgd_t *pgd);
/* XCHG */
unsigned long (*_xchg_1)(unsigned long x, volatile void *ptr);
unsigned long (*_xchg_4)(unsigned long x, volatile void *ptr);
......@@ -36,11 +34,10 @@ extern const struct processor arm2_processor_functions;
extern const struct processor arm250_processor_functions;
extern const struct processor arm3_processor_functions;
#define cpu_check_bugs() processor._check_bugs()
#define cpu_proc_init() processor._proc_init()
#define cpu_proc_fin() processor._proc_fin()
#define cpu_do_idle() do { } while (0)
#define cpu_switch_mm(pgd,mm) processor._set_pgd(pgd)
#define cpu_switch_mm(pgd,mm) processor._switch_mm(pgd)
#define cpu_xchg_1(x,ptr) processor._xchg_1(x,ptr)
#define cpu_xchg_4(x,ptr) processor._xchg_4(x,ptr)
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment