Commit c0111141 authored by Russell King's avatar Russell King

[ARM] Remove old NetWinder uncompressed kernel image compatibility code.

Add/correct comments, including in decompressor code.  Add arm6 cache
support to decompressor.
parent 4b59e57f
/* /*
* linux/arch/arm/boot/compressed/head.S * linux/arch/arm/boot/compressed/head.S
* *
* Copyright (C) 1996-1999 Russell King * Copyright (C) 1996-2002 Russell King
* *
* This program is free software; you can redistribute it and/or modify * This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as * it under the terms of the GNU General Public License version 2 as
...@@ -161,7 +161,6 @@ not_relocated: mov r0, #0 ...@@ -161,7 +161,6 @@ not_relocated: mov r0, #0
cmp r2, r3 cmp r2, r3
blo 1b blo 1b
mrc p15, 0, r6, c0, c0 @ get processor ID
bl cache_on bl cache_on
mov r1, sp @ malloc space above stack mov r1, sp @ malloc space above stack
...@@ -200,7 +199,8 @@ not_relocated: mov r0, #0 ...@@ -200,7 +199,8 @@ not_relocated: mov r0, #0
*/ */
add r1, r5, r0 @ end of decompressed kernel add r1, r5, r0 @ end of decompressed kernel
adr r2, reloc_start adr r2, reloc_start
adr r3, reloc_end ldr r3, LC1
add r3, r2, r3
1: ldmia r2!, {r8 - r13} @ copy relocation code 1: ldmia r2!, {r8 - r13} @ copy relocation code
stmia r1!, {r8 - r13} stmia r1!, {r8 - r13}
ldmia r2!, {r8 - r13} ldmia r2!, {r8 - r13}
...@@ -229,8 +229,9 @@ LC0: .word LC0 @ r1 ...@@ -229,8 +229,9 @@ LC0: .word LC0 @ r1
.word _load_addr @ r4 .word _load_addr @ r4
.word _start @ r5 .word _start @ r5
.word _got_start @ r6 .word _got_start @ r6
.word _got_end @ r7 .word _got_end @ ip
.word user_stack+4096 @ r8 .word user_stack+4096 @ sp
LC1: .word reloc_end - reloc_start
.size LC0, . - LC0 .size LC0, . - LC0
/* /*
...@@ -255,7 +256,7 @@ LC0: .word LC0 @ r1 ...@@ -255,7 +256,7 @@ LC0: .word LC0 @ r1
cache_on: mov r3, #8 @ cache_on function cache_on: mov r3, #8 @ cache_on function
b call_cache_fn b call_cache_fn
__cache_on: sub r3, r4, #16384 @ Page directory size __setup_mmu: sub r3, r4, #16384 @ Page directory size
bic r3, r3, #0xff @ Align the pointer bic r3, r3, #0xff @ Align the pointer
bic r3, r3, #0x3f00 bic r3, r3, #0x3f00
/* /*
...@@ -291,20 +292,35 @@ __cache_on: sub r3, r4, #16384 @ Page directory size ...@@ -291,20 +292,35 @@ __cache_on: sub r3, r4, #16384 @ Page directory size
str r1, [r0], #4 str r1, [r0], #4
add r1, r1, #1048576 add r1, r1, #1048576
str r1, [r0] str r1, [r0]
mov pc, lr
__armv4_cache_on:
mov r12, lr
bl __setup_mmu
mov r0, #0 mov r0, #0
mcr p15, 0, r0, c7, c10, 4 @ drain write buffer mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
mcr p15, 0, r0, c8, c7 @ flush I,D TLBs mcr p15, 0, r0, c8, c7, 0 @ flush I,D TLBs
mcr p15, 0, r3, c2, c0 @ load page table pointer mrc p15, 0, r0, c1, c0, 0 @ read control reg
mov r0, #-1
mcr p15, 0, r0, c3, c0 @ load domain access register
mrc p15, 0, r0, c1, c0
orr r0, r0, #0x1000 @ I-cache enable orr r0, r0, #0x1000 @ I-cache enable
orr r0, r0, #0x0030
b __common_cache_on
__arm6_cache_on:
mov r12, lr
bl __setup_mmu
mov r0, #0
mcr p15, 0, r0, c7, c0, 0 @ invalidate whole cache v3
mcr p15, 0, r0, c5, c0, 0 @ invalidate whole TLB v3
mov r0, #0x30
__common_cache_on:
#ifndef DEBUG #ifndef DEBUG
orr r0, r0, #0x003d @ Write buffer, mmu orr r0, r0, #0x000d @ Write buffer, mmu
#endif #endif
mcr p15, 0, r0, c1, c0 mov r1, #-1
mov pc, lr mcr p15, 0, r3, c2, c0, 0 @ load page table pointer
mcr p15, 0, r1, c3, c0, 0 @ load domain access control
mcr p15, 0, r0, c1, c0, 0 @ load control register
mov pc, r12
/* /*
* All code following this line is relocatable. It is relocated by * All code following this line is relocatable. It is relocated by
...@@ -349,11 +365,12 @@ call_kernel: bl cache_clean_flush ...@@ -349,11 +365,12 @@ call_kernel: bl cache_clean_flush
* r1 = corrupted * r1 = corrupted
* r2 = corrupted * r2 = corrupted
* r3 = block offset * r3 = block offset
* r6 = CPU ID * r6 = corrupted
* r12 = corrupted * r12 = corrupted
*/ */
call_cache_fn: adr r12, proc_types call_cache_fn: adr r12, proc_types
mrc p15, 0, r6, c0, c0 @ get processor ID
1: ldr r1, [r12, #0] @ get value 1: ldr r1, [r12, #0] @ get value
ldr r2, [r12, #4] @ get mask ldr r2, [r12, #4] @ get mask
eor r1, r1, r6 @ (real ^ match) eor r1, r1, r6 @ (real ^ match)
...@@ -380,9 +397,12 @@ call_cache_fn: adr r12, proc_types ...@@ -380,9 +397,12 @@ call_cache_fn: adr r12, proc_types
proc_types: proc_types:
.word 0x41560600 @ ARM6/610 .word 0x41560600 @ ARM6/610
.word 0xffffffe0 .word 0xffffffe0
b __arm6_cache_off b __arm6_cache_off @ works, but slow
b __arm6_cache_off b __arm6_cache_off
mov pc, lr mov pc, lr
@ b __arm6_cache_on @ untested
@ b __arm6_cache_off
@ b __armv3_cache_flush
.word 0x41007000 @ ARM7/710 .word 0x41007000 @ ARM7/710
.word 0xfff8fe00 .word 0xfff8fe00
...@@ -392,31 +412,31 @@ proc_types: ...@@ -392,31 +412,31 @@ proc_types:
.word 0x41807200 @ ARM720T (writethrough) .word 0x41807200 @ ARM720T (writethrough)
.word 0xffffff00 .word 0xffffff00
b __cache_on b __armv4_cache_on
b __armv4_cache_off b __armv4_cache_off
mov pc, lr mov pc, lr
.word 0x41129200 @ ARM920T .word 0x41129200 @ ARM920T
.word 0xff00fff0 .word 0xff00fff0
b __cache_on b __armv4_cache_on
b __armv4_cache_off b __armv4_cache_off
b __armv4_cache_flush b __armv4_cache_flush
.word 0x4401a100 @ sa110 / sa1100 .word 0x4401a100 @ sa110 / sa1100
.word 0xffffffe0 .word 0xffffffe0
b __cache_on b __armv4_cache_on
b __armv4_cache_off b __armv4_cache_off
b __armv4_cache_flush b __armv4_cache_flush
.word 0x6901b110 @ sa1110 .word 0x6901b110 @ sa1110
.word 0xfffffff0 .word 0xfffffff0
b __cache_on b __armv4_cache_on
b __armv4_cache_off b __armv4_cache_off
b __armv4_cache_flush b __armv4_cache_flush
.word 0x69050000 @ xscale .word 0x69050000 @ xscale
.word 0xffff0000 .word 0xffff0000
b __cache_on b __armv4_cache_on
b __armv4_cache_off b __armv4_cache_off
b __armv4_cache_flush b __armv4_cache_flush
...@@ -450,7 +470,7 @@ __armv4_cache_off: ...@@ -450,7 +470,7 @@ __armv4_cache_off:
mov pc, lr mov pc, lr
__arm6_cache_off: __arm6_cache_off:
mov r0, #0x00000060 @ ARM6 control reg. mov r0, #0x00000030 @ ARM6 control reg.
b __armv3_cache_off b __armv3_cache_off
__arm7_cache_off: __arm7_cache_off:
...@@ -458,10 +478,10 @@ __arm7_cache_off: ...@@ -458,10 +478,10 @@ __arm7_cache_off:
b __armv3_cache_off b __armv3_cache_off
__armv3_cache_off: __armv3_cache_off:
mcr p15, 0, r0, c1, c0 @ turn MMU and cache off mcr p15, 0, r0, c1, c0, 0 @ turn MMU and cache off
mov r0, #0 mov r0, #0
mcr p15, 0, r0, c7, c0 @ invalidate whole cache v3 mcr p15, 0, r0, c7, c0, 0 @ invalidate whole cache v3
mcr p15, 0, r0, c5, c0 @ invalidate whole TLB v3 mcr p15, 0, r0, c5, c0, 0 @ invalidate whole TLB v3
mov pc, lr mov pc, lr
/* /*
...@@ -490,6 +510,11 @@ __armv4_cache_flush: ...@@ -490,6 +510,11 @@ __armv4_cache_flush:
mcr p15, 0, r1, c7, c10, 4 @ drain WB mcr p15, 0, r1, c7, c10, 4 @ drain WB
mov pc, lr mov pc, lr
__armv3_cache_flush:
mov r1, #0
mcr p15, 0, r0, c7, c0, 0 @ invalidate whole cache v3
mov pc, lr
/* /*
* Various debugging routines for printing hex characters and * Various debugging routines for printing hex characters and
* memory, which again must be relocatable. * memory, which again must be relocatable.
......
/* /*
* linux/arch/arm/kernel/head-armv.S * linux/arch/arm/kernel/head-armv.S
* *
* Copyright (C) 1994-1999 Russell King * Copyright (C) 1994-2002 Russell King
* *
* This program is free software; you can redistribute it and/or modify * This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as * it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation. * published by the Free Software Foundation.
* *
* 32-bit kernel startup code for all architectures * Kernel startup code for all 32-bit CPUs
*/ */
#include <linux/config.h> #include <linux/config.h>
#include <linux/linkage.h> #include <linux/linkage.h>
...@@ -17,12 +17,10 @@ ...@@ -17,12 +17,10 @@
#include <asm/procinfo.h> #include <asm/procinfo.h>
#include <asm/mach/arch.h> #include <asm/mach/arch.h>
#define K(a,b,c) ((a) << 24 | (b) << 12 | (c))
/* /*
* We place the page tables 16K below TEXTADDR. Therefore, we must make sure * We place the page tables 16K below TEXTADDR. Therefore, we must make sure
* that TEXTADDR is correctly set. Currently, we expect the least significant * that TEXTADDR is correctly set. Currently, we expect the least significant
* "short" to be 0x8000, but we could probably relax this restriction to * 16 bits to be 0x8000, but we could probably relax this restriction to
* TEXTADDR > PAGE_OFFSET + 0x4000 * TEXTADDR > PAGE_OFFSET + 0x4000
* *
* Note that swapper_pg_dir is the virtual address of the page tables, and * Note that swapper_pg_dir is the virtual address of the page tables, and
...@@ -54,79 +52,26 @@ ...@@ -54,79 +52,26 @@
/* /*
* Kernel startup entry point. * Kernel startup entry point.
* ---------------------------
*
* This is normally called from the decompressor code. The requirements
* are: MMU = off, D-cache = off, I-cache = dont care, r0 = 0,
* r1 = machine nr.
*
* This code is mostly position independent, so if you link the kernel at
* 0xc0008000, you call this at __pa(0xc0008000).
* *
* The rules are: * See linux/arch/arm/tools/mach-types for the complete list of machine
* r0 - should be 0 * numbers for r1.
* r1 - unique architecture number
* MMU - off
* I-cache - on or off
* D-cache - off
* *
* See linux/arch/arm/tools/mach-types for the complete list of numbers * We're trying to keep crap to a minimum; DO NOT add any machine specific
* for r1. * crap here - that's what the boot loader (or in extreme, well justified
* circumstances, zImage) is for.
*/ */
.section ".text.init",#alloc,#execinstr .section ".text.init",#alloc,#execinstr
.type stext, #function .type stext, #function
ENTRY(stext) ENTRY(stext)
mov r12, r0 mov r12, r0
/*
* NOTE! Any code which is placed here should be done for one of
* the following reasons:
*
* 1. Compatability with old production boot firmware (ie, users
* actually have and are booting the kernel with the old firmware)
* and therefore will be eventually removed.
* 2. Cover the case when there is no boot firmware. This is not
* ideal, but in this case, it should ONLY set r0 and r1 to the
* appropriate value.
*/
#if defined(CONFIG_ARCH_NETWINDER)
/*
* Compatability cruft for old NetWinder NeTTroms. This
* code is currently scheduled for destruction in 2.5.xx
*/
.rept 8
mov r0, r0
.endr
adr r2, 1f
ldmdb r2, {r7, r8}
and r3, r2, #0xc000
teq r3, #0x8000
beq __entry
bic r3, r2, #0xc000
orr r3, r3, #0x8000
mov r0, r3
mov r4, #64
sub r5, r8, r7
b 1f
.word _stext
.word __bss_start
1:
.rept 4
ldmia r2!, {r6, r7, r8, r9}
stmia r3!, {r6, r7, r8, r9}
.endr
subs r4, r4, #64
bcs 1b
movs r4, r5
mov r5, #0
movne pc, r0
mov r1, #MACH_TYPE_NETWINDER @ (will go in 2.5)
mov r12, #2 << 24 @ scheduled for removal in 2.5.xx
orr r12, r12, #5 << 12
__entry:
#endif
#if defined(CONFIG_ARCH_L7200)
/*
* FIXME - No bootloader, so manually set 'r1' with our architecture number.
*/
mov r1, #MACH_TYPE_L7200
#endif
mov r0, #PSR_F_BIT | PSR_I_BIT | MODE_SVC @ make sure svc mode mov r0, #PSR_F_BIT | PSR_I_BIT | MODE_SVC @ make sure svc mode
msr cpsr_c, r0 @ and all irqs disabled msr cpsr_c, r0 @ and all irqs disabled
bl __lookup_processor_type bl __lookup_processor_type
...@@ -138,59 +83,72 @@ __entry: ...@@ -138,59 +83,72 @@ __entry:
moveq r0, #'a' @ yes, error 'a' moveq r0, #'a' @ yes, error 'a'
beq __error beq __error
bl __create_page_tables bl __create_page_tables
adr lr, __ret @ return address
add pc, r10, #12 @ initialise processor /*
@ (return control reg) * The following calls CPU specific code in a position independent
* manner. See arch/arm/mm/proc-*.S for details. r10 = base of
* xxx_proc_info structure selected by __lookup_architecture_type
* above. On return, the CPU will be ready for the MMU to be
* turned on, and r0 will hold the CPU control register value.
*/
adr lr, __turn_mmu_on @ return (PIC) address
add pc, r10, #12
.type __switch_data, %object .type __switch_data, %object
__switch_data: .long __mmap_switched __switch_data:
.long compat .long __mmap_switched
.long __bss_start .long __bss_start @ r4
.long _end .long _end @ r5
.long processor_id .long processor_id @ r6
.long __machine_arch_type .long __machine_arch_type @ r7
.long cr_alignment .long cr_alignment @ r8
.long init_thread_union+8192 .long init_thread_union+8192 @ sp
.type __ret, %function /*
__ret: ldr lr, __switch_data * Enable the MMU. This completely changes the structure of the visible
* memory space. You will not be able to trace execution through this.
* If you have an enquiry about this, *please* check the linux-arm-kernel
* mailing list archives BEFORE sending another post to the list.
*/
.align 5
.type __turn_mmu_on, %function
__turn_mmu_on:
ldr lr, __switch_data
#ifdef CONFIG_ALIGNMENT_TRAP
orr r0, r0, #2 @ ...........A.
#endif
mcr p15, 0, r0, c1, c0 mcr p15, 0, r0, c1, c0
mov r0, r0 mov r0, r0
mov r0, r0 mov r0, r0
mov r0, r0 mov r0, r0
mov pc, lr mov pc, lr
/* /*
* This code follows on after the page * The following fragment of code is executed with the MMU on, and uses
* table switch and jump above. * absolute addresses; this is not position independent.
* *
* r0 = processor control register * r0 = processor control register
* r1 = machine ID * r1 = machine ID
* r9 = processor ID * r9 = processor ID
* r12 = value of r0 when kernel was called (currently always zero)
*/ */
.align 5 .align 5
__mmap_switched: __mmap_switched:
adr r3, __switch_data + 4 adr r3, __switch_data + 4
ldmia r3, {r2, r4, r5, r6, r7, r8, sp}@ r2 = compat ldmia r3, {r4, r5, r6, r7, r8, sp}
@ sp = stack pointer
str r12, [r2]
mov fp, #0 @ Clear BSS (and zero fp) mov fp, #0 @ Clear BSS (and zero fp)
1: cmp r4, r5 1: cmp r4, r5
strcc fp, [r4],#4 strcc fp, [r4],#4
bcc 1b bcc 1b
str r9, [r6] @ Save processor ID str r9, [r6] @ Save processor ID
str r1, [r7] @ Save machine type str r1, [r7] @ Save machine type
#ifdef CONFIG_ALIGNMENT_TRAP
orr r0, r0, #2 @ ...........A.
#endif
bic r2, r0, #2 @ Clear 'A' bit bic r2, r0, #2 @ Clear 'A' bit
stmia r8, {r0, r2} @ Save control register values stmia r8, {r0, r2} @ Save control register values
b start_kernel b start_kernel
/* /*
* Setup the initial page tables. We only setup the barest * Setup the initial page tables. We only setup the barest
* amount which are required to get the kernel running, which * amount which are required to get the kernel running, which
...@@ -284,7 +242,7 @@ __create_page_tables: ...@@ -284,7 +242,7 @@ __create_page_tables:
teq r1, #MACH_TYPE_NETWINDER teq r1, #MACH_TYPE_NETWINDER
teqne r1, #MACH_TYPE_CATS teqne r1, #MACH_TYPE_CATS
bne 1f bne 1f
add r0, r4, #0x3fc0 add r0, r4, #0x3fc0 @ ff000000
mov r3, #0x7c000000 mov r3, #0x7c000000
orr r3, r3, r8 orr r3, r3, r8
str r3, [r0], #4 str r3, [r0], #4
...@@ -311,10 +269,10 @@ __create_page_tables: ...@@ -311,10 +269,10 @@ __create_page_tables:
/* /*
* Exception handling. Something went wrong and we can't * Exception handling. Something went wrong and we can't proceed. We
* proceed. We ought to tell the user, but since we * ought to tell the user, but since we don't have any guarantee that
* don't have any guarantee that we're even running on * we're even running on the right architecture, we do virtually nothing.
* the right architecture, we do virtually nothing. *
* r0 = ascii error character: * r0 = ascii error character:
* a = invalid architecture * a = invalid architecture
* p = invalid processor * p = invalid processor
...@@ -347,7 +305,8 @@ __error: ...@@ -347,7 +305,8 @@ __error:
b 1b b 1b
#ifdef CONFIG_DEBUG_LL #ifdef CONFIG_DEBUG_LL
err_str: .asciz "\nError: " err_str:
.asciz "\nError: "
.align .align
#endif #endif
...@@ -411,9 +370,9 @@ __lookup_architecture_type: ...@@ -411,9 +370,9 @@ __lookup_architecture_type:
add r4, r6, r5 @ to our address space add r4, r6, r5 @ to our address space
add r7, r7, r5 add r7, r7, r5
1: ldr r5, [r4] @ get machine type 1: ldr r5, [r4] @ get machine type
teq r5, r1 teq r5, r1 @ matches loader number?
beq 2f beq 2f @ found
add r4, r4, #SIZEOF_MACHINE_DESC add r4, r4, #SIZEOF_MACHINE_DESC @ next machine_desc
cmp r4, r7 cmp r4, r7
blt 1b blt 1b
mov r7, #0 @ unknown architecture mov r7, #0 @ unknown architecture
......
...@@ -61,7 +61,6 @@ extern int root_mountflags; ...@@ -61,7 +61,6 @@ extern int root_mountflags;
extern int _stext, _text, _etext, _edata, _end; extern int _stext, _text, _etext, _edata, _end;
unsigned int processor_id; unsigned int processor_id;
unsigned int compat;
unsigned int __machine_arch_type; unsigned int __machine_arch_type;
unsigned int system_rev; unsigned int system_rev;
unsigned int system_serial_low; unsigned int system_serial_low;
...@@ -289,11 +288,6 @@ static struct machine_desc * __init setup_machine(unsigned int nr) ...@@ -289,11 +288,6 @@ static struct machine_desc * __init setup_machine(unsigned int nr)
} }
printk("Machine: %s\n", list->name); printk("Machine: %s\n", list->name);
if (compat)
printk(KERN_WARNING "Using compatibility code "
"scheduled for removal in v%d.%d.%d\n",
compat >> 24, (compat >> 12) & 0x3ff,
compat & 0x3ff);
return list; return list;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment