Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
Kirill Smelkov
linux
Commits
5ab6091d
Commit
5ab6091d
authored
Jun 18, 2005
by
Russell King
Browse files
Options
Browse Files
Download
Plain Diff
Merge with ../linux-2.6-smp
parents
9ee1c939
ccea7a19
Changes
3
Hide whitespace changes
Inline
Side-by-side
Showing
3 changed files
with
190 additions
and
123 deletions
+190
-123
arch/arm/kernel/entry-armv.S
arch/arm/kernel/entry-armv.S
+137
-117
arch/arm/kernel/entry-header.S
arch/arm/kernel/entry-header.S
+3
-4
arch/arm/kernel/setup.c
arch/arm/kernel/setup.c
+50
-2
No files found.
arch/arm/kernel/entry-armv.S
View file @
5ab6091d
...
@@ -23,49 +23,92 @@
...
@@ -23,49 +23,92 @@
#include "entry-header.S"
#include "entry-header.S"
/*
*
Interrupt
handling
.
Preserves
r7
,
r8
,
r9
*/
.
macro
irq_handler
1
:
get_irqnr_and_base
r0
,
r6
,
r5
,
lr
movne
r1
,
sp
@
@
routine
called
with
r0
=
irq
number
,
r1
=
struct
pt_regs
*
@
adrne
lr
,
1
b
bne
asm_do_IRQ
#ifdef CONFIG_SMP
/
*
*
XXX
*
*
this
macro
assumes
that
irqstat
(
r6
)
and
base
(
r5
)
are
*
preserved
from
get_irqnr_and_base
above
*/
test_for_ipi
r0
,
r6
,
r5
,
lr
movne
r0
,
sp
adrne
lr
,
1
b
bne
do_IPI
#endif
.
endm
/*
/*
*
Invalid
mode
handlers
*
Invalid
mode
handlers
*/
*/
.
macro
inv_entry
,
sym
,
reason
.
macro
inv_entry
,
reason
sub
sp
,
sp
,
#
S_FRAME_SIZE
@
Allocate
frame
size
in
one
go
sub
sp
,
sp
,
#
S_FRAME_SIZE
stmia
sp
,
{
r0
-
lr
}
@
Save
XXX
r0
-
lr
stmib
sp
,
{
r1
-
lr
}
ldr
r4
,
.
LC
\
sym
mov
r1
,
#
\
reason
mov
r1
,
#
\
reason
.
endm
.
endm
__pabt_invalid
:
__pabt_invalid
:
inv_entry
abt
,
BAD_PREFETCH
inv_entry
BAD_PREFETCH
b
1
f
b
common_invalid
__dabt_invalid
:
__dabt_invalid
:
inv_entry
abt
,
BAD_DATA
inv_entry
BAD_DATA
b
1
f
b
common_invalid
__irq_invalid
:
__irq_invalid
:
inv_entry
irq
,
BAD_IRQ
inv_entry
BAD_IRQ
b
1
f
b
common_invalid
__und_invalid
:
__und_invalid
:
inv_entry
und
,
BAD_UNDEFINSTR
inv_entry
BAD_UNDEFINSTR
@
@
XXX
fall
through
to
common_invalid
@
@
@
common_invalid
-
generic
code
for
failed
exception
(
re
-
entrant
version
of
handlers
)
@
common_invalid
:
zero_fp
ldmia
r0
,
{
r4
-
r6
}
add
r0
,
sp
,
#
S_PC
@
here
for
interlock
avoidance
mov
r7
,
#-
1
@
""
""
""
""
str
r4
,
[
sp
]
@
save
preserved
r0
stmia
r0
,
{
r5
-
r7
}
@
lr_
<
exception
>,
@
cpsr_
<
exception
>,
"old_r0"
1
:
zero_fp
ldmia
r4
,
{
r5
-
r7
}
@
Get
XXX
pc
,
cpsr
,
old_r0
add
r4
,
sp
,
#
S_PC
stmia
r4
,
{
r5
-
r7
}
@
Save
XXX
pc
,
cpsr
,
old_r0
mov
r0
,
sp
mov
r0
,
sp
and
r2
,
r6
,
#
31
@
int
mode
and
r2
,
r6
,
#
0x1f
b
bad_mode
b
bad_mode
/*
/*
*
SVC
mode
handlers
*
SVC
mode
handlers
*/
*/
.
macro
svc_entry
,
sym
.
macro
svc_entry
sub
sp
,
sp
,
#
S_FRAME_SIZE
sub
sp
,
sp
,
#
S_FRAME_SIZE
stmia
sp
,
{
r0
-
r12
}
@
save
r0
-
r12
stmib
sp
,
{
r1
-
r12
}
ldr
r2
,
.
LC
\
sym
add
r0
,
sp
,
#
S_FRAME_SIZE
ldmia
r0
,
{
r1
-
r3
}
ldmia
r2
,
{
r2
-
r4
}
@
get
pc
,
cpsr
add
r5
,
sp
,
#
S_SP
@
here
for
interlock
avoidance
add
r5
,
sp
,
#
S_SP
mov
r4
,
#-
1
@
""
""
""
""
add
r0
,
sp
,
#
S_FRAME_SIZE
@
""
""
""
""
str
r1
,
[
sp
]
@
save
the
"real"
r0
copied
@
from
the
exception
stack
mov
r1
,
lr
mov
r1
,
lr
@
@
...
@@ -82,7 +125,7 @@ __und_invalid:
...
@@ -82,7 +125,7 @@ __und_invalid:
.
align
5
.
align
5
__dabt_svc
:
__dabt_svc
:
svc_entry
abt
svc_entry
@
@
@
get
ready
to
re
-
enable
interrupts
if
appropriate
@
get
ready
to
re
-
enable
interrupts
if
appropriate
...
@@ -129,28 +172,24 @@ __dabt_svc:
...
@@ -129,28 +172,24 @@ __dabt_svc:
.
align
5
.
align
5
__irq_svc
:
__irq_svc
:
svc_entry
irq
svc_entry
#ifdef CONFIG_PREEMPT
#ifdef CONFIG_PREEMPT
get_thread_info
r8
get_thread_info
tsk
ldr
r
9
,
[
r8
,
#
TI_PREEMPT
]
@
get
preempt
count
ldr
r
8
,
[
tsk
,
#
TI_PREEMPT
]
@
get
preempt
count
add
r7
,
r
9
,
#
1
@
increment
it
add
r7
,
r
8
,
#
1
@
increment
it
str
r7
,
[
r8
,
#
TI_PREEMPT
]
str
r7
,
[
tsk
,
#
TI_PREEMPT
]
#endif
#endif
1
:
get_irqnr_and_base
r0
,
r6
,
r5
,
lr
movne
r1
,
sp
irq_handler
@
@
routine
called
with
r0
=
irq
number
,
r1
=
struct
pt_regs
*
@
adrne
lr
,
1
b
bne
asm_do_IRQ
#ifdef CONFIG_PREEMPT
#ifdef CONFIG_PREEMPT
ldr
r0
,
[
r8
,
#
TI_FLAGS
]
@
get
flags
ldr
r0
,
[
tsk
,
#
TI_FLAGS
]
@
get
flags
tst
r0
,
#
_TIF_NEED_RESCHED
tst
r0
,
#
_TIF_NEED_RESCHED
blne
svc_preempt
blne
svc_preempt
preempt_return
:
preempt_return
:
ldr
r0
,
[
r8
,
#
TI_PREEMPT
]
@
read
preempt
value
ldr
r0
,
[
tsk
,
#
TI_PREEMPT
]
@
read
preempt
value
str
r8
,
[
tsk
,
#
TI_PREEMPT
]
@
restore
preempt
count
teq
r0
,
r7
teq
r0
,
r7
str
r9
,
[
r8
,
#
TI_PREEMPT
]
@
restore
preempt
count
strne
r0
,
[
r0
,
-
r0
]
@
bug
()
strne
r0
,
[
r0
,
-
r0
]
@
bug
()
#endif
#endif
ldr
r0
,
[
sp
,
#
S_PSR
]
@
irqs
are
already
disabled
ldr
r0
,
[
sp
,
#
S_PSR
]
@
irqs
are
already
disabled
...
@@ -161,7 +200,7 @@ preempt_return:
...
@@ -161,7 +200,7 @@ preempt_return:
#ifdef CONFIG_PREEMPT
#ifdef CONFIG_PREEMPT
svc_preempt
:
svc_preempt
:
teq
r
9
,
#
0
@
was
preempt
count
=
0
teq
r
8
,
#
0
@
was
preempt
count
=
0
ldreq
r6
,
.
LCirq_stat
ldreq
r6
,
.
LCirq_stat
movne
pc
,
lr
@
no
movne
pc
,
lr
@
no
ldr
r0
,
[
r6
,
#
4
]
@
local_irq_count
ldr
r0
,
[
r6
,
#
4
]
@
local_irq_count
...
@@ -169,9 +208,9 @@ svc_preempt:
...
@@ -169,9 +208,9 @@ svc_preempt:
adds
r0
,
r0
,
r1
adds
r0
,
r0
,
r1
movne
pc
,
lr
movne
pc
,
lr
mov
r7
,
#
0
@
preempt_schedule_irq
mov
r7
,
#
0
@
preempt_schedule_irq
str
r7
,
[
r8
,
#
TI_PREEMPT
]
@
expects
preempt_count
==
0
str
r7
,
[
tsk
,
#
TI_PREEMPT
]
@
expects
preempt_count
==
0
1
:
bl
preempt_schedule_irq
@
irq
en
/
disable
is
done
inside
1
:
bl
preempt_schedule_irq
@
irq
en
/
disable
is
done
inside
ldr
r0
,
[
r8
,
#
TI_FLAGS
]
@
get
new
tasks
TI_FLAGS
ldr
r0
,
[
tsk
,
#
TI_FLAGS
]
@
get
new
tasks
TI_FLAGS
tst
r0
,
#
_TIF_NEED_RESCHED
tst
r0
,
#
_TIF_NEED_RESCHED
beq
preempt_return
@
go
again
beq
preempt_return
@
go
again
b
1
b
b
1
b
...
@@ -179,7 +218,7 @@ svc_preempt:
...
@@ -179,7 +218,7 @@ svc_preempt:
.
align
5
.
align
5
__und_svc
:
__und_svc
:
svc_entry
und
svc_entry
@
@
@
call
emulation
code
,
which
returns
using
r9
if
it
has
emulated
@
call
emulation
code
,
which
returns
using
r9
if
it
has
emulated
...
@@ -209,7 +248,7 @@ __und_svc:
...
@@ -209,7 +248,7 @@ __und_svc:
.
align
5
.
align
5
__pabt_svc
:
__pabt_svc
:
svc_entry
abt
svc_entry
@
@
@
re
-
enable
interrupts
if
appropriate
@
re
-
enable
interrupts
if
appropriate
...
@@ -242,12 +281,8 @@ __pabt_svc:
...
@@ -242,12 +281,8 @@ __pabt_svc:
ldmia
sp
,
{
r0
-
pc
}^
@
load
r0
-
pc
,
cpsr
ldmia
sp
,
{
r0
-
pc
}^
@
load
r0
-
pc
,
cpsr
.
align
5
.
align
5
.
LCirq
:
.
LCcralign
:
.
word
__temp_irq
.
word
cr_alignment
.
LCund
:
.
word
__temp_und
.
LCabt
:
.
word
__temp_abt
#ifdef MULTI_ABORT
#ifdef MULTI_ABORT
.
LCprocfns
:
.
LCprocfns
:
.
word
processor
.
word
processor
...
@@ -262,12 +297,16 @@ __pabt_svc:
...
@@ -262,12 +297,16 @@ __pabt_svc:
/*
/*
*
User
mode
handlers
*
User
mode
handlers
*/
*/
.
macro
usr_entry
,
sym
.
macro
usr_entry
sub
sp
,
sp
,
#
S_FRAME_SIZE
@
Allocate
frame
size
in
one
go
sub
sp
,
sp
,
#
S_FRAME_SIZE
stmia
sp
,
{
r0
-
r12
}
@
save
r0
-
r12
stmib
sp
,
{
r1
-
r12
}
ldr
r7
,
.
LC
\
sym
add
r5
,
sp
,
#
S_PC
ldmia
r0
,
{
r1
-
r3
}
ldmia
r7
,
{
r2
-
r4
}
@
Get
USR
pc
,
cpsr
add
r0
,
sp
,
#
S_PC
@
here
for
interlock
avoidance
mov
r4
,
#-
1
@
""
""
""
""
str
r1
,
[
sp
]
@
save
the
"real"
r0
copied
@
from
the
exception
stack
#if __LINUX_ARM_ARCH__ < 6 && !defined(CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG)
#if __LINUX_ARM_ARCH__ < 6 && !defined(CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG)
@
make
sure
our
user
space
atomic
helper
is
aborted
@
make
sure
our
user
space
atomic
helper
is
aborted
...
@@ -284,13 +323,13 @@ __pabt_svc:
...
@@ -284,13 +323,13 @@ __pabt_svc:
@
@
@
Also
,
separately
save
sp_usr
and
lr_usr
@
Also
,
separately
save
sp_usr
and
lr_usr
@
@
stmia
r
5
,
{
r2
-
r4
}
stmia
r
0
,
{
r2
-
r4
}
stmdb
r
5
,
{
sp
,
lr
}^
stmdb
r
0
,
{
sp
,
lr
}^
@
@
@
Enable
the
alignment
trap
while
in
kernel
mode
@
Enable
the
alignment
trap
while
in
kernel
mode
@
@
alignment_trap
r
7
,
r0
,
__temp_
\
sym
alignment_trap
r
0
@
@
@
Clear
FP
to
mark
the
first
stack
frame
@
Clear
FP
to
mark
the
first
stack
frame
...
@@ -300,7 +339,7 @@ __pabt_svc:
...
@@ -300,7 +339,7 @@ __pabt_svc:
.
align
5
.
align
5
__dabt_usr
:
__dabt_usr
:
usr_entry
abt
usr_entry
@
@
@
Call
the
processor
-
specific
abort
handler
:
@
Call
the
processor
-
specific
abort
handler
:
...
@@ -329,30 +368,23 @@ __dabt_usr:
...
@@ -329,30 +368,23 @@ __dabt_usr:
.
align
5
.
align
5
__irq_usr
:
__irq_usr
:
usr_entry
irq
usr_entry
get_thread_info
tsk
#ifdef CONFIG_PREEMPT
#ifdef CONFIG_PREEMPT
get_thread_info
r8
ldr
r8
,
[
tsk
,
#
TI_PREEMPT
]
@
get
preempt
count
ldr
r9
,
[
r8
,
#
TI_PREEMPT
]
@
get
preempt
count
add
r7
,
r8
,
#
1
@
increment
it
add
r7
,
r9
,
#
1
@
increment
it
str
r7
,
[
tsk
,
#
TI_PREEMPT
]
str
r7
,
[
r8
,
#
TI_PREEMPT
]
#endif
#endif
1
:
get_irqnr_and_base
r0
,
r6
,
r5
,
lr
movne
r1
,
sp
irq_handler
adrne
lr
,
1
b
@
@
routine
called
with
r0
=
irq
number
,
r1
=
struct
pt_regs
*
@
bne
asm_do_IRQ
#ifdef CONFIG_PREEMPT
#ifdef CONFIG_PREEMPT
ldr
r0
,
[
r8
,
#
TI_PREEMPT
]
ldr
r0
,
[
tsk
,
#
TI_PREEMPT
]
str
r8
,
[
tsk
,
#
TI_PREEMPT
]
teq
r0
,
r7
teq
r0
,
r7
str
r9
,
[
r8
,
#
TI_PREEMPT
]
strne
r0
,
[
r0
,
-
r0
]
strne
r0
,
[
r0
,
-
r0
]
mov
tsk
,
r8
#else
get_thread_info
tsk
#endif
#endif
mov
why
,
#
0
mov
why
,
#
0
b
ret_to_user
b
ret_to_user
...
@@ -360,7 +392,7 @@ __irq_usr:
...
@@ -360,7 +392,7 @@ __irq_usr:
.
align
5
.
align
5
__und_usr
:
__und_usr
:
usr_entry
und
usr_entry
tst
r3
,
#
PSR_T_BIT
@
Thumb
mode
?
tst
r3
,
#
PSR_T_BIT
@
Thumb
mode
?
bne
fpundefinstr
@
ignore
FP
bne
fpundefinstr
@
ignore
FP
...
@@ -476,7 +508,7 @@ fpundefinstr:
...
@@ -476,7 +508,7 @@ fpundefinstr:
.
align
5
.
align
5
__pabt_usr
:
__pabt_usr
:
usr_entry
abt
usr_entry
enable_irq
@
Enable
interrupts
enable_irq
@
Enable
interrupts
mov
r0
,
r2
@
address
(
pc
)
mov
r0
,
r2
@
address
(
pc
)
...
@@ -741,29 +773,41 @@ __kuser_helper_end:
...
@@ -741,29 +773,41 @@ __kuser_helper_end:
*
*
*
Common
stub
entry
macro
:
*
Common
stub
entry
macro
:
*
Enter
in
IRQ
mode
,
spsr
=
SVC
/
USR
CPSR
,
lr
=
SVC
/
USR
PC
*
Enter
in
IRQ
mode
,
spsr
=
SVC
/
USR
CPSR
,
lr
=
SVC
/
USR
PC
*
*
SP
points
to
a
minimal
amount
of
processor
-
private
memory
,
the
address
*
of
which
is
copied
into
r0
for
the
mode
specific
abort
handler
.
*/
*/
.
macro
vector_stub
,
name
,
sym
,
correction
=
0
.
macro
vector_stub
,
name
,
correction
=
0
.
align
5
.
align
5
vector_
\
name
:
vector_
\
name
:
ldr
r13
,
.
LCs
\
sym
.
if
\
correction
.
if
\
correction
sub
lr
,
lr
,
#
\
correction
sub
lr
,
lr
,
#
\
correction
.
endif
.
endif
str
lr
,
[
r13
]
@
save
lr_IRQ
@
@
Save
r0
,
lr_
<
exception
>
(
parent
PC
)
and
spsr_
<
exception
>
@
(
parent
CPSR
)
@
stmia
sp
,
{
r0
,
lr
}
@
save
r0
,
lr
mrs
lr
,
spsr
mrs
lr
,
spsr
str
lr
,
[
r13
,
#
4
]
@
save
spsr_IRQ
str
lr
,
[
sp
,
#
8
]
@
save
spsr
@
@
@
now
branch
to
the
relevant
MODE
handling
routine
@
Prepare
for
SVC32
mode
.
IRQs
remain
disabled
.
@
@
mrs
r
13
,
cpsr
mrs
r
0
,
cpsr
bic
r
13
,
r13
,
#
MODE_MASK
bic
r
0
,
r0
,
#
MODE_MASK
orr
r
13
,
r13
,
#
SVC_MODE
orr
r
0
,
r0
,
#
SVC_MODE
msr
spsr_cxsf
,
r
13
@
switch
to
SVC_32
mode
msr
spsr_cxsf
,
r
0
and
lr
,
lr
,
#
15
@
@
the
branch
table
must
immediately
follow
this
code
@
mov
r0
,
sp
and
lr
,
lr
,
#
0x0f
ldr
lr
,
[
pc
,
lr
,
lsl
#
2
]
ldr
lr
,
[
pc
,
lr
,
lsl
#
2
]
movs
pc
,
lr
@
Changes
mode
and
branches
movs
pc
,
lr
@
branch
to
handler
in
SVC
mode
.
endm
.
endm
.
globl
__stubs_start
.
globl
__stubs_start
...
@@ -771,7 +815,7 @@ __stubs_start:
...
@@ -771,7 +815,7 @@ __stubs_start:
/*
/*
*
Interrupt
dispatcher
*
Interrupt
dispatcher
*/
*/
vector_stub
irq
,
irq
,
4
vector_stub
irq
,
4
.
long
__irq_usr
@
0
(
USR_26
/
USR_32
)
.
long
__irq_usr
@
0
(
USR_26
/
USR_32
)
.
long
__irq_invalid
@
1
(
FIQ_26
/
FIQ_32
)
.
long
__irq_invalid
@
1
(
FIQ_26
/
FIQ_32
)
...
@@ -794,7 +838,7 @@ __stubs_start:
...
@@ -794,7 +838,7 @@ __stubs_start:
*
Data
abort
dispatcher
*
Data
abort
dispatcher
*
Enter
in
ABT
mode
,
spsr
=
USR
CPSR
,
lr
=
USR
PC
*
Enter
in
ABT
mode
,
spsr
=
USR
CPSR
,
lr
=
USR
PC
*/
*/
vector_stub
dabt
,
abt
,
8
vector_stub
dabt
,
8
.
long
__dabt_usr
@
0
(
USR_26
/
USR_32
)
.
long
__dabt_usr
@
0
(
USR_26
/
USR_32
)
.
long
__dabt_invalid
@
1
(
FIQ_26
/
FIQ_32
)
.
long
__dabt_invalid
@
1
(
FIQ_26
/
FIQ_32
)
...
@@ -817,7 +861,7 @@ __stubs_start:
...
@@ -817,7 +861,7 @@ __stubs_start:
*
Prefetch
abort
dispatcher
*
Prefetch
abort
dispatcher
*
Enter
in
ABT
mode
,
spsr
=
USR
CPSR
,
lr
=
USR
PC
*
Enter
in
ABT
mode
,
spsr
=
USR
CPSR
,
lr
=
USR
PC
*/
*/
vector_stub
pabt
,
abt
,
4
vector_stub
pabt
,
4
.
long
__pabt_usr
@
0
(
USR_26
/
USR_32
)
.
long
__pabt_usr
@
0
(
USR_26
/
USR_32
)
.
long
__pabt_invalid
@
1
(
FIQ_26
/
FIQ_32
)
.
long
__pabt_invalid
@
1
(
FIQ_26
/
FIQ_32
)
...
@@ -840,7 +884,7 @@ __stubs_start:
...
@@ -840,7 +884,7 @@ __stubs_start:
*
Undef
instr
entry
dispatcher
*
Undef
instr
entry
dispatcher
*
Enter
in
UND
mode
,
spsr
=
SVC
/
USR
CPSR
,
lr
=
SVC
/
USR
PC
*
Enter
in
UND
mode
,
spsr
=
SVC
/
USR
CPSR
,
lr
=
SVC
/
USR
PC
*/
*/
vector_stub
und
,
und
vector_stub
und
.
long
__und_usr
@
0
(
USR_26
/
USR_32
)
.
long
__und_usr
@
0
(
USR_26
/
USR_32
)
.
long
__und_invalid
@
1
(
FIQ_26
/
FIQ_32
)
.
long
__und_invalid
@
1
(
FIQ_26
/
FIQ_32
)
...
@@ -894,13 +938,6 @@ vector_addrexcptn:
...
@@ -894,13 +938,6 @@ vector_addrexcptn:
.
LCvswi
:
.
LCvswi
:
.
word
vector_swi
.
word
vector_swi
.
LCsirq
:
.
word
__temp_irq
.
LCsund
:
.
word
__temp_und
.
LCsabt
:
.
word
__temp_abt
.
globl
__stubs_end
.
globl
__stubs_end
__stubs_end
:
__stubs_end
:
...
@@ -922,23 +959,6 @@ __vectors_end:
...
@@ -922,23 +959,6 @@ __vectors_end:
.
data
.
data
/*
*
Do
not
reorder
these
,
and
do
not
insert
extra
data
between
...
*/
__temp_irq
:
.
word
0
@
saved
lr_irq
.
word
0
@
saved
spsr_irq
.
word
-
1
@
old_r0
__temp_und
:
.
word
0
@
Saved
lr_und
.
word
0
@
Saved
spsr_und
.
word
-
1
@
old_r0
__temp_abt
:
.
word
0
@
Saved
lr_abt
.
word
0
@
Saved
spsr_abt
.
word
-
1
@
old_r0
.
globl
cr_alignment
.
globl
cr_alignment
.
globl
cr_no_alignment
.
globl
cr_no_alignment
cr_alignment
:
cr_alignment
:
...
...
arch/arm/kernel/entry-header.S
View file @
5ab6091d
...
@@ -59,11 +59,10 @@
...
@@ -59,11 +59,10 @@
mov
\
rd
,
\
rd
,
lsl
#
13
mov
\
rd
,
\
rd
,
lsl
#
13
.
endm
.
endm
.
macro
alignment_trap
,
r
base
,
rtemp
,
sym
.
macro
alignment_trap
,
r
temp
#ifdef CONFIG_ALIGNMENT_TRAP
#ifdef CONFIG_ALIGNMENT_TRAP
#define OFF_CR_ALIGNMENT(x) cr_alignment - x
ldr
\
rtemp
,
.
LCcralign
ldr
\
rtemp
,
[
\
rtemp
]
ldr
\
rtemp
,
[
\
rbase
,
#
OFF_CR_ALIGNMENT
(
\
sym
)]
mcr
p15
,
0
,
\
rtemp
,
c1
,
c0
mcr
p15
,
0
,
\
rtemp
,
c1
,
c0
#endif
#endif
.
endm
.
endm
...
...
arch/arm/kernel/setup.c
View file @
5ab6091d
...
@@ -92,6 +92,14 @@ struct cpu_user_fns cpu_user;
...
@@ -92,6 +92,14 @@ struct cpu_user_fns cpu_user;
struct
cpu_cache_fns
cpu_cache
;
struct
cpu_cache_fns
cpu_cache
;
#endif
#endif
struct
stack
{
u32
irq
[
3
];
u32
abt
[
3
];
u32
und
[
3
];
}
____cacheline_aligned
;
static
struct
stack
stacks
[
NR_CPUS
];
char
elf_platform
[
ELF_PLATFORM_SIZE
];
char
elf_platform
[
ELF_PLATFORM_SIZE
];
EXPORT_SYMBOL
(
elf_platform
);
EXPORT_SYMBOL
(
elf_platform
);
...
@@ -307,8 +315,6 @@ static void __init setup_processor(void)
...
@@ -307,8 +315,6 @@ static void __init setup_processor(void)
cpu_name
,
processor_id
,
(
int
)
processor_id
&
15
,
cpu_name
,
processor_id
,
(
int
)
processor_id
&
15
,
proc_arch
[
cpu_architecture
()]);
proc_arch
[
cpu_architecture
()]);
dump_cpu_info
(
smp_processor_id
());
sprintf
(
system_utsname
.
machine
,
"%s%c"
,
list
->
arch_name
,
ENDIANNESS
);
sprintf
(
system_utsname
.
machine
,
"%s%c"
,
list
->
arch_name
,
ENDIANNESS
);
sprintf
(
elf_platform
,
"%s%c"
,
list
->
elf_name
,
ENDIANNESS
);
sprintf
(
elf_platform
,
"%s%c"
,
list
->
elf_name
,
ENDIANNESS
);
elf_hwcap
=
list
->
elf_hwcap
;
elf_hwcap
=
list
->
elf_hwcap
;
...
@@ -316,6 +322,46 @@ static void __init setup_processor(void)
...
@@ -316,6 +322,46 @@ static void __init setup_processor(void)
cpu_proc_init
();
cpu_proc_init
();
}
}
/*
* cpu_init - initialise one CPU.
*
* cpu_init dumps the cache information, initialises SMP specific
* information, and sets up the per-CPU stacks.
*/
void
__init
cpu_init
(
void
)
{
unsigned
int
cpu
=
smp_processor_id
();
struct
stack
*
stk
=
&
stacks
[
cpu
];
if
(
cpu
>=
NR_CPUS
)
{
printk
(
KERN_CRIT
"CPU%u: bad primary CPU number
\n
"
,
cpu
);
BUG
();
}
dump_cpu_info
(
cpu
);
/*
* setup stacks for re-entrant exception handlers
*/
__asm__
(
"msr cpsr_c, %1
\n\t
"
"add sp, %0, %2
\n\t
"
"msr cpsr_c, %3
\n\t
"
"add sp, %0, %4
\n\t
"
"msr cpsr_c, %5
\n\t
"
"add sp, %0, %6
\n\t
"
"msr cpsr_c, %7"
:
:
"r"
(
stk
),
"I"
(
PSR_F_BIT
|
PSR_I_BIT
|
IRQ_MODE
),
"I"
(
offsetof
(
struct
stack
,
irq
[
0
])),
"I"
(
PSR_F_BIT
|
PSR_I_BIT
|
ABT_MODE
),
"I"
(
offsetof
(
struct
stack
,
abt
[
0
])),
"I"
(
PSR_F_BIT
|
PSR_I_BIT
|
UND_MODE
),
"I"
(
offsetof
(
struct
stack
,
und
[
0
])),
"I"
(
PSR_F_BIT
|
PSR_I_BIT
|
SVC_MODE
));
}
static
struct
machine_desc
*
__init
setup_machine
(
unsigned
int
nr
)
static
struct
machine_desc
*
__init
setup_machine
(
unsigned
int
nr
)
{
{
struct
machine_desc
*
list
;
struct
machine_desc
*
list
;
...
@@ -715,6 +761,8 @@ void __init setup_arch(char **cmdline_p)
...
@@ -715,6 +761,8 @@ void __init setup_arch(char **cmdline_p)
paging_init
(
&
meminfo
,
mdesc
);
paging_init
(
&
meminfo
,
mdesc
);
request_standard_resources
(
&
meminfo
,
mdesc
);
request_standard_resources
(
&
meminfo
,
mdesc
);
cpu_init
();
/*
/*
* Set up various architecture-specific pointers
* Set up various architecture-specific pointers
*/
*/
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment