Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
Kirill Smelkov
linux
Commits
3c5df5c2
Commit
3c5df5c2
authored
Sep 27, 2007
by
Kumar Gala
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
[POWERPC] Cleaned up whitespace in head_fsl_booke.S
Signed-off-by:
Kumar Gala
<
galak@kernel.crashing.org
>
parent
b6927bca
Changes
1
Hide whitespace changes
Inline
Side-by-side
Showing
1 changed file
with
38 additions
and
38 deletions
+38
-38
arch/powerpc/kernel/head_fsl_booke.S
arch/powerpc/kernel/head_fsl_booke.S
+38
-38
No files found.
arch/powerpc/kernel/head_fsl_booke.S
View file @
3c5df5c2
...
@@ -2,27 +2,27 @@
...
@@ -2,27 +2,27 @@
*
Kernel
execution
entry
point
code
.
*
Kernel
execution
entry
point
code
.
*
*
*
Copyright
(
c
)
1995
-
1996
Gary
Thomas
<
gdt
@
linuxppc
.
org
>
*
Copyright
(
c
)
1995
-
1996
Gary
Thomas
<
gdt
@
linuxppc
.
org
>
*
Initial
PowerPC
version
.
*
Initial
PowerPC
version
.
*
Copyright
(
c
)
1996
Cort
Dougan
<
cort
@
cs
.
nmt
.
edu
>
*
Copyright
(
c
)
1996
Cort
Dougan
<
cort
@
cs
.
nmt
.
edu
>
*
Rewritten
for
PReP
*
Rewritten
for
PReP
*
Copyright
(
c
)
1996
Paul
Mackerras
<
paulus
@
cs
.
anu
.
edu
.
au
>
*
Copyright
(
c
)
1996
Paul
Mackerras
<
paulus
@
cs
.
anu
.
edu
.
au
>
*
Low
-
level
exception
handers
,
MMU
support
,
and
rewrite
.
*
Low
-
level
exception
handers
,
MMU
support
,
and
rewrite
.
*
Copyright
(
c
)
1997
Dan
Malek
<
dmalek
@
jlc
.
net
>
*
Copyright
(
c
)
1997
Dan
Malek
<
dmalek
@
jlc
.
net
>
*
PowerPC
8
xx
modifications
.
*
PowerPC
8
xx
modifications
.
*
Copyright
(
c
)
1998
-
1999
TiVo
,
Inc
.
*
Copyright
(
c
)
1998
-
1999
TiVo
,
Inc
.
*
PowerPC
403
GCX
modifications
.
*
PowerPC
403
GCX
modifications
.
*
Copyright
(
c
)
1999
Grant
Erickson
<
grant
@
lcse
.
umn
.
edu
>
*
Copyright
(
c
)
1999
Grant
Erickson
<
grant
@
lcse
.
umn
.
edu
>
*
PowerPC
403
GCX
/
405
GP
modifications
.
*
PowerPC
403
GCX
/
405
GP
modifications
.
*
Copyright
2000
MontaVista
Software
Inc
.
*
Copyright
2000
MontaVista
Software
Inc
.
*
PPC405
modifications
*
PPC405
modifications
*
PowerPC
403
GCX
/
405
GP
modifications
.
*
PowerPC
403
GCX
/
405
GP
modifications
.
*
Author
:
MontaVista
Software
,
Inc
.
*
Author
:
MontaVista
Software
,
Inc
.
*
frank_rowand
@
mvista
.
com
or
source
@
mvista
.
com
*
frank_rowand
@
mvista
.
com
or
source
@
mvista
.
com
*
debbie_chu
@
mvista
.
com
*
debbie_chu
@
mvista
.
com
*
Copyright
2002
-
2004
MontaVista
Software
,
Inc
.
*
Copyright
2002
-
2004
MontaVista
Software
,
Inc
.
*
PowerPC
44
x
support
,
Matt
Porter
<
mporter
@
kernel
.
crashing
.
org
>
*
PowerPC
44
x
support
,
Matt
Porter
<
mporter
@
kernel
.
crashing
.
org
>
*
Copyright
2004
Freescale
Semiconductor
,
Inc
*
Copyright
2004
Freescale
Semiconductor
,
Inc
*
PowerPC
e500
modifications
,
Kumar
Gala
<
galak
@
kernel
.
crashing
.
org
>
*
PowerPC
e500
modifications
,
Kumar
Gala
<
galak
@
kernel
.
crashing
.
org
>
*
*
*
This
program
is
free
software
; you can redistribute it and/or modify it
*
This
program
is
free
software
; you can redistribute it and/or modify it
*
under
the
terms
of
the
GNU
General
Public
License
as
published
by
the
*
under
the
terms
of
the
GNU
General
Public
License
as
published
by
the
...
@@ -146,13 +146,13 @@ skpinv: addi r6,r6,1 /* Increment */
...
@@ -146,13 +146,13 @@ skpinv: addi r6,r6,1 /* Increment */
bne
1
b
/*
If
not
,
repeat
*/
bne
1
b
/*
If
not
,
repeat
*/
/
*
Invalidate
TLB0
*/
/
*
Invalidate
TLB0
*/
li
r6
,
0x04
li
r6
,
0x04
tlbivax
0
,
r6
tlbivax
0
,
r6
#ifdef CONFIG_SMP
#ifdef CONFIG_SMP
tlbsync
tlbsync
#endif
#endif
/
*
Invalidate
TLB1
*/
/
*
Invalidate
TLB1
*/
li
r6
,
0x0c
li
r6
,
0x0c
tlbivax
0
,
r6
tlbivax
0
,
r6
#ifdef CONFIG_SMP
#ifdef CONFIG_SMP
tlbsync
tlbsync
...
@@ -211,7 +211,7 @@ skpinv: addi r6,r6,1 /* Increment */
...
@@ -211,7 +211,7 @@ skpinv: addi r6,r6,1 /* Increment */
mtspr
SPRN_MAS1
,
r6
mtspr
SPRN_MAS1
,
r6
tlbwe
tlbwe
/
*
Invalidate
TLB1
*/
/
*
Invalidate
TLB1
*/
li
r9
,
0x0c
li
r9
,
0x0c
tlbivax
0
,
r9
tlbivax
0
,
r9
#ifdef CONFIG_SMP
#ifdef CONFIG_SMP
tlbsync
tlbsync
...
@@ -254,7 +254,7 @@ skpinv: addi r6,r6,1 /* Increment */
...
@@ -254,7 +254,7 @@ skpinv: addi r6,r6,1 /* Increment */
mtspr
SPRN_MAS1
,
r8
mtspr
SPRN_MAS1
,
r8
tlbwe
tlbwe
/
*
Invalidate
TLB1
*/
/
*
Invalidate
TLB1
*/
li
r9
,
0x0c
li
r9
,
0x0c
tlbivax
0
,
r9
tlbivax
0
,
r9
#ifdef CONFIG_SMP
#ifdef CONFIG_SMP
tlbsync
tlbsync
...
@@ -294,7 +294,7 @@ skpinv: addi r6,r6,1 /* Increment */
...
@@ -294,7 +294,7 @@ skpinv: addi r6,r6,1 /* Increment */
#ifdef CONFIG_E200
#ifdef CONFIG_E200
oris
r2
,
r2
,
MAS4_TLBSELD
(
1
)
@
h
oris
r2
,
r2
,
MAS4_TLBSELD
(
1
)
@
h
#endif
#endif
mtspr
SPRN_MAS4
,
r2
mtspr
SPRN_MAS4
,
r2
#if 0
#if 0
/
*
Enable
DOZE
*/
/
*
Enable
DOZE
*/
...
@@ -305,7 +305,7 @@ skpinv: addi r6,r6,1 /* Increment */
...
@@ -305,7 +305,7 @@ skpinv: addi r6,r6,1 /* Increment */
#ifdef CONFIG_E200
#ifdef CONFIG_E200
/
*
enable
dedicated
debug
exception
handling
resources
(
Debug
APU
)
*/
/
*
enable
dedicated
debug
exception
handling
resources
(
Debug
APU
)
*/
mfspr
r2
,
SPRN_HID0
mfspr
r2
,
SPRN_HID0
ori
r2
,
r2
,
HID0_DAPUEN
@
l
ori
r2
,
r2
,
HID0_DAPUEN
@
l
mtspr
SPRN_HID0
,
r2
mtspr
SPRN_HID0
,
r2
#endif
#endif
...
@@ -391,7 +391,7 @@ skpinv: addi r6,r6,1 /* Increment */
...
@@ -391,7 +391,7 @@ skpinv: addi r6,r6,1 /* Increment */
#ifdef CONFIG_PTE_64BIT
#ifdef CONFIG_PTE_64BIT
#define PTE_FLAGS_OFFSET 4
#define PTE_FLAGS_OFFSET 4
#define FIND_PTE \
#define FIND_PTE \
rlwinm
r12
,
r10
,
13
,
19
,
29
; /* Compute pgdir/pmd offset */ \
rlwinm
r12
,
r10
,
13
,
19
,
29
; /* Compute pgdir/pmd offset */ \
lwzx
r11
,
r12
,
r11
; /* Get pgd/pmd entry */ \
lwzx
r11
,
r12
,
r11
; /* Get pgd/pmd entry */ \
rlwinm
.
r12
,
r11
,
0
,
0
,
20
; /* Extract pt base address */ \
rlwinm
.
r12
,
r11
,
0
,
0
,
20
; /* Extract pt base address */ \
beq
2
f
; /* Bail if no table */ \
beq
2
f
; /* Bail if no table */ \
...
@@ -487,7 +487,7 @@ interrupt_base:
...
@@ -487,7 +487,7 @@ interrupt_base:
*/
*/
andi
.
r11
,
r11
,
_PAGE_HWEXEC
andi
.
r11
,
r11
,
_PAGE_HWEXEC
rlwimi
r11
,
r11
,
31
,
27
,
27
/*
SX
<-
_PAGE_HWEXEC
*/
rlwimi
r11
,
r11
,
31
,
27
,
27
/*
SX
<-
_PAGE_HWEXEC
*/
ori
r11
,
r11
,
(
MAS3_UW|MAS3_SW|MAS3_UR
|
MAS3_SR
)
@
l
/*
set
static
perms
*/
ori
r11
,
r11
,
(
MAS3_UW|MAS3_SW|MAS3_UR
|
MAS3_SR
)
@
l
/*
set
static
perms
*/
/
*
update
search
PID
in
MAS6
,
AS
=
0
*/
/
*
update
search
PID
in
MAS6
,
AS
=
0
*/
mfspr
r12
,
SPRN_PID0
mfspr
r12
,
SPRN_PID0
...
@@ -694,7 +694,7 @@ interrupt_base:
...
@@ -694,7 +694,7 @@ interrupt_base:
START_EXCEPTION
(
SPEUnavailable
)
START_EXCEPTION
(
SPEUnavailable
)
NORMAL_EXCEPTION_PROLOG
NORMAL_EXCEPTION_PROLOG
bne
load_up_spe
bne
load_up_spe
addi
r3
,
r1
,
STACK_FRAME_OVERHEAD
addi
r3
,
r1
,
STACK_FRAME_OVERHEAD
EXC_XFER_EE_LITE
(0
x2010
,
KernelSPE
)
EXC_XFER_EE_LITE
(0
x2010
,
KernelSPE
)
#else
#else
EXCEPTION
(0
x2020
,
SPEUnavailable
,
unknown_exception
,
EXC_XFER_EE
)
EXCEPTION
(0
x2020
,
SPEUnavailable
,
unknown_exception
,
EXC_XFER_EE
)
...
@@ -741,10 +741,10 @@ data_access:
...
@@ -741,10 +741,10 @@ data_access:
*
Both
the
instruction
and
data
TLB
miss
get
to
this
*
Both
the
instruction
and
data
TLB
miss
get
to
this
*
point
to
load
the
TLB
.
*
point
to
load
the
TLB
.
*
r10
-
EA
of
fault
*
r10
-
EA
of
fault
*
r11
-
TLB
(
info
from
Linux
PTE
)
*
r11
-
TLB
(
info
from
Linux
PTE
)
*
r12
,
r13
-
available
to
use
*
r12
,
r13
-
available
to
use
*
CR5
-
results
of
addr
<
TASK_SIZE
*
CR5
-
results
of
addr
<
TASK_SIZE
*
MAS0
,
MAS1
-
loaded
with
proper
value
when
we
get
here
*
MAS0
,
MAS1
-
loaded
with
proper
value
when
we
get
here
*
MAS2
,
MAS3
-
will
need
additional
info
from
Linux
PTE
*
MAS2
,
MAS3
-
will
need
additional
info
from
Linux
PTE
*
Upon
exit
,
we
reload
everything
and
RFI
.
*
Upon
exit
,
we
reload
everything
and
RFI
.
...
@@ -813,7 +813,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_BIG_PHYS)
...
@@ -813,7 +813,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_BIG_PHYS)
lwz
r13
,
tlbcam_index
@
l
(
r13
)
lwz
r13
,
tlbcam_index
@
l
(
r13
)
rlwimi
r12
,
r13
,
0
,
20
,
31
rlwimi
r12
,
r13
,
0
,
20
,
31
7
:
7
:
mtspr
SPRN_MAS0
,
r12
mtspr
SPRN_MAS0
,
r12
#endif /* CONFIG_E200 */
#endif /* CONFIG_E200 */
tlbwe
tlbwe
...
@@ -855,17 +855,17 @@ load_up_spe:
...
@@ -855,17 +855,17 @@ load_up_spe:
beq
1
f
beq
1
f
addi
r4
,
r4
,
THREAD
/*
want
THREAD
of
last_task_used_spe
*/
addi
r4
,
r4
,
THREAD
/*
want
THREAD
of
last_task_used_spe
*/
SAVE_32EVRS
(0,
r10
,
r4
)
SAVE_32EVRS
(0,
r10
,
r4
)
evxor
evr10
,
evr10
,
evr10
/*
clear
out
evr10
*/
evxor
evr10
,
evr10
,
evr10
/*
clear
out
evr10
*/
evmwumiaa
evr10
,
evr10
,
evr10
/*
evr10
<-
ACC
=
0
*
0
+
ACC
*/
evmwumiaa
evr10
,
evr10
,
evr10
/*
evr10
<-
ACC
=
0
*
0
+
ACC
*/
li
r5
,
THREAD_ACC
li
r5
,
THREAD_ACC
evstddx
evr10
,
r4
,
r5
/*
save
off
accumulator
*/
evstddx
evr10
,
r4
,
r5
/*
save
off
accumulator
*/
lwz
r5
,
PT_REGS
(
r4
)
lwz
r5
,
PT_REGS
(
r4
)
lwz
r4
,
_MSR
-
STACK_FRAME_OVERHEAD
(
r5
)
lwz
r4
,
_MSR
-
STACK_FRAME_OVERHEAD
(
r5
)
lis
r10
,
MSR_SPE
@
h
lis
r10
,
MSR_SPE
@
h
andc
r4
,
r4
,
r10
/*
disable
SPE
for
previous
task
*/
andc
r4
,
r4
,
r10
/*
disable
SPE
for
previous
task
*/
stw
r4
,
_MSR
-
STACK_FRAME_OVERHEAD
(
r5
)
stw
r4
,
_MSR
-
STACK_FRAME_OVERHEAD
(
r5
)
1
:
1
:
#endif /* CONFIG_SMP */
#endif /*
!
CONFIG_SMP */
/
*
enable
use
of
SPE
after
return
*/
/
*
enable
use
of
SPE
after
return
*/
oris
r9
,
r9
,
MSR_SPE
@
h
oris
r9
,
r9
,
MSR_SPE
@
h
mfspr
r5
,
SPRN_SPRG3
/*
current
task
's THREAD (phys) */
mfspr
r5
,
SPRN_SPRG3
/*
current
task
's THREAD (phys) */
...
@@ -878,7 +878,7 @@ load_up_spe:
...
@@ -878,7 +878,7 @@ load_up_spe:
#ifndef CONFIG_SMP
#ifndef CONFIG_SMP
subi
r4
,
r5
,
THREAD
subi
r4
,
r5
,
THREAD
stw
r4
,
last_task_used_spe
@
l
(
r3
)
stw
r4
,
last_task_used_spe
@
l
(
r3
)
#endif /* CONFIG_SMP */
#endif /*
!
CONFIG_SMP */
/
*
restore
registers
and
return
*/
/
*
restore
registers
and
return
*/
2
:
REST_4GPRS
(3,
r11
)
2
:
REST_4GPRS
(3,
r11
)
lwz
r10
,
_CCR
(
r11
)
lwz
r10
,
_CCR
(
r11
)
...
@@ -963,10 +963,10 @@ _GLOBAL(giveup_spe)
...
@@ -963,10 +963,10 @@ _GLOBAL(giveup_spe)
lwz
r5
,
PT_REGS
(
r3
)
lwz
r5
,
PT_REGS
(
r3
)
cmpi
0
,
r5
,
0
cmpi
0
,
r5
,
0
SAVE_32EVRS
(0,
r4
,
r3
)
SAVE_32EVRS
(0,
r4
,
r3
)
evxor
evr6
,
evr6
,
evr6
/*
clear
out
evr6
*/
evxor
evr6
,
evr6
,
evr6
/*
clear
out
evr6
*/
evmwumiaa
evr6
,
evr6
,
evr6
/*
evr6
<-
ACC
=
0
*
0
+
ACC
*/
evmwumiaa
evr6
,
evr6
,
evr6
/*
evr6
<-
ACC
=
0
*
0
+
ACC
*/
li
r4
,
THREAD_ACC
li
r4
,
THREAD_ACC
evstddx
evr6
,
r4
,
r3
/*
save
off
accumulator
*/
evstddx
evr6
,
r4
,
r3
/*
save
off
accumulator
*/
mfspr
r6
,
SPRN_SPEFSCR
mfspr
r6
,
SPRN_SPEFSCR
stw
r6
,
THREAD_SPEFSCR
(
r3
)
/*
save
spefscr
register
value
*/
stw
r6
,
THREAD_SPEFSCR
(
r3
)
/*
save
spefscr
register
value
*/
beq
1
f
beq
1
f
...
@@ -979,7 +979,7 @@ _GLOBAL(giveup_spe)
...
@@ -979,7 +979,7 @@ _GLOBAL(giveup_spe)
li
r5
,
0
li
r5
,
0
lis
r4
,
last_task_used_spe
@
ha
lis
r4
,
last_task_used_spe
@
ha
stw
r5
,
last_task_used_spe
@
l
(
r4
)
stw
r5
,
last_task_used_spe
@
l
(
r4
)
#endif /* CONFIG_SMP */
#endif /*
!
CONFIG_SMP */
blr
blr
#endif /* CONFIG_SPE */
#endif /* CONFIG_SPE */
...
@@ -1000,15 +1000,15 @@ _GLOBAL(giveup_fpu)
...
@@ -1000,15 +1000,15 @@ _GLOBAL(giveup_fpu)
*/
*/
_GLOBAL
(
abort
)
_GLOBAL
(
abort
)
li
r13
,
0
li
r13
,
0
mtspr
SPRN_DBCR0
,
r13
/*
disable
all
debug
events
*/
mtspr
SPRN_DBCR0
,
r13
/*
disable
all
debug
events
*/
isync
isync
mfmsr
r13
mfmsr
r13
ori
r13
,
r13
,
MSR_DE
@
l
/*
Enable
Debug
Events
*/
ori
r13
,
r13
,
MSR_DE
@
l
/*
Enable
Debug
Events
*/
mtmsr
r13
mtmsr
r13
isync
isync
mfspr
r13
,
SPRN_DBCR0
mfspr
r13
,
SPRN_DBCR0
lis
r13
,(
DBCR0_IDM
|
DBCR0_RST_CHIP
)
@
h
lis
r13
,(
DBCR0_IDM
|
DBCR0_RST_CHIP
)
@
h
mtspr
SPRN_DBCR0
,
r13
mtspr
SPRN_DBCR0
,
r13
isync
isync
_GLOBAL
(
set_context
)
_GLOBAL
(
set_context
)
...
@@ -1043,7 +1043,7 @@ swapper_pg_dir:
...
@@ -1043,7 +1043,7 @@ swapper_pg_dir:
/*
Reserved
4
k
for
the
critical
exception
stack
&
4
k
for
the
machine
/*
Reserved
4
k
for
the
critical
exception
stack
&
4
k
for
the
machine
*
check
stack
per
CPU
for
kernel
mode
exceptions
*/
*
check
stack
per
CPU
for
kernel
mode
exceptions
*/
.
section
.
bss
.
section
.
bss
.
align
12
.
align
12
exception_stack_bottom
:
exception_stack_bottom
:
.
space
BOOKE_EXCEPTION_STACK_SIZE
*
NR_CPUS
.
space
BOOKE_EXCEPTION_STACK_SIZE
*
NR_CPUS
.
globl
exception_stack_top
.
globl
exception_stack_top
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment