Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
Kirill Smelkov
linux
Commits
aac372de
Commit
aac372de
authored
Oct 12, 2005
by
Linus Torvalds
Browse files
Options
Browse Files
Download
Plain Diff
Merge master.kernel.org:/pub/scm/linux/kernel/git/davem/sparc-2.6
parents
02d31ed2
c9c10830
Changes
6
Hide whitespace changes
Inline
Side-by-side
Showing
6 changed files
with
166 additions
and
226 deletions
+166
-226
arch/sparc64/kernel/dtlb_base.S
arch/sparc64/kernel/dtlb_base.S
+5
-9
arch/sparc64/kernel/dtlb_prot.S
arch/sparc64/kernel/dtlb_prot.S
+6
-6
arch/sparc64/kernel/head.S
arch/sparc64/kernel/head.S
+26
-35
arch/sparc64/kernel/itlb_base.S
arch/sparc64/kernel/itlb_base.S
+11
-15
arch/sparc64/kernel/ktlb.S
arch/sparc64/kernel/ktlb.S
+44
-48
arch/sparc64/mm/init.c
arch/sparc64/mm/init.c
+74
-113
No files found.
arch/sparc64/kernel/dtlb_base.S
View file @
aac372de
...
@@ -53,19 +53,18 @@
...
@@ -53,19 +53,18 @@
*
be
guaranteed
to
be
0
...
mmu_context
.
h
does
guarantee
this
*
be
guaranteed
to
be
0
...
mmu_context
.
h
does
guarantee
this
*
by
only
using
10
bits
in
the
hwcontext
value
.
*
by
only
using
10
bits
in
the
hwcontext
value
.
*/
*/
#define CREATE_VPTE_OFFSET1(r1, r2)
#define CREATE_VPTE_OFFSET1(r1, r2)
nop
#define CREATE_VPTE_OFFSET2(r1, r2) \
#define CREATE_VPTE_OFFSET2(r1, r2) \
srax
r1
,
10
,
r2
srax
r1
,
10
,
r2
#define CREATE_VPTE_NOP nop
#else
#else
#define CREATE_VPTE_OFFSET1(r1, r2) \
#define CREATE_VPTE_OFFSET1(r1, r2) \
srax
r1
,
PAGE_SHIFT
,
r2
srax
r1
,
PAGE_SHIFT
,
r2
#define CREATE_VPTE_OFFSET2(r1, r2) \
#define CREATE_VPTE_OFFSET2(r1, r2) \
sllx
r2
,
3
,
r2
sllx
r2
,
3
,
r2
#define CREATE_VPTE_NOP
#endif
#endif
/*
DTLB
**
ICACHE
line
1
:
Quick
user
TLB
misses
*/
/*
DTLB
**
ICACHE
line
1
:
Quick
user
TLB
misses
*/
mov
TLB_SFSR
,
%
g1
ldxa
[%
g1
+
%
g1
]
ASI_DMMU
,
%
g4
!
Get
TAG_ACCESS
ldxa
[%
g1
+
%
g1
]
ASI_DMMU
,
%
g4
!
Get
TAG_ACCESS
andcc
%
g4
,
TAG_CONTEXT_BITS
,
%
g0
!
From
Nucleus
?
andcc
%
g4
,
TAG_CONTEXT_BITS
,
%
g0
!
From
Nucleus
?
from_tl1_trap
:
from_tl1_trap
:
...
@@ -74,18 +73,16 @@ from_tl1_trap:
...
@@ -74,18 +73,16 @@ from_tl1_trap:
be
,
pn
%
xcc
,
kvmap
!
Yep
,
special
processing
be
,
pn
%
xcc
,
kvmap
!
Yep
,
special
processing
CREATE_VPTE_OFFSET2
(%
g4
,
%
g6
)
!
Create
VPTE
offset
CREATE_VPTE_OFFSET2
(%
g4
,
%
g6
)
!
Create
VPTE
offset
cmp
%
g5
,
4
!
Last
trap
level
?
cmp
%
g5
,
4
!
Last
trap
level
?
be
,
pn
%
xcc
,
longpath
!
Yep
,
cannot
risk
VPTE
miss
nop
!
delay
slot
/*
DTLB
**
ICACHE
line
2
:
User
finish
+
quick
kernel
TLB
misses
*/
/*
DTLB
**
ICACHE
line
2
:
User
finish
+
quick
kernel
TLB
misses
*/
be
,
pn
%
xcc
,
longpath
!
Yep
,
cannot
risk
VPTE
miss
nop
!
delay
slot
ldxa
[%
g3
+
%
g6
]
ASI_S
,
%
g5
!
Load
VPTE
ldxa
[%
g3
+
%
g6
]
ASI_S
,
%
g5
!
Load
VPTE
1
:
brgez
,
pn
%
g5
,
longpath
!
Invalid
,
branch
out
1
:
brgez
,
pn
%
g5
,
longpath
!
Invalid
,
branch
out
nop
!
Delay
-
slot
nop
!
Delay
-
slot
9
:
stxa
%
g5
,
[%
g0
]
ASI_DTLB_DATA_IN
!
Reload
TLB
9
:
stxa
%
g5
,
[%
g0
]
ASI_DTLB_DATA_IN
!
Reload
TLB
retry
!
Trap
return
retry
!
Trap
return
nop
nop
nop
nop
/*
DTLB
**
ICACHE
line
3
:
winfixups
+
real_faults
*/
/*
DTLB
**
ICACHE
line
3
:
winfixups
+
real_faults
*/
longpath
:
longpath
:
...
@@ -106,8 +103,7 @@ longpath:
...
@@ -106,8 +103,7 @@ longpath:
nop
nop
nop
nop
nop
nop
CREATE_VPTE_NOP
nop
#undef CREATE_VPTE_OFFSET1
#undef CREATE_VPTE_OFFSET1
#undef CREATE_VPTE_OFFSET2
#undef CREATE_VPTE_OFFSET2
#undef CREATE_VPTE_NOP
arch/sparc64/kernel/dtlb_prot.S
View file @
aac372de
...
@@ -14,14 +14,14 @@
...
@@ -14,14 +14,14 @@
*/
*/
/*
PROT
**
ICACHE
line
1
:
User
DTLB
protection
trap
*/
/*
PROT
**
ICACHE
line
1
:
User
DTLB
protection
trap
*/
stxa
%
g0
,
[%
g1
]
ASI_DMMU
!
Clear
SFSR
FaultValid
bit
mov
TLB_SFSR
,
%
g1
membar
#
Sync
!
Synchronize
ASI
stores
stxa
%
g0
,
[%
g1
]
ASI_DMMU
!
Clear
FaultValid
bit
rdpr
%
pstate
,
%
g5
!
Move
into
alternate
globals
membar
#
Sync
!
Synchronize
stores
rdpr
%
pstate
,
%
g5
!
Move
into
alt
-
globals
wrpr
%
g5
,
PSTATE_AG
|
PSTATE_MG
,
%
pstate
wrpr
%
g5
,
PSTATE_AG
|
PSTATE_MG
,
%
pstate
rdpr
%
tl
,
%
g1
!
Need
to
do
a
winfixup
?
rdpr
%
tl
,
%
g1
!
Need
a
winfixup
?
cmp
%
g1
,
1
!
Trap
level
>
1
?
cmp
%
g1
,
1
!
Trap
level
>
1
?
mov
TLB_TAG_ACCESS
,
%
g4
!
Prepare
reload
of
vaddr
mov
TLB_TAG_ACCESS
,
%
g4
!
For
reload
of
vaddr
nop
/*
PROT
**
ICACHE
line
2
:
More
real
fault
processing
*/
/*
PROT
**
ICACHE
line
2
:
More
real
fault
processing
*/
bgu
,
pn
%
xcc
,
winfix_trampoline
!
Yes
,
perform
winfixup
bgu
,
pn
%
xcc
,
winfix_trampoline
!
Yes
,
perform
winfixup
...
...
arch/sparc64/kernel/head.S
View file @
aac372de
...
@@ -28,19 +28,14 @@
...
@@ -28,19 +28,14 @@
#include <asm/mmu.h>
#include <asm/mmu.h>
/*
This
section
from
from
_start
to
sparc64_boot_end
should
fit
into
/*
This
section
from
from
_start
to
sparc64_boot_end
should
fit
into
*
0x0000
.0000.0040.4000
to
0x0000
.0000.0040.8000
and
will
be
sharing
space
*
0x0000000000404000
to
0x0000000000408000
.
*
with
bootup_user_stack
,
which
is
from
0x0000
.0000.0040.4000
to
*
0x0000
.0000.0040.6000
and
empty_bad_page
,
which
is
from
*
0x0000
.0000.0040.6000
to
0x0000
.0000.0040.8000.
*/
*/
.
text
.
text
.
globl
start
,
_start
,
stext
,
_stext
.
globl
start
,
_start
,
stext
,
_stext
_start
:
_start
:
start
:
start
:
_stext
:
_stext
:
stext
:
stext
:
bootup_user_stack
:
!
0
x0000000000404000
!
0
x0000000000404000
b
sparc64_boot
b
sparc64_boot
flushw
/*
Flush
register
file
.
*/
flushw
/*
Flush
register
file
.
*/
...
@@ -392,31 +387,30 @@ tlb_fixup_done:
...
@@ -392,31 +387,30 @@ tlb_fixup_done:
*
former
does
use
this
code
,
the
latter
does
not
yet
due
*
former
does
use
this
code
,
the
latter
does
not
yet
due
*
to
some
complexities
.
That
should
be
fixed
up
at
some
*
to
some
complexities
.
That
should
be
fixed
up
at
some
*
point
.
*
point
.
*
*
There
used
to
be
enormous
complexity
wrt
.
transferring
*
over
from
the
firwmare
's trap table to the Linux kernel'
s
.
*
For
example
,
there
was
a
chicken
&
egg
problem
wrt
.
building
*
the
OBP
page
tables
,
yet
needing
to
be
on
the
Linux
kernel
*
trap
table
(
to
translate
PAGE_OFFSET
addresses
)
in
order
to
*
do
that
.
*
*
We
now
handle
OBP
tlb
misses
differently
,
via
linear
lookups
*
into
the
prom_trans
[]
array
.
So
that
specific
problem
no
*
longer
exists
.
Yet
,
unfortunately
there
are
still
some
issues
*
preventing
trampoline
.
S
from
using
this
code
...
ho
hum
.
*/
*/
.
globl
setup_trap_table
.
globl
setup_trap_table
setup_trap_table
:
setup_trap_table
:
save
%
sp
,
-
192
,
%
sp
save
%
sp
,
-
192
,
%
sp
/
*
Force
interrupts
to
be
disabled
.
Transferring
over
to
/
*
Force
interrupts
to
be
disabled
.
*/
*
the
Linux
trap
table
is
a
very
delicate
operation
.
*
Until
we
are
actually
on
the
Linux
trap
table
,
we
cannot
*
get
the
PAGE_OFFSET
linear
mappings
translated
.
We
need
*
that
mapping
to
be
setup
in
order
to
initialize
the
firmware
*
page
tables
.
*
*
So
there
is
this
window
of
time
,
from
the
return
from
*
prom_set_trap_table
()
until
inherit_prom_mappings_post
()
*
(
in
arch
/
sparc64
/
mm
/
init
.
c
)
completes
,
during
which
no
*
firmware
address
space
accesses
can
be
made
.
*/
rdpr
%
pstate
,
%
o1
rdpr
%
pstate
,
%
o1
andn
%
o1
,
PSTATE_IE
,
%
o1
andn
%
o1
,
PSTATE_IE
,
%
o1
wrpr
%
o1
,
0x0
,
%
pstate
wrpr
%
o1
,
0x0
,
%
pstate
wrpr
%
g0
,
15
,
%
pil
wrpr
%
g0
,
15
,
%
pil
/
*
Ok
,
now
make
the
final
valid
firmware
call
to
jump
over
/
*
Make
the
firmware
call
to
jump
over
to
the
Linux
trap
table
.
*/
*
to
the
Linux
trap
table
.
*/
call
prom_set_trap_table
call
prom_set_trap_table
sethi
%
hi
(
sparc64_ttable_tl0
),
%
o0
sethi
%
hi
(
sparc64_ttable_tl0
),
%
o0
...
@@ -540,15 +534,21 @@ setup_tba: /* i0 = is_starfire */
...
@@ -540,15 +534,21 @@ setup_tba: /* i0 = is_starfire */
ret
ret
restore
restore
sparc64_boot_end
:
#include "systbls.S"
#include "ktlb.S"
#include "etrap.S"
#include "rtrap.S"
#include "winfixup.S"
#include "entry.S"
/*
/*
*
The
following
skip
s
make
sure
the
trap
table
in
ttable
.
S
is
aligned
*
The
following
skip
makes
sure
the
trap
table
in
ttable
.
S
is
aligned
*
on
a
32
K
boundary
as
required
by
the
v9
specs
for
TBA
register
.
*
on
a
32
K
boundary
as
required
by
the
v9
specs
for
TBA
register
.
*/
*/
sparc64_boot_end
:
1
:
.
skip
0x2000
+
_start
-
sparc64_boot_end
.
skip
0x4000
+
_start
-
1
b
bootup_user_stack_end
:
.
skip
0x2000
#ifdef CONFIG_SBUS
#ifdef CONFIG_SBUS
/*
This
is
just
a
hack
to
fool
make
depend
config
.
h
discovering
/*
This
is
just
a
hack
to
fool
make
depend
config
.
h
discovering
...
@@ -560,15 +560,6 @@ bootup_user_stack_end:
...
@@ -560,15 +560,6 @@ bootup_user_stack_end:
!
0
x0000000000408000
!
0
x0000000000408000
#include "ttable.S"
#include "ttable.S"
#include "systbls.S"
#include "ktlb.S"
#include "etrap.S"
#include "rtrap.S"
#include "winfixup.S"
#include "entry.S"
/
*
This
is
just
anal
retentiveness
on
my
part
...
*/
.
align
16384
.
data
.
data
.
align
8
.
align
8
...
...
arch/sparc64/kernel/itlb_base.S
View file @
aac372de
...
@@ -15,14 +15,12 @@
...
@@ -15,14 +15,12 @@
*/
*/
#define CREATE_VPTE_OFFSET1(r1, r2) \
#define CREATE_VPTE_OFFSET1(r1, r2) \
srax
r1
,
10
,
r2
srax
r1
,
10
,
r2
#define CREATE_VPTE_OFFSET2(r1, r2)
#define CREATE_VPTE_OFFSET2(r1, r2) nop
#define CREATE_VPTE_NOP nop
#else /* PAGE_SHIFT */
#else /* PAGE_SHIFT */
#define CREATE_VPTE_OFFSET1(r1, r2) \
#define CREATE_VPTE_OFFSET1(r1, r2) \
srax
r1
,
PAGE_SHIFT
,
r2
srax
r1
,
PAGE_SHIFT
,
r2
#define CREATE_VPTE_OFFSET2(r1, r2) \
#define CREATE_VPTE_OFFSET2(r1, r2) \
sllx
r2
,
3
,
r2
sllx
r2
,
3
,
r2
#define CREATE_VPTE_NOP
#endif /* PAGE_SHIFT */
#endif /* PAGE_SHIFT */
...
@@ -36,6 +34,7 @@
...
@@ -36,6 +34,7 @@
*/
*/
/*
ITLB
**
ICACHE
line
1
:
Quick
user
TLB
misses
*/
/*
ITLB
**
ICACHE
line
1
:
Quick
user
TLB
misses
*/
mov
TLB_SFSR
,
%
g1
ldxa
[%
g1
+
%
g1
]
ASI_IMMU
,
%
g4
!
Get
TAG_ACCESS
ldxa
[%
g1
+
%
g1
]
ASI_IMMU
,
%
g4
!
Get
TAG_ACCESS
CREATE_VPTE_OFFSET1
(%
g4
,
%
g6
)
!
Create
VPTE
offset
CREATE_VPTE_OFFSET1
(%
g4
,
%
g6
)
!
Create
VPTE
offset
CREATE_VPTE_OFFSET2
(%
g4
,
%
g6
)
!
Create
VPTE
offset
CREATE_VPTE_OFFSET2
(%
g4
,
%
g6
)
!
Create
VPTE
offset
...
@@ -43,41 +42,38 @@
...
@@ -43,41 +42,38 @@
1
:
brgez
,
pn
%
g5
,
3
f
!
Not
valid
,
branch
out
1
:
brgez
,
pn
%
g5
,
3
f
!
Not
valid
,
branch
out
sethi
%
hi
(
_PAGE_EXEC
),
%
g4
!
Delay
-
slot
sethi
%
hi
(
_PAGE_EXEC
),
%
g4
!
Delay
-
slot
andcc
%
g5
,
%
g4
,
%
g0
!
Executable
?
andcc
%
g5
,
%
g4
,
%
g0
!
Executable
?
/*
ITLB
**
ICACHE
line
2
:
Real
faults
*/
be
,
pn
%
xcc
,
3
f
!
Nope
,
branch
.
be
,
pn
%
xcc
,
3
f
!
Nope
,
branch
.
nop
!
Delay
-
slot
nop
!
Delay
-
slot
2
:
stxa
%
g5
,
[%
g0
]
ASI_ITLB_DATA_IN
!
Load
PTE
into
TLB
2
:
stxa
%
g5
,
[%
g0
]
ASI_ITLB_DATA_IN
!
Load
PTE
into
TLB
retry
!
Trap
return
retry
!
Trap
return
3
:
rdpr
%
pstate
,
%
g4
!
Move
into
alternate
globals
3
:
rdpr
%
pstate
,
%
g4
!
Move
into
alt
-
globals
/*
ITLB
**
ICACHE
line
2
:
Real
faults
*/
wrpr
%
g4
,
PSTATE_AG
|
PSTATE_MG
,
%
pstate
wrpr
%
g4
,
PSTATE_AG
|
PSTATE_MG
,
%
pstate
rdpr
%
tpc
,
%
g5
!
And
load
faulting
VA
rdpr
%
tpc
,
%
g5
!
And
load
faulting
VA
mov
FAULT_CODE_ITLB
,
%
g4
!
It
was
read
from
ITLB
mov
FAULT_CODE_ITLB
,
%
g4
!
It
was
read
from
ITLB
sparc64_realfault_common
:
!
Called
by
TL0
dtlb_miss
too
/*
ITLB
**
ICACHE
line
3
:
Finish
faults
*/
sparc64_realfault_common
:
!
Called
by
dtlb_miss
stb
%
g4
,
[%
g6
+
TI_FAULT_CODE
]
stb
%
g4
,
[%
g6
+
TI_FAULT_CODE
]
stx
%
g5
,
[%
g6
+
TI_FAULT_ADDR
]
stx
%
g5
,
[%
g6
+
TI_FAULT_ADDR
]
ba
,
pt
%
xcc
,
etrap
!
Save
state
ba
,
pt
%
xcc
,
etrap
!
Save
state
1
:
rd
%
pc
,
%
g7
!
...
1
:
rd
%
pc
,
%
g7
!
...
nop
/*
ITLB
**
ICACHE
line
3
:
Finish
faults
+
window
fixups
*/
call
do_sparc64_fault
!
Call
fault
handler
call
do_sparc64_fault
!
Call
fault
handler
add
%
sp
,
PTREGS_OFF
,
%
o0
!
Compute
pt_regs
arg
add
%
sp
,
PTREGS_OFF
,
%
o0
!
Compute
pt_regs
arg
ba
,
pt
%
xcc
,
rtrap_clr_l6
!
Restore
cpu
state
ba
,
pt
%
xcc
,
rtrap_clr_l6
!
Restore
cpu
state
nop
nop
/*
ITLB
**
ICACHE
line
4
:
Window
fixups
*/
winfix_trampoline
:
winfix_trampoline
:
rdpr
%
tpc
,
%
g3
!
Prepare
winfixup
TNPC
rdpr
%
tpc
,
%
g3
!
Prepare
winfixup
TNPC
or
%
g3
,
0x7c
,
%
g3
!
Compute
offset
to
branch
or
%
g3
,
0x7c
,
%
g3
!
Compute
branch
offset
wrpr
%
g3
,
%
tnpc
!
Write
it
into
TNPC
wrpr
%
g3
,
%
tnpc
!
Write
it
into
TNPC
done
!
Do
it
to
it
done
!
Do
it
to
it
/*
ITLB
**
ICACHE
line
4
:
Unused
...
*/
nop
nop
nop
nop
nop
nop
nop
nop
CREATE_VPTE_NOP
#undef CREATE_VPTE_OFFSET1
#undef CREATE_VPTE_OFFSET1
#undef CREATE_VPTE_OFFSET2
#undef CREATE_VPTE_OFFSET2
#undef CREATE_VPTE_NOP
arch/sparc64/kernel/ktlb.S
View file @
aac372de
...
@@ -58,9 +58,6 @@ vpte_noent:
...
@@ -58,9 +58,6 @@ vpte_noent:
done
done
vpte_insn_obp
:
vpte_insn_obp
:
sethi
%
hi
(
prom_pmd_phys
),
%
g5
ldx
[%
g5
+
%
lo
(
prom_pmd_phys
)],
%
g5
/
*
Behave
as
if
we
are
at
TL0
.
*/
/
*
Behave
as
if
we
are
at
TL0
.
*/
wrpr
%
g0
,
1
,
%
tl
wrpr
%
g0
,
1
,
%
tl
rdpr
%
tpc
,
%
g4
/*
Find
original
faulting
iaddr
*/
rdpr
%
tpc
,
%
g4
/*
Find
original
faulting
iaddr
*/
...
@@ -71,58 +68,57 @@ vpte_insn_obp:
...
@@ -71,58 +68,57 @@ vpte_insn_obp:
mov
TLB_SFSR
,
%
g1
mov
TLB_SFSR
,
%
g1
stxa
%
g4
,
[%
g1
+
%
g1
]
ASI_IMMU
stxa
%
g4
,
[%
g1
+
%
g1
]
ASI_IMMU
/
*
Get
PMD
offset
.
*/
sethi
%
hi
(
prom_trans
),
%
g5
srlx
%
g4
,
23
,
%
g6
or
%
g5
,
%
lo
(
prom_trans
),
%
g5
and
%
g6
,
0x7ff
,
%
g6
sllx
%
g6
,
2
,
%
g6
1
:
ldx
[%
g5
+
0x00
],
%
g6
!
base
brz
,
a
,
pn
%
g6
,
longpath
!
no
more
entries
,
fail
/
*
Load
PMD
,
is
it
valid
?
*/
mov
TLB_SFSR
,
%
g1
!
and
restore
%
g1
lduwa
[%
g5
+
%
g6
]
ASI_PHYS_USE_EC
,
%
g5
ldx
[%
g5
+
0x08
],
%
g1
!
len
brz
,
pn
%
g5
,
longpath
add
%
g6
,
%
g1
,
%
g1
!
end
sllx
%
g5
,
11
,
%
g5
cmp
%
g6
,
%
g4
bgu
,
pt
%
xcc
,
2
f
/
*
Get
PTE
offset
.
*/
cmp
%
g4
,
%
g1
srlx
%
g4
,
13
,
%
g6
bgeu
,
pt
%
xcc
,
2
f
and
%
g6
,
0x3ff
,
%
g6
ldx
[%
g5
+
0x10
],
%
g1
!
PTE
sllx
%
g6
,
3
,
%
g6
/
*
TLB
load
,
restore
%
g1
,
and
return
from
trap
.
*/
/
*
Load
PTE
.
*/
sub
%
g4
,
%
g6
,
%
g6
ldxa
[%
g5
+
%
g6
]
ASI_PHYS_USE_EC
,
%
g5
add
%
g1
,
%
g6
,
%
g5
brgez
,
pn
%
g5
,
longpath
mov
TLB_SFSR
,
%
g1
nop
/
*
TLB
load
and
return
from
trap
.
*/
stxa
%
g5
,
[%
g0
]
ASI_ITLB_DATA_IN
stxa
%
g5
,
[%
g0
]
ASI_ITLB_DATA_IN
retry
retry
kvmap_do_obp
:
2
:
ba
,
pt
%
xcc
,
1
b
sethi
%
hi
(
prom_pmd_phys
),
%
g5
add
%
g5
,
(
3
*
8
),
%
g5
!
next
entry
ldx
[%
g5
+
%
lo
(
prom_pmd_phys
)],
%
g5
/
*
Get
PMD
offset
.
*/
srlx
%
g4
,
23
,
%
g6
and
%
g6
,
0x7ff
,
%
g6
sllx
%
g6
,
2
,
%
g6
/
*
Load
PMD
,
is
it
valid
?
*/
lduwa
[%
g5
+
%
g6
]
ASI_PHYS_USE_EC
,
%
g5
brz
,
pn
%
g5
,
longpath
sllx
%
g5
,
11
,
%
g5
/
*
Get
PTE
offset
.
*/
srlx
%
g4
,
13
,
%
g6
and
%
g6
,
0x3ff
,
%
g6
sllx
%
g6
,
3
,
%
g6
/
*
Load
PTE
.
*/
ldxa
[%
g5
+
%
g6
]
ASI_PHYS_USE_EC
,
%
g5
brgez
,
pn
%
g5
,
longpath
nop
/
*
TLB
load
and
return
from
trap
.
*/
kvmap_do_obp
:
sethi
%
hi
(
prom_trans
),
%
g5
or
%
g5
,
%
lo
(
prom_trans
),
%
g5
srlx
%
g4
,
13
,
%
g4
sllx
%
g4
,
13
,
%
g4
1
:
ldx
[%
g5
+
0x00
],
%
g6
!
base
brz
,
a
,
pn
%
g6
,
longpath
!
no
more
entries
,
fail
mov
TLB_SFSR
,
%
g1
!
and
restore
%
g1
ldx
[%
g5
+
0x08
],
%
g1
!
len
add
%
g6
,
%
g1
,
%
g1
!
end
cmp
%
g6
,
%
g4
bgu
,
pt
%
xcc
,
2
f
cmp
%
g4
,
%
g1
bgeu
,
pt
%
xcc
,
2
f
ldx
[%
g5
+
0x10
],
%
g1
!
PTE
/
*
TLB
load
,
restore
%
g1
,
and
return
from
trap
.
*/
sub
%
g4
,
%
g6
,
%
g6
add
%
g1
,
%
g6
,
%
g5
mov
TLB_SFSR
,
%
g1
stxa
%
g5
,
[%
g0
]
ASI_DTLB_DATA_IN
stxa
%
g5
,
[%
g0
]
ASI_DTLB_DATA_IN
retry
retry
2
:
ba
,
pt
%
xcc
,
1
b
add
%
g5
,
(
3
*
8
),
%
g5
!
next
entry
/*
/*
*
On
a
first
level
data
miss
,
check
whether
this
is
to
the
OBP
range
(
note
*
On
a
first
level
data
miss
,
check
whether
this
is
to
the
OBP
range
(
note
*
that
such
accesses
can
be
made
by
prom
,
as
well
as
by
kernel
using
*
that
such
accesses
can
be
made
by
prom
,
as
well
as
by
kernel
using
...
...
arch/sparc64/mm/init.c
View file @
aac372de
...
@@ -105,7 +105,7 @@ static void __init read_obp_memory(const char *property,
...
@@ -105,7 +105,7 @@ static void __init read_obp_memory(const char *property,
regs
[
i
].
phys_addr
=
base
;
regs
[
i
].
phys_addr
=
base
;
regs
[
i
].
reg_size
=
size
;
regs
[
i
].
reg_size
=
size
;
}
}
sort
(
regs
,
ents
,
sizeof
(
struct
linux_prom64_registers
),
sort
(
regs
,
ents
,
sizeof
(
struct
linux_prom64_registers
),
cmp_p64
,
NULL
);
cmp_p64
,
NULL
);
}
}
...
@@ -367,8 +367,11 @@ struct linux_prom_translation {
...
@@ -367,8 +367,11 @@ struct linux_prom_translation {
unsigned
long
size
;
unsigned
long
size
;
unsigned
long
data
;
unsigned
long
data
;
};
};
static
struct
linux_prom_translation
prom_trans
[
512
]
__initdata
;
static
unsigned
int
prom_trans_ents
__initdata
;
/* Exported for kernel TLB miss handling in ktlb.S */
struct
linux_prom_translation
prom_trans
[
512
]
__read_mostly
;
unsigned
int
prom_trans_ents
__read_mostly
;
unsigned
int
swapper_pgd_zero
__read_mostly
;
extern
unsigned
long
prom_boot_page
;
extern
unsigned
long
prom_boot_page
;
extern
void
prom_remap
(
unsigned
long
physpage
,
unsigned
long
virtpage
,
int
mmu_ihandle
);
extern
void
prom_remap
(
unsigned
long
physpage
,
unsigned
long
virtpage
,
int
mmu_ihandle
);
...
@@ -378,122 +381,57 @@ extern void register_prom_callbacks(void);
...
@@ -378,122 +381,57 @@ extern void register_prom_callbacks(void);
/* Exported for SMP bootup purposes. */
/* Exported for SMP bootup purposes. */
unsigned
long
kern_locked_tte_data
;
unsigned
long
kern_locked_tte_data
;
/* Exported for kernel TLB miss handling in ktlb.S */
unsigned
long
prom_pmd_phys
__read_mostly
;
unsigned
int
swapper_pgd_zero
__read_mostly
;
static
pmd_t
*
prompmd
__read_mostly
;
#define BASE_PAGE_SIZE 8192
/*
/*
* Translate PROM's mapping we capture at boot time into physical address.
* Translate PROM's mapping we capture at boot time into physical address.
* The second parameter is only set from prom_callback() invocations.
* The second parameter is only set from prom_callback() invocations.
*/
*/
unsigned
long
prom_virt_to_phys
(
unsigned
long
promva
,
int
*
error
)
unsigned
long
prom_virt_to_phys
(
unsigned
long
promva
,
int
*
error
)
{
{
pmd_t
*
pmdp
=
prompmd
+
((
promva
>>
23
)
&
0x7ff
);
int
i
;
pte_t
*
ptep
;
unsigned
long
base
;
if
(
pmd_none
(
*
pmdp
))
{
if
(
error
)
*
error
=
1
;
return
0
;
}
ptep
=
(
pte_t
*
)
__pmd_page
(
*
pmdp
)
+
((
promva
>>
13
)
&
0x3ff
);
if
(
!
pte_present
(
*
ptep
))
{
if
(
error
)
*
error
=
1
;
return
0
;
}
if
(
error
)
{
*
error
=
0
;
return
pte_val
(
*
ptep
);
}
base
=
pte_val
(
*
ptep
)
&
_PAGE_PADDR
;
return
base
+
(
promva
&
(
BASE_PAGE_SIZE
-
1
));
}
/* The obp translations are saved based on 8k pagesize, since obp can
for
(
i
=
0
;
i
<
prom_trans_ents
;
i
++
)
{
* use a mixture of pagesizes. Misses to the LOW_OBP_ADDRESS ->
struct
linux_prom_translation
*
p
=
&
prom_trans
[
i
];
* HI_OBP_ADDRESS range are handled in entry.S and do not use the vpte
* scheme (also, see rant in inherit_locked_prom_mappings()).
*/
static
void
__init
build_obp_range
(
unsigned
long
start
,
unsigned
long
end
,
unsigned
long
data
)
{
unsigned
long
vaddr
;
for
(
vaddr
=
start
;
vaddr
<
end
;
vaddr
+=
BASE_PAGE_SIZE
)
{
if
(
promva
>=
p
->
virt
&&
unsigned
long
val
;
promva
<
(
p
->
virt
+
p
->
size
))
{
pmd_t
*
pmd
;
unsigned
long
base
=
p
->
data
&
_PAGE_PADDR
;
pte_t
*
pte
;
pmd
=
prompmd
+
((
vaddr
>>
23
)
&
0x7ff
);
if
(
error
)
if
(
pmd_none
(
*
pmd
))
{
*
error
=
0
;
pte
=
__alloc_bootmem
(
BASE_PAGE_SIZE
,
BASE_PAGE_SIZE
,
return
base
+
(
promva
&
(
8192
-
1
));
PAGE_SIZE
);
if
(
!
pte
)
prom_halt
();
memset
(
pte
,
0
,
BASE_PAGE_SIZE
);
pmd_set
(
pmd
,
pte
);
}
}
pte
=
(
pte_t
*
)
__pmd_page
(
*
pmd
)
+
((
vaddr
>>
13
)
&
0x3ff
);
val
=
data
;
/* Clear diag TTE bits. */
if
(
tlb_type
==
spitfire
)
val
&=
~
0x0003fe0000000000UL
;
set_pte_at
(
&
init_mm
,
vaddr
,
pte
,
__pte
(
val
|
_PAGE_MODIFIED
));
data
+=
BASE_PAGE_SIZE
;
}
}
if
(
error
)
*
error
=
1
;
return
0UL
;
}
}
/* The obp translations are saved based on 8k pagesize, since obp can
* use a mixture of pagesizes. Misses to the LOW_OBP_ADDRESS ->
* HI_OBP_ADDRESS range are handled in ktlb.S and do not use the vpte
* scheme (also, see rant in inherit_locked_prom_mappings()).
*/
static
inline
int
in_obp_range
(
unsigned
long
vaddr
)
static
inline
int
in_obp_range
(
unsigned
long
vaddr
)
{
{
return
(
vaddr
>=
LOW_OBP_ADDRESS
&&
return
(
vaddr
>=
LOW_OBP_ADDRESS
&&
vaddr
<
HI_OBP_ADDRESS
);
vaddr
<
HI_OBP_ADDRESS
);
}
}
#define OBP_PMD_SIZE 2048
static
int
cmp_ptrans
(
const
void
*
a
,
const
void
*
b
)
static
void
__init
build_obp_pgtable
(
void
)
{
{
unsigned
long
i
;
const
struct
linux_prom_translation
*
x
=
a
,
*
y
=
b
;
prompmd
=
__alloc_bootmem
(
OBP_PMD_SIZE
,
OBP_PMD_SIZE
,
PAGE_SIZE
);
if
(
!
prompmd
)
prom_halt
();
memset
(
prompmd
,
0
,
OBP_PMD_SIZE
);
prom_pmd_phys
=
__pa
(
prompmd
);
for
(
i
=
0
;
i
<
prom_trans_ents
;
i
++
)
{
unsigned
long
start
,
end
;
if
(
!
in_obp_range
(
prom_trans
[
i
].
virt
))
continue
;
start
=
prom_trans
[
i
].
virt
;
if
(
x
->
virt
>
y
->
virt
)
end
=
start
+
prom_trans
[
i
].
size
;
return
1
;
if
(
end
>
HI_OBP_ADDRESS
)
if
(
x
->
virt
<
y
->
virt
)
end
=
HI_OBP_ADDRESS
;
return
-
1
;
return
0
;
build_obp_range
(
start
,
end
,
prom_trans
[
i
].
data
);
}
}
}
/* Read OBP translations property into 'prom_trans[]'.
/* Read OBP translations property into 'prom_trans[]'. */
* Return the number of entries.
*/
static
void
__init
read_obp_translations
(
void
)
static
void
__init
read_obp_translations
(
void
)
{
{
int
n
,
node
;
int
n
,
node
,
ents
,
first
,
last
,
i
;
node
=
prom_finddevice
(
"/virtual-memory"
);
node
=
prom_finddevice
(
"/virtual-memory"
);
n
=
prom_getproplen
(
node
,
"translations"
);
n
=
prom_getproplen
(
node
,
"translations"
);
...
@@ -515,7 +453,41 @@ static void __init read_obp_translations(void)
...
@@ -515,7 +453,41 @@ static void __init read_obp_translations(void)
n
=
n
/
sizeof
(
struct
linux_prom_translation
);
n
=
n
/
sizeof
(
struct
linux_prom_translation
);
prom_trans_ents
=
n
;
ents
=
n
;
sort
(
prom_trans
,
ents
,
sizeof
(
struct
linux_prom_translation
),
cmp_ptrans
,
NULL
);
/* Now kick out all the non-OBP entries. */
for
(
i
=
0
;
i
<
ents
;
i
++
)
{
if
(
in_obp_range
(
prom_trans
[
i
].
virt
))
break
;
}
first
=
i
;
for
(;
i
<
ents
;
i
++
)
{
if
(
!
in_obp_range
(
prom_trans
[
i
].
virt
))
break
;
}
last
=
i
;
for
(
i
=
0
;
i
<
(
last
-
first
);
i
++
)
{
struct
linux_prom_translation
*
src
=
&
prom_trans
[
i
+
first
];
struct
linux_prom_translation
*
dest
=
&
prom_trans
[
i
];
*
dest
=
*
src
;
}
for
(;
i
<
ents
;
i
++
)
{
struct
linux_prom_translation
*
dest
=
&
prom_trans
[
i
];
dest
->
virt
=
dest
->
size
=
dest
->
data
=
0x0UL
;
}
prom_trans_ents
=
last
-
first
;
if
(
tlb_type
==
spitfire
)
{
/* Clear diag TTE bits. */
for
(
i
=
0
;
i
<
prom_trans_ents
;
i
++
)
prom_trans
[
i
].
data
&=
~
0x0003fe0000000000UL
;
}
}
}
static
void
__init
remap_kernel
(
void
)
static
void
__init
remap_kernel
(
void
)
...
@@ -553,21 +525,18 @@ static void __init remap_kernel(void)
...
@@ -553,21 +525,18 @@ static void __init remap_kernel(void)
}
}
static
void
__init
inherit_prom_mappings
_pre
(
void
)
static
void
__init
inherit_prom_mappings
(
void
)
{
{
read_obp_translations
();
read_obp_translations
();
/* Now fixup OBP's idea about where we really are mapped. */
/* Now fixup OBP's idea about where we really are mapped. */
prom_printf
(
"Remapping the kernel... "
);
prom_printf
(
"Remapping the kernel... "
);
remap_kernel
();
remap_kernel
();
prom_printf
(
"done.
\n
"
);
prom_printf
(
"done.
\n
"
);
}
static
void
__init
inherit_prom_mappings_post
(
void
)
prom_printf
(
"Registering callbacks... "
);
{
build_obp_pgtable
();
register_prom_callbacks
();
register_prom_callbacks
();
prom_printf
(
"done.
\n
"
);
}
}
/* The OBP specifications for sun4u mark 0xfffffffc00000000 and
/* The OBP specifications for sun4u mark 0xfffffffc00000000 and
...
@@ -1519,7 +1488,7 @@ void __init paging_init(void)
...
@@ -1519,7 +1488,7 @@ void __init paging_init(void)
swapper_pgd_zero
=
pgd_val
(
swapper_pg_dir
[
0
]);
swapper_pgd_zero
=
pgd_val
(
swapper_pg_dir
[
0
]);
inherit_prom_mappings
_pre
();
inherit_prom_mappings
();
/* Ok, we can use our TLB miss and window trap handlers safely.
/* Ok, we can use our TLB miss and window trap handlers safely.
* We need to do a quick peek here to see if we are on StarFire
* We need to do a quick peek here to see if we are on StarFire
...
@@ -1530,23 +1499,15 @@ void __init paging_init(void)
...
@@ -1530,23 +1499,15 @@ void __init paging_init(void)
extern
void
setup_tba
(
int
);
extern
void
setup_tba
(
int
);
setup_tba
(
this_is_starfire
);
setup_tba
(
this_is_starfire
);
}
}
__flush_tlb_all
();
/* Everything from this point forward, until we are done with
inherit_locked_prom_mappings
(
1
);
* inherit_prom_mappings_post(), must complete successfully
* without calling into the firmware. The firwmare page tables
__flush_tlb_all
();
* have not been built, but we are running on the Linux kernel's
* trap table.
*/
/* Setup bootmem... */
/* Setup bootmem... */
pages_avail
=
0
;
pages_avail
=
0
;
last_valid_pfn
=
end_pfn
=
bootmem_init
(
&
pages_avail
);
last_valid_pfn
=
end_pfn
=
bootmem_init
(
&
pages_avail
);
inherit_prom_mappings_post
();
inherit_locked_prom_mappings
(
1
);
#ifdef CONFIG_DEBUG_PAGEALLOC
#ifdef CONFIG_DEBUG_PAGEALLOC
kernel_physical_mapping_init
();
kernel_physical_mapping_init
();
#endif
#endif
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment