Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
Kirill Smelkov
linux
Commits
b5984adb
Commit
b5984adb
authored
Sep 03, 2003
by
Linus Torvalds
Browse files
Options
Browse Files
Download
Plain Diff
Merge
bk://bk.arm.linux.org.uk/linux-2.6-rmk
into home.osdl.org:/home/torvalds/v2.5/linux
parents
c5164261
ac7ebfb2
Changes
66
Hide whitespace changes
Inline
Side-by-side
Showing
66 changed files
with
1693 additions
and
3981 deletions
+1693
-3981
arch/arm/Makefile
arch/arm/Makefile
+10
-39
arch/arm/kernel/Makefile
arch/arm/kernel/Makefile
+1
-4
arch/arm/kernel/entry-armo.S
arch/arm/kernel/entry-armo.S
+0
-633
arch/arm/kernel/entry-armv.S
arch/arm/kernel/entry-armv.S
+3
-1
arch/arm/kernel/entry-common.S
arch/arm/kernel/entry-common.S
+4
-1
arch/arm/kernel/head.S
arch/arm/kernel/head.S
+1
-0
arch/arm/mach-l7200/core.c
arch/arm/mach-l7200/core.c
+0
-1
arch/arm/mach-rpc/riscpc.c
arch/arm/mach-rpc/riscpc.c
+1
-1
arch/arm/mm/Makefile
arch/arm/mm/Makefile
+2
-10
arch/arm/mm/mm-armv.c
arch/arm/mm/mm-armv.c
+4
-4
arch/arm/mm/proc-arm1020.S
arch/arm/mm/proc-arm1020.S
+1
-0
arch/arm/mm/proc-arm2_3.S
arch/arm/mm/proc-arm2_3.S
+0
-360
arch/arm/mm/proc-arm6_7.S
arch/arm/mm/proc-arm6_7.S
+1
-0
arch/arm/mm/proc-arm720.S
arch/arm/mm/proc-arm720.S
+1
-0
arch/arm/mm/proc-arm920.S
arch/arm/mm/proc-arm920.S
+1
-0
arch/arm/mm/proc-arm922.S
arch/arm/mm/proc-arm922.S
+1
-0
arch/arm/mm/proc-arm926.S
arch/arm/mm/proc-arm926.S
+1
-0
arch/arm/mm/proc-sa110.S
arch/arm/mm/proc-sa110.S
+2
-1
arch/arm/mm/proc-sa1100.S
arch/arm/mm/proc-sa1100.S
+2
-1
arch/arm/mm/proc-xscale.S
arch/arm/mm/proc-xscale.S
+2
-1
include/asm-arm/assembler.h
include/asm-arm/assembler.h
+72
-7
include/asm-arm/atomic.h
include/asm-arm/atomic.h
+1
-1
include/asm-arm/cacheflush.h
include/asm-arm/cacheflush.h
+271
-2
include/asm-arm/cpu-multi26.h
include/asm-arm/cpu-multi26.h
+0
-47
include/asm-arm/domain.h
include/asm-arm/domain.h
+1
-1
include/asm-arm/elf.h
include/asm-arm/elf.h
+26
-1
include/asm-arm/locks.h
include/asm-arm/locks.h
+1
-1
include/asm-arm/memory.h
include/asm-arm/memory.h
+4
-0
include/asm-arm/page.h
include/asm-arm/page.h
+22
-13
include/asm-arm/param.h
include/asm-arm/param.h
+2
-1
include/asm-arm/pgalloc.h
include/asm-arm/pgalloc.h
+102
-1
include/asm-arm/pgtable.h
include/asm-arm/pgtable.h
+200
-7
include/asm-arm/proc-armo/assembler.h
include/asm-arm/proc-armo/assembler.h
+0
-106
include/asm-arm/proc-armo/cache.h
include/asm-arm/proc-armo/cache.h
+0
-28
include/asm-arm/proc-armo/elf.h
include/asm-arm/proc-armo/elf.h
+0
-15
include/asm-arm/proc-armo/locks.h
include/asm-arm/proc-armo/locks.h
+0
-161
include/asm-arm/proc-armo/page.h
include/asm-arm/proc-armo/page.h
+0
-40
include/asm-arm/proc-armo/pgalloc.h
include/asm-arm/proc-armo/pgalloc.h
+0
-44
include/asm-arm/proc-armo/pgtable.h
include/asm-arm/proc-armo/pgtable.h
+0
-106
include/asm-arm/proc-armo/processor.h
include/asm-arm/proc-armo/processor.h
+0
-63
include/asm-arm/proc-armo/ptrace.h
include/asm-arm/proc-armo/ptrace.h
+0
-98
include/asm-arm/proc-armo/shmparam.h
include/asm-arm/proc-armo/shmparam.h
+0
-19
include/asm-arm/proc-armo/system.h
include/asm-arm/proc-armo/system.h
+0
-128
include/asm-arm/proc-armo/tlbflush.h
include/asm-arm/proc-armo/tlbflush.h
+0
-63
include/asm-arm/proc-armo/uaccess.h
include/asm-arm/proc-armo/uaccess.h
+0
-138
include/asm-arm/proc-armv/assembler.h
include/asm-arm/proc-armv/assembler.h
+0
-74
include/asm-arm/proc-armv/cache.h
include/asm-arm/proc-armv/cache.h
+0
-278
include/asm-arm/proc-armv/elf.h
include/asm-arm/proc-armv/elf.h
+0
-30
include/asm-arm/proc-armv/page.h
include/asm-arm/proc-armv/page.h
+0
-37
include/asm-arm/proc-armv/pgalloc.h
include/asm-arm/proc-armv/pgalloc.h
+0
-110
include/asm-arm/proc-armv/pgtable.h
include/asm-arm/proc-armv/pgtable.h
+0
-217
include/asm-arm/proc-armv/processor.h
include/asm-arm/proc-armv/processor.h
+0
-51
include/asm-arm/proc-armv/ptrace.h
include/asm-arm/proc-armv/ptrace.h
+0
-143
include/asm-arm/proc-armv/shmparam.h
include/asm-arm/proc-armv/shmparam.h
+0
-20
include/asm-arm/proc-armv/system.h
include/asm-arm/proc-armv/system.h
+0
-215
include/asm-arm/proc-armv/tlbflush.h
include/asm-arm/proc-armv/tlbflush.h
+0
-410
include/asm-arm/proc-armv/uaccess.h
include/asm-arm/proc-armv/uaccess.h
+0
-189
include/asm-arm/proc-fns.h
include/asm-arm/proc-fns.h
+0
-5
include/asm-arm/processor.h
include/asm-arm/processor.h
+24
-2
include/asm-arm/ptrace.h
include/asm-arm/ptrace.h
+115
-1
include/asm-arm/semaphore.h
include/asm-arm/semaphore.h
+1
-1
include/asm-arm/shmparam.h
include/asm-arm/shmparam.h
+0
-2
include/asm-arm/system.h
include/asm-arm/system.h
+216
-13
include/asm-arm/thread_info.h
include/asm-arm/thread_info.h
+14
-12
include/asm-arm/tlbflush.h
include/asm-arm/tlbflush.h
+393
-2
include/asm-arm/uaccess.h
include/asm-arm/uaccess.h
+190
-21
No files found.
arch/arm/Makefile
View file @
b5984adb
...
@@ -30,12 +30,6 @@ endif
...
@@ -30,12 +30,6 @@ endif
check_gcc
=
$(
shell
if
$(CC)
$(1)
-S
-o
/dev/null
-xc
/dev/null
>
/dev/null 2>&1
;
then
echo
"
$(1)
"
;
else
echo
"
$(2)
"
;
fi
)
check_gcc
=
$(
shell
if
$(CC)
$(1)
-S
-o
/dev/null
-xc
/dev/null
>
/dev/null 2>&1
;
then
echo
"
$(1)
"
;
else
echo
"
$(2)
"
;
fi
)
comma
=
,
comma
=
,
# Select CPU dependent flags. Note that order of declaration is important;
# the options further down the list override previous items.
#
apcs-$(CONFIG_CPU_32)
:=
-mapcs-32
apcs-$(CONFIG_CPU_26)
:=
-mapcs-26
-mcpu
=
arm3
# This selects which instruction set is used.
# This selects which instruction set is used.
# Note that GCC does not numerically define an architecture version
# Note that GCC does not numerically define an architecture version
# macro, but instead defines a whole series of macros which makes
# macro, but instead defines a whole series of macros which makes
...
@@ -55,37 +49,21 @@ tune-$(CONFIG_CPU_SA110) :=-mtune=strongarm110
...
@@ -55,37 +49,21 @@ tune-$(CONFIG_CPU_SA110) :=-mtune=strongarm110
tune-$(CONFIG_CPU_SA1100)
:=
-mtune
=
strongarm1100
tune-$(CONFIG_CPU_SA1100)
:=
-mtune
=
strongarm1100
tune-$(CONFIG_CPU_XSCALE)
:=
$(
call
check_gcc,-mtune
=
xscale,-mtune
=
strongarm110
)
tune-$(CONFIG_CPU_XSCALE)
:=
$(
call
check_gcc,-mtune
=
xscale,-mtune
=
strongarm110
)
# Force -mno-fpu to be passed to the assembler. Some versions of gcc don't
CFLAGS_BOOT
:=
-mapcs-32
$
(
arch-y
)
$
(
tune-y
)
-mshort-load-bytes
-msoft-float
-Wa
,-mno-fpu
-Uarm
# do this with -msoft-float
CFLAGS
+=
-mapcs-32
$
(
arch-y
)
$
(
tune-y
)
-mshort-load-bytes
-msoft-float
-Wa
,-mno-fpu
-Uarm
CFLAGS_BOOT
:=
$
(
apcs-y
)
$
(
arch-y
)
$
(
tune-y
)
-mshort-load-bytes
-msoft-float
-Wa
,-mno-fpu
-Uarm
AFLAGS
+=
-mapcs-32
$
(
arch-y
)
$
(
tune-y
)
-msoft-float
-Wa
,-mno-fpu
CFLAGS
+=
$
(
apcs-y
)
$
(
arch-y
)
$
(
tune-y
)
-mshort-load-bytes
-msoft-float
-Wa
,-mno-fpu
-Uarm
AFLAGS
+=
$
(
apcs-y
)
$
(
arch-y
)
$
(
tune-y
)
-msoft-float
-Wa
,-mno-fpu
#Default value
#Default value
DATAADDR
:=
.
DATAADDR
:=
.
ifeq
($(CONFIG_CPU_26),y)
PROCESSOR
:=
armo
head-y
:=
arch
/arm/mach-arc/head.o
arch
/arm/kernel/init_task.o
LDFLAGS_BLOB
+=
--oformat
elf26-littlearm
ifeq
($(CONFIG_ROM_KERNEL),y)
DATAADDR
:=
0x02080000
textaddr-y
:=
0x03800000
else
textaddr-y
:=
0x02080000
endif
endif
ifeq
($(CONFIG_CPU_32),y)
PROCESSOR
:=
armv
PROCESSOR
:=
armv
head-y
:=
arch
/arm/kernel/head.o
arch
/arm/kernel/init_task.o
head-y
:=
arch
/arm/kernel/head.o
arch
/arm/kernel/init_task.o
ifeq
($(CONFIG_CPU_BIG_ENDIAN),y)
ifeq
($(CONFIG_CPU_BIG_ENDIAN),y)
LDFLAGS_BLOB
+=
--oformat
elf32-bigarm
LDFLAGS_BLOB
+=
--oformat
elf32-bigarm
else
else
LDFLAGS_BLOB
+=
--oformat
elf32-littlearm
LDFLAGS_BLOB
+=
--oformat
elf32-littlearm
endif
textaddr-y
:=
0xC0008000
endif
endif
textaddr-y
:=
0xC0008000
machine-$(CONFIG_ARCH_ARCA5K)
:=
arc
machine-$(CONFIG_ARCH_ARCA5K)
:=
arc
machine-$(CONFIG_ARCH_RPC)
:=
rpc
machine-$(CONFIG_ARCH_RPC)
:=
rpc
...
@@ -160,16 +138,10 @@ include/asm-arm/.arch: $(wildcard include/config/arch/*.h)
...
@@ -160,16 +138,10 @@ include/asm-arm/.arch: $(wildcard include/config/arch/*.h)
@
ln
-sf
arch-
$(INCDIR)
include/asm-arm/arch
@
ln
-sf
arch-
$(INCDIR)
include/asm-arm/arch
@
touch
$@
@
touch
$@
include/asm-arm/.proc
:
$(wildcard include/config/cpu/32.h) $(wildcard include/config/cpu/26.h)
@
echo
' Making asm-arm/proc -> asm-arm/proc-
$(PROCESSOR)
symlink'
@
rm
-f
include/asm-arm/proc
@
ln
-sf
proc-
$(PROCESSOR)
include/asm-arm/proc
@
touch
$@
prepare
:
maketools
prepare
:
maketools
.PHONY
:
maketools FORCE
.PHONY
:
maketools FORCE
maketools
:
include/asm-arm/.arch
include/asm-arm/.proc
\
maketools
:
include/asm-arm/.arch
\
include/asm-arm/constants.h include/linux/version.h FORCE
include/asm-arm/constants.h include/linux/version.h FORCE
$(Q)$(MAKE)
$(build)
=
arch
/arm/tools include/asm-arm/mach-types.h
$(Q)$(MAKE)
$(build)
=
arch
/arm/tools include/asm-arm/mach-types.h
...
@@ -185,7 +157,6 @@ zinstall install: vmlinux
...
@@ -185,7 +157,6 @@ zinstall install: vmlinux
MRPROPER_FILES
+=
\
MRPROPER_FILES
+=
\
include/asm-arm/arch include/asm-arm/.arch
\
include/asm-arm/arch include/asm-arm/.arch
\
include/asm-arm/proc include/asm-arm/.proc
\
include/asm-arm/constants.h
*
\
include/asm-arm/constants.h
*
\
include/asm-arm/mach-types.h
include/asm-arm/mach-types.h
...
@@ -217,7 +188,7 @@ zi:; $(Q)$(MAKE) $(build)=$(boot) zinstall
...
@@ -217,7 +188,7 @@ zi:; $(Q)$(MAKE) $(build)=$(boot) zinstall
)
)
arch/$(ARCH)/kernel/asm-offsets.s
:
include/asm include/linux/version.h
\
arch/$(ARCH)/kernel/asm-offsets.s
:
include/asm include/linux/version.h
\
include/asm-arm/.arch
include/asm-arm/.proc
\
include/asm-arm/.arch
\
include/config/MARKER
include/config/MARKER
include/asm-$(ARCH)/constants.h
:
arch/$(ARCH)/kernel/asm-offsets.s
include/asm-$(ARCH)/constants.h
:
arch/$(ARCH)/kernel/asm-offsets.s
...
...
arch/arm/kernel/Makefile
View file @
b5984adb
...
@@ -2,13 +2,11 @@
...
@@ -2,13 +2,11 @@
# Makefile for the linux kernel.
# Makefile for the linux kernel.
#
#
ENTRY_OBJ
=
entry-
$(PROCESSOR)
.o
AFLAGS_head.o
:=
-DTEXTADDR
=
$(TEXTADDR)
AFLAGS_head.o
:=
-DTEXTADDR
=
$(TEXTADDR)
# Object file lists.
# Object file lists.
obj-y
:=
arch.o compat.o dma.o
$(ENTRY_OBJ)
entry-common.o irq.o
\
obj-y
:=
arch.o compat.o dma.o
entry-armv.o
entry-common.o irq.o
\
process.o ptrace.o semaphore.o setup.o signal.o sys_arm.o
\
process.o ptrace.o semaphore.o setup.o signal.o sys_arm.o
\
time.o traps.o
time.o traps.o
...
@@ -34,6 +32,5 @@ extra-y := $(head-y) init_task.o vmlinux.lds.s
...
@@ -34,6 +32,5 @@ extra-y := $(head-y) init_task.o vmlinux.lds.s
# Spell out some dependencies that `make dep' doesn't spot
# Spell out some dependencies that `make dep' doesn't spot
$(obj)/entry-armv.o
:
$(obj)/entry-header.S include/asm-arm/constants.h
$(obj)/entry-armv.o
:
$(obj)/entry-header.S include/asm-arm/constants.h
$(obj)/entry-armo.o
:
$(obj)/entry-header.S include/asm-arm/constants.h
$(obj)/entry-common.o
:
$(obj)/entry-header.S include/asm-arm/constants.h
\
$(obj)/entry-common.o
:
$(obj)/entry-header.S include/asm-arm/constants.h
\
$(obj)/calls.S
$(obj)/calls.S
arch/arm/kernel/entry-armo.S
deleted
100644 → 0
View file @
c5164261
/*
*
linux
/
arch
/
arm
/
kernel
/
entry
-
armo
.
S
*
*
Copyright
(
C
)
1995
,
1996
,
1997
,
1998
Russell
King
.
*
*
This
program
is
free
software
; you can redistribute it and/or modify
*
it
under
the
terms
of
the
GNU
General
Public
License
version
2
as
*
published
by
the
Free
Software
Foundation
.
*
*
Low
-
level
vector
interface
routines
*
*
Design
issues
:
*
-
We
have
several
modes
that
each
vector
can
be
called
from
,
*
each
with
its
own
set
of
registers
.
On
entry
to
any
vector
,
*
we
*
must
*
save
the
registers
used
in
*
that
*
mode
.
*
*
-
This
code
must
be
as
fast
as
possible
.
*
*
There
are
a
few
restrictions
on
the
vectors
:
*
-
the
SWI
vector
cannot
be
called
from
*
any
*
non
-
user
mode
*
*
-
the
FP
emulator
is
*
never
*
called
from
*
any
*
non
-
user
mode
undefined
*
instruction
.
*
*
Ok
,
so
this
file
may
be
a
mess
,
but
its
as
efficient
as
possible
while
*
adhering
to
the
above
criteria
.
*/
#include <linux/config.h>
#include <linux/init.h>
#include "entry-header.S"
.
text
#ifdef IOC_BASE
/*
IOC
/
IOMD
based
hardware
*/
.
equ
ioc_base_high
,
IOC_BASE
&
0xff000000
.
equ
ioc_base_low
,
IOC_BASE
&
0x00ff0000
.
macro
disable_fiq
mov
r12
,
#
ioc_base_high
.
if
ioc_base_low
orr
r12
,
r12
,
#
ioc_base_low
.
endif
strb
r12
,
[
r12
,
#
0x38
]
@
Disable
FIQ
register
.
endm
.
macro
get_irqnr_and_base
,
irqnr
,
base
mov
r4
,
#
ioc_base_high
@
point
at
IOC
.
if
ioc_base_low
orr
r4
,
r4
,
#
ioc_base_low
.
endif
ldrb
\
irqnr
,
[
r4
,
#
0x24
]
@
get
high
priority
first
adr
\
base
,
irq_prio_h
teq
\
irqnr
,
#
0
ldreqb
\
irqnr
,
[
r4
,
#
0x14
]
@
get
low
priority
adreq
\
base
,
irq_prio_l
.
endm
/*
*
Interrupt
table
(
incorporates
priority
)
*/
.
macro
irq_prio_table
irq_prio_l
:
.
byte
0
,
0
,
1
,
0
,
2
,
2
,
2
,
2
,
3
,
3
,
3
,
3
,
3
,
3
,
3
,
3
.
byte
4
,
0
,
1
,
0
,
2
,
2
,
2
,
2
,
3
,
3
,
3
,
3
,
3
,
3
,
3
,
3
.
byte
5
,
5
,
5
,
5
,
5
,
5
,
5
,
5
,
5
,
5
,
5
,
5
,
5
,
5
,
5
,
5
.
byte
5
,
5
,
5
,
5
,
5
,
5
,
5
,
5
,
5
,
5
,
5
,
5
,
5
,
5
,
5
,
5
.
byte
6
,
6
,
6
,
6
,
6
,
6
,
6
,
6
,
3
,
3
,
3
,
3
,
3
,
3
,
3
,
3
.
byte
6
,
6
,
6
,
6
,
6
,
6
,
6
,
6
,
3
,
3
,
3
,
3
,
3
,
3
,
3
,
3
.
byte
5
,
5
,
5
,
5
,
5
,
5
,
5
,
5
,
5
,
5
,
5
,
5
,
5
,
5
,
5
,
5
.
byte
5
,
5
,
5
,
5
,
5
,
5
,
5
,
5
,
5
,
5
,
5
,
5
,
5
,
5
,
5
,
5
.
byte
7
,
7
,
7
,
7
,
7
,
7
,
7
,
7
,
7
,
7
,
7
,
7
,
7
,
7
,
7
,
7
.
byte
7
,
7
,
7
,
7
,
7
,
7
,
7
,
7
,
7
,
7
,
7
,
7
,
7
,
7
,
7
,
7
.
byte
7
,
7
,
7
,
7
,
7
,
7
,
7
,
7
,
7
,
7
,
7
,
7
,
7
,
7
,
7
,
7
.
byte
7
,
7
,
7
,
7
,
7
,
7
,
7
,
7
,
7
,
7
,
7
,
7
,
7
,
7
,
7
,
7
.
byte
7
,
7
,
7
,
7
,
7
,
7
,
7
,
7
,
7
,
7
,
7
,
7
,
7
,
7
,
7
,
7
.
byte
7
,
7
,
7
,
7
,
7
,
7
,
7
,
7
,
7
,
7
,
7
,
7
,
7
,
7
,
7
,
7
.
byte
7
,
7
,
7
,
7
,
7
,
7
,
7
,
7
,
7
,
7
,
7
,
7
,
7
,
7
,
7
,
7
.
byte
7
,
7
,
7
,
7
,
7
,
7
,
7
,
7
,
7
,
7
,
7
,
7
,
7
,
7
,
7
,
7
irq_prio_h
:
.
byte
0
,
8
,
9
,
8
,
10
,
10
,
10
,
10
,
11
,
11
,
11
,
11
,
10
,
10
,
10
,
10
.
byte
12
,
8
,
9
,
8
,
10
,
10
,
10
,
10
,
11
,
11
,
11
,
11
,
10
,
10
,
10
,
10
.
byte
13
,
13
,
13
,
13
,
10
,
10
,
10
,
10
,
11
,
11
,
11
,
11
,
10
,
10
,
10
,
10
.
byte
13
,
13
,
13
,
13
,
10
,
10
,
10
,
10
,
11
,
11
,
11
,
11
,
10
,
10
,
10
,
10
.
byte
14
,
14
,
14
,
14
,
10
,
10
,
10
,
10
,
11
,
11
,
11
,
11
,
10
,
10
,
10
,
10
.
byte
14
,
14
,
14
,
14
,
10
,
10
,
10
,
10
,
11
,
11
,
11
,
11
,
10
,
10
,
10
,
10
.
byte
13
,
13
,
13
,
13
,
10
,
10
,
10
,
10
,
11
,
11
,
11
,
11
,
10
,
10
,
10
,
10
.
byte
13
,
13
,
13
,
13
,
10
,
10
,
10
,
10
,
11
,
11
,
11
,
11
,
10
,
10
,
10
,
10
.
byte
15
,
15
,
15
,
15
,
10
,
10
,
10
,
10
,
11
,
11
,
11
,
11
,
10
,
10
,
10
,
10
.
byte
15
,
15
,
15
,
15
,
10
,
10
,
10
,
10
,
11
,
11
,
11
,
11
,
10
,
10
,
10
,
10
.
byte
13
,
13
,
13
,
13
,
10
,
10
,
10
,
10
,
11
,
11
,
11
,
11
,
10
,
10
,
10
,
10
.
byte
13
,
13
,
13
,
13
,
10
,
10
,
10
,
10
,
11
,
11
,
11
,
11
,
10
,
10
,
10
,
10
.
byte
15
,
15
,
15
,
15
,
10
,
10
,
10
,
10
,
11
,
11
,
11
,
11
,
10
,
10
,
10
,
10
.
byte
15
,
15
,
15
,
15
,
10
,
10
,
10
,
10
,
11
,
11
,
11
,
11
,
10
,
10
,
10
,
10
.
byte
13
,
13
,
13
,
13
,
10
,
10
,
10
,
10
,
11
,
11
,
11
,
11
,
10
,
10
,
10
,
10
.
byte
13
,
13
,
13
,
13
,
10
,
10
,
10
,
10
,
11
,
11
,
11
,
11
,
10
,
10
,
10
,
10
.
endm
#else
#error Unknown architecture
#endif
/*=============================================================================
*
For
entry
-
common
.
S
*/
#if 0
/*
*
Uncomment
these
if
you
wish
to
get
more
debugging
into
about
data
aborts
.
*/
#define FAULT_CODE_LDRSTRPOST 0x80
#define FAULT_CODE_LDRSTRPRE 0x40
#define FAULT_CODE_LDRSTRREG 0x20
#define FAULT_CODE_LDMSTM 0x10
#define FAULT_CODE_LDCSTC 0x08
#endif
#define FAULT_CODE_PREFETCH 0x04
#define FAULT_CODE_WRITE 0x02
#define FAULT_CODE_FORCECOW 0x01
#define SVC_SAVE_ALL \
str
sp
,
[
sp
,
#-
16
]!
;\
str
lr
,
[
sp
,
#
8
]
;\
str
lr
,
[
sp
,
#
4
]
;\
stmfd
sp
!,
{
r0
-
r12
}
;\
mov
r0
,
#-
1
;\
str
r0
,
[
sp
,
#
S_OLD_R0
]
;\
zero_fp
#define SVC_IRQ_SAVE_ALL \
str
sp
,
[
sp
,
#-
16
]!
;\
str
lr
,
[
sp
,
#
4
]
;\
ldr
lr
,
.
LCirq
;\
ldr
lr
,
[
lr
]
;\
str
lr
,
[
sp
,
#
8
]
;\
stmfd
sp
!,
{
r0
-
r12
}
;\
mov
r0
,
#-
1
;\
str
r0
,
[
sp
,
#
S_OLD_R0
]
;\
zero_fp
#define SVC_RESTORE_ALL \
ldmfd
sp
,
{
r0
-
pc
}^
/*=============================================================================
*
Undefined
FIQs
*-----------------------------------------------------------------------------
*/
_unexp_fiq
:
ldr
sp
,
.
LCfiq
mov
r12
,
#
IOC_BASE
strb
r12
,
[
r12
,
#
0x38
]
@
Disable
FIQ
register
teqp
pc
,
#
0x0c000003
mov
r0
,
r0
stmfd
sp
!,
{
r0
-
r3
,
ip
,
lr
}
adr
r0
,
Lfiqmsg
bl
printk
ldmfd
sp
!,
{
r0
-
r3
,
ip
,
lr
}
teqp
pc
,
#
0x0c000001
mov
r0
,
r0
movs
pc
,
lr
Lfiqmsg
:
.
ascii
"*** Unexpected FIQ\n\0"
.
align
.
LCfiq
:
.
word
__temp_fiq
.
LCirq
:
.
word
__temp_irq
/*=============================================================================
*
Undefined
instruction
handler
*-----------------------------------------------------------------------------
*
Handles
floating
point
instructions
*/
vector_undefinstr
:
tst
lr
,#
3
bne
__und_svc
save_user_regs
zero_fp
teqp
pc
,
#
PSR_I_BIT
|
MODE_SVC
.
Lbug_undef
:
ldr
r4
,
.
LC2
ldr
pc
,
[
r4
]
@
Call
FP
module
USR
entry
point
.
globl
fpundefinstr
fpundefinstr
:
@
Called
by
FP
module
on
undefined
instr
mov
r0
,
lr
mov
r1
,
sp
teqp
pc
,
#
MODE_SVC
bl
do_undefinstr
b
ret_from_exception
@
Normal
FP
exit
__und_svc
:
SVC_SAVE_ALL
@
Non
-
user
mode
mask_pc
r0
,
lr
and
r2
,
lr
,
#
3
sub
r0
,
r0
,
#
4
mov
r1
,
sp
bl
do_undefinstr
SVC_RESTORE_ALL
#if defined CONFIG_FPE_NWFPE || defined CONFIG_FPE_FASTFPE
/
*
The
FPE
is
always
present
*/
.
equ
fpe_not_present
,
0
#else
/*
We
get
here
if
an
undefined
instruction
happens
and
the
floating
*
point
emulator
is
not
present
.
If
the
offending
instruction
was
*
a
WFS
,
we
just
perform
a
normal
return
as
if
we
had
emulated
the
*
operation
.
This
is
a
hack
to
allow
some
basic
userland
binaries
*
to
run
so
that
the
emulator
module
proper
can
be
loaded
.
--
philb
*/
fpe_not_present
:
adr
r10
,
wfs_mask_data
ldmia
r10
,
{
r4
,
r5
,
r6
,
r7
,
r8
}
ldr
r10
,
[
sp
,
#
S_PC
]
@
Load
PC
sub
r10
,
r10
,
#
4
mask_pc
r10
,
r10
ldrt
r10
,
[
r10
]
@
get
instruction
and
r5
,
r10
,
r5
teq
r5
,
r4
@
Is
it
WFS
?
beq
ret_from_exception
and
r5
,
r10
,
r8
teq
r5
,
r6
@
Is
it
LDF
/
STF
on
sp
or
fp
?
teqne
r5
,
r7
bne
fpundefinstr
tst
r10
,
#
0x00200000
@
Does
it
have
WB
beq
ret_from_exception
and
r4
,
r10
,
#
255
@
get
offset
and
r6
,
r10
,
#
0x000f0000
tst
r10
,
#
0x00800000
@
+/-
ldr
r5
,
[
sp
,
r6
,
lsr
#
14
]
@
Load
reg
rsbeq
r4
,
r4
,
#
0
add
r5
,
r5
,
r4
,
lsl
#
2
str
r5
,
[
sp
,
r6
,
lsr
#
14
]
@
Save
reg
b
ret_from_exception
wfs_mask_data
:
.
word
0x0e200110
@
WFS
/
RFS
.
word
0x0fef0fff
.
word
0x0d0d0100
@
LDF
[
sp
]/
STF
[
sp
]
.
word
0x0d0b0100
@
LDF
[
fp
]/
STF
[
fp
]
.
word
0x0f0f0f00
#endif
.
LC2
:
.
word
fp_enter
/*=============================================================================
*
Prefetch
abort
handler
*-----------------------------------------------------------------------------
*/
vector_prefetch
:
sub
lr
,
lr
,
#
4
tst
lr
,
#
3
bne
__pabt_invalid
save_user_regs
teqp
pc
,
#
0x00000003
@
NOT
a
problem
-
doesn
't change mode
mask_pc
r0
,
lr
@
Address
of
abort
mov
r1
,
sp
@
Tasks
registers
bl
do_PrefetchAbort
teq
r0
,
#
0
@
If
non
-
zero
,
we
believe
this
abort
..
bne
ret_from_exception
#ifdef DEBUG_UNDEF
adr
r0
,
t
bl
printk
#endif
ldr
lr
,
[
sp
,#
S_PC
]
@
program
to
test
this
on
.
I
think
its
b
.
Lbug_undef
@
broken
at
the
moment
though
!)
__pabt_invalid
:
SVC_SAVE_ALL
mov
r0
,
sp
@
Prefetch
aborts
are
definitely
*
not
*
mov
r1
,
#
BAD_PREFETCH
@
allowed
in
non
-
user
modes
.
We
cant
and
r2
,
lr
,
#
3
@
recover
from
this
problem
.
b
bad_mode
#ifdef DEBUG_UNDEF
t
:
.
ascii
"*** undef ***\r\n\0"
.
align
#endif
/*=============================================================================
*
Address
exception
handler
*-----------------------------------------------------------------------------
*
These
aren
't too critical.
*
(
they
're not supposed to happen).
*
In
order
to
debug
the
reason
for
address
exceptions
in
non
-
user
modes
,
*
we
have
to
obtain
all
the
registers
so
that
we
can
see
what
's going on.
*/
vector_addrexcptn
:
sub
lr
,
lr
,
#
8
tst
lr
,
#
3
bne
Laddrexcptn_not_user
save_user_regs
teq
pc
,
#
0x00000003
mask_pc
r0
,
lr
@
Point
to
instruction
mov
r1
,
sp
@
Point
to
registers
mov
r2
,
#
0x400
mov
lr
,
pc
bl
do_excpt
b
ret_from_exception
Laddrexcptn_not_user
:
SVC_SAVE_ALL
and
r2
,
lr
,
#
3
teq
r2
,
#
3
bne
Laddrexcptn_illegal_mode
teqp
pc
,
#
0x00000003
@
NOT
a
problem
-
doesn
't change mode
mask_pc
r0
,
lr
mov
r1
,
sp
orr
r2
,
r2
,
#
0x400
bl
do_excpt
ldmia
sp
,
{
r0
-
lr
}
@
I
cant
remember
the
reason
I
changed
this
...
add
sp
,
sp
,
#
15
*
4
movs
pc
,
lr
Laddrexcptn_illegal_mode
:
mov
r0
,
sp
str
lr
,
[
sp
,
#-
4
]!
orr
r1
,
r2
,
#
0x0c000000
teqp
r1
,
#
0
@
change
into
mode
(
wont
be
user
mode
)
mov
r0
,
r0
mov
r1
,
r8
@
Any
register
from
r8
-
r14
can
be
banked
mov
r2
,
r9
mov
r3
,
r10
mov
r4
,
r11
mov
r5
,
r12
mov
r6
,
r13
mov
r7
,
r14
teqp
pc
,
#
0x04000003
@
back
to
svc
mov
r0
,
r0
stmfd
sp
!,
{
r1
-
r7
}
ldmia
r0
,
{
r0
-
r7
}
stmfd
sp
!,
{
r0
-
r7
}
mov
r0
,
sp
mov
r1
,
#
BAD_ADDREXCPTN
b
bad_mode
/*=============================================================================
*
Interrupt
(
IRQ
)
handler
*-----------------------------------------------------------------------------
*
Note
:
if
in
user
mode
,
then
*
no
*
kernel
routine
is
running
,
so
do
not
have
*
to
save
svc
lr
*
(
r13
points
to
irq
temp
save
area
)
*/
vector_IRQ
:
ldr
r13
,
.
LCirq
@
I
will
leave
this
one
in
just
in
case
...
sub
lr
,
lr
,
#
4
str
lr
,
[
r13
]
tst
lr
,
#
3
bne
__irq_svc
teqp
pc
,
#
0x08000003
mov
r0
,
r0
ldr
lr
,
.
LCirq
ldr
lr
,
[
lr
]
save_user_regs
1
:
get_irqnr_and_base
r6
,
r5
teq
r6
,
#
0
ldrneb
r0
,
[
r5
,
r6
]
@
get
IRQ
number
movne
r1
,
sp
@
@
routine
called
with
r0
=
irq
number
,
r1
=
struct
pt_regs
*
@
adr
lr
,
1
b
orr
lr
,
lr
,
#
0x08000003
@
Force
SVC
bne
asm_do_IRQ
mov
why
,
#
0
get_current_task
r5
b
ret_to_user
irq_prio_table
__irq_svc
:
teqp
pc
,
#
0x08000003
mov
r0
,
r0
SVC_IRQ_SAVE_ALL
and
r2
,
lr
,
#
3
teq
r2
,
#
3
bne
__irq_invalid
1
:
get_irqnr_and_base
r6
,
r5
teq
r6
,
#
0
ldrneb
r0
,
[
r5
,
r6
]
@
get
IRQ
number
movne
r1
,
sp
@
@
routine
called
with
r0
=
irq
number
,
r1
=
struct
pt_regs
*
@
adr
lr
,
1
b
orr
lr
,
lr
,
#
0x08000003
@
Force
SVC
bne
asm_do_IRQ
@
Returns
to
1
b
SVC_RESTORE_ALL
__irq_invalid
:
mov
r0
,
sp
mov
r1
,
#
BAD_IRQ
b
bad_mode
/*=============================================================================
*
Data
abort
handler
code
*-----------------------------------------------------------------------------
*
*
This
handles
both
exceptions
from
user
and
SVC
modes
,
computes
the
address
*
range
of
the
problem
,
and
does
any
correction
that
is
required
.
It
then
*
calls
the
kernel
data
abort
routine
.
*
*
This
is
where
I
wish
that
the
ARM
would
tell
you
which
address
aborted
.
*/
vector_data
:
sub
lr
,
lr
,
#
8
@
Correct
lr
tst
lr
,
#
3
bne
Ldata_not_user
save_user_regs
teqp
pc
,
#
0x00000003
@
NOT
a
problem
-
doesn
't change mode
mask_pc
r0
,
lr
bl
Ldata_do
b
ret_from_exception
Ldata_not_user
:
SVC_SAVE_ALL
and
r2
,
lr
,
#
3
teq
r2
,
#
3
bne
Ldata_illegal_mode
tst
lr
,
#
0x08000000
teqeqp
pc
,
#
0x00000003
@
NOT
a
problem
-
doesn
't change mode
mask_pc
r0
,
lr
bl
Ldata_do
SVC_RESTORE_ALL
Ldata_illegal_mode
:
mov
r0
,
sp
mov
r1
,
#
BAD_DATA
b
bad_mode
Ldata_do
:
mov
r3
,
sp
ldr
r4
,
[
r0
]
@
Get
instruction
mov
r2
,
#
0
tst
r4
,
#
1
<<
20
@
Check
to
see
if
it
is
a
write
instruction
orreq
r2
,
r2
,
#
FAULT_CODE_WRITE
@
Indicate
write
instruction
mov
r1
,
r4
,
lsr
#
22
@
Now
branch
to
the
relevant
processing
routine
and
r1
,
r1
,
#
15
<<
2
add
pc
,
pc
,
r1
movs
pc
,
lr
b
Ldata_unknown
b
Ldata_unknown
b
Ldata_unknown
b
Ldata_unknown
b
Ldata_ldrstr_post
@
ldr
rd
,
[
rn
],
#
m
b
Ldata_ldrstr_numindex
@
ldr
rd
,
[
rn
,
#
m
]
@
RegVal
b
Ldata_ldrstr_post
@
ldr
rd
,
[
rn
],
rm
b
Ldata_ldrstr_regindex
@
ldr
rd
,
[
rn
,
rm
]
b
Ldata_ldmstm
@
ldm
*
a
rn
,
<
rlist
>
b
Ldata_ldmstm
@
ldm
*
b
rn
,
<
rlist
>
b
Ldata_unknown
b
Ldata_unknown
b
Ldata_ldrstr_post
@
ldc
rd
,
[
rn
],
#
m
@
Same
as
ldr
rd
,
[
rn
],
#
m
b
Ldata_ldcstc_pre
@
ldc
rd
,
[
rn
,
#
m
]
b
Ldata_unknown
Ldata_unknown
:
@
Part
of
jumptable
mov
r0
,
r1
mov
r1
,
r4
mov
r2
,
r3
b
baddataabort
Ldata_ldrstr_post
:
mov
r0
,
r4
,
lsr
#
14
@
Get
Rn
and
r0
,
r0
,
#
15
<<
2
@
Mask
out
reg
.
teq
r0
,
#
15
<<
2
ldr
r0
,
[
r3
,
r0
]
@
Get
register
biceq
r0
,
r0
,
#
PCMASK
mov
r1
,
r0
#ifdef FAULT_CODE_LDRSTRPOST
orr
r2
,
r2
,
#
FAULT_CODE_LDRSTRPOST
#endif
b
do_DataAbort
Ldata_ldrstr_numindex
:
mov
r0
,
r4
,
lsr
#
14
@
Get
Rn
and
r0
,
r0
,
#
15
<<
2
@
Mask
out
reg
.
teq
r0
,
#
15
<<
2
ldr
r0
,
[
r3
,
r0
]
@
Get
register
mov
r1
,
r4
,
lsl
#
20
biceq
r0
,
r0
,
#
PCMASK
tst
r4
,
#
1
<<
23
addne
r0
,
r0
,
r1
,
lsr
#
20
subeq
r0
,
r0
,
r1
,
lsr
#
20
mov
r1
,
r0
#ifdef FAULT_CODE_LDRSTRPRE
orr
r2
,
r2
,
#
FAULT_CODE_LDRSTRPRE
#endif
b
do_DataAbort
Ldata_ldrstr_regindex
:
mov
r0
,
r4
,
lsr
#
14
@
Get
Rn
and
r0
,
r0
,
#
15
<<
2
@
Mask
out
reg
.
teq
r0
,
#
15
<<
2
ldr
r0
,
[
r3
,
r0
]
@
Get
register
and
r7
,
r4
,
#
15
biceq
r0
,
r0
,
#
PCMASK
teq
r7
,
#
15
@
Check
for
PC
ldr
r7
,
[
r3
,
r7
,
lsl
#
2
]
@
Get
Rm
and
r8
,
r4
,
#
0x60
@
Get
shift
types
biceq
r7
,
r7
,
#
PCMASK
mov
r9
,
r4
,
lsr
#
7
@
Get
shift
amount
and
r9
,
r9
,
#
31
teq
r8
,
#
0
moveq
r7
,
r7
,
lsl
r9
teq
r8
,
#
0x20
@
LSR
shift
moveq
r7
,
r7
,
lsr
r9
teq
r8
,
#
0x40
@
ASR
shift
moveq
r7
,
r7
,
asr
r9
teq
r8
,
#
0x60
@
ROR
shift
moveq
r7
,
r7
,
ror
r9
tst
r4
,
#
1
<<
23
addne
r0
,
r0
,
r7
subeq
r0
,
r0
,
r7
@
Apply
correction
mov
r1
,
r0
#ifdef FAULT_CODE_LDRSTRREG
orr
r2
,
r2
,
#
FAULT_CODE_LDRSTRREG
#endif
b
do_DataAbort
Ldata_ldmstm
:
mov
r7
,
#
0x11
orr
r7
,
r7
,
r7
,
lsl
#
8
and
r0
,
r4
,
r7
and
r1
,
r4
,
r7
,
lsl
#
1
add
r0
,
r0
,
r1
,
lsr
#
1
and
r1
,
r4
,
r7
,
lsl
#
2
add
r0
,
r0
,
r1
,
lsr
#
2
and
r1
,
r4
,
r7
,
lsl
#
3
add
r0
,
r0
,
r1
,
lsr
#
3
add
r0
,
r0
,
r0
,
lsr
#
8
add
r0
,
r0
,
r0
,
lsr
#
4
and
r7
,
r0
,
#
15
@
r7
=
no
.
of
registers
to
transfer
.
mov
r5
,
r4
,
lsr
#
14
@
Get
Rn
and
r5
,
r5
,
#
15
<<
2
ldr
r0
,
[
r3
,
r5
]
@
Get
reg
eor
r6
,
r4
,
r4
,
lsl
#
2
tst
r6
,
#
1
<<
23
@
Check
inc
/
dec
^
writeback
rsbeq
r7
,
r7
,
#
0
add
r7
,
r0
,
r7
,
lsl
#
2
@
Do
correction
(
signed
)
subne
r1
,
r7
,
#
1
subeq
r1
,
r0
,
#
1
moveq
r0
,
r7
tst
r4
,
#
1
<<
21
@
Check
writeback
strne
r7
,
[
r3
,
r5
]
eor
r6
,
r4
,
r4
,
lsl
#
1
tst
r6
,
#
1
<<
24
@
Check
Pre
/
Post
^
inc
/
dec
addeq
r0
,
r0
,
#
4
addeq
r1
,
r1
,
#
4
teq
r5
,
#
15
*
4
@
CHECK
FOR
PC
biceq
r1
,
r1
,
#
PCMASK
biceq
r0
,
r0
,
#
PCMASK
#ifdef FAULT_CODE_LDMSTM
orr
r2
,
r2
,
#
FAULT_CODE_LDMSTM
#endif
b
do_DataAbort
Ldata_ldcstc_pre
:
mov
r0
,
r4
,
lsr
#
14
@
Get
Rn
and
r0
,
r0
,
#
15
<<
2
@
Mask
out
reg
.
teq
r0
,
#
15
<<
2
ldr
r0
,
[
r3
,
r0
]
@
Get
register
mov
r1
,
r4
,
lsl
#
24
@
Get
offset
biceq
r0
,
r0
,
#
PCMASK
tst
r4
,
#
1
<<
23
addne
r0
,
r0
,
r1
,
lsr
#
24
subeq
r0
,
r0
,
r1
,
lsr
#
24
mov
r1
,
r0
#ifdef FAULT_CODE_LDCSTC
orr
r2
,
r2
,
#
FAULT_CODE_LDCSTC
#endif
b
do_DataAbort
/*
*
This
is
the
return
code
to
user
mode
for
abort
handlers
*/
ENTRY
(
ret_from_exception
)
get_current_task
tsk
mov
why
,
#
0
b
ret_to_user
.
data
ENTRY
(
fp_enter
)
.
word
fpe_not_present
.
text
/*
*
Register
switch
for
older
26
-
bit
only
ARMs
*/
ENTRY
(
__switch_to
)
stmfd
sp
!,
{
r4
-
sl
,
fp
,
lr
}
@
Store
most
regs
on
stack
str
sp
,
[
r0
,
#
TSS_SAVE
]
@
Save
sp_SVC
ldr
sp
,
[
r1
,
#
TSS_SAVE
]
@
Get
saved
sp_SVC
ldmfd
sp
!,
{
r4
-
sl
,
fp
,
pc
}^
@
Load
all
regs
saved
previously
/*
*============================================================================
=
*
Low
-
level
interface
code
*-----------------------------------------------------------------------------
*
Trap
initialisation
*-----------------------------------------------------------------------------
*
*
Note
-
FIQ
code
has
changed
.
The
default
is
a
couple
of
words
in
0x1c
,
0x20
*
that
call
_unexp_fiq
.
Nowever
,
we
now
copy
the
FIQ
routine
to
0x1c
(
removes
*
some
excess
cycles
)
.
*
*
What
we
need
to
put
into
0
-
0x1c
are
branches
to
branch
to
the
kernel
.
*/
__INIT
.
Ljump_addresses
:
swi
SYS_ERROR0
.
word
vector_undefinstr
-
12
.
word
vector_swi
-
16
.
word
vector_prefetch
-
20
.
word
vector_data
-
24
.
word
vector_addrexcptn
-
28
.
word
vector_IRQ
-
32
.
word
_unexp_fiq
-
36
b
.
+
8
/*
*
initialise
the
trap
system
*/
ENTRY
(
__trap_init
)
stmfd
sp
!,
{
r4
-
r7
,
lr
}
adr
r1
,
.
Ljump_addresses
ldmia
r1
,
{
r1
-
r7
,
ip
,
lr
}
orr
r2
,
lr
,
r2
,
lsr
#
2
orr
r3
,
lr
,
r3
,
lsr
#
2
orr
r4
,
lr
,
r4
,
lsr
#
2
orr
r5
,
lr
,
r5
,
lsr
#
2
orr
r6
,
lr
,
r6
,
lsr
#
2
orr
r7
,
lr
,
r7
,
lsr
#
2
orr
ip
,
lr
,
ip
,
lsr
#
2
mov
r0
,
#
0
stmia
r0
,
{
r1
-
r7
,
ip
}
ldmfd
sp
!,
{
r4
-
r7
,
pc
}^
.
bss
__temp_irq
:
.
space
4
@
saved
lr_irq
__temp_fiq
:
.
space
128
arch/arm/kernel/entry-armv.S
View file @
b5984adb
...
@@ -15,10 +15,12 @@
...
@@ -15,10 +15,12 @@
*/
*/
#include <linux/config.h>
#include <linux/config.h>
#include <linux/init.h>
#include <linux/init.h>
#include "entry-header.S"
#include <asm/thread_info.h>
#include <asm/thread_info.h>
#include <asm/glue.h>
#include <asm/glue.h>
#include <asm/ptrace.h>
#include "entry-header.S"
#ifdef IOC_BASE
#ifdef IOC_BASE
/*
IOC
/
IOMD
based
hardware
*/
/*
IOC
/
IOMD
based
hardware
*/
...
...
arch/arm/kernel/entry-common.S
View file @
b5984adb
...
@@ -8,8 +8,11 @@
...
@@ -8,8 +8,11 @@
*
published
by
the
Free
Software
Foundation
.
*
published
by
the
Free
Software
Foundation
.
*/
*/
#include <linux/config.h>
#include <linux/config.h>
#include "entry-header.S"
#include <asm/thread_info.h>
#include <asm/thread_info.h>
#include <asm/ptrace.h>
#include "entry-header.S"
/*
/*
*
We
rely
on
the
fact
that
R0
is
at
the
bottom
of
the
stack
(
due
to
*
We
rely
on
the
fact
that
R0
is
at
the
bottom
of
the
stack
(
due
to
...
...
arch/arm/kernel/head.S
View file @
b5984adb
...
@@ -16,6 +16,7 @@
...
@@ -16,6 +16,7 @@
#include <asm/assembler.h>
#include <asm/assembler.h>
#include <asm/mach-types.h>
#include <asm/mach-types.h>
#include <asm/procinfo.h>
#include <asm/procinfo.h>
#include <asm/ptrace.h>
#include <asm/mach/arch.h>
#include <asm/mach/arch.h>
/*
/*
...
...
arch/arm/mach-l7200/core.c
View file @
b5984adb
...
@@ -11,7 +11,6 @@
...
@@ -11,7 +11,6 @@
#include <asm/hardware.h>
#include <asm/hardware.h>
#include <asm/page.h>
#include <asm/page.h>
#include <asm/proc/domain.h>
#include <asm/mach/map.h>
#include <asm/mach/map.h>
#include <asm/arch/hardware.h>
#include <asm/arch/hardware.h>
...
...
arch/arm/mach-rpc/riscpc.c
View file @
b5984adb
...
@@ -21,7 +21,7 @@
...
@@ -21,7 +21,7 @@
#include <asm/mach-types.h>
#include <asm/mach-types.h>
#include <asm/hardware.h>
#include <asm/hardware.h>
#include <asm/page.h>
#include <asm/page.h>
#include <asm/
proc/
domain.h>
#include <asm/domain.h>
#include <asm/setup.h>
#include <asm/setup.h>
#include <asm/mach/map.h>
#include <asm/mach/map.h>
...
...
arch/arm/mm/Makefile
View file @
b5984adb
...
@@ -4,21 +4,13 @@
...
@@ -4,21 +4,13 @@
# Object file lists.
# Object file lists.
obj-y
:=
init.o extable.o fault-common.o
obj-y
:=
consistent.o extable.o fault-armv.o fault-common.o
\
obj-m
:=
init.o ioremap.o mm-armv.o
obj-n
:=
obj-
:=
ifeq
($(CONFIG_CPU_32),y)
obj-y
+=
consistent.o fault-armv.o ioremap.o mm-armv.o
obj-$(CONFIG_MODULES)
+=
proc-syms.o
obj-$(CONFIG_MODULES)
+=
proc-syms.o
endif
obj-$(CONFIG_ALIGNMENT_TRAP)
+=
alignment.o
obj-$(CONFIG_ALIGNMENT_TRAP)
+=
alignment.o
obj-$(CONFIG_DISCONTIGMEM)
+=
discontig.o
obj-$(CONFIG_DISCONTIGMEM)
+=
discontig.o
# Select the processor-specific files
p-$(CONFIG_CPU_26)
+=
proc-arm2_3.o
# ARMv3
# ARMv3
p-$(CONFIG_CPU_ARM610)
+=
proc-arm6_7.o tlb-v3.o cache-v3.o copypage-v3.o
p-$(CONFIG_CPU_ARM610)
+=
proc-arm6_7.o tlb-v3.o cache-v3.o copypage-v3.o
p-$(CONFIG_CPU_ARM710)
+=
proc-arm6_7.o tlb-v3.o cache-v3.o copypage-v3.o
p-$(CONFIG_CPU_ARM710)
+=
proc-arm6_7.o tlb-v3.o cache-v3.o copypage-v3.o
...
...
arch/arm/mm/mm-armv.c
View file @
b5984adb
...
@@ -34,8 +34,8 @@ struct cachepolicy {
...
@@ -34,8 +34,8 @@ struct cachepolicy {
};
};
static
struct
cachepolicy
cache_policies
[]
__initdata
=
{
static
struct
cachepolicy
cache_policies
[]
__initdata
=
{
{
"uncached"
,
CR
1_W
|
CR1
_C
,
PMD_SECT_UNCACHED
},
{
"uncached"
,
CR
_W
|
CR
_C
,
PMD_SECT_UNCACHED
},
{
"buffered"
,
CR
1
_C
,
PMD_SECT_BUFFERED
},
{
"buffered"
,
CR_C
,
PMD_SECT_BUFFERED
},
{
"writethrough"
,
0
,
PMD_SECT_WT
},
{
"writethrough"
,
0
,
PMD_SECT_WT
},
#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
{
"writeback"
,
0
,
PMD_SECT_WB
},
{
"writeback"
,
0
,
PMD_SECT_WB
},
...
@@ -102,8 +102,8 @@ __early_param("ecc=", early_ecc);
...
@@ -102,8 +102,8 @@ __early_param("ecc=", early_ecc);
static
int
__init
noalign_setup
(
char
*
__unused
)
static
int
__init
noalign_setup
(
char
*
__unused
)
{
{
cr_alignment
&=
~
CR
1
_A
;
cr_alignment
&=
~
CR_A
;
cr_no_alignment
&=
~
CR
1
_A
;
cr_no_alignment
&=
~
CR_A
;
set_cr
(
cr_alignment
);
set_cr
(
cr_alignment
);
return
1
;
return
1
;
}
}
...
...
arch/arm/mm/proc-arm1020.S
View file @
b5984adb
...
@@ -30,6 +30,7 @@
...
@@ -30,6 +30,7 @@
#include <asm/assembler.h>
#include <asm/assembler.h>
#include <asm/constants.h>
#include <asm/constants.h>
#include <asm/procinfo.h>
#include <asm/procinfo.h>
#include <asm/ptrace.h>
#include <asm/hardware.h>
#include <asm/hardware.h>
/*
/*
...
...
arch/arm/mm/proc-arm2_3.S
deleted
100644 → 0
View file @
c5164261
/*
*
linux
/
arch
/
arm
/
mm
/
proc
-
arm2
,
3
.
S
*
*
Copyright
(
C
)
1997
-
1999
Russell
King
*
*
This
program
is
free
software
; you can redistribute it and/or modify
*
it
under
the
terms
of
the
GNU
General
Public
License
version
2
as
*
published
by
the
Free
Software
Foundation
.
*
*
MMU
functions
for
ARM2
,
3
*
*
These
are
the
low
level
assembler
for
performing
cache
*
and
memory
functions
on
ARM2
,
ARM250
and
ARM3
processors
.
*/
#include <linux/linkage.h>
#include <linux/init.h>
#include <asm/assembler.h>
#include <asm/constants.h>
#include <asm/procinfo.h>
/*
*
MEMC
workhorse
code
.
It
's both a horse which things it'
s
a
pig
.
*/
/*
*
Function
:
cpu_memc_update_entry
(
pgd_t
*
pgd
,
unsigned
long
phys_pte
,
unsigned
long
addr
)
*
Params
:
pgd
Page
tables
/
MEMC
mapping
*
:
phys_pte
physical
address
,
or
PTE
*
:
addr
virtual
address
*/
ENTRY
(
cpu_memc_update_entry
)
tst
r1
,
#
PAGE_PRESENT
@
is
the
page
present
orreq
r1
,
r1
,
#
PAGE_OLD
|
PAGE_CLEAN
moveq
r2
,
#
0x01f00000
mov
r3
,
r1
,
lsr
#
13
@
convert
to
physical
page
nr
and
r3
,
r3
,
#
0x3fc
adr
ip
,
memc_phys_table_32
ldr
r3
,
[
ip
,
r3
]
tst
r1
,
#
PAGE_OLD
|
PAGE_NOT_USER
biceq
r3
,
r3
,
#
0x200
tsteq
r1
,
#
PAGE_READONLY
|
PAGE_CLEAN
biceq
r3
,
r3
,
#
0x300
mov
r2
,
r2
,
lsr
#
15
@
virtual
->
nr
orr
r3
,
r3
,
r2
,
lsl
#
15
and
r2
,
r2
,
#
0x300
orr
r3
,
r3
,
r2
,
lsl
#
2
and
r2
,
r3
,
#
255
sub
r0
,
r0
,
#
256
*
4
str
r3
,
[
r0
,
r2
,
lsl
#
2
]
strb
r3
,
[
r3
]
movs
pc
,
lr
/*
*
Params
:
r0
=
preserved
*
:
r1
=
memc
table
base
(
preserved
)
*
:
r2
=
page
table
entry
*
:
r3
=
preserved
*
:
r4
=
unused
*
:
r5
=
memc
physical
address
translation
table
*
:
ip
=
virtual
address
(
preserved
)
*/
update_pte
:
mov
r4
,
r2
,
lsr
#
13
and
r4
,
r4
,
#
0x3fc
ldr
r4
,
[
r5
,
r4
]
@
covert
to
MEMC
page
tst
r2
,
#
PAGE_OLD
|
PAGE_NOT_USER
@
check
for
MEMC
read
biceq
r4
,
r4
,
#
0x200
tsteq
r2
,
#
PAGE_READONLY
|
PAGE_CLEAN
@
check
for
MEMC
write
biceq
r4
,
r4
,
#
0x300
orr
r4
,
r4
,
ip
and
r2
,
ip
,
#
0x01800000
orr
r4
,
r4
,
r2
,
lsr
#
13
and
r2
,
r4
,
#
255
str
r4
,
[
r1
,
r2
,
lsl
#
2
]
movs
pc
,
lr
/*
*
Params
:
r0
=
preserved
*
:
r1
=
memc
table
base
(
preserved
)
*
:
r2
=
page
table
base
*
:
r3
=
preserved
*
:
r4
=
unused
*
:
r5
=
memc
physical
address
translation
table
*
:
ip
=
virtual
address
(
updated
)
*/
update_pte_table
:
stmfd
sp
!,
{
r0
,
lr
}
bic
r0
,
r2
,
#
3
1
:
ldr
r2
,
[
r0
],
#
4
@
get
entry
tst
r2
,
#
PAGE_PRESENT
@
page
present
blne
update_pte
@
process
pte
add
ip
,
ip
,
#
32768
@
increment
virt
addr
ldr
r2
,
[
r0
],
#
4
@
get
entry
tst
r2
,
#
PAGE_PRESENT
@
page
present
blne
update_pte
@
process
pte
add
ip
,
ip
,
#
32768
@
increment
virt
addr
ldr
r2
,
[
r0
],
#
4
@
get
entry
tst
r2
,
#
PAGE_PRESENT
@
page
present
blne
update_pte
@
process
pte
add
ip
,
ip
,
#
32768
@
increment
virt
addr
ldr
r2
,
[
r0
],
#
4
@
get
entry
tst
r2
,
#
PAGE_PRESENT
@
page
present
blne
update_pte
@
process
pte
add
ip
,
ip
,
#
32768
@
increment
virt
addr
tst
ip
,
#
32768
*
31
@
finished
?
bne
1
b
ldmfd
sp
!,
{
r0
,
pc
}^
/*
*
Function
:
cpu_memc_update_all
(
pgd_t
*
pgd
)
*
Params
:
pgd
Page
tables
/
MEMC
mapping
*
Notes
:
this
is
optimised
for
32
k
pages
*/
ENTRY
(
cpu_memc_update_all
)
stmfd
sp
!,
{
r4
,
r5
,
lr
}
bl
clear_tables
sub
r1
,
r0
,
#
256
*
4
@
start
of
MEMC
tables
adr
r5
,
memc_phys_table_32
@
Convert
to
logical
page
number
mov
ip
,
#
0
@
virtual
address
1
:
ldmia
r0
!,
{
r2
,
r3
}
tst
r2
,
#
PAGE_PRESENT
addeq
ip
,
ip
,
#
1048576
blne
update_pte_table
mov
r2
,
r3
tst
r2
,
#
PAGE_PRESENT
addeq
ip
,
ip
,
#
1048576
blne
update_pte_table
teq
ip
,
#
32
*
1048576
bne
1
b
ldmfd
sp
!,
{
r4
,
r5
,
pc
}^
/*
*
Build
the
table
to
map
from
physical
page
number
to
memc
page
number
*/
.
type
memc_phys_table_32
,
#
object
memc_phys_table_32
:
.
irp
b7
,
0x00
,
0x80
.
irp
b6
,
0x00
,
0x02
.
irp
b5
,
0x00
,
0x04
.
irp
b4
,
0x00
,
0x01
.
irp
b3
,
0x00
,
0x40
.
irp
b2
,
0x00
,
0x20
.
irp
b1
,
0x00
,
0x10
.
irp
b0
,
0x00
,
0x08
.
long
0x03800300
+
\
b7
+
\
b6
+
\
b5
+
\
b4
+
\
b3
+
\
b2
+
\
b1
+
\
b0
.
endr
.
endr
.
endr
.
endr
.
endr
.
endr
.
endr
.
endr
.
size
memc_phys_table_32
,
.
-
memc_phys_table_32
/*
*
helper
for
cpu_memc_update_all
,
this
clears
out
all
*
mappings
,
setting
them
close
to
the
top
of
memory
,
*
and
inaccessible
(
0x01f00000
)
.
*
Params
:
r0
=
page
table
pointer
*/
clear_tables
:
ldr
r1
,
_arm3_switch_mm
-
4
ldr
r2
,
[
r1
]
sub
r1
,
r0
,
#
256
*
4
@
start
of
MEMC
tables
add
r2
,
r1
,
r2
,
lsl
#
2
@
end
of
tables
mov
r3
,
#
0x03f00000
@
Default
mapping
(
null
mapping
)
orr
r3
,
r3
,
#
0x00000f00
orr
r4
,
r3
,
#
1
orr
r5
,
r3
,
#
2
orr
ip
,
r3
,
#
3
1
:
stmia
r1
!,
{
r3
,
r4
,
r5
,
ip
}
add
r3
,
r3
,
#
4
add
r4
,
r4
,
#
4
add
r5
,
r5
,
#
4
add
ip
,
ip
,
#
4
stmia
r1
!,
{
r3
,
r4
,
r5
,
ip
}
add
r3
,
r3
,
#
4
add
r4
,
r4
,
#
4
add
r5
,
r5
,
#
4
add
ip
,
ip
,
#
4
teq
r1
,
r2
bne
1
b
mov
pc
,
lr
/*
*
Function
:
*
_switch_mm
(
pgd_t
*
pgd
)
*
Params
:
pgd
New
page
tables
/
MEMC
mapping
*
Purpose
:
update
MEMC
hardware
with
new
mapping
*/
.
word
page_nr
_arm3_switch_mm
:
mcr
p15
,
0
,
r1
,
c1
,
c0
,
0
@
flush
cache
_arm2_switch_mm
:
stmfd
sp
!,
{
lr
}
ldr
r1
,
_arm3_switch_mm
-
4
ldr
r2
,
[
r1
]
sub
r0
,
r0
,
#
256
*
4
@
start
of
MEMC
tables
add
r1
,
r0
,
r2
,
lsl
#
2
@
end
of
tables
1
:
ldmia
r0
!,
{
r2
,
r3
,
ip
,
lr
}
strb
r2
,
[
r2
]
strb
r3
,
[
r3
]
strb
ip
,
[
ip
]
strb
lr
,
[
lr
]
ldmia
r0
!,
{
r2
,
r3
,
ip
,
lr
}
strb
r2
,
[
r2
]
strb
r3
,
[
r3
]
strb
ip
,
[
ip
]
strb
lr
,
[
lr
]
teq
r0
,
r1
bne
1
b
ldmfd
sp
!,
{
pc
}^
/*
*
Function
:
*
_proc_init
(
void
)
*
Purpose
:
Initialise
the
cache
control
registers
*/
_arm3_proc_init
:
mov
r0
,
#
0x001f0000
orr
r0
,
r0
,
#
0x0000ff00
orr
r0
,
r0
,
#
0x000000ff
mcr
p15
,
0
,
r0
,
c3
,
c0
@
ARM3
Cacheable
mcr
p15
,
0
,
r0
,
c4
,
c0
@
ARM3
Updateable
mov
r0
,
#
0
mcr
p15
,
0
,
r0
,
c5
,
c0
@
ARM3
Disruptive
mcr
p15
,
0
,
r0
,
c1
,
c0
@
ARM3
Flush
mov
r0
,
#
3
mcr
p15
,
0
,
r0
,
c2
,
c0
@
ARM3
Control
_arm2_proc_init
:
movs
pc
,
lr
/*
*
Function
:
*
_proc_fin
(
void
)
*
Purpose
:
Finalise
processor
(
disable
caches
)
*/
_arm3_proc_fin
:
mov
r0
,
#
2
mcr
p15
,
0
,
r0
,
c2
,
c0
_arm2_proc_fin
:
orrs
pc
,
lr
,
#
PSR_I_BIT
|
PSR_F_BIT
/*
*
Function
:
*
_xchg_1
(
int
new
,
volatile
void
*
ptr
)
*
Params
:
new
New
value
to
store
at
...
*
:
ptr
pointer
to
byte
-
wide
location
*
Purpose
:
Performs
an
exchange
operation
*
Returns
:
Original
byte
data
at
'ptr'
*/
_arm2_xchg_1
:
mov
r2
,
pc
orr
r2
,
r2
,
#
PSR_I_BIT
teqp
r2
,
#
0
ldrb
r2
,
[
r1
]
strb
r0
,
[
r1
]
mov
r0
,
r2
movs
pc
,
lr
_arm3_xchg_1
:
swpb
r0
,
r0
,
[
r1
]
movs
pc
,
lr
/*
*
Function
:
*
_xchg_4
(
int
new
,
volatile
void
*
ptr
)
*
Params
:
new
New
value
to
store
at
...
*
:
ptr
pointer
to
word
-
wide
location
*
Purpose
:
Performs
an
exchange
operation
*
Returns
:
Original
word
data
at
'ptr'
*/
_arm2_xchg_4
:
mov
r2
,
pc
orr
r2
,
r2
,
#
PSR_I_BIT
teqp
r2
,
#
0
ldr
r2
,
[
r1
]
str
r0
,
[
r1
]
mov
r0
,
r2
movs
pc
,
lr
_arm3_xchg_4
:
swp
r0
,
r0
,
[
r1
]
movs
pc
,
lr
cpu_arm2_name
:
.
asciz
"ARM 2"
cpu_arm250_name
:
.
asciz
"ARM 250"
cpu_arm3_name
:
.
asciz
"ARM 3"
__INIT
/*
*
Purpose
:
Function
pointers
used
to
access
above
functions
-
all
calls
*
come
through
these
*/
.
globl
arm2_processor_functions
arm2_processor_functions
:
.
word
_arm2_proc_init
.
word
_arm2_proc_fin
.
word
_arm2_switch_mm
.
word
_arm2_xchg_1
.
word
_arm2_xchg_4
.
globl
arm250_processor_functions
arm250_processor_functions
:
.
word
_arm2_proc_init
.
word
_arm2_proc_fin
.
word
_arm2_switch_mm
.
word
_arm3_xchg_1
.
word
_arm3_xchg_4
.
globl
arm3_processor_functions
arm3_processor_functions
:
.
word
_arm3_proc_init
.
word
_arm3_proc_fin
.
word
_arm3_switch_mm
.
word
_arm3_xchg_1
.
word
_arm3_xchg_4
arm2_arch_name
:
.
asciz
"armv1"
arm3_arch_name
:
.
asciz
"armv2"
arm2_elf_name
:
.
asciz
"v1"
arm3_elf_name
:
.
asciz
"v2"
.
align
.
section
".proc.info"
,
#
alloc
,
#
execinstr
.
long
0x41560200
.
long
0xfffffff0
.
long
0
mov
pc
,
lr
.
long
arm2_arch_name
.
long
arm2_elf_name
.
long
0
.
long
cpu_arm2_name
.
long
arm2_processor_functions
.
long
0
.
long
0
.
long
0
.
long
0x41560250
.
long
0xfffffff0
.
long
0
mov
pc
,
lr
.
long
arm3_arch_name
.
long
arm3_elf_name
.
long
0
.
long
cpu_arm250_name
.
long
arm250_processor_functions
.
long
0
.
long
0
.
long
0
.
long
0x41560300
.
long
0xfffffff0
.
long
0
mov
pc
,
lr
.
long
arm3_arch_name
.
long
arm3_elf_name
.
long
0
.
long
cpu_arm3_name
.
long
arm3_processor_functions
.
long
0
.
long
0
.
long
0
arch/arm/mm/proc-arm6_7.S
View file @
b5984adb
...
@@ -15,6 +15,7 @@
...
@@ -15,6 +15,7 @@
#include <asm/assembler.h>
#include <asm/assembler.h>
#include <asm/constants.h>
#include <asm/constants.h>
#include <asm/procinfo.h>
#include <asm/procinfo.h>
#include <asm/ptrace.h>
ENTRY
(
cpu_arm6_dcache_clean_area
)
ENTRY
(
cpu_arm6_dcache_clean_area
)
ENTRY
(
cpu_arm7_dcache_clean_area
)
ENTRY
(
cpu_arm7_dcache_clean_area
)
...
...
arch/arm/mm/proc-arm720.S
View file @
b5984adb
...
@@ -35,6 +35,7 @@
...
@@ -35,6 +35,7 @@
#include <asm/assembler.h>
#include <asm/assembler.h>
#include <asm/constants.h>
#include <asm/constants.h>
#include <asm/procinfo.h>
#include <asm/procinfo.h>
#include <asm/ptrace.h>
#include <asm/hardware.h>
#include <asm/hardware.h>
/*
/*
...
...
arch/arm/mm/proc-arm920.S
View file @
b5984adb
...
@@ -31,6 +31,7 @@
...
@@ -31,6 +31,7 @@
#include <asm/procinfo.h>
#include <asm/procinfo.h>
#include <asm/hardware.h>
#include <asm/hardware.h>
#include <asm/page.h>
#include <asm/page.h>
#include <asm/ptrace.h>
#include "proc-macros.S"
#include "proc-macros.S"
/*
/*
...
...
arch/arm/mm/proc-arm922.S
View file @
b5984adb
...
@@ -32,6 +32,7 @@
...
@@ -32,6 +32,7 @@
#include <asm/procinfo.h>
#include <asm/procinfo.h>
#include <asm/hardware.h>
#include <asm/hardware.h>
#include <asm/page.h>
#include <asm/page.h>
#include <asm/ptrace.h>
#include "proc-macros.S"
#include "proc-macros.S"
/*
/*
...
...
arch/arm/mm/proc-arm926.S
View file @
b5984adb
...
@@ -31,6 +31,7 @@
...
@@ -31,6 +31,7 @@
#include <asm/procinfo.h>
#include <asm/procinfo.h>
#include <asm/hardware.h>
#include <asm/hardware.h>
#include <asm/page.h>
#include <asm/page.h>
#include <asm/ptrace.h>
#include "proc-macros.S"
#include "proc-macros.S"
/*
/*
...
...
arch/arm/mm/proc-sa110.S
View file @
b5984adb
...
@@ -18,7 +18,8 @@
...
@@ -18,7 +18,8 @@
#include <asm/constants.h>
#include <asm/constants.h>
#include <asm/procinfo.h>
#include <asm/procinfo.h>
#include <asm/hardware.h>
#include <asm/hardware.h>
#include <asm/proc/pgtable.h>
#include <asm/pgtable.h>
#include <asm/ptrace.h>
/*
/*
*
the
cache
line
size
of
the
I
and
D
cache
*
the
cache
line
size
of
the
I
and
D
cache
...
...
arch/arm/mm/proc-sa1100.S
View file @
b5984adb
...
@@ -23,7 +23,8 @@
...
@@ -23,7 +23,8 @@
#include <asm/constants.h>
#include <asm/constants.h>
#include <asm/procinfo.h>
#include <asm/procinfo.h>
#include <asm/hardware.h>
#include <asm/hardware.h>
#include <asm/proc/pgtable.h>
#include <asm/pgtable.h>
#include <asm/ptrace.h>
/*
/*
*
the
cache
line
size
of
the
I
and
D
cache
*
the
cache
line
size
of
the
I
and
D
cache
...
...
arch/arm/mm/proc-xscale.S
View file @
b5984adb
...
@@ -25,8 +25,9 @@
...
@@ -25,8 +25,9 @@
#include <asm/assembler.h>
#include <asm/assembler.h>
#include <asm/procinfo.h>
#include <asm/procinfo.h>
#include <asm/hardware.h>
#include <asm/hardware.h>
#include <asm/p
roc/p
gtable.h>
#include <asm/pgtable.h>
#include <asm/page.h>
#include <asm/page.h>
#include <asm/ptrace.h>
#include "proc-macros.S"
#include "proc-macros.S"
/*
/*
...
...
include/asm-arm/assembler.h
View file @
b5984adb
/*
/*
*
linux/as
m/assembler.h
*
linux/include/asm-ar
m/assembler.h
*
*
* This file contains arm architecture specific defines
* Copyright (C) 1996-2000 Russell King
* for the different processors.
*
*
* Do not include any C declarations in this file - it is included by
* This program is free software; you can redistribute it and/or modify
* assembler source.
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* This file contains arm architecture specific defines
* for the different processors.
*
* Do not include any C declarations in this file - it is included by
* assembler source.
*/
*/
#ifndef __ASSEMBLY__
#ifndef __ASSEMBLY__
#error "Only include this from assembly code"
#error "Only include this from assembly code"
#endif
#endif
#include <asm/proc/ptrace.h>
#include <asm/ptrace.h>
#include <asm/proc/assembler.h>
/*
/*
* Endian independent macros for shifting bytes within registers.
* Endian independent macros for shifting bytes within registers.
...
@@ -36,3 +41,63 @@
...
@@ -36,3 +41,63 @@
#define PLD(code...)
#define PLD(code...)
#endif
#endif
#define MODE_USR USR_MODE
#define MODE_FIQ FIQ_MODE
#define MODE_IRQ IRQ_MODE
#define MODE_SVC SVC_MODE
#define DEFAULT_FIQ MODE_FIQ
/*
* LOADREGS - ldm with PC in register list (eg, ldmfd sp!, {pc})
*/
#ifdef __STDC__
#define LOADREGS(cond, base, reglist...)\
ldm##cond base,reglist
#else
#define LOADREGS(cond, base, reglist...)\
ldm
/**/
cond base,reglist
#endif
/*
* Build a return instruction for this processor type.
*/
#define RETINSTR(instr, regs...)\
instr regs
/*
* Save the current IRQ state and disable IRQs. Note that this macro
* assumes FIQs are enabled, and that the processor is in SVC mode.
*/
.
macro
save_and_disable_irqs
,
oldcpsr
,
temp
mrs
\
oldcpsr
,
cpsr
mov
\
temp
,
#
PSR_I_BIT
|
MODE_SVC
msr
cpsr_c
,
\
temp
.
endm
/*
* Restore interrupt state previously stored in a register. We don't
* guarantee that this will preserve the flags.
*/
.
macro
restore_irqs
,
oldcpsr
msr
cpsr_c
,
\
oldcpsr
.
endm
/*
* These two are used to save LR/restore PC over a user-based access.
* The old 26-bit architecture requires that we do. On 32-bit
* architecture, we can safely ignore this requirement.
*/
.
macro
save_lr
.
endm
.
macro
restore_pc
mov
pc
,
lr
.
endm
#define USER(x...) \
9999: x; \
.section __ex_table,"a"; \
.align 3; \
.long 9999b,9001f; \
.previous
include/asm-arm/atomic.h
View file @
b5984adb
...
@@ -27,7 +27,7 @@ typedef struct { volatile int counter; } atomic_t;
...
@@ -27,7 +27,7 @@ typedef struct { volatile int counter; } atomic_t;
#define ATOMIC_INIT(i) { (i) }
#define ATOMIC_INIT(i) { (i) }
#ifdef __KERNEL__
#ifdef __KERNEL__
#include <asm/
proc/
system.h>
#include <asm/system.h>
#define atomic_read(v) ((v)->counter)
#define atomic_read(v) ((v)->counter)
#define atomic_set(v,i) (((v)->counter) = (i))
#define atomic_set(v,i) (((v)->counter) = (i))
...
...
include/asm-arm/cacheflush.h
View file @
b5984adb
/*
/*
* linux/include/asm-arm/cacheflush.h
* linux/include/asm-arm/cacheflush.h
*
*
* Copyright (C)
2000
-2002 Russell King
* Copyright (C)
1999
-2002 Russell King
*
*
* This program is free software; you can redistribute it and/or modify
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* it under the terms of the GNU General Public License version 2 as
...
@@ -12,6 +12,275 @@
...
@@ -12,6 +12,275 @@
#include <linux/sched.h>
#include <linux/sched.h>
#include <linux/mm.h>
#include <linux/mm.h>
#include <asm/proc/cache.h>
#include <asm/mman.h>
#include <asm/glue.h>
/*
* Cache Model
* ===========
*/
#undef _CACHE
#undef MULTI_CACHE
#if defined(CONFIG_CPU_ARM610) || defined(CONFIG_CPU_ARM710)
# ifdef _CACHE
# define MULTI_CACHE 1
# else
# define _CACHE v3
# endif
#endif
#if defined(CONFIG_CPU_ARM720T)
# ifdef _CACHE
# define MULTI_CACHE 1
# else
# define _CACHE v4
# endif
#endif
#if defined(CONFIG_CPU_ARM920T) || defined(CONFIG_CPU_ARM922T) || \
defined(CONFIG_CPU_ARM1020)
# define MULTI_CACHE 1
#endif
#if defined(CONFIG_CPU_ARM926T)
# ifdef _CACHE
# define MULTI_CACHE 1
# else
# define _CACHE arm926
# endif
#endif
#if defined(CONFIG_CPU_SA110) || defined(CONFIG_CPU_SA1100)
# ifdef _CACHE
# define MULTI_CACHE 1
# else
# define _CACHE v4wb
# endif
#endif
#if defined(CONFIG_CPU_XSCALE)
# ifdef _CACHE
# define MULTI_CACHE 1
# else
# define _CACHE xscale
# endif
#endif
#if !defined(_CACHE) && !defined(MULTI_CACHE)
#error Unknown cache maintainence model
#endif
/*
* This flag is used to indicate that the page pointed to by a pte
* is dirty and requires cleaning before returning it to the user.
*/
#define PG_dcache_dirty PG_arch_1
/*
* MM Cache Management
* ===================
*
* The arch/arm/mm/cache-*.S and arch/arm/mm/proc-*.S files
* implement these methods.
*
* Start addresses are inclusive and end addresses are exclusive;
* start addresses should be rounded down, end addresses up.
*
* See linux/Documentation/cachetlb.txt for more information.
* Please note that the implementation of these, and the required
* effects are cache-type (VIVT/VIPT/PIPT) specific.
*
* flush_cache_kern_all()
*
* Unconditionally clean and invalidate the entire cache.
*
* flush_cache_user_mm(mm)
*
* Clean and invalidate all user space cache entries
* before a change of page tables.
*
* flush_cache_user_range(start, end, flags)
*
* Clean and invalidate a range of cache entries in the
* specified address space before a change of page tables.
* - start - user start address (inclusive, page aligned)
* - end - user end address (exclusive, page aligned)
* - flags - vma->vm_flags field
*
* coherent_kern_range(start, end)
*
* Ensure coherency between the Icache and the Dcache in the
* region described by start, end. If you have non-snooping
* Harvard caches, you need to implement this function.
* - start - virtual start address
* - end - virtual end address
*
* DMA Cache Coherency
* ===================
*
* dma_inv_range(start, end)
*
* Invalidate (discard) the specified virtual address range.
* May not write back any entries. If 'start' or 'end'
* are not cache line aligned, those lines must be written
* back.
* - start - virtual start address
* - end - virtual end address
*
* dma_clean_range(start, end)
*
* Clean (write back) the specified virtual address range.
* - start - virtual start address
* - end - virtual end address
*
* dma_flush_range(start, end)
*
* Clean and invalidate the specified virtual address range.
* - start - virtual start address
* - end - virtual end address
*/
struct
cpu_cache_fns
{
void
(
*
flush_kern_all
)(
void
);
void
(
*
flush_user_all
)(
void
);
void
(
*
flush_user_range
)(
unsigned
long
,
unsigned
long
,
unsigned
int
);
void
(
*
coherent_kern_range
)(
unsigned
long
,
unsigned
long
);
void
(
*
flush_kern_dcache_page
)(
void
*
);
void
(
*
dma_inv_range
)(
unsigned
long
,
unsigned
long
);
void
(
*
dma_clean_range
)(
unsigned
long
,
unsigned
long
);
void
(
*
dma_flush_range
)(
unsigned
long
,
unsigned
long
);
};
/*
* Select the calling method
*/
#ifdef MULTI_CACHE
extern
struct
cpu_cache_fns
cpu_cache
;
#define __cpuc_flush_kern_all cpu_cache.flush_kern_all
#define __cpuc_flush_user_all cpu_cache.flush_user_all
#define __cpuc_flush_user_range cpu_cache.flush_user_range
#define __cpuc_coherent_kern_range cpu_cache.coherent_kern_range
#define __cpuc_flush_dcache_page cpu_cache.flush_kern_dcache_page
/*
* These are private to the dma-mapping API. Do not use directly.
* Their sole purpose is to ensure that data held in the cache
* is visible to DMA, or data written by DMA to system memory is
* visible to the CPU.
*/
#define dmac_inv_range cpu_cache.dma_inv_range
#define dmac_clean_range cpu_cache.dma_clean_range
#define dmac_flush_range cpu_cache.dma_flush_range
#else
#define __cpuc_flush_kern_all __glue(_CACHE,_flush_kern_cache_all)
#define __cpuc_flush_user_all __glue(_CACHE,_flush_user_cache_all)
#define __cpuc_flush_user_range __glue(_CACHE,_flush_user_cache_range)
#define __cpuc_coherent_kern_range __glue(_CACHE,_coherent_kern_range)
#define __cpuc_flush_dcache_page __glue(_CACHE,_flush_kern_dcache_page)
extern
void
__cpuc_flush_kern_all
(
void
);
extern
void
__cpuc_flush_user_all
(
void
);
extern
void
__cpuc_flush_user_range
(
unsigned
long
,
unsigned
long
,
unsigned
int
);
extern
void
__cpuc_coherent_kern_range
(
unsigned
long
,
unsigned
long
);
extern
void
__cpuc_flush_dcache_page
(
void
*
);
/*
* These are private to the dma-mapping API. Do not use directly.
* Their sole purpose is to ensure that data held in the cache
* is visible to DMA, or data written by DMA to system memory is
* visible to the CPU.
*/
#define dmac_inv_range __glue(_CACHE,_dma_inv_range)
#define dmac_clean_range __glue(_CACHE,_dma_clean_range)
#define dmac_flush_range __glue(_CACHE,_dma_flush_range)
extern
void
dmac_inv_range
(
unsigned
long
,
unsigned
long
);
extern
void
dmac_clean_range
(
unsigned
long
,
unsigned
long
);
extern
void
dmac_flush_range
(
unsigned
long
,
unsigned
long
);
#endif
/*
* Convert calls to our calling convention.
*/
#define flush_cache_all() __cpuc_flush_kern_all()
static
inline
void
flush_cache_mm
(
struct
mm_struct
*
mm
)
{
if
(
current
->
active_mm
==
mm
)
__cpuc_flush_user_all
();
}
static
inline
void
flush_cache_range
(
struct
vm_area_struct
*
vma
,
unsigned
long
start
,
unsigned
long
end
)
{
if
(
current
->
active_mm
==
vma
->
vm_mm
)
__cpuc_flush_user_range
(
start
&
PAGE_MASK
,
PAGE_ALIGN
(
end
),
vma
->
vm_flags
);
}
static
inline
void
flush_cache_page
(
struct
vm_area_struct
*
vma
,
unsigned
long
user_addr
)
{
if
(
current
->
active_mm
==
vma
->
vm_mm
)
{
unsigned
long
addr
=
user_addr
&
PAGE_MASK
;
__cpuc_flush_user_range
(
addr
,
addr
+
PAGE_SIZE
,
vma
->
vm_flags
);
}
}
/*
* Perform necessary cache operations to ensure that data previously
* stored within this range of addresses can be executed by the CPU.
*/
#define flush_icache_range(s,e) __cpuc_coherent_kern_range(s,e)
/*
* Perform necessary cache operations to ensure that the TLB will
* see data written in the specified area.
*/
#define clean_dcache_area(start,size) cpu_dcache_clean_area(start, size)
/*
* flush_dcache_page is used when the kernel has written to the page
* cache page at virtual address page->virtual.
*
* If this page isn't mapped (ie, page->mapping = NULL), or it has
* userspace mappings (page->mapping->i_mmap or page->mapping->i_mmap_shared)
* then we _must_ always clean + invalidate the dcache entries associated
* with the kernel mapping.
*
* Otherwise we can defer the operation, and clean the cache when we are
* about to change to user space. This is the same method as used on SPARC64.
* See update_mmu_cache for the user space part.
*/
#define mapping_mapped(map) (!list_empty(&(map)->i_mmap) || \
!list_empty(&(map)->i_mmap_shared))
extern
void
__flush_dcache_page
(
struct
page
*
);
static
inline
void
flush_dcache_page
(
struct
page
*
page
)
{
if
(
page
->
mapping
&&
!
mapping_mapped
(
page
->
mapping
))
set_bit
(
PG_dcache_dirty
,
&
page
->
flags
);
else
__flush_dcache_page
(
page
);
}
#define flush_icache_user_range(vma,page,addr,len) \
flush_dcache_page(page)
/*
* We don't appear to need to do anything here. In fact, if we did, we'd
* duplicate cache flushing elsewhere performed by flush_dcache_page().
*/
#define flush_icache_page(vma,page) do { } while (0)
#endif
#endif
include/asm-arm/cpu-multi26.h
deleted
100644 → 0
View file @
c5164261
/*
* linux/include/asm-arm/cpu-multi26.h
*
* Copyright (C) 2000 Russell King
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#ifndef __ASSEMBLY__
#include <asm/page.h>
/* forward-declare task_struct */
struct
task_struct
;
/*
* Don't change this structure - ASM code
* relies on it.
*/
extern
struct
processor
{
/* Set up any processor specifics */
void
(
*
_proc_init
)(
void
);
/* Disable any processor specifics */
void
(
*
_proc_fin
)(
void
);
/* set the MEMC hardware mappings */
void
(
*
_switch_mm
)(
pgd_t
*
pgd
);
/* XCHG */
unsigned
long
(
*
_xchg_1
)(
unsigned
long
x
,
volatile
void
*
ptr
);
unsigned
long
(
*
_xchg_4
)(
unsigned
long
x
,
volatile
void
*
ptr
);
}
processor
;
extern
const
struct
processor
arm2_processor_functions
;
extern
const
struct
processor
arm250_processor_functions
;
extern
const
struct
processor
arm3_processor_functions
;
#define cpu_proc_init() processor._proc_init()
#define cpu_proc_fin() processor._proc_fin()
#define cpu_do_idle() do { } while (0)
#define cpu_switch_mm(pgd,mm) processor._switch_mm(pgd)
#define cpu_xchg_1(x,ptr) processor._xchg_1(x,ptr)
#define cpu_xchg_4(x,ptr) processor._xchg_4(x,ptr)
extern
void
cpu_memc_update_all
(
pgd_t
*
pgd
);
extern
void
cpu_memc_update_entry
(
pgd_t
*
pgd
,
unsigned
long
phys_pte
,
unsigned
long
log_addr
);
#endif
include/asm-arm/
proc-armv/
domain.h
→
include/asm-arm/domain.h
View file @
b5984adb
/*
/*
* linux/include/asm-arm/
proc-armv/
domain.h
* linux/include/asm-arm/domain.h
*
*
* Copyright (C) 1999 Russell King.
* Copyright (C) 1999 Russell King.
*
*
...
...
include/asm-arm/elf.h
View file @
b5984adb
...
@@ -7,7 +7,6 @@
...
@@ -7,7 +7,6 @@
#include <asm/ptrace.h>
#include <asm/ptrace.h>
#include <asm/user.h>
#include <asm/user.h>
#include <asm/proc/elf.h>
#include <asm/procinfo.h>
#include <asm/procinfo.h>
typedef
unsigned
long
elf_greg_t
;
typedef
unsigned
long
elf_greg_t
;
...
@@ -42,6 +41,7 @@ typedef struct user_fp elf_fpregset_t;
...
@@ -42,6 +41,7 @@ typedef struct user_fp elf_fpregset_t;
#define ELF_ARCH EM_ARM
#define ELF_ARCH EM_ARM
#define USE_ELF_CORE_DUMP
#define USE_ELF_CORE_DUMP
#define ELF_EXEC_PAGESIZE 4096
/* This is the location that an ET_DYN program is loaded if exec'ed. Typical
/* This is the location that an ET_DYN program is loaded if exec'ed. Typical
use of this is to invoke "./ld.so someprog" to test out a new version of
use of this is to invoke "./ld.so someprog" to test out a new version of
...
@@ -76,4 +76,29 @@ typedef struct user_fp elf_fpregset_t;
...
@@ -76,4 +76,29 @@ typedef struct user_fp elf_fpregset_t;
extern
char
elf_platform
[];
extern
char
elf_platform
[];
#define ELF_PLATFORM (elf_platform)
#define ELF_PLATFORM (elf_platform)
#ifdef __KERNEL__
/*
* 32-bit code is always OK. Some cpus can do 26-bit, some can't.
*/
#define ELF_PROC_OK(x) (ELF_THUMB_OK(x) && ELF_26BIT_OK(x))
#define ELF_THUMB_OK(x) \
(( (elf_hwcap & HWCAP_THUMB) && ((x)->e_entry & 1) == 1) || \
((x)->e_entry & 3) == 0)
#define ELF_26BIT_OK(x) \
(( (elf_hwcap & HWCAP_26BIT) && (x)->e_flags & EF_ARM_APCS26) || \
((x)->e_flags & EF_ARM_APCS26) == 0)
/* Old NetWinder binaries were compiled in such a way that the iBCS
heuristic always trips on them. Until these binaries become uncommon
enough not to care, don't trust the `ibcs' flag here. In any case
there is no other ELF system currently supported by iBCS.
@@ Could print a warning message to encourage users to upgrade. */
#define SET_PERSONALITY(ex,ibcs2) \
set_personality(((ex).e_flags&EF_ARM_APCS26 ?PER_LINUX :PER_LINUX_32BIT))
#endif
#endif
#endif
include/asm-arm/
proc-armv/
locks.h
→
include/asm-arm/locks.h
View file @
b5984adb
/*
/*
* linux/include/asm-arm/
proc-armv/
locks.h
* linux/include/asm-arm/locks.h
*
*
* Copyright (C) 2000 Russell King
* Copyright (C) 2000 Russell King
*
*
...
...
include/asm-arm/memory.h
View file @
b5984adb
...
@@ -15,6 +15,8 @@
...
@@ -15,6 +15,8 @@
#include <linux/config.h>
#include <linux/config.h>
#include <asm/arch/memory.h>
#include <asm/arch/memory.h>
#ifndef __ASSEMBLY__
/*
/*
* PFNs are used to describe any physical page; this means
* PFNs are used to describe any physical page; this means
* PFN 0 == physical address 0.
* PFN 0 == physical address 0.
...
@@ -120,3 +122,5 @@ static inline void *phys_to_virt(unsigned long x)
...
@@ -120,3 +122,5 @@ static inline void *phys_to_virt(unsigned long x)
#define page_to_bus(page) (virt_to_bus(page_address(page)))
#define page_to_bus(page) (virt_to_bus(page_address(page)))
#endif
#endif
#endif
include/asm-arm/page.h
View file @
b5984adb
/*
* linux/include/asm-arm/page.h
*
* Copyright (C) 1995-2003 Russell King
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#ifndef _ASMARM_PAGE_H
#ifndef _ASMARM_PAGE_H
#define _ASMARM_PAGE_H
#define _ASMARM_PAGE_H
#include <linux/config.h>
#include <linux/config.h>
/* PAGE_SHIFT determines the page size */
#define PAGE_SHIFT 12
#define PAGE_SIZE (1UL << PAGE_SHIFT)
#define PAGE_MASK (~(PAGE_SIZE-1))
#ifdef __KERNEL__
#ifdef __KERNEL__
/* to align the pointer to the (next) page boundary */
#define PAGE_ALIGN(addr) (((addr)+PAGE_SIZE-1)&PAGE_MASK)
#ifndef __ASSEMBLY__
#ifndef __ASSEMBLY__
#include <asm/glue.h>
#include <asm/glue.h>
...
@@ -119,10 +137,12 @@ extern void copy_page(void *to, void *from);
...
@@ -119,10 +137,12 @@ extern void copy_page(void *to, void *from);
*/
*/
typedef
struct
{
unsigned
long
pte
;
}
pte_t
;
typedef
struct
{
unsigned
long
pte
;
}
pte_t
;
typedef
struct
{
unsigned
long
pmd
;
}
pmd_t
;
typedef
struct
{
unsigned
long
pmd
;
}
pmd_t
;
typedef
struct
{
unsigned
long
pgd
[
2
];
}
pgd_t
;
typedef
struct
{
unsigned
long
pgprot
;
}
pgprot_t
;
typedef
struct
{
unsigned
long
pgprot
;
}
pgprot_t
;
#define pte_val(x) ((x).pte)
#define pte_val(x) ((x).pte)
#define pmd_val(x) ((x).pmd)
#define pmd_val(x) ((x).pmd)
#define pgd_val(x) ((x).pgd[0])
#define pgprot_val(x) ((x).pgprot)
#define pgprot_val(x) ((x).pgprot)
#define __pte(x) ((pte_t) { (x) } )
#define __pte(x) ((pte_t) { (x) } )
...
@@ -135,10 +155,12 @@ typedef struct { unsigned long pgprot; } pgprot_t;
...
@@ -135,10 +155,12 @@ typedef struct { unsigned long pgprot; } pgprot_t;
*/
*/
typedef
unsigned
long
pte_t
;
typedef
unsigned
long
pte_t
;
typedef
unsigned
long
pmd_t
;
typedef
unsigned
long
pmd_t
;
typedef
unsigned
long
pgd_t
[
2
];
typedef
unsigned
long
pgprot_t
;
typedef
unsigned
long
pgprot_t
;
#define pte_val(x) (x)
#define pte_val(x) (x)
#define pmd_val(x) (x)
#define pmd_val(x) (x)
#define pgd_val(x) ((x)[0])
#define pgprot_val(x) (x)
#define pgprot_val(x) (x)
#define __pte(x) (x)
#define __pte(x) (x)
...
@@ -146,19 +168,6 @@ typedef unsigned long pgprot_t;
...
@@ -146,19 +168,6 @@ typedef unsigned long pgprot_t;
#define __pgprot(x) (x)
#define __pgprot(x) (x)
#endif
/* STRICT_MM_TYPECHECKS */
#endif
/* STRICT_MM_TYPECHECKS */
#endif
/* !__ASSEMBLY__ */
#endif
/* __KERNEL__ */
#include <asm/proc/page.h>
#define PAGE_SIZE (1UL << PAGE_SHIFT)
#define PAGE_MASK (~(PAGE_SIZE-1))
/* to align the pointer to the (next) page boundary */
#define PAGE_ALIGN(addr) (((addr)+PAGE_SIZE-1)&PAGE_MASK)
#ifdef __KERNEL__
#ifndef __ASSEMBLY__
/* Pure 2^n version of get_order */
/* Pure 2^n version of get_order */
static
inline
int
get_order
(
unsigned
long
size
)
static
inline
int
get_order
(
unsigned
long
size
)
...
...
include/asm-arm/param.h
View file @
b5984adb
...
@@ -11,7 +11,6 @@
...
@@ -11,7 +11,6 @@
#define __ASM_PARAM_H
#define __ASM_PARAM_H
#include <asm/arch/param.h>
/* for HZ */
#include <asm/arch/param.h>
/* for HZ */
#include <asm/proc/page.h>
/* for EXEC_PAGE_SIZE */
#ifndef __KERNEL_HZ
#ifndef __KERNEL_HZ
#define __KERNEL_HZ 100
#define __KERNEL_HZ 100
...
@@ -25,6 +24,8 @@
...
@@ -25,6 +24,8 @@
# define HZ 100
# define HZ 100
#endif
#endif
#define EXEC_PAGESIZE 4096
#ifndef NGROUPS
#ifndef NGROUPS
#define NGROUPS 32
#define NGROUPS 32
#endif
#endif
...
...
include/asm-arm/pgalloc.h
View file @
b5984adb
...
@@ -11,7 +11,8 @@
...
@@ -11,7 +11,8 @@
#define _ASMARM_PGALLOC_H
#define _ASMARM_PGALLOC_H
#include <asm/processor.h>
#include <asm/processor.h>
#include <asm/proc/pgalloc.h>
#include <asm/cacheflush.h>
#include <asm/tlbflush.h>
/*
/*
* Since we have only two-level page tables, these are trivial
* Since we have only two-level page tables, these are trivial
...
@@ -28,4 +29,104 @@ extern void free_pgd_slow(pgd_t *pgd);
...
@@ -28,4 +29,104 @@ extern void free_pgd_slow(pgd_t *pgd);
#define check_pgt_cache() do { } while (0)
#define check_pgt_cache() do { } while (0)
/*
* Allocate one PTE table.
*
* This actually allocates two hardware PTE tables, but we wrap this up
* into one table thus:
*
* +------------+
* | h/w pt 0 |
* +------------+
* | h/w pt 1 |
* +------------+
* | Linux pt 0 |
* +------------+
* | Linux pt 1 |
* +------------+
*/
static
inline
pte_t
*
pte_alloc_one_kernel
(
struct
mm_struct
*
mm
,
unsigned
long
addr
)
{
pte_t
*
pte
;
pte
=
(
pte_t
*
)
__get_free_page
(
GFP_KERNEL
|
__GFP_REPEAT
);
if
(
pte
)
{
clear_page
(
pte
);
clean_dcache_area
(
pte
,
sizeof
(
pte_t
)
*
PTRS_PER_PTE
);
pte
+=
PTRS_PER_PTE
;
}
return
pte
;
}
static
inline
struct
page
*
pte_alloc_one
(
struct
mm_struct
*
mm
,
unsigned
long
addr
)
{
struct
page
*
pte
;
pte
=
alloc_pages
(
GFP_KERNEL
|
__GFP_REPEAT
,
0
);
if
(
pte
)
{
void
*
page
=
page_address
(
pte
);
clear_page
(
page
);
clean_dcache_area
(
page
,
sizeof
(
pte_t
)
*
PTRS_PER_PTE
);
}
return
pte
;
}
/*
* Free one PTE table.
*/
static
inline
void
pte_free_kernel
(
pte_t
*
pte
)
{
if
(
pte
)
{
pte
-=
PTRS_PER_PTE
;
free_page
((
unsigned
long
)
pte
);
}
}
static
inline
void
pte_free
(
struct
page
*
pte
)
{
__free_page
(
pte
);
}
/*
* Populate the pmdp entry with a pointer to the pte. This pmd is part
* of the mm address space.
*
* Ensure that we always set both PMD entries.
*/
static
inline
void
pmd_populate_kernel
(
struct
mm_struct
*
mm
,
pmd_t
*
pmdp
,
pte_t
*
ptep
)
{
unsigned
long
pte_ptr
=
(
unsigned
long
)
ptep
;
unsigned
long
pmdval
;
BUG_ON
(
mm
!=
&
init_mm
);
/*
* The pmd must be loaded with the physical
* address of the PTE table
*/
pte_ptr
-=
PTRS_PER_PTE
*
sizeof
(
void
*
);
pmdval
=
__pa
(
pte_ptr
)
|
_PAGE_KERNEL_TABLE
;
pmdp
[
0
]
=
__pmd
(
pmdval
);
pmdp
[
1
]
=
__pmd
(
pmdval
+
256
*
sizeof
(
pte_t
));
flush_pmd_entry
(
pmdp
);
}
static
inline
void
pmd_populate
(
struct
mm_struct
*
mm
,
pmd_t
*
pmdp
,
struct
page
*
ptep
)
{
unsigned
long
pmdval
;
BUG_ON
(
mm
==
&
init_mm
);
pmdval
=
page_to_pfn
(
ptep
)
<<
PAGE_SHIFT
|
_PAGE_USER_TABLE
;
pmdp
[
0
]
=
__pmd
(
pmdval
);
pmdp
[
1
]
=
__pmd
(
pmdval
+
256
*
sizeof
(
pte_t
));
flush_pmd_entry
(
pmdp
);
}
#endif
#endif
include/asm-arm/pgtable.h
View file @
b5984adb
/*
/*
* linux/include/asm-arm/pgtable.h
* linux/include/asm-arm/pgtable.h
*
*
* Copyright (C)
2000
-2002 Russell King
* Copyright (C)
1995
-2002 Russell King
*
*
* This program is free software; you can redistribute it and/or modify
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* it under the terms of the GNU General Public License version 2 as
...
@@ -15,16 +15,25 @@
...
@@ -15,16 +15,25 @@
#include <asm/proc-fns.h>
#include <asm/proc-fns.h>
#include <asm/arch/vmalloc.h>
#include <asm/arch/vmalloc.h>
/*
* We pull a couple of tricks here:
* 1. We wrap the PMD into the PGD.
* 2. We lie about the size of the PTE and PGD.
* Even though we have 256 PTE entries and 4096 PGD entries, we tell
* Linux that we actually have 512 PTE entries and 2048 PGD entries.
* Each "Linux" PGD entry is made up of two hardware PGD entries, and
* each PTE table is actually two hardware PTE tables.
*/
#define PTRS_PER_PTE 512
#define PTRS_PER_PMD 1
#define PTRS_PER_PGD 2048
/*
/*
* PMD_SHIFT determines the size of the area a second-level page table can map
* PMD_SHIFT determines the size of the area a second-level page table can map
* PGDIR_SHIFT determines what a third-level page table entry can map
* PGDIR_SHIFT determines what a third-level page table entry can map
*/
*/
#define PMD_SHIFT 20
#define PMD_SHIFT 20
#ifdef CONFIG_CPU_32
#define PGDIR_SHIFT 21
#define PGDIR_SHIFT 21
#else
#define PGDIR_SHIFT 20
#endif
#define LIBRARY_TEXT_START 0x0c000000
#define LIBRARY_TEXT_START 0x0c000000
...
@@ -46,6 +55,117 @@ extern void __pgd_error(const char *file, int line, unsigned long val);
...
@@ -46,6 +55,117 @@ extern void __pgd_error(const char *file, int line, unsigned long val);
#define FIRST_USER_PGD_NR 1
#define FIRST_USER_PGD_NR 1
#define USER_PTRS_PER_PGD ((TASK_SIZE/PGDIR_SIZE) - FIRST_USER_PGD_NR)
#define USER_PTRS_PER_PGD ((TASK_SIZE/PGDIR_SIZE) - FIRST_USER_PGD_NR)
/*
* Hardware page table definitions.
*
* + Level 1 descriptor (PMD)
* - common
*/
#define PMD_TYPE_MASK (3 << 0)
#define PMD_TYPE_FAULT (0 << 0)
#define PMD_TYPE_TABLE (1 << 0)
#define PMD_TYPE_SECT (2 << 0)
#define PMD_BIT4 (1 << 4)
#define PMD_DOMAIN(x) ((x) << 5)
#define PMD_PROTECTION (1 << 9)
/* v5 */
/*
* - section
*/
#define PMD_SECT_BUFFERABLE (1 << 2)
#define PMD_SECT_CACHEABLE (1 << 3)
#define PMD_SECT_AP_WRITE (1 << 10)
#define PMD_SECT_AP_READ (1 << 11)
#define PMD_SECT_TEX(x) ((x) << 12)
/* v5 */
#define PMD_SECT_APX (1 << 15)
/* v6 */
#define PMD_SECT_S (1 << 16)
/* v6 */
#define PMD_SECT_nG (1 << 17)
/* v6 */
#define PMD_SECT_UNCACHED (0)
#define PMD_SECT_BUFFERED (PMD_SECT_BUFFERABLE)
#define PMD_SECT_WT (PMD_SECT_CACHEABLE)
#define PMD_SECT_WB (PMD_SECT_CACHEABLE | PMD_SECT_BUFFERABLE)
#define PMD_SECT_MINICACHE (PMD_SECT_TEX(1) | PMD_SECT_CACHEABLE)
#define PMD_SECT_WBWA (PMD_SECT_TEX(1) | PMD_SECT_CACHEABLE | PMD_SECT_BUFFERABLE)
/*
* - coarse table (not used)
*/
/*
* + Level 2 descriptor (PTE)
* - common
*/
#define PTE_TYPE_MASK (3 << 0)
#define PTE_TYPE_FAULT (0 << 0)
#define PTE_TYPE_LARGE (1 << 0)
#define PTE_TYPE_SMALL (2 << 0)
#define PTE_TYPE_EXT (3 << 0)
/* v5 */
#define PTE_BUFFERABLE (1 << 2)
#define PTE_CACHEABLE (1 << 3)
/*
* - extended small page/tiny page
*/
#define PTE_EXT_AP_UNO_SRO (0 << 4)
#define PTE_EXT_AP_UNO_SRW (1 << 4)
#define PTE_EXT_AP_URO_SRW (2 << 4)
#define PTE_EXT_AP_URW_SRW (3 << 4)
#define PTE_EXT_TEX(x) ((x) << 6)
/* v5 */
/*
* - small page
*/
#define PTE_SMALL_AP_UNO_SRO (0x00 << 4)
#define PTE_SMALL_AP_UNO_SRW (0x55 << 4)
#define PTE_SMALL_AP_URO_SRW (0xaa << 4)
#define PTE_SMALL_AP_URW_SRW (0xff << 4)
#define PTE_AP_READ PTE_SMALL_AP_URO_SRW
#define PTE_AP_WRITE PTE_SMALL_AP_UNO_SRW
/*
* "Linux" PTE definitions.
*
* We keep two sets of PTEs - the hardware and the linux version.
* This allows greater flexibility in the way we map the Linux bits
* onto the hardware tables, and allows us to have YOUNG and DIRTY
* bits.
*
* The PTE table pointer refers to the hardware entries; the "Linux"
* entries are stored 1024 bytes below.
*/
#define L_PTE_PRESENT (1 << 0)
#define L_PTE_FILE (1 << 1)
/* only when !PRESENT */
#define L_PTE_YOUNG (1 << 1)
#define L_PTE_BUFFERABLE (1 << 2)
/* matches PTE */
#define L_PTE_CACHEABLE (1 << 3)
/* matches PTE */
#define L_PTE_USER (1 << 4)
#define L_PTE_WRITE (1 << 5)
#define L_PTE_EXEC (1 << 6)
#define L_PTE_DIRTY (1 << 7)
#ifndef __ASSEMBLY__
#include <asm/domain.h>
#define _PAGE_USER_TABLE (PMD_TYPE_TABLE | PMD_BIT4 | PMD_DOMAIN(DOMAIN_USER))
#define _PAGE_KERNEL_TABLE (PMD_TYPE_TABLE | PMD_BIT4 | PMD_DOMAIN(DOMAIN_KERNEL))
/*
* The following macros handle the cache and bufferable bits...
*/
#define _L_PTE_DEFAULT L_PTE_PRESENT | L_PTE_YOUNG
#define _L_PTE_READ L_PTE_USER | L_PTE_EXEC | L_PTE_CACHEABLE | L_PTE_BUFFERABLE
#define PAGE_NONE __pgprot(_L_PTE_DEFAULT)
#define PAGE_COPY __pgprot(_L_PTE_DEFAULT | _L_PTE_READ)
#define PAGE_SHARED __pgprot(_L_PTE_DEFAULT | _L_PTE_READ | L_PTE_WRITE)
#define PAGE_READONLY __pgprot(_L_PTE_DEFAULT | _L_PTE_READ)
#define PAGE_KERNEL __pgprot(_L_PTE_DEFAULT | L_PTE_CACHEABLE | L_PTE_BUFFERABLE | L_PTE_DIRTY | L_PTE_WRITE | L_PTE_EXEC)
#define _PAGE_CHG_MASK (PAGE_MASK | L_PTE_DIRTY | L_PTE_YOUNG)
#endif
/* __ASSEMBLY__ */
/*
/*
* The table below defines the page protection levels that we insert into our
* The table below defines the page protection levels that we insert into our
* Linux page table version. These get translated into the best that the
* Linux page table version. These get translated into the best that the
...
@@ -86,9 +206,82 @@ extern struct page *empty_zero_page;
...
@@ -86,9 +206,82 @@ extern struct page *empty_zero_page;
#define pte_none(pte) (!pte_val(pte))
#define pte_none(pte) (!pte_val(pte))
#define pte_clear(ptep) set_pte((ptep), __pte(0))
#define pte_clear(ptep) set_pte((ptep), __pte(0))
#define pte_page(pte) (pfn_to_page(pte_pfn(pte)))
#define pte_page(pte) (pfn_to_page(pte_pfn(pte)))
#define pte_offset_kernel(dir,addr) (pmd_page_kernel(*(dir)) + __pte_index(addr))
#define pte_offset_map(dir,addr) (pmd_page_kernel(*(dir)) + __pte_index(addr))
#define pte_offset_map_nested(dir,addr) (pmd_page_kernel(*(dir)) + __pte_index(addr))
#define pte_unmap(pte) do { } while (0)
#define pte_unmap_nested(pte) do { } while (0)
#define set_pte(ptep, pte) cpu_set_pte(ptep,pte)
/*
* The following only work if pte_present() is true.
* Undefined behaviour if not..
*/
#define pte_present(pte) (pte_val(pte) & L_PTE_PRESENT)
#define pte_read(pte) (pte_val(pte) & L_PTE_USER)
#define pte_write(pte) (pte_val(pte) & L_PTE_WRITE)
#define pte_exec(pte) (pte_val(pte) & L_PTE_EXEC)
#define pte_dirty(pte) (pte_val(pte) & L_PTE_DIRTY)
#define pte_young(pte) (pte_val(pte) & L_PTE_YOUNG)
/*
* The following only works if pte_present() is not true.
*/
#define pte_file(pte) (pte_val(pte) & L_PTE_FILE)
#define pte_to_pgoff(x) (pte_val(x) >> 2)
#define pgoff_to_pte(x) __pte(((x) << 2) | L_PTE_FILE)
#define PTE_FILE_MAX_BITS 30
#define PTE_BIT_FUNC(fn,op) \
static inline pte_t pte_##fn(pte_t pte) { pte_val(pte) op; return pte; }
/*PTE_BIT_FUNC(rdprotect, &= ~L_PTE_USER);*/
/*PTE_BIT_FUNC(mkread, |= L_PTE_USER);*/
PTE_BIT_FUNC
(
wrprotect
,
&=
~
L_PTE_WRITE
);
PTE_BIT_FUNC
(
mkwrite
,
|=
L_PTE_WRITE
);
PTE_BIT_FUNC
(
exprotect
,
&=
~
L_PTE_EXEC
);
PTE_BIT_FUNC
(
mkexec
,
|=
L_PTE_EXEC
);
PTE_BIT_FUNC
(
mkclean
,
&=
~
L_PTE_DIRTY
);
PTE_BIT_FUNC
(
mkdirty
,
|=
L_PTE_DIRTY
);
PTE_BIT_FUNC
(
mkold
,
&=
~
L_PTE_YOUNG
);
PTE_BIT_FUNC
(
mkyoung
,
|=
L_PTE_YOUNG
);
/*
* Mark the prot value as uncacheable and unbufferable.
*/
#define pgprot_noncached(prot) __pgprot(pgprot_val(prot) & ~(L_PTE_CACHEABLE | L_PTE_BUFFERABLE))
#define pgprot_writecombine(prot) __pgprot(pgprot_val(prot) & ~L_PTE_CACHEABLE)
#define pmd_none(pmd) (!pmd_val(pmd))
#define pmd_none(pmd) (!pmd_val(pmd))
#define pmd_present(pmd) (pmd_val(pmd))
#define pmd_present(pmd) (pmd_val(pmd))
#define pmd_bad(pmd) (pmd_val(pmd) & 2)
#define set_pmd(pmdp,pmd) \
do { \
*pmdp = pmd; \
flush_pmd_entry(pmdp); \
} while (0)
#define pmd_clear(pmdp) \
do { \
pmdp[0] = __pmd(0); \
pmdp[1] = __pmd(0); \
clean_pmd_entry(pmdp); \
} while (0)
static
inline
pte_t
*
pmd_page_kernel
(
pmd_t
pmd
)
{
unsigned
long
ptr
;
ptr
=
pmd_val
(
pmd
)
&
~
(
PTRS_PER_PTE
*
sizeof
(
void
*
)
-
1
);
ptr
+=
PTRS_PER_PTE
*
sizeof
(
void
*
);
return
__va
(
ptr
);
}
#define pmd_page(pmd) virt_to_page(__va(pmd_val(pmd)))
/*
/*
* Permanent address of a page. We never have highmem, so this is trivial.
* Permanent address of a page. We never have highmem, so this is trivial.
...
@@ -129,8 +322,6 @@ extern struct page *empty_zero_page;
...
@@ -129,8 +322,6 @@ extern struct page *empty_zero_page;
/* Find an entry in the third-level page table.. */
/* Find an entry in the third-level page table.. */
#define __pte_index(addr) (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
#define __pte_index(addr) (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
#include <asm/proc/pgtable.h>
static
inline
pte_t
pte_modify
(
pte_t
pte
,
pgprot_t
newprot
)
static
inline
pte_t
pte_modify
(
pte_t
pte
,
pgprot_t
newprot
)
{
{
pte_val
(
pte
)
=
(
pte_val
(
pte
)
&
_PAGE_CHG_MASK
)
|
pgprot_val
(
newprot
);
pte_val
(
pte
)
=
(
pte_val
(
pte
)
&
_PAGE_CHG_MASK
)
|
pgprot_val
(
newprot
);
...
@@ -164,6 +355,8 @@ extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
...
@@ -164,6 +355,8 @@ extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
typedef
pte_t
*
pte_addr_t
;
typedef
pte_t
*
pte_addr_t
;
#define pgtable_cache_init() do { } while (0)
#endif
/* !__ASSEMBLY__ */
#endif
/* !__ASSEMBLY__ */
#endif
/* _ASMARM_PGTABLE_H */
#endif
/* _ASMARM_PGTABLE_H */
include/asm-arm/proc-armo/assembler.h
deleted
100644 → 0
View file @
c5164261
/*
* linux/asm-arm/proc-armo/assembler.h
*
* Copyright (C) 1996 Russell King
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* This file contains arm architecture specific defines
* for the different processors
*/
#define MODE_USR USR26_MODE
#define MODE_FIQ FIQ26_MODE
#define MODE_IRQ IRQ26_MODE
#define MODE_SVC SVC26_MODE
#define DEFAULT_FIQ MODE_FIQ
#ifdef __STDC__
#define LOADREGS(cond, base, reglist...)\
ldm##cond base,reglist^
#define RETINSTR(instr, regs...)\
instr##s regs
#else
#define LOADREGS(cond, base, reglist...)\
ldm
/**/
cond base,reglist^
#define RETINSTR(instr, regs...)\
instr
/**/
s regs
#endif
#define MODENOP\
mov r0, r0
#define MODE(savereg,tmpreg,mode) \
mov savereg, pc; \
bic tmpreg, savereg, $0x0c000003; \
orr tmpreg, tmpreg, $mode; \
teqp tmpreg, $0
#define RESTOREMODE(savereg) \
teqp savereg, $0
#define SAVEIRQS(tmpreg)
#define RESTOREIRQS(tmpreg)
#define DISABLEIRQS(tmpreg)\
teqp pc, $0x08000003
#define ENABLEIRQS(tmpreg)\
teqp pc, $0x00000003
#define USERMODE(tmpreg)\
teqp pc, $0x00000000;\
mov r0, r0
#define SVCMODE(tmpreg)\
teqp pc, $0x00000003;\
mov r0, r0
/*
* Save the current IRQ state and disable IRQs
* Note that this macro assumes FIQs are enabled, and
* that the processor is in SVC mode.
*/
.
macro
save_and_disable_irqs
,
oldcpsr
,
temp
mov
\
oldcpsr
,
pc
orr
\
temp
,
\
oldcpsr
,
#
0x08000000
teqp
\
temp
,
#
0
.
endm
/*
* Restore interrupt state previously stored in
* a register
* ** Actually do nothing on Arc - hope that the caller uses a MOVS PC soon
* after!
*/
.
macro
restore_irqs
,
oldcpsr
@
This
be
restore_irqs
.
endm
/*
* These two are used to save LR/restore PC over a user-based access.
* The old 26-bit architecture requires that we do. On 32-bit
* architecture, we can safely ignore this requirement.
*/
.
macro
save_lr
str
lr
,
[
sp
,
#
-
4
]
!
.
endm
.
macro
restore_pc
ldmfd
sp
!
,
{
pc
}
^
.
endm
#define USER(x...) \
9999: x; \
.section __ex_table,"a"; \
.align 3; \
.long 9999b,9001f; \
.previous
include/asm-arm/proc-armo/cache.h
deleted
100644 → 0
View file @
c5164261
/*
* linux/include/asm-arm/proc-armo/cache.h
*
* Copyright (C) 1999-2001 Russell King
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* Cache handling for 26-bit ARM processors.
*/
#define flush_cache_all() do { } while (0)
#define flush_cache_mm(mm) do { } while (0)
#define flush_cache_range(vma,start,end) do { } while (0)
#define flush_cache_page(vma,vmaddr) do { } while (0)
#define invalidate_dcache_range(start,end) do { } while (0)
#define clean_dcache_range(start,end) do { } while (0)
#define flush_dcache_range(start,end) do { } while (0)
#define flush_dcache_page(page) do { } while (0)
#define clean_dcache_entry(_s) do { } while (0)
#define clean_cache_entry(_start) do { } while (0)
#define flush_icache_range(start,end) do { } while (0)
#define flush_icache_page(vma,page) do { } while (0)
/* DAG: ARM3 will flush cache on MEMC updates anyway? so don't bother */
#define clean_cache_area(_start,_size) do { } while (0)
include/asm-arm/proc-armo/elf.h
deleted
100644 → 0
View file @
c5164261
/*
* ELF definitions for 26-bit CPUs
*/
#define ELF_EXEC_PAGESIZE 32768
#ifdef __KERNEL__
/* We can only execute 26-bit code. */
#define ELF_PROC_OK(x) \
((x)->e_flags & EF_ARM_APCS26)
#define SET_PERSONALITY(ex,ibcs2) set_personality(PER_LINUX)
#endif
include/asm-arm/proc-armo/locks.h
deleted
100644 → 0
View file @
c5164261
/*
* linux/include/asm-arm/proc-armo/locks.h
*
* Copyright (C) 2000 Russell King
* Fixes for 26 bit machines, (C) 2000 Dave Gilbert
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* Interrupt safe locking assembler.
*/
#ifndef __ASM_PROC_LOCKS_H
#define __ASM_PROC_LOCKS_H
/* Decrements by 1, fails if value < 0 */
#define __down_op(ptr,fail) \
({ \
__asm__ __volatile__ ( \
"@ atomic down operation\n" \
" mov ip, pc\n" \
" orr lr, ip, #0x08000000\n" \
" teqp lr, #0\n" \
" ldr lr, [%0]\n" \
" and ip, ip, #0x0c000003\n" \
" subs lr, lr, #1\n" \
" str lr, [%0]\n" \
" orrmi ip, ip, #0x80000000 @ set N\n" \
" teqp ip, #0\n" \
" movmi ip, %0\n" \
" blmi " #fail \
: \
: "r" (ptr) \
: "ip", "lr", "cc"); \
})
#define __down_op_ret(ptr,fail) \
({ \
unsigned int result; \
__asm__ __volatile__ ( \
" @ down_op_ret\n" \
" mov ip, pc\n" \
" orr lr, ip, #0x08000000\n" \
" teqp lr, #0\n" \
" ldr lr, [%1]\n" \
" and ip, ip, #0x0c000003\n" \
" subs lr, lr, #1\n" \
" str lr, [%1]\n" \
" orrmi ip, ip, #0x80000000 @ set N\n" \
" teqp ip, #0\n" \
" movmi ip, %1\n" \
" movpl ip, #0\n" \
" blmi " #fail "\n" \
" mov %0, ip" \
: "=&r" (result) \
: "r" (ptr) \
: "ip", "lr", "cc"); \
result; \
})
#define __up_op(ptr,wake) \
({ \
__asm__ __volatile__ ( \
"@ up_op\n" \
" mov ip, pc\n" \
" orr lr, ip, #0x08000000\n" \
" teqp lr, #0\n" \
" ldr lr, [%0]\n" \
" and ip, ip, #0x0c000003\n" \
" adds lr, lr, #1\n" \
" str lr, [%0]\n" \
" orrle ip, ip, #0x80000000 @ set N - should this be mi ??? DAG ! \n" \
" teqp ip, #0\n" \
" movmi ip, %0\n" \
" blmi " #wake \
: \
: "r" (ptr) \
: "ip", "lr", "cc"); \
})
/*
* The value 0x01000000 supports up to 128 processors and
* lots of processes. BIAS must be chosen such that sub'ing
* BIAS once per CPU will result in the long remaining
* negative.
*/
#define RW_LOCK_BIAS 0x01000000
#define RW_LOCK_BIAS_STR "0x01000000"
/* Decrements by RW_LOCK_BIAS rather than 1, fails if value != 0 */
#define __down_op_write(ptr,fail) \
({ \
__asm__ __volatile__( \
"@ down_op_write\n" \
" mov ip, pc\n" \
" orr lr, ip, #0x08000000\n" \
" teqp lr, #0\n" \
" and ip, ip, #0x0c000003\n" \
\
" ldr lr, [%0]\n" \
" subs lr, lr, %1\n" \
" str lr, [%0]\n" \
\
" orreq ip, ip, #0x40000000 @ set Z \n"\
" teqp ip, #0\n" \
" movne ip, %0\n" \
" blne " #fail \
: \
: "r" (ptr), "I" (RW_LOCK_BIAS) \
: "ip", "lr", "cc"); \
})
/* Increments by RW_LOCK_BIAS, wakes if value >= 0 */
#define __up_op_write(ptr,wake) \
({ \
__asm__ __volatile__( \
"@ up_op_read\n" \
" mov ip, pc\n" \
" orr lr, ip, #0x08000000\n" \
" teqp lr, #0\n" \
\
" ldr lr, [%0]\n" \
" and ip, ip, #0x0c000003\n" \
" adds lr, lr, %1\n" \
" str lr, [%0]\n" \
\
" orrcs ip, ip, #0x20000000 @ set C\n" \
" teqp ip, #0\n" \
" movcs ip, %0\n" \
" blcs " #wake \
: \
: "r" (ptr), "I" (RW_LOCK_BIAS) \
: "ip", "lr", "cc"); \
})
#define __down_op_read(ptr,fail) \
__down_op(ptr, fail)
#define __up_op_read(ptr,wake) \
({ \
__asm__ __volatile__( \
"@ up_op_read\n" \
" mov ip, pc\n" \
" orr lr, ip, #0x08000000\n" \
" teqp lr, #0\n" \
\
" ldr lr, [%0]\n" \
" and ip, ip, #0x0c000003\n" \
" adds lr, lr, %1\n" \
" str lr, [%0]\n" \
\
" orreq ip, ip, #0x40000000 @ Set Z \n" \
" teqp ip, #0\n" \
" moveq ip, %0\n" \
" bleq " #wake \
: \
: "r" (ptr), "I" (1) \
: "ip", "lr", "cc"); \
})
#endif
include/asm-arm/proc-armo/page.h
deleted
100644 → 0
View file @
c5164261
/*
* linux/include/asm-arm/proc-armo/page.h
*
* Copyright (C) 1995-2002 Russell King
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#ifndef __ASM_PROC_PAGE_H
#define __ASM_PROC_PAGE_H
#include <linux/config.h>
/* PAGE_SHIFT determines the page size. This is configurable. */
#if defined(CONFIG_PAGESIZE_16)
#define PAGE_SHIFT 14
/* 16K */
#else
/* default */
#define PAGE_SHIFT 15
/* 32K */
#endif
#define EXEC_PAGESIZE 32768
#ifndef __ASSEMBLY__
#ifdef STRICT_MM_TYPECHECKS
typedef
struct
{
unsigned
long
pgd
;
}
pgd_t
;
#define pgd_val(x) ((x).pgd)
#else
typedef
unsigned
long
pgd_t
;
#define pgd_val(x) (x)
#endif
#endif
/* __ASSEMBLY__ */
#endif
/* __ASM_PROC_PAGE_H */
include/asm-arm/proc-armo/pgalloc.h
deleted
100644 → 0
View file @
c5164261
/*
* linux/include/asm-arm/proc-armo/pgalloc.h
*
* Copyright (C) 2001-2002 Russell King
*
* Page table allocation/freeing primitives for 26-bit ARM processors.
*/
#include <linux/slab.h>
extern
kmem_cache_t
*
pte_cache
;
static
inline
pte_t
*
pte_alloc_one_kernel
(
struct
mm_struct
*
mm
,
unsigned
long
addr
)
{
return
kmem_cache_alloc
(
pte_cache
,
GFP_KERNEL
);
}
static
inline
void
pte_free_kernel
(
pte_t
*
pte
)
{
if
(
pte
)
kmem_cache_free
(
pte_cache
,
pte
);
}
/*
* Populate the pmdp entry with a pointer to the pte. This pmd is part
* of the mm address space.
*
* If 'mm' is the init tasks mm, then we are doing a vmalloc, and we
* need to set stuff up correctly for it.
*/
static
inline
void
pmd_populate_kernel
(
struct
mm_struct
*
mm
,
pmd_t
*
pmdp
,
pte_t
*
ptep
)
{
set_pmd
(
pmdp
,
__mk_pmd
(
ptep
,
_PAGE_TABLE
));
}
/*
* We use the old 2.5.5-rmk1 hack for this.
* This is not truly correct, but should be functional.
*/
#define pte_alloc_one(mm,addr) ((struct page *)pte_alloc_one_kernel(mm,addr))
#define pte_free(pte) pte_free_kernel((pte_t *)pte)
#define pmd_populate(mm,pmdp,ptep) pmd_populate_kernel(mm,pmdp,(pte_t *)ptep)
include/asm-arm/proc-armo/pgtable.h
deleted
100644 → 0
View file @
c5164261
/*
* linux/include/asm-arm/proc-armo/pgtable.h
*
* Copyright (C) 1995-2002 Russell King
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* 18-Oct-1997 RMK Now two-level (32x32)
*/
#ifndef __ASM_PROC_PGTABLE_H
#define __ASM_PROC_PGTABLE_H
/*
* entries per page directory level: they are two-level, so
* we don't really have any PMD directory.
*/
#define PTRS_PER_PTE 32
#define PTRS_PER_PMD 1
#define PTRS_PER_PGD 32
/*
* The vmalloc() routines leaves a hole of 4kB between each vmalloced
* area for the same reason. ;)
*/
#define VMALLOC_START 0x01a00000
#define VMALLOC_VMADDR(x) ((unsigned long)(x))
#define VMALLOC_END 0x01c00000
#define _PAGE_TABLE (0x01)
#define pmd_bad(pmd) ((pmd_val(pmd) & 0xfc000002))
#define set_pmd(pmdp,pmd) ((*(pmdp)) = (pmd))
#define pmd_clear(pmdp) set_pmd(pmdp, __pmd(0))
static
inline
pmd_t
__mk_pmd
(
pte_t
*
ptep
,
unsigned
long
prot
)
{
unsigned
long
pte_ptr
=
(
unsigned
long
)
ptep
;
pmd_t
pmd
;
pmd_val
(
pmd
)
=
__virt_to_phys
(
pte_ptr
)
|
prot
;
return
pmd
;
}
static
inline
unsigned
long
pmd_page
(
pmd_t
pmd
)
{
return
__phys_to_virt
(
pmd_val
(
pmd
)
&
~
_PAGE_TABLE
);
}
#define pte_offset_kernel(dir,addr) (pmd_page_kernel(*(dir)) + __pte_index(addr))
#define pte_offset_map(dir,addr) (pmd_page_kernel(*(dir)) + __pte_index(addr))
#define pte_offset_map_nested(dir,addr) (pmd_page_kernel(*(dir)) + __pte_index(addr))
#define pte_unmap(pte) do { } while (0)
#define pte_unmap_nested(pte) do { } while (0)
#define set_pte(pteptr, pteval) ((*(pteptr)) = (pteval))
#define _PAGE_PRESENT 0x01
#define _PAGE_READONLY 0x02
#define _PAGE_NOT_USER 0x04
#define _PAGE_OLD 0x08
#define _PAGE_CLEAN 0x10
/* -- present -- -- !dirty -- --- !write --- ---- !user --- */
#define PAGE_NONE __pgprot(_PAGE_PRESENT | _PAGE_CLEAN | _PAGE_READONLY | _PAGE_NOT_USER)
#define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_CLEAN )
#define PAGE_COPY __pgprot(_PAGE_PRESENT | _PAGE_CLEAN | _PAGE_READONLY )
#define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_CLEAN | _PAGE_READONLY )
#define PAGE_KERNEL __pgprot(_PAGE_PRESENT | _PAGE_NOT_USER)
#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_OLD | _PAGE_CLEAN)
/*
* The following only work if pte_present() is true.
* Undefined behaviour if not..
*/
#define pte_present(pte) (pte_val(pte) & _PAGE_PRESENT)
#define pte_read(pte) (!(pte_val(pte) & _PAGE_NOT_USER))
#define pte_write(pte) (!(pte_val(pte) & _PAGE_READONLY))
#define pte_exec(pte) (!(pte_val(pte) & _PAGE_NOT_USER))
#define pte_dirty(pte) (!(pte_val(pte) & _PAGE_CLEAN))
#define pte_young(pte) (!(pte_val(pte) & _PAGE_OLD))
static
inline
pte_t
pte_wrprotect
(
pte_t
pte
)
{
pte_val
(
pte
)
|=
_PAGE_READONLY
;
return
pte
;
}
static
inline
pte_t
pte_rdprotect
(
pte_t
pte
)
{
pte_val
(
pte
)
|=
_PAGE_NOT_USER
;
return
pte
;
}
static
inline
pte_t
pte_exprotect
(
pte_t
pte
)
{
pte_val
(
pte
)
|=
_PAGE_NOT_USER
;
return
pte
;
}
static
inline
pte_t
pte_mkclean
(
pte_t
pte
)
{
pte_val
(
pte
)
|=
_PAGE_CLEAN
;
return
pte
;
}
static
inline
pte_t
pte_mkold
(
pte_t
pte
)
{
pte_val
(
pte
)
|=
_PAGE_OLD
;
return
pte
;
}
static
inline
pte_t
pte_mkwrite
(
pte_t
pte
)
{
pte_val
(
pte
)
&=
~
_PAGE_READONLY
;
return
pte
;
}
static
inline
pte_t
pte_mkread
(
pte_t
pte
)
{
pte_val
(
pte
)
&=
~
_PAGE_NOT_USER
;
return
pte
;
}
static
inline
pte_t
pte_mkexec
(
pte_t
pte
)
{
pte_val
(
pte
)
&=
~
_PAGE_NOT_USER
;
return
pte
;
}
static
inline
pte_t
pte_mkdirty
(
pte_t
pte
)
{
pte_val
(
pte
)
&=
~
_PAGE_CLEAN
;
return
pte
;
}
static
inline
pte_t
pte_mkyoung
(
pte_t
pte
)
{
pte_val
(
pte
)
&=
~
_PAGE_OLD
;
return
pte
;
}
/*
* We don't store cache state bits in the page table here.
*/
#define pgprot_noncached(prot) (prot)
extern
void
pgtable_cache_init
(
void
);
#endif
/* __ASM_PROC_PGTABLE_H */
include/asm-arm/proc-armo/processor.h
deleted
100644 → 0
View file @
c5164261
/*
* linux/include/asm-arm/proc-armo/processor.h
*
* Copyright (C) 1996 Russell King.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* Changelog:
* 27-06-1996 RMK Created
* 10-10-1996 RMK Brought up to date with SA110
* 26-09-1996 RMK Added 'EXTRA_THREAD_STRUCT*'
* 28-09-1996 RMK Moved start_thread into the processor dependencies
* 11-01-1998 RMK Added new uaccess_t
* 09-09-1998 PJB Delete redundant `wp_works_ok'
* 30-05-1999 PJB Save sl across context switches
*/
#ifndef __ASM_PROC_PROCESSOR_H
#define __ASM_PROC_PROCESSOR_H
#include <linux/string.h>
#define KERNEL_STACK_SIZE 4096
typedef
struct
{
void
(
*
put_byte
)(
void
);
/* Special calling convention */
void
(
*
get_byte
)(
void
);
/* Special calling convention */
void
(
*
put_half
)(
void
);
/* Special calling convention */
void
(
*
get_half
)(
void
);
/* Special calling convention */
void
(
*
put_word
)(
void
);
/* Special calling convention */
void
(
*
get_word
)(
void
);
/* Special calling convention */
unsigned
long
(
*
copy_from_user
)(
void
*
to
,
const
void
*
from
,
unsigned
long
sz
);
unsigned
long
(
*
copy_to_user
)(
void
*
to
,
const
void
*
from
,
unsigned
long
sz
);
unsigned
long
(
*
clear_user
)(
void
*
addr
,
unsigned
long
sz
);
unsigned
long
(
*
strncpy_from_user
)(
char
*
to
,
const
char
*
from
,
unsigned
long
sz
);
unsigned
long
(
*
strnlen_user
)(
const
char
*
s
,
long
n
);
}
uaccess_t
;
extern
uaccess_t
uaccess_user
,
uaccess_kernel
;
#define EXTRA_THREAD_STRUCT \
uaccess_t *uaccess;
/* User access functions*/
#define EXTRA_THREAD_STRUCT_INIT \
.uaccess = &uaccess_kernel,
#define start_thread(regs,pc,sp) \
({ \
unsigned long *stack = (unsigned long *)sp; \
set_fs(USER_DS); \
memzero(regs->uregs, sizeof (regs->uregs)); \
regs->ARM_pc = pc;
/* pc */
\
regs->ARM_sp = sp;
/* sp */
\
regs->ARM_r2 = stack[2];
/* r2 (envp) */
\
regs->ARM_r1 = stack[1];
/* r1 (argv) */
\
regs->ARM_r0 = stack[0];
/* r0 (argc) */
\
})
#define KSTK_EIP(tsk) (((unsigned long *)(4096+(unsigned long)(tsk)))[1020])
#define KSTK_ESP(tsk) (((unsigned long *)(4096+(unsigned long)(tsk)))[1018])
#endif
include/asm-arm/proc-armo/ptrace.h
deleted
100644 → 0
View file @
c5164261
/*
* linux/include/asm-arm/proc-armo/ptrace.h
*
* Copyright (C) 1996-2001 Russell King
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#ifndef __ASM_PROC_PTRACE_H
#define __ASM_PROC_PTRACE_H
#define USR26_MODE 0x00000000
#define FIQ26_MODE 0x00000001
#define IRQ26_MODE 0x00000002
#define SVC26_MODE 0x00000003
#define USR_MODE USR26_MODE
#define FIQ_MODE FIQ26_MODE
#define IRQ_MODE IRQ26_MODE
#define SVC_MODE SVC26_MODE
#define MODE_MASK 0x00000003
#define PSR_F_BIT 0x04000000
#define PSR_I_BIT 0x08000000
#define PSR_V_BIT 0x10000000
#define PSR_C_BIT 0x20000000
#define PSR_Z_BIT 0x40000000
#define PSR_N_BIT 0x80000000
#define PCMASK 0xfc000003
#ifndef __ASSEMBLY__
/* this struct defines the way the registers are stored on the
stack during a system call. */
struct
pt_regs
{
long
uregs
[
17
];
};
#define ARM_pc uregs[15]
#define ARM_lr uregs[14]
#define ARM_sp uregs[13]
#define ARM_ip uregs[12]
#define ARM_fp uregs[11]
#define ARM_r10 uregs[10]
#define ARM_r9 uregs[9]
#define ARM_r8 uregs[8]
#define ARM_r7 uregs[7]
#define ARM_r6 uregs[6]
#define ARM_r5 uregs[5]
#define ARM_r4 uregs[4]
#define ARM_r3 uregs[3]
#define ARM_r2 uregs[2]
#define ARM_r1 uregs[1]
#define ARM_r0 uregs[0]
#define ARM_ORIG_r0 uregs[16]
#ifdef __KERNEL__
#define processor_mode(regs) \
((regs)->ARM_pc & MODE_MASK)
#define user_mode(regs) \
(processor_mode(regs) == USR26_MODE)
#define thumb_mode(regs) (0)
#define interrupts_enabled(regs) \
(!((regs)->ARM_pc & PSR_I_BIT))
#define fast_interrupts_enabled(regs) \
(!((regs)->ARM_pc & PSR_F_BIT))
#define condition_codes(regs) \
((regs)->ARM_pc & (PSR_V_BIT|PSR_C_BIT|PSR_Z_BIT|PSR_N_BIT))
/* Are the current registers suitable for user mode?
* (used to maintain security in signal handlers)
*/
static
inline
int
valid_user_regs
(
struct
pt_regs
*
regs
)
{
if
(
user_mode
(
regs
)
&&
(
regs
->
ARM_pc
&
(
PSR_F_BIT
|
PSR_I_BIT
))
==
0
)
return
1
;
/*
* force it to be something sensible
*/
regs
->
ARM_pc
&=
~
(
MODE_MASK
|
PSR_F_BIT
|
PSR_I_BIT
);
return
0
;
}
#endif
/* __KERNEL__ */
#endif
/* __ASSEMBLY__ */
#endif
include/asm-arm/proc-armo/shmparam.h
deleted
100644 → 0
View file @
c5164261
/*
* linux/include/asm-arm/proc-armo/shmparam.h
*
* Copyright (C) 1996 Russell King
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* definitions for the shared process memory on the ARM3
*/
#ifndef __ASM_PROC_SHMPARAM_H
#define __ASM_PROC_SHMPARAM_H
#ifndef SHMMAX
#define SHMMAX 0x003fa000
#endif
#endif
include/asm-arm/proc-armo/system.h
deleted
100644 → 0
View file @
c5164261
/*
* linux/include/asm-arm/proc-armo/system.h
*
* Copyright (C) 1995, 1996 Russell King
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#ifndef __ASM_PROC_SYSTEM_H
#define __ASM_PROC_SYSTEM_H
#define vectors_base() (0)
static
inline
unsigned
long
__xchg
(
unsigned
long
x
,
volatile
void
*
ptr
,
int
size
)
{
extern
void
__bad_xchg
(
volatile
void
*
,
int
);
switch
(
size
)
{
case
1
:
return
cpu_xchg_1
(
x
,
ptr
);
case
4
:
return
cpu_xchg_4
(
x
,
ptr
);
default:
__bad_xchg
(
ptr
,
size
);
}
return
0
;
}
/*
* We need to turn the caches off before calling the reset vector - RiscOS
* messes up if we don't
*/
#define proc_hard_reset() cpu_proc_fin()
/*
* A couple of speedups for the ARM
*/
/*
* Save the current interrupt enable state & disable IRQs
*/
#define local_save_flags_cli(x) \
do { \
unsigned long temp; \
__asm__ __volatile__( \
" mov %0, pc @ save_flags_cli\n" \
" orr %1, %0, #0x08000000\n" \
" and %0, %0, #0x0c000000\n" \
" teqp %1, #0\n" \
: "=r" (x), "=r" (temp) \
: \
: "memory"); \
} while (0)
/*
* Enable IRQs
*/
#define local_irq_enable() \
do { \
unsigned long temp; \
__asm__ __volatile__( \
" mov %0, pc @ sti\n" \
" bic %0, %0, #0x08000000\n" \
" teqp %0, #0\n" \
: "=r" (temp) \
: \
: "memory"); \
} while(0)
/*
* Disable IRQs
*/
#define local_irq_disable() \
do { \
unsigned long temp; \
__asm__ __volatile__( \
" mov %0, pc @ cli\n" \
" orr %0, %0, #0x08000000\n" \
" teqp %0, #0\n" \
: "=r" (temp) \
: \
: "memory"); \
} while(0)
#define __clf() do { \
unsigned long temp; \
__asm__ __volatile__( \
" mov %0, pc @ clf\n" \
" orr %0, %0, #0x04000000\n" \
" teqp %0, #0\n" \
: "=r" (temp)); \
} while(0)
#define __stf() do { \
unsigned long temp; \
__asm__ __volatile__( \
" mov %0, pc @ stf\n" \
" bic %0, %0, #0x04000000\n" \
" teqp %0, #0\n" \
: "=r" (temp)); \
} while(0)
/*
* save current IRQ & FIQ state
*/
#define local_save_flags(x) \
do { \
__asm__ __volatile__( \
" mov %0, pc @ save_flags\n" \
" and %0, %0, #0x0c000000\n" \
: "=r" (x)); \
} while (0)
/*
* restore saved IRQ & FIQ state
*/
#define local_irq_restore(x) \
do { \
unsigned long temp; \
__asm__ __volatile__( \
" mov %0, pc @ restore_flags\n" \
" bic %0, %0, #0x0c000000\n" \
" orr %0, %0, %1\n" \
" teqp %0, #0\n" \
: "=&r" (temp) \
: "r" (x) \
: "memory"); \
} while (0)
#endif
include/asm-arm/proc-armo/tlbflush.h
deleted
100644 → 0
View file @
c5164261
/*
* TLB flushing:
*
* - flush_tlb_all() flushes all processes TLBs
* - flush_tlb_mm(mm) flushes the specified mm context TLB's
* - flush_tlb_page(vma, vmaddr) flushes one page
* - flush_tlb_range(vma, start, end) flushes a range of pages
*/
#define flush_tlb_all() memc_update_all()
#define flush_tlb_mm(mm) memc_update_mm(mm)
#define flush_tlb_range(vma,start,end) \
do { memc_update_mm(vma->vm_mm); (void)(start); (void)(end); } while (0)
#define flush_tlb_page(vma, vmaddr) do { } while (0)
/*
* The following handle the weird MEMC chip
*/
static
inline
void
memc_update_all
(
void
)
{
struct
task_struct
*
p
;
cpu_memc_update_all
(
init_mm
.
pgd
);
for_each_task
(
p
)
{
if
(
!
p
->
mm
)
continue
;
cpu_memc_update_all
(
p
->
mm
->
pgd
);
}
processor
.
_set_pgd
(
current
->
active_mm
->
pgd
);
}
static
inline
void
memc_update_mm
(
struct
mm_struct
*
mm
)
{
cpu_memc_update_all
(
mm
->
pgd
);
if
(
mm
==
current
->
active_mm
)
processor
.
_set_pgd
(
mm
->
pgd
);
}
static
inline
void
memc_clear
(
struct
mm_struct
*
mm
,
struct
page
*
page
)
{
cpu_memc_update_entry
(
mm
->
pgd
,
(
unsigned
long
)
page_address
(
page
),
0
);
if
(
mm
==
current
->
active_mm
)
processor
.
_set_pgd
(
mm
->
pgd
);
}
static
inline
void
memc_update_addr
(
struct
mm_struct
*
mm
,
pte_t
pte
,
unsigned
long
vaddr
)
{
cpu_memc_update_entry
(
mm
->
pgd
,
pte_val
(
pte
),
vaddr
);
if
(
mm
==
current
->
active_mm
)
processor
.
_set_pgd
(
mm
->
pgd
);
}
static
inline
void
update_mmu_cache
(
struct
vm_area_struct
*
vma
,
unsigned
long
addr
,
pte_t
pte
)
{
struct
mm_struct
*
mm
=
vma
->
vm_mm
;
memc_update_addr
(
mm
,
pte
,
addr
);
}
include/asm-arm/proc-armo/uaccess.h
deleted
100644 → 0
View file @
c5164261
/*
* linux/include/asm-arm/proc-armo/segment.h
*
* Copyright (C) 1996 Russell King
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
/*
* The fs functions are implemented on the ARM2 and ARM3 architectures
* manually.
* Use *_user functions to access user memory with faulting behaving
* as though the user is accessing the memory.
* Use set_fs(get_ds()) and then the *_user functions to allow them to
* access kernel memory.
*/
/*
* These are the values used to represent the user `fs' and the kernel `ds'
*/
#define KERNEL_DS 0x03000000
#define USER_DS 0x02000000
extern
uaccess_t
uaccess_user
,
uaccess_kernel
;
static
inline
void
set_fs
(
mm_segment_t
fs
)
{
current
->
addr_limit
=
fs
;
current
->
thread
.
uaccess
=
fs
==
USER_DS
?
&
uaccess_user
:
&
uaccess_kernel
;
}
#define __range_ok(addr,size) ({ \
unsigned long flag, sum; \
__asm__ __volatile__("subs %1, %0, %3; cmpcs %1, %2; movcs %0, #0" \
: "=&r" (flag), "=&r" (sum) \
: "r" (addr), "Ir" (size), "0" (current->addr_limit) \
: "cc"); \
flag; })
#define __addr_ok(addr) ({ \
unsigned long flag; \
__asm__ __volatile__("cmp %2, %0; movlo %0, #0" \
: "=&r" (flag) \
: "0" (current->addr_limit), "r" (addr) \
: "cc"); \
(flag == 0); })
#define __put_user_asm_byte(x,addr,err) \
__asm__ __volatile__( \
" mov r0, %1\n" \
" mov r1, %2\n" \
" mov r2, %0\n" \
" mov lr, pc\n" \
" mov pc, %3\n" \
" mov %0, r2\n" \
: "=r" (err) \
: "r" (x), "r" (addr), "r" (current->thread.uaccess->put_byte), \
"0" (err) \
: "r0", "r1", "r2", "lr")
#define __put_user_asm_half(x,addr,err) \
__asm__ __volatile__( \
" mov r0, %1\n" \
" mov r1, %2\n" \
" mov r2, %0\n" \
" mov lr, pc\n" \
" mov pc, %3\n" \
" mov %0, r2\n" \
: "=r" (err) \
: "r" (x), "r" (addr), "r" (current->thread.uaccess->put_half), \
"0" (err) \
: "r0", "r1", "r2", "lr")
#define __put_user_asm_word(x,addr,err) \
__asm__ __volatile__( \
" mov r0, %1\n" \
" mov r1, %2\n" \
" mov r2, %0\n" \
" mov lr, pc\n" \
" mov pc, %3\n" \
" mov %0, r2\n" \
: "=r" (err) \
: "r" (x), "r" (addr), "r" (current->thread.uaccess->put_word), \
"0" (err) \
: "r0", "r1", "r2", "lr")
#define __get_user_asm_byte(x,addr,err) \
__asm__ __volatile__( \
" mov r0, %2\n" \
" mov r1, %0\n" \
" mov lr, pc\n" \
" mov pc, %3\n" \
" mov %0, r1\n" \
" mov %1, r0\n" \
: "=r" (err), "=r" (x) \
: "r" (addr), "r" (current->thread.uaccess->get_byte), "0" (err) \
: "r0", "r1", "r2", "lr")
#define __get_user_asm_half(x,addr,err) \
__asm__ __volatile__( \
" mov r0, %2\n" \
" mov r1, %0\n" \
" mov lr, pc\n" \
" mov pc, %3\n" \
" mov %0, r1\n" \
" mov %1, r0\n" \
: "=r" (err), "=r" (x) \
: "r" (addr), "r" (current->thread.uaccess->get_half), "0" (err) \
: "r0", "r1", "r2", "lr")
#define __get_user_asm_word(x,addr,err) \
__asm__ __volatile__( \
" mov r0, %2\n" \
" mov r1, %0\n" \
" mov lr, pc\n" \
" mov pc, %3\n" \
" mov %0, r1\n" \
" mov %1, r0\n" \
: "=r" (err), "=r" (x) \
: "r" (addr), "r" (current->thread.uaccess->get_word), "0" (err) \
: "r0", "r1", "r2", "lr")
#define __do_copy_from_user(to,from,n) \
(n) = current->thread.uaccess->copy_from_user((to),(from),(n))
#define __do_copy_to_user(to,from,n) \
(n) = current->thread.uaccess->copy_to_user((to),(from),(n))
#define __do_clear_user(addr,sz) \
(sz) = current->thread.uaccess->clear_user((addr),(sz))
#define __do_strncpy_from_user(dst,src,count,res) \
(res) = current->thread.uaccess->strncpy_from_user(dst,src,count)
#define __do_strnlen_user(s,n,res) \
(res) = current->thread.uaccess->strnlen_user(s,n)
include/asm-arm/proc-armv/assembler.h
deleted
100644 → 0
View file @
c5164261
/*
* linux/asm-arm/proc-armv/assembler.h
*
* Copyright (C) 1996-2000 Russell King
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* This file contains ARM processor specifics for
* the ARM6 and better processors.
*/
#define MODE_USR USR_MODE
#define MODE_FIQ FIQ_MODE
#define MODE_IRQ IRQ_MODE
#define MODE_SVC SVC_MODE
#define DEFAULT_FIQ MODE_FIQ
/*
* LOADREGS - ldm with PC in register list (eg, ldmfd sp!, {pc})
*/
#ifdef __STDC__
#define LOADREGS(cond, base, reglist...)\
ldm##cond base,reglist
#else
#define LOADREGS(cond, base, reglist...)\
ldm
/**/
cond base,reglist
#endif
/*
* Build a return instruction for this processor type.
*/
#define RETINSTR(instr, regs...)\
instr regs
/*
* Save the current IRQ state and disable IRQs. Note that this macro
* assumes FIQs are enabled, and that the processor is in SVC mode.
*/
.
macro
save_and_disable_irqs
,
oldcpsr
,
temp
mrs
\
oldcpsr
,
cpsr
mov
\
temp
,
#
PSR_I_BIT
|
MODE_SVC
msr
cpsr_c
,
\
temp
.
endm
/*
* Restore interrupt state previously stored in a register. We don't
* guarantee that this will preserve the flags.
*/
.
macro
restore_irqs
,
oldcpsr
msr
cpsr_c
,
\
oldcpsr
.
endm
/*
* These two are used to save LR/restore PC over a user-based access.
* The old 26-bit architecture requires that we do. On 32-bit
* architecture, we can safely ignore this requirement.
*/
.
macro
save_lr
.
endm
.
macro
restore_pc
mov
pc
,
lr
.
endm
#define USER(x...) \
9999: x; \
.section __ex_table,"a"; \
.align 3; \
.long 9999b,9001f; \
.previous
include/asm-arm/proc-armv/cache.h
deleted
100644 → 0
View file @
c5164261
/*
* linux/include/asm-arm/proc-armv/cache.h
*
* Copyright (C) 1999-2002 Russell King
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <asm/mman.h>
#include <asm/glue.h>
/*
* Cache Model
* ===========
*/
#undef _CACHE
#undef MULTI_CACHE
#if defined(CONFIG_CPU_ARM610) || defined(CONFIG_CPU_ARM710)
# ifdef _CACHE
# define MULTI_CACHE 1
# else
# define _CACHE v3
# endif
#endif
#if defined(CONFIG_CPU_ARM720T)
# ifdef _CACHE
# define MULTI_CACHE 1
# else
# define _CACHE v4
# endif
#endif
#if defined(CONFIG_CPU_ARM920T) || defined(CONFIG_CPU_ARM922T) || \
defined(CONFIG_CPU_ARM1020)
# define MULTI_CACHE 1
#endif
#if defined(CONFIG_CPU_ARM926T)
# ifdef _CACHE
# define MULTI_CACHE 1
# else
# define _CACHE arm926
# endif
#endif
#if defined(CONFIG_CPU_SA110) || defined(CONFIG_CPU_SA1100)
# ifdef _CACHE
# define MULTI_CACHE 1
# else
# define _CACHE v4wb
# endif
#endif
#if defined(CONFIG_CPU_XSCALE)
# ifdef _CACHE
# define MULTI_CACHE 1
# else
# define _CACHE xscale
# endif
#endif
#if !defined(_CACHE) && !defined(MULTI_CACHE)
#error Unknown cache maintainence model
#endif
/*
* This flag is used to indicate that the page pointed to by a pte
* is dirty and requires cleaning before returning it to the user.
*/
#define PG_dcache_dirty PG_arch_1
/*
* MM Cache Management
* ===================
*
* The arch/arm/mm/cache-*.S and arch/arm/mm/proc-*.S files
* implement these methods.
*
* Start addresses are inclusive and end addresses are exclusive;
* start addresses should be rounded down, end addresses up.
*
* See linux/Documentation/cachetlb.txt for more information.
* Please note that the implementation of these, and the required
* effects are cache-type (VIVT/VIPT/PIPT) specific.
*
* flush_cache_kern_all()
*
* Unconditionally clean and invalidate the entire cache.
*
* flush_cache_user_mm(mm)
*
* Clean and invalidate all user space cache entries
* before a change of page tables.
*
* flush_cache_user_range(start, end, flags)
*
* Clean and invalidate a range of cache entries in the
* specified address space before a change of page tables.
* - start - user start address (inclusive, page aligned)
* - end - user end address (exclusive, page aligned)
* - flags - vma->vm_flags field
*
* coherent_kern_range(start, end)
*
* Ensure coherency between the Icache and the Dcache in the
* region described by start, end. If you have non-snooping
* Harvard caches, you need to implement this function.
* - start - virtual start address
* - end - virtual end address
*
* DMA Cache Coherency
* ===================
*
* dma_inv_range(start, end)
*
* Invalidate (discard) the specified virtual address range.
* May not write back any entries. If 'start' or 'end'
* are not cache line aligned, those lines must be written
* back.
* - start - virtual start address
* - end - virtual end address
*
* dma_clean_range(start, end)
*
* Clean (write back) the specified virtual address range.
* - start - virtual start address
* - end - virtual end address
*
* dma_flush_range(start, end)
*
* Clean and invalidate the specified virtual address range.
* - start - virtual start address
* - end - virtual end address
*/
struct
cpu_cache_fns
{
void
(
*
flush_kern_all
)(
void
);
void
(
*
flush_user_all
)(
void
);
void
(
*
flush_user_range
)(
unsigned
long
,
unsigned
long
,
unsigned
int
);
void
(
*
coherent_kern_range
)(
unsigned
long
,
unsigned
long
);
void
(
*
flush_kern_dcache_page
)(
void
*
);
void
(
*
dma_inv_range
)(
unsigned
long
,
unsigned
long
);
void
(
*
dma_clean_range
)(
unsigned
long
,
unsigned
long
);
void
(
*
dma_flush_range
)(
unsigned
long
,
unsigned
long
);
};
/*
* Select the calling method
*/
#ifdef MULTI_CACHE
extern
struct
cpu_cache_fns
cpu_cache
;
#define __cpuc_flush_kern_all cpu_cache.flush_kern_all
#define __cpuc_flush_user_all cpu_cache.flush_user_all
#define __cpuc_flush_user_range cpu_cache.flush_user_range
#define __cpuc_coherent_kern_range cpu_cache.coherent_kern_range
#define __cpuc_flush_dcache_page cpu_cache.flush_kern_dcache_page
/*
* These are private to the dma-mapping API. Do not use directly.
* Their sole purpose is to ensure that data held in the cache
* is visible to DMA, or data written by DMA to system memory is
* visible to the CPU.
*/
#define dmac_inv_range cpu_cache.dma_inv_range
#define dmac_clean_range cpu_cache.dma_clean_range
#define dmac_flush_range cpu_cache.dma_flush_range
#else
#define __cpuc_flush_kern_all __glue(_CACHE,_flush_kern_cache_all)
#define __cpuc_flush_user_all __glue(_CACHE,_flush_user_cache_all)
#define __cpuc_flush_user_range __glue(_CACHE,_flush_user_cache_range)
#define __cpuc_coherent_kern_range __glue(_CACHE,_coherent_kern_range)
#define __cpuc_flush_dcache_page __glue(_CACHE,_flush_kern_dcache_page)
extern
void
__cpuc_flush_kern_all
(
void
);
extern
void
__cpuc_flush_user_all
(
void
);
extern
void
__cpuc_flush_user_range
(
unsigned
long
,
unsigned
long
,
unsigned
int
);
extern
void
__cpuc_coherent_kern_range
(
unsigned
long
,
unsigned
long
);
extern
void
__cpuc_flush_dcache_page
(
void
*
);
/*
* These are private to the dma-mapping API. Do not use directly.
* Their sole purpose is to ensure that data held in the cache
* is visible to DMA, or data written by DMA to system memory is
* visible to the CPU.
*/
#define dmac_inv_range __glue(_CACHE,_dma_inv_range)
#define dmac_clean_range __glue(_CACHE,_dma_clean_range)
#define dmac_flush_range __glue(_CACHE,_dma_flush_range)
extern
void
dmac_inv_range
(
unsigned
long
,
unsigned
long
);
extern
void
dmac_clean_range
(
unsigned
long
,
unsigned
long
);
extern
void
dmac_flush_range
(
unsigned
long
,
unsigned
long
);
#endif
/*
* Convert calls to our calling convention.
*/
#define flush_cache_all() __cpuc_flush_kern_all()
static
inline
void
flush_cache_mm
(
struct
mm_struct
*
mm
)
{
if
(
current
->
active_mm
==
mm
)
__cpuc_flush_user_all
();
}
static
inline
void
flush_cache_range
(
struct
vm_area_struct
*
vma
,
unsigned
long
start
,
unsigned
long
end
)
{
if
(
current
->
active_mm
==
vma
->
vm_mm
)
__cpuc_flush_user_range
(
start
&
PAGE_MASK
,
PAGE_ALIGN
(
end
),
vma
->
vm_flags
);
}
static
inline
void
flush_cache_page
(
struct
vm_area_struct
*
vma
,
unsigned
long
user_addr
)
{
if
(
current
->
active_mm
==
vma
->
vm_mm
)
{
unsigned
long
addr
=
user_addr
&
PAGE_MASK
;
__cpuc_flush_user_range
(
addr
,
addr
+
PAGE_SIZE
,
vma
->
vm_flags
);
}
}
/*
* Perform necessary cache operations to ensure that data previously
* stored within this range of addresses can be executed by the CPU.
*/
#define flush_icache_range(s,e) __cpuc_coherent_kern_range(s,e)
/*
* Perform necessary cache operations to ensure that the TLB will
* see data written in the specified area.
*/
#define clean_dcache_area(start,size) cpu_dcache_clean_area(start, size)
/*
* flush_dcache_page is used when the kernel has written to the page
* cache page at virtual address page->virtual.
*
* If this page isn't mapped (ie, page->mapping = NULL), or it has
* userspace mappings (page->mapping->i_mmap or page->mapping->i_mmap_shared)
* then we _must_ always clean + invalidate the dcache entries associated
* with the kernel mapping.
*
* Otherwise we can defer the operation, and clean the cache when we are
* about to change to user space. This is the same method as used on SPARC64.
* See update_mmu_cache for the user space part.
*/
#define mapping_mapped(map) (!list_empty(&(map)->i_mmap) || \
!list_empty(&(map)->i_mmap_shared))
extern
void
__flush_dcache_page
(
struct
page
*
);
static
inline
void
flush_dcache_page
(
struct
page
*
page
)
{
if
(
page
->
mapping
&&
!
mapping_mapped
(
page
->
mapping
))
set_bit
(
PG_dcache_dirty
,
&
page
->
flags
);
else
__flush_dcache_page
(
page
);
}
#define flush_icache_user_range(vma,page,addr,len) \
flush_dcache_page(page)
/*
* We don't appear to need to do anything here. In fact, if we did, we'd
* duplicate cache flushing elsewhere performed by flush_dcache_page().
*/
#define flush_icache_page(vma,page) do { } while (0)
include/asm-arm/proc-armv/elf.h
deleted
100644 → 0
View file @
c5164261
/*
* ELF definitions for 32-bit CPUs
*/
#define ELF_EXEC_PAGESIZE 4096
#ifdef __KERNEL__
/*
* 32-bit code is always OK. Some cpus can do 26-bit, some can't.
*/
#define ELF_PROC_OK(x) (ELF_THUMB_OK(x) && ELF_26BIT_OK(x))
#define ELF_THUMB_OK(x) \
(( (elf_hwcap & HWCAP_THUMB) && ((x)->e_entry & 1) == 1) || \
((x)->e_entry & 3) == 0)
#define ELF_26BIT_OK(x) \
(( (elf_hwcap & HWCAP_26BIT) && (x)->e_flags & EF_ARM_APCS26) || \
((x)->e_flags & EF_ARM_APCS26) == 0)
/* Old NetWinder binaries were compiled in such a way that the iBCS
heuristic always trips on them. Until these binaries become uncommon
enough not to care, don't trust the `ibcs' flag here. In any case
there is no other ELF system currently supported by iBCS.
@@ Could print a warning message to encourage users to upgrade. */
#define SET_PERSONALITY(ex,ibcs2) \
set_personality(((ex).e_flags&EF_ARM_APCS26 ?PER_LINUX :PER_LINUX_32BIT))
#endif
include/asm-arm/proc-armv/page.h
deleted
100644 → 0
View file @
c5164261
/*
* linux/include/asm-arm/proc-armv/page.h
*
* Copyright (C) 1995-2002 Russell King
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#ifndef __ASM_PROC_PAGE_H
#define __ASM_PROC_PAGE_H
/* PAGE_SHIFT determines the page size */
#define PAGE_SHIFT 12
#define EXEC_PAGESIZE 4096
#ifndef __ASSEMBLY__
#ifdef STRICT_MM_TYPECHECKS
typedef
struct
{
unsigned
long
pgd0
;
unsigned
long
pgd1
;
}
pgd_t
;
#define pgd_val(x) ((x).pgd0)
#else
typedef
unsigned
long
pgd_t
[
2
];
#define pgd_val(x) ((x)[0])
#endif
#endif
/* __ASSEMBLY__ */
#endif
/* __ASM_PROC_PAGE_H */
include/asm-arm/proc-armv/pgalloc.h
deleted
100644 → 0
View file @
c5164261
/*
* linux/include/asm-arm/proc-armv/pgalloc.h
*
* Copyright (C) 2001-2002 Russell King
*
* Page table allocation/freeing primitives for 32-bit ARM processors.
*/
#include <asm/cacheflush.h>
#include <asm/tlbflush.h>
#include "pgtable.h"
/*
* Allocate one PTE table.
*
* This actually allocates two hardware PTE tables, but we wrap this up
* into one table thus:
*
* +------------+
* | h/w pt 0 |
* +------------+
* | h/w pt 1 |
* +------------+
* | Linux pt 0 |
* +------------+
* | Linux pt 1 |
* +------------+
*/
static
inline
pte_t
*
pte_alloc_one_kernel
(
struct
mm_struct
*
mm
,
unsigned
long
addr
)
{
pte_t
*
pte
;
pte
=
(
pte_t
*
)
__get_free_page
(
GFP_KERNEL
|
__GFP_REPEAT
);
if
(
pte
)
{
clear_page
(
pte
);
clean_dcache_area
(
pte
,
sizeof
(
pte_t
)
*
PTRS_PER_PTE
);
pte
+=
PTRS_PER_PTE
;
}
return
pte
;
}
static
inline
struct
page
*
pte_alloc_one
(
struct
mm_struct
*
mm
,
unsigned
long
addr
)
{
struct
page
*
pte
;
pte
=
alloc_pages
(
GFP_KERNEL
|
__GFP_REPEAT
,
0
);
if
(
pte
)
{
void
*
page
=
page_address
(
pte
);
clear_page
(
page
);
clean_dcache_area
(
page
,
sizeof
(
pte_t
)
*
PTRS_PER_PTE
);
}
return
pte
;
}
/*
* Free one PTE table.
*/
static
inline
void
pte_free_kernel
(
pte_t
*
pte
)
{
if
(
pte
)
{
pte
-=
PTRS_PER_PTE
;
free_page
((
unsigned
long
)
pte
);
}
}
static
inline
void
pte_free
(
struct
page
*
pte
)
{
__free_page
(
pte
);
}
/*
* Populate the pmdp entry with a pointer to the pte. This pmd is part
* of the mm address space.
*
* Ensure that we always set both PMD entries.
*/
static
inline
void
pmd_populate_kernel
(
struct
mm_struct
*
mm
,
pmd_t
*
pmdp
,
pte_t
*
ptep
)
{
unsigned
long
pte_ptr
=
(
unsigned
long
)
ptep
;
unsigned
long
pmdval
;
BUG_ON
(
mm
!=
&
init_mm
);
/*
* The pmd must be loaded with the physical
* address of the PTE table
*/
pte_ptr
-=
PTRS_PER_PTE
*
sizeof
(
void
*
);
pmdval
=
__pa
(
pte_ptr
)
|
_PAGE_KERNEL_TABLE
;
pmdp
[
0
]
=
__pmd
(
pmdval
);
pmdp
[
1
]
=
__pmd
(
pmdval
+
256
*
sizeof
(
pte_t
));
flush_pmd_entry
(
pmdp
);
}
static
inline
void
pmd_populate
(
struct
mm_struct
*
mm
,
pmd_t
*
pmdp
,
struct
page
*
ptep
)
{
unsigned
long
pmdval
;
BUG_ON
(
mm
==
&
init_mm
);
pmdval
=
page_to_pfn
(
ptep
)
<<
PAGE_SHIFT
|
_PAGE_USER_TABLE
;
pmdp
[
0
]
=
__pmd
(
pmdval
);
pmdp
[
1
]
=
__pmd
(
pmdval
+
256
*
sizeof
(
pte_t
));
flush_pmd_entry
(
pmdp
);
}
include/asm-arm/proc-armv/pgtable.h
deleted
100644 → 0
View file @
c5164261
/*
* linux/include/asm-arm/proc-armv/pgtable.h
*
* Copyright (C) 1995-2002 Russell King
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* 12-Jan-1997 RMK Altered flushing routines to use function pointers
* now possible to combine ARM6, ARM7 and StrongARM versions.
* 17-Apr-1999 RMK Now pass an area size to clean_cache_area and
* flush_icache_area.
*/
#ifndef __ASM_PROC_PGTABLE_H
#define __ASM_PROC_PGTABLE_H
/*
* We pull a couple of tricks here:
* 1. We wrap the PMD into the PGD.
* 2. We lie about the size of the PTE and PGD.
* Even though we have 256 PTE entries and 4096 PGD entries, we tell
* Linux that we actually have 512 PTE entries and 2048 PGD entries.
* Each "Linux" PGD entry is made up of two hardware PGD entries, and
* each PTE table is actually two hardware PTE tables.
*/
#define PTRS_PER_PTE 512
#define PTRS_PER_PMD 1
#define PTRS_PER_PGD 2048
/*
* Hardware page table definitions.
*
* + Level 1 descriptor (PMD)
* - common
*/
#define PMD_TYPE_MASK (3 << 0)
#define PMD_TYPE_FAULT (0 << 0)
#define PMD_TYPE_TABLE (1 << 0)
#define PMD_TYPE_SECT (2 << 0)
#define PMD_BIT4 (1 << 4)
#define PMD_DOMAIN(x) ((x) << 5)
#define PMD_PROTECTION (1 << 9)
/* v5 */
/*
* - section
*/
#define PMD_SECT_BUFFERABLE (1 << 2)
#define PMD_SECT_CACHEABLE (1 << 3)
#define PMD_SECT_AP_WRITE (1 << 10)
#define PMD_SECT_AP_READ (1 << 11)
#define PMD_SECT_TEX(x) ((x) << 12)
/* v5 */
#define PMD_SECT_APX (1 << 15)
/* v6 */
#define PMD_SECT_S (1 << 16)
/* v6 */
#define PMD_SECT_nG (1 << 17)
/* v6 */
#define PMD_SECT_UNCACHED (0)
#define PMD_SECT_BUFFERED (PMD_SECT_BUFFERABLE)
#define PMD_SECT_WT (PMD_SECT_CACHEABLE)
#define PMD_SECT_WB (PMD_SECT_CACHEABLE | PMD_SECT_BUFFERABLE)
#define PMD_SECT_MINICACHE (PMD_SECT_TEX(1) | PMD_SECT_CACHEABLE)
#define PMD_SECT_WBWA (PMD_SECT_TEX(1) | PMD_SECT_CACHEABLE | PMD_SECT_BUFFERABLE)
/*
* - coarse table (not used)
*/
/*
* + Level 2 descriptor (PTE)
* - common
*/
#define PTE_TYPE_MASK (3 << 0)
#define PTE_TYPE_FAULT (0 << 0)
#define PTE_TYPE_LARGE (1 << 0)
#define PTE_TYPE_SMALL (2 << 0)
#define PTE_TYPE_EXT (3 << 0)
/* v5 */
#define PTE_BUFFERABLE (1 << 2)
#define PTE_CACHEABLE (1 << 3)
/*
* - extended small page/tiny page
*/
#define PTE_EXT_AP_UNO_SRO (0 << 4)
#define PTE_EXT_AP_UNO_SRW (1 << 4)
#define PTE_EXT_AP_URO_SRW (2 << 4)
#define PTE_EXT_AP_URW_SRW (3 << 4)
#define PTE_EXT_TEX(x) ((x) << 6)
/* v5 */
/*
* - small page
*/
#define PTE_SMALL_AP_UNO_SRO (0x00 << 4)
#define PTE_SMALL_AP_UNO_SRW (0x55 << 4)
#define PTE_SMALL_AP_URO_SRW (0xaa << 4)
#define PTE_SMALL_AP_URW_SRW (0xff << 4)
#define PTE_AP_READ PTE_SMALL_AP_URO_SRW
#define PTE_AP_WRITE PTE_SMALL_AP_UNO_SRW
/*
* "Linux" PTE definitions.
*
* We keep two sets of PTEs - the hardware and the linux version.
* This allows greater flexibility in the way we map the Linux bits
* onto the hardware tables, and allows us to have YOUNG and DIRTY
* bits.
*
* The PTE table pointer refers to the hardware entries; the "Linux"
* entries are stored 1024 bytes below.
*/
#define L_PTE_PRESENT (1 << 0)
#define L_PTE_FILE (1 << 1)
/* only when !PRESENT */
#define L_PTE_YOUNG (1 << 1)
#define L_PTE_BUFFERABLE (1 << 2)
/* matches PTE */
#define L_PTE_CACHEABLE (1 << 3)
/* matches PTE */
#define L_PTE_USER (1 << 4)
#define L_PTE_WRITE (1 << 5)
#define L_PTE_EXEC (1 << 6)
#define L_PTE_DIRTY (1 << 7)
#ifndef __ASSEMBLY__
#include <asm/proc/domain.h>
#define _PAGE_USER_TABLE (PMD_TYPE_TABLE | PMD_BIT4 | PMD_DOMAIN(DOMAIN_USER))
#define _PAGE_KERNEL_TABLE (PMD_TYPE_TABLE | PMD_BIT4 | PMD_DOMAIN(DOMAIN_KERNEL))
#define pmd_bad(pmd) (pmd_val(pmd) & 2)
#define set_pmd(pmdp,pmd) \
do { \
*pmdp = pmd; \
flush_pmd_entry(pmdp); \
} while (0)
#define pmd_clear(pmdp) \
do { \
pmdp[0] = __pmd(0); \
pmdp[1] = __pmd(0); \
clean_pmd_entry(pmdp); \
} while (0)
static
inline
pte_t
*
pmd_page_kernel
(
pmd_t
pmd
)
{
unsigned
long
ptr
;
ptr
=
pmd_val
(
pmd
)
&
~
(
PTRS_PER_PTE
*
sizeof
(
void
*
)
-
1
);
ptr
+=
PTRS_PER_PTE
*
sizeof
(
void
*
);
return
__va
(
ptr
);
}
#define pmd_page(pmd) virt_to_page(__va(pmd_val(pmd)))
#define pte_offset_kernel(dir,addr) (pmd_page_kernel(*(dir)) + __pte_index(addr))
#define pte_offset_map(dir,addr) (pmd_page_kernel(*(dir)) + __pte_index(addr))
#define pte_offset_map_nested(dir,addr) (pmd_page_kernel(*(dir)) + __pte_index(addr))
#define pte_unmap(pte) do { } while (0)
#define pte_unmap_nested(pte) do { } while (0)
#define set_pte(ptep, pte) cpu_set_pte(ptep,pte)
/*
* The following macros handle the cache and bufferable bits...
*/
#define _L_PTE_DEFAULT L_PTE_PRESENT | L_PTE_YOUNG
#define _L_PTE_READ L_PTE_USER | L_PTE_EXEC | L_PTE_CACHEABLE | L_PTE_BUFFERABLE
#define PAGE_NONE __pgprot(_L_PTE_DEFAULT)
#define PAGE_COPY __pgprot(_L_PTE_DEFAULT | _L_PTE_READ)
#define PAGE_SHARED __pgprot(_L_PTE_DEFAULT | _L_PTE_READ | L_PTE_WRITE)
#define PAGE_READONLY __pgprot(_L_PTE_DEFAULT | _L_PTE_READ)
#define PAGE_KERNEL __pgprot(_L_PTE_DEFAULT | L_PTE_CACHEABLE | L_PTE_BUFFERABLE | L_PTE_DIRTY | L_PTE_WRITE | L_PTE_EXEC)
#define _PAGE_CHG_MASK (PAGE_MASK | L_PTE_DIRTY | L_PTE_YOUNG)
/*
* The following only work if pte_present() is true.
* Undefined behaviour if not..
*/
#define pte_present(pte) (pte_val(pte) & L_PTE_PRESENT)
#define pte_read(pte) (pte_val(pte) & L_PTE_USER)
#define pte_write(pte) (pte_val(pte) & L_PTE_WRITE)
#define pte_exec(pte) (pte_val(pte) & L_PTE_EXEC)
#define pte_dirty(pte) (pte_val(pte) & L_PTE_DIRTY)
#define pte_young(pte) (pte_val(pte) & L_PTE_YOUNG)
#define pte_file(pte) (pte_val(pte) & L_PTE_FILE)
#define PTE_BIT_FUNC(fn,op) \
static inline pte_t pte_##fn(pte_t pte) { pte_val(pte) op; return pte; }
/*PTE_BIT_FUNC(rdprotect, &= ~L_PTE_USER);*/
/*PTE_BIT_FUNC(mkread, |= L_PTE_USER);*/
PTE_BIT_FUNC
(
wrprotect
,
&=
~
L_PTE_WRITE
);
PTE_BIT_FUNC
(
mkwrite
,
|=
L_PTE_WRITE
);
PTE_BIT_FUNC
(
exprotect
,
&=
~
L_PTE_EXEC
);
PTE_BIT_FUNC
(
mkexec
,
|=
L_PTE_EXEC
);
PTE_BIT_FUNC
(
mkclean
,
&=
~
L_PTE_DIRTY
);
PTE_BIT_FUNC
(
mkdirty
,
|=
L_PTE_DIRTY
);
PTE_BIT_FUNC
(
mkold
,
&=
~
L_PTE_YOUNG
);
PTE_BIT_FUNC
(
mkyoung
,
|=
L_PTE_YOUNG
);
/*
* Mark the prot value as uncacheable and unbufferable.
*/
#define pgprot_noncached(prot) __pgprot(pgprot_val(prot) & ~(L_PTE_CACHEABLE | L_PTE_BUFFERABLE))
#define pgprot_writecombine(prot) __pgprot(pgprot_val(prot) & ~L_PTE_CACHEABLE)
#define pgtable_cache_init() do { } while (0)
#define pte_to_pgoff(x) (pte_val(x) >> 2)
#define pgoff_to_pte(x) __pte(((x) << 2) | L_PTE_FILE)
#define PTE_FILE_MAX_BITS 30
#endif
/* __ASSEMBLY__ */
#endif
/* __ASM_PROC_PGTABLE_H */
include/asm-arm/proc-armv/processor.h
deleted
100644 → 0
View file @
c5164261
/*
* linux/include/asm-arm/proc-armv/processor.h
*
* Copyright (C) 1996-1999 Russell King.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* Changelog:
* 20-09-1996 RMK Created
* 26-09-1996 RMK Added 'EXTRA_THREAD_STRUCT*'
* 28-09-1996 RMK Moved start_thread into the processor dependencies
* 09-09-1998 PJB Delete redundant `wp_works_ok'
* 30-05-1999 PJB Save sl across context switches
* 31-07-1999 RMK Added 'domain' stuff
*/
#ifndef __ASM_PROC_PROCESSOR_H
#define __ASM_PROC_PROCESSOR_H
#include <asm/proc/domain.h>
#define KERNEL_STACK_SIZE PAGE_SIZE
#define INIT_EXTRA_THREAD_INFO \
.cpu_domain = domain_val(DOMAIN_USER, DOMAIN_MANAGER) | \
domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \
domain_val(DOMAIN_IO, DOMAIN_CLIENT)
#define start_thread(regs,pc,sp) \
({ \
unsigned long *stack = (unsigned long *)sp; \
set_fs(USER_DS); \
memzero(regs->uregs, sizeof(regs->uregs)); \
if (current->personality & ADDR_LIMIT_32BIT) \
regs->ARM_cpsr = USR_MODE; \
else \
regs->ARM_cpsr = USR26_MODE; \
if (elf_hwcap & HWCAP_THUMB && pc & 1) \
regs->ARM_cpsr |= PSR_T_BIT; \
regs->ARM_pc = pc & ~1;
/* pc */
\
regs->ARM_sp = sp;
/* sp */
\
regs->ARM_r2 = stack[2];
/* r2 (envp) */
\
regs->ARM_r1 = stack[1];
/* r1 (argv) */
\
regs->ARM_r0 = stack[0];
/* r0 (argc) */
\
})
#define KSTK_EIP(tsk) (((unsigned long *)(4096+(unsigned long)(tsk)->thread_info))[1019])
#define KSTK_ESP(tsk) (((unsigned long *)(4096+(unsigned long)(tsk)->thread_info))[1017])
#endif
include/asm-arm/proc-armv/ptrace.h
deleted
100644 → 0
View file @
c5164261
/*
* linux/include/asm-arm/proc-armv/ptrace.h
*
* Copyright (C) 1996-1999 Russell King
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#ifndef __ASM_PROC_PTRACE_H
#define __ASM_PROC_PTRACE_H
#include <linux/config.h>
/*
* PSR bits
*/
#define USR26_MODE 0x00000000
#define FIQ26_MODE 0x00000001
#define IRQ26_MODE 0x00000002
#define SVC26_MODE 0x00000003
#define USR_MODE 0x00000010
#define FIQ_MODE 0x00000011
#define IRQ_MODE 0x00000012
#define SVC_MODE 0x00000013
#define ABT_MODE 0x00000017
#define UND_MODE 0x0000001b
#define SYSTEM_MODE 0x0000001f
#define MODE32_BIT 0x00000010
#define MODE_MASK 0x0000001f
#define PSR_T_BIT 0x00000020
#define PSR_F_BIT 0x00000040
#define PSR_I_BIT 0x00000080
#define PSR_J_BIT 0x01000000
#define PSR_Q_BIT 0x08000000
#define PSR_V_BIT 0x10000000
#define PSR_C_BIT 0x20000000
#define PSR_Z_BIT 0x40000000
#define PSR_N_BIT 0x80000000
#define PCMASK 0
/*
* Groups of PSR bits
*/
#define PSR_f 0xff000000
/* Flags */
#define PSR_s 0x00ff0000
/* Status */
#define PSR_x 0x0000ff00
/* Extension */
#define PSR_c 0x000000ff
/* Control */
/*
* CR1 bits
*/
#define CR1_M 0x00000001
/* MMU */
#define CR1_A 0x00000002
/* Alignment fault */
#define CR1_C 0x00000004
/* Dcache */
#define CR1_W 0x00000008
/* Write buffer */
#define CR1_P 0x00000010
/* Prog32 */
#define CR1_D 0x00000020
/* Data32 */
#define CR1_L 0x00000040
/* Late abort */
#define CR1_B 0x00000080
/* Big endian */
#define CR1_S 0x00000100
/* System protection */
#define CR1_R 0x00000200
/* ROM protection */
#define CR1_F 0x00000400
#define CR1_Z 0x00000800
/* BTB enable */
#define CR1_I 0x00001000
/* Icache */
#define CR1_V 0x00002000
/* Vector relocation */
#define CR1_RR 0x00004000
/* Round Robin */
#ifndef __ASSEMBLY__
/* this struct defines the way the registers are stored on the
stack during a system call. */
struct
pt_regs
{
long
uregs
[
18
];
};
#define ARM_cpsr uregs[16]
#define ARM_pc uregs[15]
#define ARM_lr uregs[14]
#define ARM_sp uregs[13]
#define ARM_ip uregs[12]
#define ARM_fp uregs[11]
#define ARM_r10 uregs[10]
#define ARM_r9 uregs[9]
#define ARM_r8 uregs[8]
#define ARM_r7 uregs[7]
#define ARM_r6 uregs[6]
#define ARM_r5 uregs[5]
#define ARM_r4 uregs[4]
#define ARM_r3 uregs[3]
#define ARM_r2 uregs[2]
#define ARM_r1 uregs[1]
#define ARM_r0 uregs[0]
#define ARM_ORIG_r0 uregs[17]
#ifdef __KERNEL__
#define user_mode(regs) \
(((regs)->ARM_cpsr & 0xf) == 0)
#ifdef CONFIG_ARM_THUMB
#define thumb_mode(regs) \
(((regs)->ARM_cpsr & PSR_T_BIT))
#else
#define thumb_mode(regs) (0)
#endif
#define processor_mode(regs) \
((regs)->ARM_cpsr & MODE_MASK)
#define interrupts_enabled(regs) \
(!((regs)->ARM_cpsr & PSR_I_BIT))
#define fast_interrupts_enabled(regs) \
(!((regs)->ARM_cpsr & PSR_F_BIT))
#define condition_codes(regs) \
((regs)->ARM_cpsr & (PSR_V_BIT|PSR_C_BIT|PSR_Z_BIT|PSR_N_BIT))
/* Are the current registers suitable for user mode?
* (used to maintain security in signal handlers)
*/
static
inline
int
valid_user_regs
(
struct
pt_regs
*
regs
)
{
if
(
user_mode
(
regs
)
&&
(
regs
->
ARM_cpsr
&
(
PSR_F_BIT
|
PSR_I_BIT
))
==
0
)
return
1
;
/*
* Force CPSR to something logical...
*/
regs
->
ARM_cpsr
&=
PSR_f
|
PSR_s
|
PSR_x
|
PSR_T_BIT
|
MODE32_BIT
;
return
0
;
}
#endif
/* __KERNEL__ */
#endif
/* __ASSEMBLY__ */
#endif
include/asm-arm/proc-armv/shmparam.h
deleted
100644 → 0
View file @
c5164261
/*
* linux/include/asm-arm/proc-armv/shmparam.h
*
* Copyright (C) 1996 Russell King
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* definitions for the shared process memory on ARM v3 or v4
* processors
*/
#ifndef __ASM_PROC_SHMPARAM_H
#define __ASM_PROC_SHMPARAM_H
#ifndef SHMMAX
#define SHMMAX 0x01000000
#endif
#endif
include/asm-arm/proc-armv/system.h
deleted
100644 → 0
View file @
c5164261
/*
* linux/include/asm-arm/proc-armv/system.h
*
* Copyright (C) 1996 Russell King
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#ifndef __ASM_PROC_SYSTEM_H
#define __ASM_PROC_SYSTEM_H
#include <linux/config.h>
#define set_cr(x) \
__asm__ __volatile__( \
"mcr p15, 0, %0, c1, c0, 0 @ set CR" \
: : "r" (x) : "cc")
#define get_cr() \
({ \
unsigned int __val; \
__asm__ __volatile__( \
"mrc p15, 0, %0, c1, c0, 0 @ get CR" \
: "=r" (__val) : : "cc"); \
__val; \
})
#define CR_M (1 << 0)
/* MMU enable */
#define CR_A (1 << 1)
/* Alignment abort enable */
#define CR_C (1 << 2)
/* Dcache enable */
#define CR_W (1 << 3)
/* Write buffer enable */
#define CR_P (1 << 4)
/* 32-bit exception handler */
#define CR_D (1 << 5)
/* 32-bit data address range */
#define CR_L (1 << 6)
/* Implementation defined */
#define CR_B (1 << 7)
/* Big endian */
#define CR_S (1 << 8)
/* System MMU protection */
#define CR_R (1 << 9)
/* ROM MMU protection */
#define CR_F (1 << 10)
/* Implementation defined */
#define CR_Z (1 << 11)
/* Implementation defined */
#define CR_I (1 << 12)
/* Icache enable */
#define CR_V (1 << 13)
/* Vectors relocated to 0xffff0000 */
#define CR_RR (1 << 14)
/* Round Robin cache replacement */
#define CR_L4 (1 << 15)
/* LDR pc can set T bit */
#define CR_DT (1 << 16)
#define CR_IT (1 << 18)
#define CR_ST (1 << 19)
#define CR_FI (1 << 21)
#define CR_U (1 << 22)
/* Unaligned access operation */
#define CR_XP (1 << 23)
/* Extended page tables */
#define CR_VE (1 << 24)
/* Vectored interrupts */
extern
unsigned
long
cr_no_alignment
;
/* defined in entry-armv.S */
extern
unsigned
long
cr_alignment
;
/* defined in entry-armv.S */
#if __LINUX_ARM_ARCH__ >= 4
#define vectors_base() ((cr_alignment & CR_V) ? 0xffff0000 : 0)
#else
#define vectors_base() (0)
#endif
/*
* Save the current interrupt enable state & disable IRQs
*/
#define local_irq_save(x) \
({ \
unsigned long temp; \
(void) (&temp == &x); \
__asm__ __volatile__( \
"mrs %0, cpsr @ local_irq_save\n" \
" orr %1, %0, #128\n" \
" msr cpsr_c, %1" \
: "=r" (x), "=r" (temp) \
: \
: "memory", "cc"); \
})
/*
* Enable IRQs
*/
#define local_irq_enable() \
({ \
unsigned long temp; \
__asm__ __volatile__( \
"mrs %0, cpsr @ local_irq_enable\n" \
" bic %0, %0, #128\n" \
" msr cpsr_c, %0" \
: "=r" (temp) \
: \
: "memory", "cc"); \
})
/*
* Disable IRQs
*/
#define local_irq_disable() \
({ \
unsigned long temp; \
__asm__ __volatile__( \
"mrs %0, cpsr @ local_irq_disable\n" \
" orr %0, %0, #128\n" \
" msr cpsr_c, %0" \
: "=r" (temp) \
: \
: "memory", "cc"); \
})
/*
* Enable FIQs
*/
#define __stf() \
({ \
unsigned long temp; \
__asm__ __volatile__( \
"mrs %0, cpsr @ stf\n" \
" bic %0, %0, #64\n" \
" msr cpsr_c, %0" \
: "=r" (temp) \
: \
: "memory", "cc"); \
})
/*
* Disable FIQs
*/
#define __clf() \
({ \
unsigned long temp; \
__asm__ __volatile__( \
"mrs %0, cpsr @ clf\n" \
" orr %0, %0, #64\n" \
" msr cpsr_c, %0" \
: "=r" (temp) \
: \
: "memory", "cc"); \
})
/*
* Save the current interrupt enable state.
*/
#define local_save_flags(x) \
({ \
__asm__ __volatile__( \
"mrs %0, cpsr @ local_save_flags" \
: "=r" (x) : : "memory", "cc"); \
})
/*
* restore saved IRQ & FIQ state
*/
#define local_irq_restore(x) \
__asm__ __volatile__( \
"msr cpsr_c, %0 @ local_irq_restore\n" \
: \
: "r" (x) \
: "memory", "cc")
#if defined(CONFIG_CPU_SA1100) || defined(CONFIG_CPU_SA110)
/*
* On the StrongARM, "swp" is terminally broken since it bypasses the
* cache totally. This means that the cache becomes inconsistent, and,
* since we use normal loads/stores as well, this is really bad.
* Typically, this causes oopsen in filp_close, but could have other,
* more disasterous effects. There are two work-arounds:
* 1. Disable interrupts and emulate the atomic swap
* 2. Clean the cache, perform atomic swap, flush the cache
*
* We choose (1) since its the "easiest" to achieve here and is not
* dependent on the processor type.
*/
#define swp_is_buggy
#endif
static
inline
unsigned
long
__xchg
(
unsigned
long
x
,
volatile
void
*
ptr
,
int
size
)
{
extern
void
__bad_xchg
(
volatile
void
*
,
int
);
unsigned
long
ret
;
#ifdef swp_is_buggy
unsigned
long
flags
;
#endif
switch
(
size
)
{
#ifdef swp_is_buggy
case
1
:
local_irq_save
(
flags
);
ret
=
*
(
volatile
unsigned
char
*
)
ptr
;
*
(
volatile
unsigned
char
*
)
ptr
=
x
;
local_irq_restore
(
flags
);
break
;
case
4
:
local_irq_save
(
flags
);
ret
=
*
(
volatile
unsigned
long
*
)
ptr
;
*
(
volatile
unsigned
long
*
)
ptr
=
x
;
local_irq_restore
(
flags
);
break
;
#else
case
1
:
__asm__
__volatile__
(
"swpb %0, %1, [%2]"
:
"=&r"
(
ret
)
:
"r"
(
x
),
"r"
(
ptr
)
:
"memory"
,
"cc"
);
break
;
case
4
:
__asm__
__volatile__
(
"swp %0, %1, [%2]"
:
"=&r"
(
ret
)
:
"r"
(
x
),
"r"
(
ptr
)
:
"memory"
,
"cc"
);
break
;
#endif
default:
__bad_xchg
(
ptr
,
size
),
ret
=
0
;
}
return
ret
;
}
#endif
include/asm-arm/proc-armv/tlbflush.h
deleted
100644 → 0
View file @
c5164261
/*
* linux/include/asm-arm/proc-armv/tlbflush.h
*
* Copyright (C) 1999-2003 Russell King
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/config.h>
#include <asm/glue.h>
#define TLB_V3_PAGE (1 << 0)
#define TLB_V4_U_PAGE (1 << 1)
#define TLB_V4_D_PAGE (1 << 2)
#define TLB_V4_I_PAGE (1 << 3)
#define TLB_V6_U_PAGE (1 << 4)
#define TLB_V6_D_PAGE (1 << 5)
#define TLB_V6_I_PAGE (1 << 6)
#define TLB_V3_FULL (1 << 8)
#define TLB_V4_U_FULL (1 << 9)
#define TLB_V4_D_FULL (1 << 10)
#define TLB_V4_I_FULL (1 << 11)
#define TLB_V6_U_FULL (1 << 12)
#define TLB_V6_D_FULL (1 << 13)
#define TLB_V6_I_FULL (1 << 14)
#define TLB_V6_U_ASID (1 << 16)
#define TLB_V6_D_ASID (1 << 17)
#define TLB_V6_I_ASID (1 << 18)
#define TLB_DCLEAN (1 << 30)
#define TLB_WB (1 << 31)
/*
* MMU TLB Model
* =============
*
* We have the following to choose from:
* v3 - ARMv3
* v4 - ARMv4 without write buffer
* v4wb - ARMv4 with write buffer without I TLB flush entry instruction
* v4wbi - ARMv4 with write buffer with I TLB flush entry instruction
* v6wbi - ARMv6 with write buffer with I TLB flush entry instruction
*/
#undef _TLB
#undef MULTI_TLB
#define v3_tlb_flags (TLB_V3_FULL | TLB_V3_PAGE)
#if defined(CONFIG_CPU_ARM610) || defined(CONFIG_CPU_ARM710)
# define v3_possible_flags v3_tlb_flags
# define v3_always_flags v3_tlb_flags
# ifdef _TLB
# define MULTI_TLB 1
# else
# define _TLB v3
# endif
#else
# define v3_possible_flags 0
# define v3_always_flags (-1UL)
#endif
#define v4_tlb_flags (TLB_V4_U_FULL | TLB_V4_U_PAGE)
#if defined(CONFIG_CPU_ARM720T)
# define v4_possible_flags v4_tlb_flags
# define v4_always_flags v4_tlb_flags
# ifdef _TLB
# define MULTI_TLB 1
# else
# define _TLB v4
# endif
#else
# define v4_possible_flags 0
# define v4_always_flags (-1UL)
#endif
#define v4wbi_tlb_flags (TLB_WB | TLB_DCLEAN | \
TLB_V4_I_FULL | TLB_V4_D_FULL | \
TLB_V4_I_PAGE | TLB_V4_D_PAGE)
#if defined(CONFIG_CPU_ARM920T) || defined(CONFIG_CPU_ARM922T) || \
defined(CONFIG_CPU_ARM926T) || defined(CONFIG_CPU_ARM1020) || \
defined(CONFIG_CPU_XSCALE)
# define v4wbi_possible_flags v4wbi_tlb_flags
# define v4wbi_always_flags v4wbi_tlb_flags
# ifdef _TLB
# define MULTI_TLB 1
# else
# define _TLB v4wbi
# endif
#else
# define v4wbi_possible_flags 0
# define v4wbi_always_flags (-1UL)
#endif
#define v4wb_tlb_flags (TLB_WB | TLB_DCLEAN | \
TLB_V4_I_FULL | TLB_V4_D_FULL | \
TLB_V4_D_PAGE)
#if defined(CONFIG_CPU_SA110) || defined(CONFIG_CPU_SA1100)
# define v4wb_possible_flags v4wb_tlb_flags
# define v4wb_always_flags v4wb_tlb_flags
# ifdef _TLB
# define MULTI_TLB 1
# else
# define _TLB v4wb
# endif
#else
# define v4wb_possible_flags 0
# define v4wb_always_flags (-1UL)
#endif
#define v6wbi_tlb_flags (TLB_WB | TLB_DCLEAN | \
TLB_V6_I_FULL | TLB_V6_D_FULL | \
TLB_V6_I_PAGE | TLB_V6_D_PAGE | \
TLB_V6_I_ASID | TLB_V6_D_ASID)
#if defined(CONFIG_CPU_V6)
# define v6wbi_possible_flags v6wbi_tlb_flags
# define v6wbi_always_flags v6wbi_tlb_flags
# ifdef _TLB
# define MULTI_TLB 1
# else
# define _TLB v6wbi
# endif
#else
# define v6wbi_possible_flags 0
# define v6wbi_always_flags (-1UL)
#endif
#ifndef _TLB
#error Unknown TLB model
#endif
#ifndef __ASSEMBLY__
struct
cpu_tlb_fns
{
void
(
*
flush_user_range
)(
unsigned
long
,
unsigned
long
,
struct
vm_area_struct
*
);
void
(
*
flush_kern_range
)(
unsigned
long
,
unsigned
long
);
unsigned
long
tlb_flags
;
};
/*
* Select the calling method
*/
#ifdef MULTI_TLB
#define __cpu_flush_user_tlb_range cpu_tlb.flush_user_range
#define __cpu_flush_kern_tlb_range cpu_tlb.flush_kern_range
#else
#define __cpu_flush_user_tlb_range __glue(_TLB,_flush_user_tlb_range)
#define __cpu_flush_kern_tlb_range __glue(_TLB,_flush_kern_tlb_range)
extern
void
__cpu_flush_user_tlb_range
(
unsigned
long
,
unsigned
long
,
struct
vm_area_struct
*
);
extern
void
__cpu_flush_kern_tlb_range
(
unsigned
long
,
unsigned
long
);
#endif
extern
struct
cpu_tlb_fns
cpu_tlb
;
#define __cpu_tlb_flags cpu_tlb.tlb_flags
/*
* TLB Management
* ==============
*
* The arch/arm/mm/tlb-*.S files implement these methods.
*
* The TLB specific code is expected to perform whatever tests it
* needs to determine if it should invalidate the TLB for each
* call. Start addresses are inclusive and end addresses are
* exclusive; it is safe to round these addresses down.
*
* flush_tlb_all()
*
* Invalidate the entire TLB.
*
* flush_tlb_mm(mm)
*
* Invalidate all TLB entries in a particular address
* space.
* - mm - mm_struct describing address space
*
* flush_tlb_range(mm,start,end)
*
* Invalidate a range of TLB entries in the specified
* address space.
* - mm - mm_struct describing address space
* - start - start address (may not be aligned)
* - end - end address (exclusive, may not be aligned)
*
* flush_tlb_page(vaddr,vma)
*
* Invalidate the specified page in the specified address range.
* - vaddr - virtual address (may not be aligned)
* - vma - vma_struct describing address range
*
* flush_kern_tlb_page(kaddr)
*
* Invalidate the TLB entry for the specified page. The address
* will be in the kernels virtual memory space. Current uses
* only require the D-TLB to be invalidated.
* - kaddr - Kernel virtual memory address
*/
/*
* We optimise the code below by:
* - building a set of TLB flags that might be set in __cpu_tlb_flags
* - building a set of TLB flags that will always be set in __cpu_tlb_flags
* - if we're going to need __cpu_tlb_flags, access it once and only once
*
* This allows us to build optimal assembly for the single-CPU type case,
* and as close to optimal given the compiler constrants for multi-CPU
* case. We could do better for the multi-CPU case if the compiler
* implemented the "%?" method, but this has been discontinued due to too
* many people getting it wrong.
*/
#define possible_tlb_flags (v3_possible_flags | \
v4_possible_flags | \
v4wbi_possible_flags | \
v4wb_possible_flags | \
v6wbi_possible_flags)
#define always_tlb_flags (v3_always_flags & \
v4_always_flags & \
v4wbi_always_flags & \
v4wb_always_flags & \
v6wbi_always_flags)
#define tlb_flag(f) ((always_tlb_flags & (f)) || (__tlb_flag & possible_tlb_flags & (f)))
static
inline
void
flush_tlb_all
(
void
)
{
const
int
zero
=
0
;
const
unsigned
int
__tlb_flag
=
__cpu_tlb_flags
;
if
(
tlb_flag
(
TLB_WB
))
asm
(
"mcr%? p15, 0, %0, c7, c10, 4"
:
:
"r"
(
zero
));
if
(
tlb_flag
(
TLB_V3_FULL
))
asm
(
"mcr%? p15, 0, %0, c6, c0, 0"
:
:
"r"
(
zero
));
if
(
tlb_flag
(
TLB_V4_U_FULL
|
TLB_V6_U_FULL
))
asm
(
"mcr%? p15, 0, %0, c8, c7, 0"
:
:
"r"
(
zero
));
if
(
tlb_flag
(
TLB_V4_D_FULL
|
TLB_V6_D_FULL
))
asm
(
"mcr%? p15, 0, %0, c8, c6, 0"
:
:
"r"
(
zero
));
if
(
tlb_flag
(
TLB_V4_I_FULL
|
TLB_V6_I_FULL
))
asm
(
"mcr%? p15, 0, %0, c8, c5, 0"
:
:
"r"
(
zero
));
}
static
inline
void
flush_tlb_mm
(
struct
mm_struct
*
mm
)
{
const
int
zero
=
0
;
const
int
asid
=
ASID
(
mm
);
const
unsigned
int
__tlb_flag
=
__cpu_tlb_flags
;
if
(
tlb_flag
(
TLB_WB
))
asm
(
"mcr%? p15, 0, %0, c7, c10, 4"
:
:
"r"
(
zero
));
if
(
mm
==
current
->
active_mm
)
{
if
(
tlb_flag
(
TLB_V3_FULL
))
asm
(
"mcr%? p15, 0, %0, c6, c0, 0"
:
:
"r"
(
zero
));
if
(
tlb_flag
(
TLB_V4_U_FULL
))
asm
(
"mcr%? p15, 0, %0, c8, c7, 0"
:
:
"r"
(
zero
));
if
(
tlb_flag
(
TLB_V4_D_FULL
))
asm
(
"mcr%? p15, 0, %0, c8, c6, 0"
:
:
"r"
(
zero
));
if
(
tlb_flag
(
TLB_V4_I_FULL
))
asm
(
"mcr%? p15, 0, %0, c8, c5, 0"
:
:
"r"
(
zero
));
}
if
(
tlb_flag
(
TLB_V6_U_ASID
))
asm
(
"mcr%? p15, 0, %0, c8, c7, 2"
:
:
"r"
(
asid
));
if
(
tlb_flag
(
TLB_V6_D_ASID
))
asm
(
"mcr%? p15, 0, %0, c8, c6, 2"
:
:
"r"
(
asid
));
if
(
tlb_flag
(
TLB_V6_I_ASID
))
asm
(
"mcr%? p15, 0, %0, c8, c5, 2"
:
:
"r"
(
asid
));
}
static
inline
void
flush_tlb_page
(
struct
vm_area_struct
*
vma
,
unsigned
long
uaddr
)
{
const
int
zero
=
0
;
const
unsigned
int
__tlb_flag
=
__cpu_tlb_flags
;
uaddr
=
(
uaddr
&
PAGE_MASK
)
|
ASID
(
vma
->
vm_mm
);
if
(
tlb_flag
(
TLB_WB
))
asm
(
"mcr%? p15, 0, %0, c7, c10, 4"
:
:
"r"
(
zero
));
if
(
vma
->
vm_mm
==
current
->
active_mm
)
{
if
(
tlb_flag
(
TLB_V3_PAGE
))
asm
(
"mcr%? p15, 0, %0, c6, c0, 0"
:
:
"r"
(
uaddr
));
if
(
tlb_flag
(
TLB_V4_U_PAGE
))
asm
(
"mcr%? p15, 0, %0, c8, c7, 1"
:
:
"r"
(
uaddr
));
if
(
tlb_flag
(
TLB_V4_D_PAGE
))
asm
(
"mcr%? p15, 0, %0, c8, c6, 1"
:
:
"r"
(
uaddr
));
if
(
tlb_flag
(
TLB_V4_I_PAGE
))
asm
(
"mcr%? p15, 0, %0, c8, c5, 1"
:
:
"r"
(
uaddr
));
if
(
!
tlb_flag
(
TLB_V4_I_PAGE
)
&&
tlb_flag
(
TLB_V4_I_FULL
))
asm
(
"mcr%? p15, 0, %0, c8, c5, 0"
:
:
"r"
(
zero
));
}
if
(
tlb_flag
(
TLB_V6_U_PAGE
))
asm
(
"mcr%? p15, 0, %0, c8, c7, 1"
:
:
"r"
(
uaddr
));
if
(
tlb_flag
(
TLB_V6_D_PAGE
))
asm
(
"mcr%? p15, 0, %0, c8, c6, 1"
:
:
"r"
(
uaddr
));
if
(
tlb_flag
(
TLB_V6_I_PAGE
))
asm
(
"mcr%? p15, 0, %0, c8, c5, 1"
:
:
"r"
(
uaddr
));
}
static
inline
void
flush_tlb_kernel_page
(
unsigned
long
kaddr
)
{
const
int
zero
=
0
;
const
unsigned
int
__tlb_flag
=
__cpu_tlb_flags
;
kaddr
&=
PAGE_MASK
;
if
(
tlb_flag
(
TLB_WB
))
asm
(
"mcr%? p15, 0, %0, c7, c10, 4"
:
:
"r"
(
zero
));
if
(
tlb_flag
(
TLB_V3_PAGE
))
asm
(
"mcr%? p15, 0, %0, c6, c0, 0"
:
:
"r"
(
kaddr
));
if
(
tlb_flag
(
TLB_V4_U_PAGE
))
asm
(
"mcr%? p15, 0, %0, c8, c7, 1"
:
:
"r"
(
kaddr
));
if
(
tlb_flag
(
TLB_V4_D_PAGE
))
asm
(
"mcr%? p15, 0, %0, c8, c6, 1"
:
:
"r"
(
kaddr
));
if
(
tlb_flag
(
TLB_V4_I_PAGE
))
asm
(
"mcr%? p15, 0, %0, c8, c5, 1"
:
:
"r"
(
kaddr
));
if
(
!
tlb_flag
(
TLB_V4_I_PAGE
)
&&
tlb_flag
(
TLB_V4_I_FULL
))
asm
(
"mcr%? p15, 0, %0, c8, c5, 0"
:
:
"r"
(
zero
));
if
(
tlb_flag
(
TLB_V6_U_PAGE
))
asm
(
"mcr%? p15, 0, %0, c8, c7, 1"
:
:
"r"
(
kaddr
));
if
(
tlb_flag
(
TLB_V6_D_PAGE
))
asm
(
"mcr%? p15, 0, %0, c8, c6, 1"
:
:
"r"
(
kaddr
));
if
(
tlb_flag
(
TLB_V6_I_PAGE
))
asm
(
"mcr%? p15, 0, %0, c8, c5, 1"
:
:
"r"
(
kaddr
));
}
/*
* flush_pmd_entry
*
* Flush a PMD entry (word aligned, or double-word aligned) to
* RAM if the TLB for the CPU we are running on requires this.
* This is typically used when we are creating PMD entries.
*
* clean_pmd_entry
*
* Clean (but don't drain the write buffer) if the CPU requires
* these operations. This is typically used when we are removing
* PMD entries.
*/
static
inline
void
flush_pmd_entry
(
pmd_t
*
pmd
)
{
const
unsigned
int
zero
=
0
;
const
unsigned
int
__tlb_flag
=
__cpu_tlb_flags
;
if
(
tlb_flag
(
TLB_DCLEAN
))
asm
(
"mcr%? p15, 0, %0, c7, c10, 1 @ flush_pmd"
:
:
"r"
(
pmd
));
if
(
tlb_flag
(
TLB_WB
))
asm
(
"mcr%? p15, 0, %0, c7, c10, 4 @ flush_pmd"
:
:
"r"
(
zero
));
}
static
inline
void
clean_pmd_entry
(
pmd_t
*
pmd
)
{
const
unsigned
int
__tlb_flag
=
__cpu_tlb_flags
;
if
(
tlb_flag
(
TLB_DCLEAN
))
asm
(
"mcr%? p15, 0, %0, c7, c10, 1 @ flush_pmd"
:
:
"r"
(
pmd
));
}
#undef tlb_flag
#undef always_tlb_flags
#undef possible_tlb_flags
/*
* Convert calls to our calling convention.
*/
#define flush_tlb_range(vma,start,end) __cpu_flush_user_tlb_range(start,end,vma)
#define flush_tlb_kernel_range(s,e) __cpu_flush_kern_tlb_range(s,e)
/*
* if PG_dcache_dirty is set for the page, we need to ensure that any
* cache entries for the kernels virtual memory range are written
* back to the page.
*/
extern
void
update_mmu_cache
(
struct
vm_area_struct
*
vma
,
unsigned
long
addr
,
pte_t
pte
);
/*
* ARM processors do not cache TLB tables in RAM.
*/
#define flush_tlb_pgtables(mm,start,end) do { } while (0)
/*
* Old ARM MEMC stuff. This supports the reversed mapping handling that
* we have on the older 26-bit machines. We don't have a MEMC chip, so...
*/
#define memc_update_all() do { } while (0)
#define memc_update_mm(mm) do { } while (0)
#define memc_update_addr(mm,pte,log) do { } while (0)
#define memc_clear(mm,physaddr) do { } while (0)
#endif
include/asm-arm/proc-armv/uaccess.h
deleted
100644 → 0
View file @
c5164261
/*
* linux/include/asm-arm/proc-armv/uaccess.h
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <asm/arch/memory.h>
#include <asm/proc/domain.h>
/*
* Note that this is actually 0x1,0000,0000
*/
#define KERNEL_DS 0x00000000
#define USER_DS TASK_SIZE
static
inline
void
set_fs
(
mm_segment_t
fs
)
{
current_thread_info
()
->
addr_limit
=
fs
;
modify_domain
(
DOMAIN_KERNEL
,
fs
?
DOMAIN_CLIENT
:
DOMAIN_MANAGER
);
}
/* We use 33-bit arithmetic here... */
#define __range_ok(addr,size) ({ \
unsigned long flag, sum; \
__asm__("adds %1, %2, %3; sbcccs %1, %1, %0; movcc %0, #0" \
: "=&r" (flag), "=&r" (sum) \
: "r" (addr), "Ir" (size), "0" (current_thread_info()->addr_limit) \
: "cc"); \
flag; })
#define __addr_ok(addr) ({ \
unsigned long flag; \
__asm__("cmp %2, %0; movlo %0, #0" \
: "=&r" (flag) \
: "0" (current_thread_info()->addr_limit), "r" (addr) \
: "cc"); \
(flag == 0); })
#define __put_user_asm_byte(x,__pu_addr,err) \
__asm__ __volatile__( \
"1: strbt %1,[%2],#0\n" \
"2:\n" \
" .section .fixup,\"ax\"\n" \
" .align 2\n" \
"3: mov %0, %3\n" \
" b 2b\n" \
" .previous\n" \
" .section __ex_table,\"a\"\n" \
" .align 3\n" \
" .long 1b, 3b\n" \
" .previous" \
: "+r" (err) \
: "r" (x), "r" (__pu_addr), "i" (-EFAULT) \
: "cc")
#ifndef __ARMEB__
#define __put_user_asm_half(x,__pu_addr,err) \
({ \
unsigned long __temp = (unsigned long)(x); \
__put_user_asm_byte(__temp, __pu_addr, err); \
__put_user_asm_byte(__temp >> 8, __pu_addr + 1, err); \
})
#else
#define __put_user_asm_half(x,__pu_addr,err) \
({ \
unsigned long __temp = (unsigned long)(x); \
__put_user_asm_byte(__temp >> 8, __pu_addr, err); \
__put_user_asm_byte(__temp, __pu_addr + 1, err); \
})
#endif
#define __put_user_asm_word(x,__pu_addr,err) \
__asm__ __volatile__( \
"1: strt %1,[%2],#0\n" \
"2:\n" \
" .section .fixup,\"ax\"\n" \
" .align 2\n" \
"3: mov %0, %3\n" \
" b 2b\n" \
" .previous\n" \
" .section __ex_table,\"a\"\n" \
" .align 3\n" \
" .long 1b, 3b\n" \
" .previous" \
: "+r" (err) \
: "r" (x), "r" (__pu_addr), "i" (-EFAULT) \
: "cc")
#ifndef __ARMEB__
#define __reg_oper0 "%R2"
#define __reg_oper1 "%Q2"
#else
#define __reg_oper0 "%Q2"
#define __reg_oper1 "%R2"
#endif
#define __put_user_asm_dword(x,__pu_addr,err) \
__asm__ __volatile__( \
"1: strt " __reg_oper1 ", [%1], #4\n" \
"2: strt " __reg_oper0 ", [%1], #0\n" \
"3:\n" \
" .section .fixup,\"ax\"\n" \
" .align 2\n" \
"4: mov %0, %3\n" \
" b 3b\n" \
" .previous\n" \
" .section __ex_table,\"a\"\n" \
" .align 3\n" \
" .long 1b, 4b\n" \
" .long 2b, 4b\n" \
" .previous" \
: "+r" (err), "+r" (__pu_addr) \
: "r" (x), "i" (-EFAULT) \
: "cc")
#define __get_user_asm_byte(x,addr,err) \
__asm__ __volatile__( \
"1: ldrbt %1,[%2],#0\n" \
"2:\n" \
" .section .fixup,\"ax\"\n" \
" .align 2\n" \
"3: mov %0, %3\n" \
" mov %1, #0\n" \
" b 2b\n" \
" .previous\n" \
" .section __ex_table,\"a\"\n" \
" .align 3\n" \
" .long 1b, 3b\n" \
" .previous" \
: "+r" (err), "=&r" (x) \
: "r" (addr), "i" (-EFAULT) \
: "cc")
#ifndef __ARMEB__
#define __get_user_asm_half(x,__gu_addr,err) \
({ \
unsigned long __b1, __b2; \
__get_user_asm_byte(__b1, __gu_addr, err); \
__get_user_asm_byte(__b2, __gu_addr + 1, err); \
(x) = __b1 | (__b2 << 8); \
})
#else
#define __get_user_asm_half(x,__gu_addr,err) \
({ \
unsigned long __b1, __b2; \
__get_user_asm_byte(__b1, __gu_addr, err); \
__get_user_asm_byte(__b2, __gu_addr + 1, err); \
(x) = (__b1 << 8) | __b2; \
})
#endif
#define __get_user_asm_word(x,addr,err) \
__asm__ __volatile__( \
"1: ldrt %1,[%2],#0\n" \
"2:\n" \
" .section .fixup,\"ax\"\n" \
" .align 2\n" \
"3: mov %0, %3\n" \
" mov %1, #0\n" \
" b 2b\n" \
" .previous\n" \
" .section __ex_table,\"a\"\n" \
" .align 3\n" \
" .long 1b, 3b\n" \
" .previous" \
: "+r" (err), "=&r" (x) \
: "r" (addr), "i" (-EFAULT) \
: "cc")
extern
unsigned
long
__arch_copy_from_user
(
void
*
to
,
const
void
*
from
,
unsigned
long
n
);
#define __do_copy_from_user(to,from,n) \
(n) = __arch_copy_from_user(to,from,n)
extern
unsigned
long
__arch_copy_to_user
(
void
*
to
,
const
void
*
from
,
unsigned
long
n
);
#define __do_copy_to_user(to,from,n) \
(n) = __arch_copy_to_user(to,from,n)
extern
unsigned
long
__arch_clear_user
(
void
*
addr
,
unsigned
long
n
);
#define __do_clear_user(addr,sz) \
(sz) = __arch_clear_user(addr,sz)
extern
unsigned
long
__arch_strncpy_from_user
(
char
*
to
,
const
char
*
from
,
unsigned
long
count
);
#define __do_strncpy_from_user(dst,src,count,res) \
(res) = __arch_strncpy_from_user(dst,src,count)
extern
unsigned
long
__arch_strnlen_user
(
const
char
*
s
,
long
n
);
#define __do_strnlen_user(s,n,res) \
(res) = __arch_strnlen_user(s,n)
include/asm-arm/proc-fns.h
View file @
b5984adb
...
@@ -21,11 +21,6 @@
...
@@ -21,11 +21,6 @@
#undef MULTI_CPU
#undef MULTI_CPU
#undef CPU_NAME
#undef CPU_NAME
#ifdef CONFIG_CPU_26
# define CPU_INCLUDE_NAME "asm/cpu-multi26.h"
# define MULTI_CPU
#endif
/*
/*
* CPU_NAME - the prefix for CPU related functions
* CPU_NAME - the prefix for CPU related functions
*/
*/
...
...
include/asm-arm/processor.h
View file @
b5984adb
/*
/*
* linux/include/asm-arm/processor.h
* linux/include/asm-arm/processor.h
*
*
* Copyright (C) 1995 Russell King
* Copyright (C) 1995
-1999
Russell King
*
*
* This program is free software; you can redistribute it and/or modify
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* it under the terms of the GNU General Public License version 2 as
...
@@ -27,9 +27,10 @@
...
@@ -27,9 +27,10 @@
#include <asm/ptrace.h>
#include <asm/ptrace.h>
#include <asm/procinfo.h>
#include <asm/procinfo.h>
#include <asm/arch/memory.h>
#include <asm/arch/memory.h>
#include <asm/proc/processor.h>
#include <asm/types.h>
#include <asm/types.h>
#define KERNEL_STACK_SIZE PAGE_SIZE
union
debug_insn
{
union
debug_insn
{
u32
arm
;
u32
arm
;
u16
thumb
;
u16
thumb
;
...
@@ -56,6 +57,24 @@ struct thread_struct {
...
@@ -56,6 +57,24 @@ struct thread_struct {
#define INIT_THREAD { }
#define INIT_THREAD { }
#define start_thread(regs,pc,sp) \
({ \
unsigned long *stack = (unsigned long *)sp; \
set_fs(USER_DS); \
memzero(regs->uregs, sizeof(regs->uregs)); \
if (current->personality & ADDR_LIMIT_32BIT) \
regs->ARM_cpsr = USR_MODE; \
else \
regs->ARM_cpsr = USR26_MODE; \
if (elf_hwcap & HWCAP_THUMB && pc & 1) \
regs->ARM_cpsr |= PSR_T_BIT; \
regs->ARM_pc = pc & ~1;
/* pc */
\
regs->ARM_sp = sp;
/* sp */
\
regs->ARM_r2 = stack[2];
/* r2 (envp) */
\
regs->ARM_r1 = stack[1];
/* r1 (argv) */
\
regs->ARM_r0 = stack[0];
/* r0 (argc) */
\
})
/* Forward declaration, a strange C thing */
/* Forward declaration, a strange C thing */
struct
task_struct
;
struct
task_struct
;
...
@@ -74,6 +93,9 @@ unsigned long get_wchan(struct task_struct *p);
...
@@ -74,6 +93,9 @@ unsigned long get_wchan(struct task_struct *p);
*/
*/
extern
int
kernel_thread
(
int
(
*
fn
)(
void
*
),
void
*
arg
,
unsigned
long
flags
);
extern
int
kernel_thread
(
int
(
*
fn
)(
void
*
),
void
*
arg
,
unsigned
long
flags
);
#define KSTK_EIP(tsk) (((unsigned long *)(4096+(unsigned long)(tsk)->thread_info))[1019])
#define KSTK_ESP(tsk) (((unsigned long *)(4096+(unsigned long)(tsk)->thread_info))[1017])
/*
/*
* Prefetching support - only ARMv5.
* Prefetching support - only ARMv5.
*/
*/
...
...
include/asm-arm/ptrace.h
View file @
b5984adb
/*
* linux/include/asm-arm/ptrace.h
*
* Copyright (C) 1996-2003 Russell King
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#ifndef __ASM_ARM_PTRACE_H
#ifndef __ASM_ARM_PTRACE_H
#define __ASM_ARM_PTRACE_H
#define __ASM_ARM_PTRACE_H
#include <linux/config.h>
#define PTRACE_GETREGS 12
#define PTRACE_GETREGS 12
#define PTRACE_SETREGS 13
#define PTRACE_SETREGS 13
#define PTRACE_GETFPREGS 14
#define PTRACE_GETFPREGS 14
...
@@ -8,9 +19,112 @@
...
@@ -8,9 +19,112 @@
#define PTRACE_OLDSETOPTIONS 21
#define PTRACE_OLDSETOPTIONS 21
#include <asm/proc/ptrace.h>
/*
* PSR bits
*/
#define USR26_MODE 0x00000000
#define FIQ26_MODE 0x00000001
#define IRQ26_MODE 0x00000002
#define SVC26_MODE 0x00000003
#define USR_MODE 0x00000010
#define FIQ_MODE 0x00000011
#define IRQ_MODE 0x00000012
#define SVC_MODE 0x00000013
#define ABT_MODE 0x00000017
#define UND_MODE 0x0000001b
#define SYSTEM_MODE 0x0000001f
#define MODE32_BIT 0x00000010
#define MODE_MASK 0x0000001f
#define PSR_T_BIT 0x00000020
#define PSR_F_BIT 0x00000040
#define PSR_I_BIT 0x00000080
#define PSR_J_BIT 0x01000000
#define PSR_Q_BIT 0x08000000
#define PSR_V_BIT 0x10000000
#define PSR_C_BIT 0x20000000
#define PSR_Z_BIT 0x40000000
#define PSR_N_BIT 0x80000000
#define PCMASK 0
/*
* Groups of PSR bits
*/
#define PSR_f 0xff000000
/* Flags */
#define PSR_s 0x00ff0000
/* Status */
#define PSR_x 0x0000ff00
/* Extension */
#define PSR_c 0x000000ff
/* Control */
#ifndef __ASSEMBLY__
#ifndef __ASSEMBLY__
/* this struct defines the way the registers are stored on the
stack during a system call. */
struct
pt_regs
{
long
uregs
[
18
];
};
#define ARM_cpsr uregs[16]
#define ARM_pc uregs[15]
#define ARM_lr uregs[14]
#define ARM_sp uregs[13]
#define ARM_ip uregs[12]
#define ARM_fp uregs[11]
#define ARM_r10 uregs[10]
#define ARM_r9 uregs[9]
#define ARM_r8 uregs[8]
#define ARM_r7 uregs[7]
#define ARM_r6 uregs[6]
#define ARM_r5 uregs[5]
#define ARM_r4 uregs[4]
#define ARM_r3 uregs[3]
#define ARM_r2 uregs[2]
#define ARM_r1 uregs[1]
#define ARM_r0 uregs[0]
#define ARM_ORIG_r0 uregs[17]
#ifdef __KERNEL__
#define user_mode(regs) \
(((regs)->ARM_cpsr & 0xf) == 0)
#ifdef CONFIG_ARM_THUMB
#define thumb_mode(regs) \
(((regs)->ARM_cpsr & PSR_T_BIT))
#else
#define thumb_mode(regs) (0)
#endif
#define processor_mode(regs) \
((regs)->ARM_cpsr & MODE_MASK)
#define interrupts_enabled(regs) \
(!((regs)->ARM_cpsr & PSR_I_BIT))
#define fast_interrupts_enabled(regs) \
(!((regs)->ARM_cpsr & PSR_F_BIT))
#define condition_codes(regs) \
((regs)->ARM_cpsr & (PSR_V_BIT|PSR_C_BIT|PSR_Z_BIT|PSR_N_BIT))
/* Are the current registers suitable for user mode?
* (used to maintain security in signal handlers)
*/
static
inline
int
valid_user_regs
(
struct
pt_regs
*
regs
)
{
if
(
user_mode
(
regs
)
&&
(
regs
->
ARM_cpsr
&
(
PSR_F_BIT
|
PSR_I_BIT
))
==
0
)
return
1
;
/*
* Force CPSR to something logical...
*/
regs
->
ARM_cpsr
&=
PSR_f
|
PSR_s
|
PSR_x
|
PSR_T_BIT
|
MODE32_BIT
;
return
0
;
}
#endif
/* __KERNEL__ */
#define pc_pointer(v) \
#define pc_pointer(v) \
((v) & ~PCMASK)
((v) & ~PCMASK)
...
...
include/asm-arm/semaphore.h
View file @
b5984adb
...
@@ -10,7 +10,7 @@
...
@@ -10,7 +10,7 @@
#include <linux/rwsem.h>
#include <linux/rwsem.h>
#include <asm/atomic.h>
#include <asm/atomic.h>
#include <asm/
proc/
locks.h>
#include <asm/locks.h>
struct
semaphore
{
struct
semaphore
{
atomic_t
count
;
atomic_t
count
;
...
...
include/asm-arm/shmparam.h
View file @
b5984adb
#ifndef _ASMARM_SHMPARAM_H
#ifndef _ASMARM_SHMPARAM_H
#define _ASMARM_SHMPARAM_H
#define _ASMARM_SHMPARAM_H
#include <asm/proc/shmparam.h>
/*
/*
* This should be the size of the virtually indexed cache/ways,
* This should be the size of the virtually indexed cache/ways,
* or page size, whichever is greater since the cache aliases
* or page size, whichever is greater since the cache aliases
...
...
include/asm-arm/system.h
View file @
b5984adb
...
@@ -4,6 +4,45 @@
...
@@ -4,6 +4,45 @@
#ifdef __KERNEL__
#ifdef __KERNEL__
#include <linux/config.h>
#include <linux/config.h>
#define CPU_ARCH_UNKNOWN 0
#define CPU_ARCH_ARMv3 1
#define CPU_ARCH_ARMv4 2
#define CPU_ARCH_ARMv4T 3
#define CPU_ARCH_ARMv5 4
#define CPU_ARCH_ARMv5T 5
#define CPU_ARCH_ARMv5TE 6
#define CPU_ARCH_ARMv6 7
/*
* CR1 bits (CP#15 CR1)
*/
#define CR_M (1 << 0)
/* MMU enable */
#define CR_A (1 << 1)
/* Alignment abort enable */
#define CR_C (1 << 2)
/* Dcache enable */
#define CR_W (1 << 3)
/* Write buffer enable */
#define CR_P (1 << 4)
/* 32-bit exception handler */
#define CR_D (1 << 5)
/* 32-bit data address range */
#define CR_L (1 << 6)
/* Implementation defined */
#define CR_B (1 << 7)
/* Big endian */
#define CR_S (1 << 8)
/* System MMU protection */
#define CR_R (1 << 9)
/* ROM MMU protection */
#define CR_F (1 << 10)
/* Implementation defined */
#define CR_Z (1 << 11)
/* Implementation defined */
#define CR_I (1 << 12)
/* Icache enable */
#define CR_V (1 << 13)
/* Vectors relocated to 0xffff0000 */
#define CR_RR (1 << 14)
/* Round Robin cache replacement */
#define CR_L4 (1 << 15)
/* LDR pc can set T bit */
#define CR_DT (1 << 16)
#define CR_IT (1 << 18)
#define CR_ST (1 << 19)
#define CR_FI (1 << 21)
#define CR_U (1 << 22)
/* Unaligned access operation */
#define CR_XP (1 << 23)
/* Extended page tables */
#define CR_VE (1 << 24)
/* Vectored interrupts */
#ifndef __ASSEMBLY__
#include <linux/kernel.h>
#include <linux/kernel.h>
struct
thread_info
;
struct
thread_info
;
...
@@ -34,21 +73,30 @@ void hook_fault_code(int nr, int (*fn)(unsigned long, unsigned int,
...
@@ -34,21 +73,30 @@ void hook_fault_code(int nr, int (*fn)(unsigned long, unsigned int,
extern
asmlinkage
void
__backtrace
(
void
);
extern
asmlinkage
void
__backtrace
(
void
);
#define CPU_ARCH_UNKNOWN 0
#define CPU_ARCH_ARMv3 1
#define CPU_ARCH_ARMv4 2
#define CPU_ARCH_ARMv4T 3
#define CPU_ARCH_ARMv5 4
#define CPU_ARCH_ARMv5T 5
#define CPU_ARCH_ARMv5TE 6
#define CPU_ARCH_ARMv6 7
extern
int
cpu_architecture
(
void
);
extern
int
cpu_architecture
(
void
);
/*
#define set_cr(x) \
* Include processor dependent parts
__asm__ __volatile__( \
*/
"mcr p15, 0, %0, c1, c0, 0 @ set CR" \
#include <asm/proc/system.h>
: : "r" (x) : "cc")
#define get_cr() \
({ \
unsigned int __val; \
__asm__ __volatile__( \
"mrc p15, 0, %0, c1, c0, 0 @ get CR" \
: "=r" (__val) : : "cc"); \
__val; \
})
extern
unsigned
long
cr_no_alignment
;
/* defined in entry-armv.S */
extern
unsigned
long
cr_alignment
;
/* defined in entry-armv.S */
#if __LINUX_ARM_ARCH__ >= 4
#define vectors_base() ((cr_alignment & CR_V) ? 0xffff0000 : 0)
#else
#define vectors_base() (0)
#endif
#define mb() __asm__ __volatile__ ("" : : : "memory")
#define mb() __asm__ __volatile__ ("" : : : "memory")
#define rmb() mb()
#define rmb() mb()
...
@@ -75,6 +123,102 @@ extern struct task_struct *__switch_to(struct task_struct *, struct thread_info
...
@@ -75,6 +123,102 @@ extern struct task_struct *__switch_to(struct task_struct *, struct thread_info
mb(); \
mb(); \
} while (0)
} while (0)
/*
* Save the current interrupt enable state & disable IRQs
*/
#define local_irq_save(x) \
({ \
unsigned long temp; \
(void) (&temp == &x); \
__asm__ __volatile__( \
"mrs %0, cpsr @ local_irq_save\n" \
" orr %1, %0, #128\n" \
" msr cpsr_c, %1" \
: "=r" (x), "=r" (temp) \
: \
: "memory", "cc"); \
})
/*
* Enable IRQs
*/
#define local_irq_enable() \
({ \
unsigned long temp; \
__asm__ __volatile__( \
"mrs %0, cpsr @ local_irq_enable\n" \
" bic %0, %0, #128\n" \
" msr cpsr_c, %0" \
: "=r" (temp) \
: \
: "memory", "cc"); \
})
/*
* Disable IRQs
*/
#define local_irq_disable() \
({ \
unsigned long temp; \
__asm__ __volatile__( \
"mrs %0, cpsr @ local_irq_disable\n" \
" orr %0, %0, #128\n" \
" msr cpsr_c, %0" \
: "=r" (temp) \
: \
: "memory", "cc"); \
})
/*
* Enable FIQs
*/
#define __stf() \
({ \
unsigned long temp; \
__asm__ __volatile__( \
"mrs %0, cpsr @ stf\n" \
" bic %0, %0, #64\n" \
" msr cpsr_c, %0" \
: "=r" (temp) \
: \
: "memory", "cc"); \
})
/*
* Disable FIQs
*/
#define __clf() \
({ \
unsigned long temp; \
__asm__ __volatile__( \
"mrs %0, cpsr @ clf\n" \
" orr %0, %0, #64\n" \
" msr cpsr_c, %0" \
: "=r" (temp) \
: \
: "memory", "cc"); \
})
/*
* Save the current interrupt enable state.
*/
#define local_save_flags(x) \
({ \
__asm__ __volatile__( \
"mrs %0, cpsr @ local_save_flags" \
: "=r" (x) : : "memory", "cc"); \
})
/*
* restore saved IRQ & FIQ state
*/
#define local_irq_restore(x) \
__asm__ __volatile__( \
"msr cpsr_c, %0 @ local_irq_restore\n" \
: \
: "r" (x) \
: "memory", "cc")
#ifdef CONFIG_SMP
#ifdef CONFIG_SMP
#error SMP not supported
#error SMP not supported
...
@@ -100,8 +244,67 @@ extern struct task_struct *__switch_to(struct task_struct *, struct thread_info
...
@@ -100,8 +244,67 @@ extern struct task_struct *__switch_to(struct task_struct *, struct thread_info
flags & PSR_I_BIT; \
flags & PSR_I_BIT; \
})
})
#if defined(CONFIG_CPU_SA1100) || defined(CONFIG_CPU_SA110)
/*
* On the StrongARM, "swp" is terminally broken since it bypasses the
* cache totally. This means that the cache becomes inconsistent, and,
* since we use normal loads/stores as well, this is really bad.
* Typically, this causes oopsen in filp_close, but could have other,
* more disasterous effects. There are two work-arounds:
* 1. Disable interrupts and emulate the atomic swap
* 2. Clean the cache, perform atomic swap, flush the cache
*
* We choose (1) since its the "easiest" to achieve here and is not
* dependent on the processor type.
*/
#define swp_is_buggy
#endif
static
inline
unsigned
long
__xchg
(
unsigned
long
x
,
volatile
void
*
ptr
,
int
size
)
{
extern
void
__bad_xchg
(
volatile
void
*
,
int
);
unsigned
long
ret
;
#ifdef swp_is_buggy
unsigned
long
flags
;
#endif
switch
(
size
)
{
#ifdef swp_is_buggy
case
1
:
local_irq_save
(
flags
);
ret
=
*
(
volatile
unsigned
char
*
)
ptr
;
*
(
volatile
unsigned
char
*
)
ptr
=
x
;
local_irq_restore
(
flags
);
break
;
case
4
:
local_irq_save
(
flags
);
ret
=
*
(
volatile
unsigned
long
*
)
ptr
;
*
(
volatile
unsigned
long
*
)
ptr
=
x
;
local_irq_restore
(
flags
);
break
;
#else
case
1
:
__asm__
__volatile__
(
"swpb %0, %1, [%2]"
:
"=&r"
(
ret
)
:
"r"
(
x
),
"r"
(
ptr
)
:
"memory"
,
"cc"
);
break
;
case
4
:
__asm__
__volatile__
(
"swp %0, %1, [%2]"
:
"=&r"
(
ret
)
:
"r"
(
x
),
"r"
(
ptr
)
:
"memory"
,
"cc"
);
break
;
#endif
default:
__bad_xchg
(
ptr
,
size
),
ret
=
0
;
}
return
ret
;
}
#endif
/* CONFIG_SMP */
#endif
/* CONFIG_SMP */
#endif
/* __ASSEMBLY__ */
#endif
/* __KERNEL__ */
#endif
/* __KERNEL__ */
#endif
#endif
include/asm-arm/thread_info.h
View file @
b5984adb
...
@@ -18,9 +18,9 @@ struct task_struct;
...
@@ -18,9 +18,9 @@ struct task_struct;
struct
exec_domain
;
struct
exec_domain
;
#include <asm/fpstate.h>
#include <asm/fpstate.h>
#include <asm/proc/processor.h>
#include <asm/ptrace.h>
#include <asm/ptrace.h>
#include <asm/types.h>
#include <asm/types.h>
#include <asm/domain.h>
typedef
unsigned
long
mm_segment_t
;
typedef
unsigned
long
mm_segment_t
;
...
@@ -55,17 +55,19 @@ struct thread_info {
...
@@ -55,17 +55,19 @@ struct thread_info {
union
fp_state
fpstate
;
union
fp_state
fpstate
;
};
};
#define INIT_THREAD_INFO(tsk) \
#define INIT_THREAD_INFO(tsk) \
{ \
{ \
.task = &tsk, \
.task = &tsk, \
.exec_domain = &default_exec_domain, \
.exec_domain = &default_exec_domain, \
.flags = 0, \
.flags = 0, \
.preempt_count = 1, \
.preempt_count = 1, \
.addr_limit = KERNEL_DS, \
.addr_limit = KERNEL_DS, \
.restart_block = { \
.cpu_domain = domain_val(DOMAIN_USER, DOMAIN_MANAGER) | \
.fn = do_no_restart_syscall, \
domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \
}, \
domain_val(DOMAIN_IO, DOMAIN_CLIENT), \
INIT_EXTRA_THREAD_INFO, \
.restart_block = { \
.fn = do_no_restart_syscall, \
}, \
}
}
#define init_thread_info (init_thread_union.thread_info)
#define init_thread_info (init_thread_union.thread_info)
...
...
include/asm-arm/tlbflush.h
View file @
b5984adb
/*
/*
* linux/include/asm-arm/tlbflush.h
* linux/include/asm-arm/tlbflush.h
*
*
* Copyright (C)
2000-2002
Russell King
* Copyright (C)
1999-2003
Russell King
*
*
* This program is free software; you can redistribute it and/or modify
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* it under the terms of the GNU General Public License version 2 as
...
@@ -10,6 +10,397 @@
...
@@ -10,6 +10,397 @@
#ifndef _ASMARM_TLBFLUSH_H
#ifndef _ASMARM_TLBFLUSH_H
#define _ASMARM_TLBFLUSH_H
#define _ASMARM_TLBFLUSH_H
#include <asm-arm/proc/tlbflush.h>
#include <linux/config.h>
#include <asm/glue.h>
#define TLB_V3_PAGE (1 << 0)
#define TLB_V4_U_PAGE (1 << 1)
#define TLB_V4_D_PAGE (1 << 2)
#define TLB_V4_I_PAGE (1 << 3)
#define TLB_V6_U_PAGE (1 << 4)
#define TLB_V6_D_PAGE (1 << 5)
#define TLB_V6_I_PAGE (1 << 6)
#define TLB_V3_FULL (1 << 8)
#define TLB_V4_U_FULL (1 << 9)
#define TLB_V4_D_FULL (1 << 10)
#define TLB_V4_I_FULL (1 << 11)
#define TLB_V6_U_FULL (1 << 12)
#define TLB_V6_D_FULL (1 << 13)
#define TLB_V6_I_FULL (1 << 14)
#define TLB_V6_U_ASID (1 << 16)
#define TLB_V6_D_ASID (1 << 17)
#define TLB_V6_I_ASID (1 << 18)
#define TLB_DCLEAN (1 << 30)
#define TLB_WB (1 << 31)
/*
* MMU TLB Model
* =============
*
* We have the following to choose from:
* v3 - ARMv3
* v4 - ARMv4 without write buffer
* v4wb - ARMv4 with write buffer without I TLB flush entry instruction
* v4wbi - ARMv4 with write buffer with I TLB flush entry instruction
* v6wbi - ARMv6 with write buffer with I TLB flush entry instruction
*/
#undef _TLB
#undef MULTI_TLB
#define v3_tlb_flags (TLB_V3_FULL | TLB_V3_PAGE)
#if defined(CONFIG_CPU_ARM610) || defined(CONFIG_CPU_ARM710)
# define v3_possible_flags v3_tlb_flags
# define v3_always_flags v3_tlb_flags
# ifdef _TLB
# define MULTI_TLB 1
# else
# define _TLB v3
# endif
#else
# define v3_possible_flags 0
# define v3_always_flags (-1UL)
#endif
#define v4_tlb_flags (TLB_V4_U_FULL | TLB_V4_U_PAGE)
#if defined(CONFIG_CPU_ARM720T)
# define v4_possible_flags v4_tlb_flags
# define v4_always_flags v4_tlb_flags
# ifdef _TLB
# define MULTI_TLB 1
# else
# define _TLB v4
# endif
#else
# define v4_possible_flags 0
# define v4_always_flags (-1UL)
#endif
#define v4wbi_tlb_flags (TLB_WB | TLB_DCLEAN | \
TLB_V4_I_FULL | TLB_V4_D_FULL | \
TLB_V4_I_PAGE | TLB_V4_D_PAGE)
#if defined(CONFIG_CPU_ARM920T) || defined(CONFIG_CPU_ARM922T) || \
defined(CONFIG_CPU_ARM926T) || defined(CONFIG_CPU_ARM1020) || \
defined(CONFIG_CPU_XSCALE)
# define v4wbi_possible_flags v4wbi_tlb_flags
# define v4wbi_always_flags v4wbi_tlb_flags
# ifdef _TLB
# define MULTI_TLB 1
# else
# define _TLB v4wbi
# endif
#else
# define v4wbi_possible_flags 0
# define v4wbi_always_flags (-1UL)
#endif
#define v4wb_tlb_flags (TLB_WB | TLB_DCLEAN | \
TLB_V4_I_FULL | TLB_V4_D_FULL | \
TLB_V4_D_PAGE)
#if defined(CONFIG_CPU_SA110) || defined(CONFIG_CPU_SA1100)
# define v4wb_possible_flags v4wb_tlb_flags
# define v4wb_always_flags v4wb_tlb_flags
# ifdef _TLB
# define MULTI_TLB 1
# else
# define _TLB v4wb
# endif
#else
# define v4wb_possible_flags 0
# define v4wb_always_flags (-1UL)
#endif
#define v6wbi_tlb_flags (TLB_WB | TLB_DCLEAN | \
TLB_V6_I_FULL | TLB_V6_D_FULL | \
TLB_V6_I_PAGE | TLB_V6_D_PAGE | \
TLB_V6_I_ASID | TLB_V6_D_ASID)
#if defined(CONFIG_CPU_V6)
# define v6wbi_possible_flags v6wbi_tlb_flags
# define v6wbi_always_flags v6wbi_tlb_flags
# ifdef _TLB
# define MULTI_TLB 1
# else
# define _TLB v6wbi
# endif
#else
# define v6wbi_possible_flags 0
# define v6wbi_always_flags (-1UL)
#endif
#ifndef _TLB
#error Unknown TLB model
#endif
#ifndef __ASSEMBLY__
struct
cpu_tlb_fns
{
void
(
*
flush_user_range
)(
unsigned
long
,
unsigned
long
,
struct
vm_area_struct
*
);
void
(
*
flush_kern_range
)(
unsigned
long
,
unsigned
long
);
unsigned
long
tlb_flags
;
};
/*
* Select the calling method
*/
#ifdef MULTI_TLB
#define __cpu_flush_user_tlb_range cpu_tlb.flush_user_range
#define __cpu_flush_kern_tlb_range cpu_tlb.flush_kern_range
#else
#define __cpu_flush_user_tlb_range __glue(_TLB,_flush_user_tlb_range)
#define __cpu_flush_kern_tlb_range __glue(_TLB,_flush_kern_tlb_range)
extern
void
__cpu_flush_user_tlb_range
(
unsigned
long
,
unsigned
long
,
struct
vm_area_struct
*
);
extern
void
__cpu_flush_kern_tlb_range
(
unsigned
long
,
unsigned
long
);
#endif
extern
struct
cpu_tlb_fns
cpu_tlb
;
#define __cpu_tlb_flags cpu_tlb.tlb_flags
/*
* TLB Management
* ==============
*
* The arch/arm/mm/tlb-*.S files implement these methods.
*
* The TLB specific code is expected to perform whatever tests it
* needs to determine if it should invalidate the TLB for each
* call. Start addresses are inclusive and end addresses are
* exclusive; it is safe to round these addresses down.
*
* flush_tlb_all()
*
* Invalidate the entire TLB.
*
* flush_tlb_mm(mm)
*
* Invalidate all TLB entries in a particular address
* space.
* - mm - mm_struct describing address space
*
* flush_tlb_range(mm,start,end)
*
* Invalidate a range of TLB entries in the specified
* address space.
* - mm - mm_struct describing address space
* - start - start address (may not be aligned)
* - end - end address (exclusive, may not be aligned)
*
* flush_tlb_page(vaddr,vma)
*
* Invalidate the specified page in the specified address range.
* - vaddr - virtual address (may not be aligned)
* - vma - vma_struct describing address range
*
* flush_kern_tlb_page(kaddr)
*
* Invalidate the TLB entry for the specified page. The address
* will be in the kernels virtual memory space. Current uses
* only require the D-TLB to be invalidated.
* - kaddr - Kernel virtual memory address
*/
/*
* We optimise the code below by:
* - building a set of TLB flags that might be set in __cpu_tlb_flags
* - building a set of TLB flags that will always be set in __cpu_tlb_flags
* - if we're going to need __cpu_tlb_flags, access it once and only once
*
* This allows us to build optimal assembly for the single-CPU type case,
* and as close to optimal given the compiler constrants for multi-CPU
* case. We could do better for the multi-CPU case if the compiler
* implemented the "%?" method, but this has been discontinued due to too
* many people getting it wrong.
*/
#define possible_tlb_flags (v3_possible_flags | \
v4_possible_flags | \
v4wbi_possible_flags | \
v4wb_possible_flags | \
v6wbi_possible_flags)
#define always_tlb_flags (v3_always_flags & \
v4_always_flags & \
v4wbi_always_flags & \
v4wb_always_flags & \
v6wbi_always_flags)
#define tlb_flag(f) ((always_tlb_flags & (f)) || (__tlb_flag & possible_tlb_flags & (f)))
static
inline
void
flush_tlb_all
(
void
)
{
const
int
zero
=
0
;
const
unsigned
int
__tlb_flag
=
__cpu_tlb_flags
;
if
(
tlb_flag
(
TLB_WB
))
asm
(
"mcr%? p15, 0, %0, c7, c10, 4"
:
:
"r"
(
zero
));
if
(
tlb_flag
(
TLB_V3_FULL
))
asm
(
"mcr%? p15, 0, %0, c6, c0, 0"
:
:
"r"
(
zero
));
if
(
tlb_flag
(
TLB_V4_U_FULL
|
TLB_V6_U_FULL
))
asm
(
"mcr%? p15, 0, %0, c8, c7, 0"
:
:
"r"
(
zero
));
if
(
tlb_flag
(
TLB_V4_D_FULL
|
TLB_V6_D_FULL
))
asm
(
"mcr%? p15, 0, %0, c8, c6, 0"
:
:
"r"
(
zero
));
if
(
tlb_flag
(
TLB_V4_I_FULL
|
TLB_V6_I_FULL
))
asm
(
"mcr%? p15, 0, %0, c8, c5, 0"
:
:
"r"
(
zero
));
}
static
inline
void
flush_tlb_mm
(
struct
mm_struct
*
mm
)
{
const
int
zero
=
0
;
const
int
asid
=
ASID
(
mm
);
const
unsigned
int
__tlb_flag
=
__cpu_tlb_flags
;
if
(
tlb_flag
(
TLB_WB
))
asm
(
"mcr%? p15, 0, %0, c7, c10, 4"
:
:
"r"
(
zero
));
if
(
mm
==
current
->
active_mm
)
{
if
(
tlb_flag
(
TLB_V3_FULL
))
asm
(
"mcr%? p15, 0, %0, c6, c0, 0"
:
:
"r"
(
zero
));
if
(
tlb_flag
(
TLB_V4_U_FULL
))
asm
(
"mcr%? p15, 0, %0, c8, c7, 0"
:
:
"r"
(
zero
));
if
(
tlb_flag
(
TLB_V4_D_FULL
))
asm
(
"mcr%? p15, 0, %0, c8, c6, 0"
:
:
"r"
(
zero
));
if
(
tlb_flag
(
TLB_V4_I_FULL
))
asm
(
"mcr%? p15, 0, %0, c8, c5, 0"
:
:
"r"
(
zero
));
}
if
(
tlb_flag
(
TLB_V6_U_ASID
))
asm
(
"mcr%? p15, 0, %0, c8, c7, 2"
:
:
"r"
(
asid
));
if
(
tlb_flag
(
TLB_V6_D_ASID
))
asm
(
"mcr%? p15, 0, %0, c8, c6, 2"
:
:
"r"
(
asid
));
if
(
tlb_flag
(
TLB_V6_I_ASID
))
asm
(
"mcr%? p15, 0, %0, c8, c5, 2"
:
:
"r"
(
asid
));
}
static
inline
void
flush_tlb_page
(
struct
vm_area_struct
*
vma
,
unsigned
long
uaddr
)
{
const
int
zero
=
0
;
const
unsigned
int
__tlb_flag
=
__cpu_tlb_flags
;
uaddr
=
(
uaddr
&
PAGE_MASK
)
|
ASID
(
vma
->
vm_mm
);
if
(
tlb_flag
(
TLB_WB
))
asm
(
"mcr%? p15, 0, %0, c7, c10, 4"
:
:
"r"
(
zero
));
if
(
vma
->
vm_mm
==
current
->
active_mm
)
{
if
(
tlb_flag
(
TLB_V3_PAGE
))
asm
(
"mcr%? p15, 0, %0, c6, c0, 0"
:
:
"r"
(
uaddr
));
if
(
tlb_flag
(
TLB_V4_U_PAGE
))
asm
(
"mcr%? p15, 0, %0, c8, c7, 1"
:
:
"r"
(
uaddr
));
if
(
tlb_flag
(
TLB_V4_D_PAGE
))
asm
(
"mcr%? p15, 0, %0, c8, c6, 1"
:
:
"r"
(
uaddr
));
if
(
tlb_flag
(
TLB_V4_I_PAGE
))
asm
(
"mcr%? p15, 0, %0, c8, c5, 1"
:
:
"r"
(
uaddr
));
if
(
!
tlb_flag
(
TLB_V4_I_PAGE
)
&&
tlb_flag
(
TLB_V4_I_FULL
))
asm
(
"mcr%? p15, 0, %0, c8, c5, 0"
:
:
"r"
(
zero
));
}
if
(
tlb_flag
(
TLB_V6_U_PAGE
))
asm
(
"mcr%? p15, 0, %0, c8, c7, 1"
:
:
"r"
(
uaddr
));
if
(
tlb_flag
(
TLB_V6_D_PAGE
))
asm
(
"mcr%? p15, 0, %0, c8, c6, 1"
:
:
"r"
(
uaddr
));
if
(
tlb_flag
(
TLB_V6_I_PAGE
))
asm
(
"mcr%? p15, 0, %0, c8, c5, 1"
:
:
"r"
(
uaddr
));
}
static
inline
void
flush_tlb_kernel_page
(
unsigned
long
kaddr
)
{
const
int
zero
=
0
;
const
unsigned
int
__tlb_flag
=
__cpu_tlb_flags
;
kaddr
&=
PAGE_MASK
;
if
(
tlb_flag
(
TLB_WB
))
asm
(
"mcr%? p15, 0, %0, c7, c10, 4"
:
:
"r"
(
zero
));
if
(
tlb_flag
(
TLB_V3_PAGE
))
asm
(
"mcr%? p15, 0, %0, c6, c0, 0"
:
:
"r"
(
kaddr
));
if
(
tlb_flag
(
TLB_V4_U_PAGE
))
asm
(
"mcr%? p15, 0, %0, c8, c7, 1"
:
:
"r"
(
kaddr
));
if
(
tlb_flag
(
TLB_V4_D_PAGE
))
asm
(
"mcr%? p15, 0, %0, c8, c6, 1"
:
:
"r"
(
kaddr
));
if
(
tlb_flag
(
TLB_V4_I_PAGE
))
asm
(
"mcr%? p15, 0, %0, c8, c5, 1"
:
:
"r"
(
kaddr
));
if
(
!
tlb_flag
(
TLB_V4_I_PAGE
)
&&
tlb_flag
(
TLB_V4_I_FULL
))
asm
(
"mcr%? p15, 0, %0, c8, c5, 0"
:
:
"r"
(
zero
));
if
(
tlb_flag
(
TLB_V6_U_PAGE
))
asm
(
"mcr%? p15, 0, %0, c8, c7, 1"
:
:
"r"
(
kaddr
));
if
(
tlb_flag
(
TLB_V6_D_PAGE
))
asm
(
"mcr%? p15, 0, %0, c8, c6, 1"
:
:
"r"
(
kaddr
));
if
(
tlb_flag
(
TLB_V6_I_PAGE
))
asm
(
"mcr%? p15, 0, %0, c8, c5, 1"
:
:
"r"
(
kaddr
));
}
/*
* flush_pmd_entry
*
* Flush a PMD entry (word aligned, or double-word aligned) to
* RAM if the TLB for the CPU we are running on requires this.
* This is typically used when we are creating PMD entries.
*
* clean_pmd_entry
*
* Clean (but don't drain the write buffer) if the CPU requires
* these operations. This is typically used when we are removing
* PMD entries.
*/
static
inline
void
flush_pmd_entry
(
pmd_t
*
pmd
)
{
const
unsigned
int
zero
=
0
;
const
unsigned
int
__tlb_flag
=
__cpu_tlb_flags
;
if
(
tlb_flag
(
TLB_DCLEAN
))
asm
(
"mcr%? p15, 0, %0, c7, c10, 1 @ flush_pmd"
:
:
"r"
(
pmd
));
if
(
tlb_flag
(
TLB_WB
))
asm
(
"mcr%? p15, 0, %0, c7, c10, 4 @ flush_pmd"
:
:
"r"
(
zero
));
}
static
inline
void
clean_pmd_entry
(
pmd_t
*
pmd
)
{
const
unsigned
int
__tlb_flag
=
__cpu_tlb_flags
;
if
(
tlb_flag
(
TLB_DCLEAN
))
asm
(
"mcr%? p15, 0, %0, c7, c10, 1 @ flush_pmd"
:
:
"r"
(
pmd
));
}
#undef tlb_flag
#undef always_tlb_flags
#undef possible_tlb_flags
/*
* Convert calls to our calling convention.
*/
#define flush_tlb_range(vma,start,end) __cpu_flush_user_tlb_range(start,end,vma)
#define flush_tlb_kernel_range(s,e) __cpu_flush_kern_tlb_range(s,e)
/*
* if PG_dcache_dirty is set for the page, we need to ensure that any
* cache entries for the kernels virtual memory range are written
* back to the page.
*/
extern
void
update_mmu_cache
(
struct
vm_area_struct
*
vma
,
unsigned
long
addr
,
pte_t
pte
);
/*
* ARM processors do not cache TLB tables in RAM.
*/
#define flush_tlb_pgtables(mm,start,end) do { } while (0)
#endif
#endif
#endif
include/asm-arm/uaccess.h
View file @
b5984adb
/*
* linux/include/asm-arm/uaccess.h
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#ifndef _ASMARM_UACCESS_H
#ifndef _ASMARM_UACCESS_H
#define _ASMARM_UACCESS_H
#define _ASMARM_UACCESS_H
...
@@ -6,6 +13,8 @@
...
@@ -6,6 +13,8 @@
*/
*/
#include <linux/sched.h>
#include <linux/sched.h>
#include <asm/errno.h>
#include <asm/errno.h>
#include <asm/arch/memory.h>
#include <asm/domain.h>
#define VERIFY_READ 0
#define VERIFY_READ 0
#define VERIFY_WRITE 1
#define VERIFY_WRITE 1
...
@@ -30,11 +39,39 @@ struct exception_table_entry
...
@@ -30,11 +39,39 @@ struct exception_table_entry
extern
int
fixup_exception
(
struct
pt_regs
*
regs
);
extern
int
fixup_exception
(
struct
pt_regs
*
regs
);
/*
* Note that this is actually 0x1,0000,0000
*/
#define KERNEL_DS 0x00000000
#define USER_DS TASK_SIZE
#define get_ds() (KERNEL_DS)
#define get_ds() (KERNEL_DS)
#define get_fs() (current_thread_info()->addr_limit)
#define get_fs() (current_thread_info()->addr_limit)
static
inline
void
set_fs
(
mm_segment_t
fs
)
{
current_thread_info
()
->
addr_limit
=
fs
;
modify_domain
(
DOMAIN_KERNEL
,
fs
?
DOMAIN_CLIENT
:
DOMAIN_MANAGER
);
}
#define segment_eq(a,b) ((a) == (b))
#define segment_eq(a,b) ((a) == (b))
#include <asm/proc/uaccess.h>
#define __addr_ok(addr) ({ \
unsigned long flag; \
__asm__("cmp %2, %0; movlo %0, #0" \
: "=&r" (flag) \
: "0" (current_thread_info()->addr_limit), "r" (addr) \
: "cc"); \
(flag == 0); })
/* We use 33-bit arithmetic here... */
#define __range_ok(addr,size) ({ \
unsigned long flag, sum; \
__asm__("adds %1, %2, %3; sbcccs %1, %1, %0; movcc %0, #0" \
: "=&r" (flag), "=&r" (sum) \
: "r" (addr), "Ir" (size), "0" (current_thread_info()->addr_limit) \
: "cc"); \
flag; })
#define access_ok(type,addr,size) (__range_ok(addr,size) == 0)
#define access_ok(type,addr,size) (__range_ok(addr,size) == 0)
...
@@ -125,17 +162,71 @@ do { \
...
@@ -125,17 +162,71 @@ do { \
(x) = (__typeof__(*(ptr)))__gu_val; \
(x) = (__typeof__(*(ptr)))__gu_val; \
} while (0)
} while (0)
#define __get_user_asm_byte(x,addr,err) \
__asm__ __volatile__( \
"1: ldrbt %1,[%2],#0\n" \
"2:\n" \
" .section .fixup,\"ax\"\n" \
" .align 2\n" \
"3: mov %0, %3\n" \
" mov %1, #0\n" \
" b 2b\n" \
" .previous\n" \
" .section __ex_table,\"a\"\n" \
" .align 3\n" \
" .long 1b, 3b\n" \
" .previous" \
: "+r" (err), "=&r" (x) \
: "r" (addr), "i" (-EFAULT) \
: "cc")
#ifndef __ARMEB__
#define __get_user_asm_half(x,__gu_addr,err) \
({ \
unsigned long __b1, __b2; \
__get_user_asm_byte(__b1, __gu_addr, err); \
__get_user_asm_byte(__b2, __gu_addr + 1, err); \
(x) = __b1 | (__b2 << 8); \
})
#else
#define __get_user_asm_half(x,__gu_addr,err) \
({ \
unsigned long __b1, __b2; \
__get_user_asm_byte(__b1, __gu_addr, err); \
__get_user_asm_byte(__b2, __gu_addr + 1, err); \
(x) = (__b1 << 8) | __b2; \
})
#endif
#define __get_user_asm_word(x,addr,err) \
__asm__ __volatile__( \
"1: ldrt %1,[%2],#0\n" \
"2:\n" \
" .section .fixup,\"ax\"\n" \
" .align 2\n" \
"3: mov %0, %3\n" \
" mov %1, #0\n" \
" b 2b\n" \
" .previous\n" \
" .section __ex_table,\"a\"\n" \
" .align 3\n" \
" .long 1b, 3b\n" \
" .previous" \
: "+r" (err), "=&r" (x) \
: "r" (addr), "i" (-EFAULT) \
: "cc")
extern
int
__put_user_1
(
void
*
,
unsigned
int
);
extern
int
__put_user_1
(
void
*
,
unsigned
int
);
extern
int
__put_user_2
(
void
*
,
unsigned
int
);
extern
int
__put_user_2
(
void
*
,
unsigned
int
);
extern
int
__put_user_4
(
void
*
,
unsigned
int
);
extern
int
__put_user_4
(
void
*
,
unsigned
int
);
extern
int
__put_user_8
(
void
*
,
unsigned
long
long
);
extern
int
__put_user_8
(
void
*
,
unsigned
long
long
);
extern
int
__put_user_bad
(
void
);
extern
int
__put_user_bad
(
void
);
#define __put_user_x(__r1,__p,__e,__s
,__i...)
\
#define __put_user_x(__r1,__p,__e,__s
)
\
__asm__ __volatile__ ("bl __put_user_" #__s \
__asm__ __volatile__ ("bl __put_user_" #__s \
: "=&r" (__e) \
: "=&r" (__e) \
: "0" (__p), "r" (__r1) \
: "0" (__p), "r" (__r1) \
:
__i
, "cc")
:
"ip", "lr"
, "cc")
#define put_user(x,p) \
#define put_user(x,p) \
({ \
({ \
...
@@ -144,16 +235,16 @@ extern int __put_user_bad(void);
...
@@ -144,16 +235,16 @@ extern int __put_user_bad(void);
register int __e asm("r0"); \
register int __e asm("r0"); \
switch (sizeof(*(__p))) { \
switch (sizeof(*(__p))) { \
case 1: \
case 1: \
__put_user_x(__r1, __p, __e, 1
, "ip", "lr");
\
__put_user_x(__r1, __p, __e, 1
);
\
break; \
break; \
case 2: \
case 2: \
__put_user_x(__r1, __p, __e, 2
, "ip", "lr");
\
__put_user_x(__r1, __p, __e, 2
);
\
break; \
break; \
case 4: \
case 4: \
__put_user_x(__r1, __p, __e, 4
, "ip", "lr");
\
__put_user_x(__r1, __p, __e, 4
);
\
break; \
break; \
case 8: \
case 8: \
__put_user_x(__r1, __p, __e, 8
, "ip", "lr");
\
__put_user_x(__r1, __p, __e, 8
);
\
break; \
break; \
default: __e = __put_user_bad(); break; \
default: __e = __put_user_bad(); break; \
} \
} \
...
@@ -186,10 +277,93 @@ do { \
...
@@ -186,10 +277,93 @@ do { \
} \
} \
} while (0)
} while (0)
#define __put_user_asm_byte(x,__pu_addr,err) \
__asm__ __volatile__( \
"1: strbt %1,[%2],#0\n" \
"2:\n" \
" .section .fixup,\"ax\"\n" \
" .align 2\n" \
"3: mov %0, %3\n" \
" b 2b\n" \
" .previous\n" \
" .section __ex_table,\"a\"\n" \
" .align 3\n" \
" .long 1b, 3b\n" \
" .previous" \
: "+r" (err) \
: "r" (x), "r" (__pu_addr), "i" (-EFAULT) \
: "cc")
#ifndef __ARMEB__
#define __put_user_asm_half(x,__pu_addr,err) \
({ \
unsigned long __temp = (unsigned long)(x); \
__put_user_asm_byte(__temp, __pu_addr, err); \
__put_user_asm_byte(__temp >> 8, __pu_addr + 1, err); \
})
#else
#define __put_user_asm_half(x,__pu_addr,err) \
({ \
unsigned long __temp = (unsigned long)(x); \
__put_user_asm_byte(__temp >> 8, __pu_addr, err); \
__put_user_asm_byte(__temp, __pu_addr + 1, err); \
})
#endif
#define __put_user_asm_word(x,__pu_addr,err) \
__asm__ __volatile__( \
"1: strt %1,[%2],#0\n" \
"2:\n" \
" .section .fixup,\"ax\"\n" \
" .align 2\n" \
"3: mov %0, %3\n" \
" b 2b\n" \
" .previous\n" \
" .section __ex_table,\"a\"\n" \
" .align 3\n" \
" .long 1b, 3b\n" \
" .previous" \
: "+r" (err) \
: "r" (x), "r" (__pu_addr), "i" (-EFAULT) \
: "cc")
#ifndef __ARMEB__
#define __reg_oper0 "%R2"
#define __reg_oper1 "%Q2"
#else
#define __reg_oper0 "%Q2"
#define __reg_oper1 "%R2"
#endif
#define __put_user_asm_dword(x,__pu_addr,err) \
__asm__ __volatile__( \
"1: strt " __reg_oper1 ", [%1], #4\n" \
"2: strt " __reg_oper0 ", [%1], #0\n" \
"3:\n" \
" .section .fixup,\"ax\"\n" \
" .align 2\n" \
"4: mov %0, %3\n" \
" b 3b\n" \
" .previous\n" \
" .section __ex_table,\"a\"\n" \
" .align 3\n" \
" .long 1b, 4b\n" \
" .long 2b, 4b\n" \
" .previous" \
: "+r" (err), "+r" (__pu_addr) \
: "r" (x), "i" (-EFAULT) \
: "cc")
extern
unsigned
long
__arch_copy_from_user
(
void
*
to
,
const
void
*
from
,
unsigned
long
n
);
extern
unsigned
long
__arch_copy_to_user
(
void
*
to
,
const
void
*
from
,
unsigned
long
n
);
extern
unsigned
long
__arch_clear_user
(
void
*
addr
,
unsigned
long
n
);
extern
unsigned
long
__arch_strncpy_from_user
(
char
*
to
,
const
char
*
from
,
unsigned
long
count
);
extern
unsigned
long
__arch_strnlen_user
(
const
char
*
s
,
long
n
);
static
__inline__
unsigned
long
copy_from_user
(
void
*
to
,
const
void
*
from
,
unsigned
long
n
)
static
__inline__
unsigned
long
copy_from_user
(
void
*
to
,
const
void
*
from
,
unsigned
long
n
)
{
{
if
(
access_ok
(
VERIFY_READ
,
from
,
n
))
if
(
access_ok
(
VERIFY_READ
,
from
,
n
))
__do
_copy_from_user
(
to
,
from
,
n
);
n
=
__arch
_copy_from_user
(
to
,
from
,
n
);
else
/* security hole - plug it */
else
/* security hole - plug it */
memzero
(
to
,
n
);
memzero
(
to
,
n
);
return
n
;
return
n
;
...
@@ -197,49 +371,44 @@ static __inline__ unsigned long copy_from_user(void *to, const void *from, unsig
...
@@ -197,49 +371,44 @@ static __inline__ unsigned long copy_from_user(void *to, const void *from, unsig
static
__inline__
unsigned
long
__copy_from_user
(
void
*
to
,
const
void
*
from
,
unsigned
long
n
)
static
__inline__
unsigned
long
__copy_from_user
(
void
*
to
,
const
void
*
from
,
unsigned
long
n
)
{
{
__do_copy_from_user
(
to
,
from
,
n
);
return
__arch_copy_from_user
(
to
,
from
,
n
);
return
n
;
}
}
static
__inline__
unsigned
long
copy_to_user
(
void
*
to
,
const
void
*
from
,
unsigned
long
n
)
static
__inline__
unsigned
long
copy_to_user
(
void
*
to
,
const
void
*
from
,
unsigned
long
n
)
{
{
if
(
access_ok
(
VERIFY_WRITE
,
to
,
n
))
if
(
access_ok
(
VERIFY_WRITE
,
to
,
n
))
__do
_copy_to_user
(
to
,
from
,
n
);
n
=
__arch
_copy_to_user
(
to
,
from
,
n
);
return
n
;
return
n
;
}
}
static
__inline__
unsigned
long
__copy_to_user
(
void
*
to
,
const
void
*
from
,
unsigned
long
n
)
static
__inline__
unsigned
long
__copy_to_user
(
void
*
to
,
const
void
*
from
,
unsigned
long
n
)
{
{
__do_copy_to_user
(
to
,
from
,
n
);
return
__arch_copy_to_user
(
to
,
from
,
n
);
return
n
;
}
}
static
__inline__
unsigned
long
clear_user
(
void
*
to
,
unsigned
long
n
)
static
__inline__
unsigned
long
clear_user
(
void
*
to
,
unsigned
long
n
)
{
{
if
(
access_ok
(
VERIFY_WRITE
,
to
,
n
))
if
(
access_ok
(
VERIFY_WRITE
,
to
,
n
))
__do
_clear_user
(
to
,
n
);
n
=
__arch
_clear_user
(
to
,
n
);
return
n
;
return
n
;
}
}
static
__inline__
unsigned
long
__clear_user
(
void
*
to
,
unsigned
long
n
)
static
__inline__
unsigned
long
__clear_user
(
void
*
to
,
unsigned
long
n
)
{
{
__do_clear_user
(
to
,
n
);
return
__arch_clear_user
(
to
,
n
);
return
n
;
}
}
static
__inline__
long
strncpy_from_user
(
char
*
dst
,
const
char
*
src
,
long
count
)
static
__inline__
long
strncpy_from_user
(
char
*
dst
,
const
char
*
src
,
long
count
)
{
{
long
res
=
-
EFAULT
;
long
res
=
-
EFAULT
;
if
(
access_ok
(
VERIFY_READ
,
src
,
1
))
if
(
access_ok
(
VERIFY_READ
,
src
,
1
))
__do_strncpy_from_user
(
dst
,
src
,
count
,
res
);
res
=
__arch_strncpy_from_user
(
dst
,
src
,
count
);
return
res
;
return
res
;
}
}
static
__inline__
long
__strncpy_from_user
(
char
*
dst
,
const
char
*
src
,
long
count
)
static
__inline__
long
__strncpy_from_user
(
char
*
dst
,
const
char
*
src
,
long
count
)
{
{
long
res
;
return
__arch_strncpy_from_user
(
dst
,
src
,
count
);
__do_strncpy_from_user
(
dst
,
src
,
count
,
res
);
return
res
;
}
}
#define strlen_user(s) strnlen_user(s, ~0UL >> 1)
#define strlen_user(s) strnlen_user(s, ~0UL >> 1)
...
@@ -249,7 +418,7 @@ static inline long strnlen_user(const char *s, long n)
...
@@ -249,7 +418,7 @@ static inline long strnlen_user(const char *s, long n)
unsigned
long
res
=
0
;
unsigned
long
res
=
0
;
if
(
__addr_ok
(
s
))
if
(
__addr_ok
(
s
))
__do_strnlen_user
(
s
,
n
,
res
);
res
=
__arch_strnlen_user
(
s
,
n
);
return
res
;
return
res
;
}
}
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment