Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
Kirill Smelkov
linux
Commits
e18fad25
Commit
e18fad25
authored
Feb 10, 2002
by
Linus Torvalds
Browse files
Options
Browse Files
Download
Plain Diff
Merge
parents
77ae7b5e
8a3fb763
Changes
31
Show whitespace changes
Inline
Side-by-side
Showing
31 changed files
with
627 additions
and
470 deletions
+627
-470
arch/alpha/Makefile
arch/alpha/Makefile
+3
-1
arch/alpha/kernel/Makefile
arch/alpha/kernel/Makefile
+12
-8
arch/alpha/kernel/check_asm.c
arch/alpha/kernel/check_asm.c
+20
-22
arch/alpha/kernel/entry.S
arch/alpha/kernel/entry.S
+115
-150
arch/alpha/kernel/head.S
arch/alpha/kernel/head.S
+2
-2
arch/alpha/kernel/init_task.c
arch/alpha/kernel/init_task.c
+17
-0
arch/alpha/kernel/osf_sys.c
arch/alpha/kernel/osf_sys.c
+14
-8
arch/alpha/kernel/pci_iommu.c
arch/alpha/kernel/pci_iommu.c
+2
-7
arch/alpha/kernel/process.c
arch/alpha/kernel/process.c
+43
-36
arch/alpha/kernel/ptrace.c
arch/alpha/kernel/ptrace.c
+31
-24
arch/alpha/kernel/signal.c
arch/alpha/kernel/signal.c
+12
-3
arch/alpha/kernel/traps.c
arch/alpha/kernel/traps.c
+17
-16
arch/alpha/lib/ev6-memset.S
arch/alpha/lib/ev6-memset.S
+1
-1
arch/alpha/math-emu/math.c
arch/alpha/math-emu/math.c
+3
-2
arch/alpha/mm/fault.c
arch/alpha/mm/fault.c
+6
-4
arch/alpha/mm/init.c
arch/alpha/mm/init.c
+7
-8
arch/alpha/vmlinux.lds.in
arch/alpha/vmlinux.lds.in
+48
-35
include/asm-alpha/asm_offsets.h
include/asm-alpha/asm_offsets.h
+0
-13
include/asm-alpha/bitops.h
include/asm-alpha/bitops.h
+95
-19
include/asm-alpha/current.h
include/asm-alpha/current.h
+5
-2
include/asm-alpha/fpu.h
include/asm-alpha/fpu.h
+1
-1
include/asm-alpha/io.h
include/asm-alpha/io.h
+5
-1
include/asm-alpha/mmu_context.h
include/asm-alpha/mmu_context.h
+32
-4
include/asm-alpha/page.h
include/asm-alpha/page.h
+5
-5
include/asm-alpha/processor.h
include/asm-alpha/processor.h
+32
-91
include/asm-alpha/smp.h
include/asm-alpha/smp.h
+2
-1
include/asm-alpha/sysinfo.h
include/asm-alpha/sysinfo.h
+2
-2
include/asm-alpha/system.h
include/asm-alpha/system.h
+1
-2
include/asm-alpha/thread_info.h
include/asm-alpha/thread_info.h
+89
-0
include/asm-alpha/uaccess.h
include/asm-alpha/uaccess.h
+2
-2
include/asm-alpha/unistd.h
include/asm-alpha/unistd.h
+3
-0
No files found.
arch/alpha/Makefile
View file @
e18fad25
...
...
@@ -117,11 +117,13 @@ srmboot:
archclean
:
@
$(MAKE)
-C
arch
/alpha/kernel clean
@
$(MAKEBOOT)
clean
rm
-f
arch
/alpha/vmlinux.lds
archmrproper
:
rm
-f
arch
/alpha/vmlinux.lds
rm
-f
include/asm-alpha/asm_offsets.h
archdep
:
$(MAKE)
-C
arch
/alpha/kernel asm_offsets
@
$(MAKEBOOT)
dep
vmlinux
:
arch/alpha/vmlinux.lds
...
...
arch/alpha/kernel/Makefile
View file @
e18fad25
...
...
@@ -8,15 +8,15 @@
# Note 2! The CFLAGS definitions are now in the main makefile...
.S.s
:
$(CPP)
$(AFLAGS)
-o
$*
.s
$<
$(CPP)
$(
CFLAGS)
$(
AFLAGS)
-o
$*
.s
$<
.S.o
:
$(CC)
$(AFLAGS)
-c
-o
$*
.o
$<
$(CC)
$(
CFLAGS)
$(
AFLAGS)
-c
-o
$*
.o
$<
O_TARGET
:=
kernel.o
export-objs
:=
alpha_ksyms.o
obj-y
:=
entry.o traps.o process.o osf_sys.o irq.o irq_alpha.o
\
obj-y
:=
entry.o traps.o process.o
init_task.o
osf_sys.o irq.o irq_alpha.o
\
signal.o setup.o ptrace.o time.o semaphore.o alpha_ksyms.o
#
...
...
@@ -102,11 +102,15 @@ endif # GENERIC
all
:
kernel.o head.o
asm_offsets
:
check_asm
./check_asm
>
$(TOPDIR)
/include/asm-alpha/asm_offsets.h
check_asm
:
check_asm.c
$(HOSTCC)
-o
$@
$<
$(CPPFLAGS)
-ffixed-8
ASM_OFFSETS_H
=
$(TOPDIR)
/include/asm-alpha/asm_offsets.h
asm_offsets
:
$(CC)
$(CFLAGS)
-S
-o
- check_asm.c |
\
sed
-e
'/xyzzy/ { s/xyzzy //; p; }; d;'
>
asm_offsets.tmp
@
if
cmp
-s
asm_offsets.tmp
$(ASM_OFFSETS_H)
;
then
\
set
-x
;
rm
asm_offsets.tmp
;
\
else
\
set
-x
;
mv
asm_offsets.tmp
$(ASM_OFFSETS_H)
;
\
fi
clean
::
rm
-f
check_asm
...
...
arch/alpha/kernel/check_asm.c
View file @
e18fad25
...
...
@@ -3,30 +3,28 @@
#include <linux/sched.h>
#include <asm/io.h>
int
main
()
#define OUT(x) \
asm ("\nxyzzy " x)
#define DEF(name, val) \
asm volatile ("\nxyzzy #define " name " %0" : : "i"(val))
void
foo
(
void
)
{
printf
(
"#ifndef __ASM_OFFSETS_H__
\n
#define __ASM_OFFSETS_H__
\n
"
);
OUT
(
"#ifndef __ASM_OFFSETS_H__"
);
OUT
(
"#define __ASM_OFFSETS_H__"
);
OUT
(
""
);
DEF
(
"TI_TASK"
,
offsetof
(
struct
thread_info
,
task
));
DEF
(
"TI_FLAGS"
,
offsetof
(
struct
thread_info
,
flags
));
DEF
(
"TI_CPU"
,
offsetof
(
struct
thread_info
,
cpu
));
printf
(
"#define TASK_STATE %ld
\n
"
,
(
long
)
offsetof
(
struct
task_struct
,
state
));
printf
(
"#define TASK_FLAGS %ld
\n
"
,
(
long
)
offsetof
(
struct
task_struct
,
flags
));
printf
(
"#define TASK_SIGPENDING %ld
\n
"
,
#error (long)offsetof(struct task_struct, sigpending));
printf
(
"#define TASK_ADDR_LIMIT %ld
\n
"
,
(
long
)
offsetof
(
struct
task_struct
,
addr_limit
));
printf
(
"#define TASK_EXEC_DOMAIN %ld
\n
"
,
(
long
)
offsetof
(
struct
task_struct
,
exec_domain
));
printf
(
"#define TASK_NEED_RESCHED %ld
\n
"
,
#error (long)offsetof(struct task_struct, work.need_resched));
printf
(
"#define TASK_SIZE %ld
\n
"
,
sizeof
(
struct
task_struct
));
printf
(
"#define STACK_SIZE %ld
\n
"
,
sizeof
(
union
task_union
));
DEF
(
"PT_PTRACED"
,
PT_PTRACED
);
DEF
(
"CLONE_VM"
,
CLONE_VM
);
DEF
(
"SIGCHLD"
,
SIGCHLD
);
printf
(
"#define HAE_CACHE %ld
\n
"
,
(
long
)
offsetof
(
struct
alpha_machine_vector
,
hae_cache
));
printf
(
"#define HAE_REG %ld
\n
"
,
(
long
)
offsetof
(
struct
alpha_machine_vector
,
hae_register
));
DEF
(
"HAE_CACHE"
,
offsetof
(
struct
alpha_machine_vector
,
hae_cache
));
DEF
(
"HAE_REG"
,
offsetof
(
struct
alpha_machine_vector
,
hae_register
));
printf
(
"#endif /* __ASM_OFFSETS_H__ */
\n
"
);
return
0
;
OUT
(
"
"
);
OUT
(
"#endif /* __ASM_OFFSETS_H__ */"
)
;
}
arch/alpha/kernel/entry.S
View file @
e18fad25
...
...
@@ -7,43 +7,17 @@
#include <linux/config.h>
#include <asm/system.h>
#include <asm/cache.h>
#include <asm/asm_offsets.h>
#include <asm/thread_info.h>
#define SIGCHLD 20
#define NR_SYSCALLS 378
/*
*
These
offsets
must
match
with
alpha_mv
in
<
asm
/
machvec
.
h
>
.
*/
#define HAE_CACHE 0
#define HAE_REG 8
#define NR_SYSCALLS 381
/*
*
stack
offsets
*/
#define SP_OFF 184
#define SWITCH_STACK_SIZE 320
/*
*
task
structure
offsets
*/
#define TASK_STATE 0
#define TASK_FLAGS 8
#error #define TASK_SIGPENDING 16
#define TASK_ADDR_LIMIT 24
#define TASK_EXEC_DOMAIN 32
#error #define TASK_NEED_RESCHED 40
#error #define TASK_PTRACE 48
#define TASK_PROCESSOR 100
/*
*
task
flags
(
must
match
include
/
linux
/
sched
.
h
)
:
*/
#define PT_PTRACED 0x00000001
#define CLONE_VM 0x00000100
/*
*
This
defines
the
normal
kernel
pt
-
regs
layout
.
*
...
...
@@ -55,7 +29,7 @@
*/
#define SAVE_ALL \
subq
$
30
,
184
,
$
30
; \
subq
$
30
,
SP_OFF
,
$
30
; \
stq
$
0
,
0
(
$
30
)
; \
stq
$
1
,
8
(
$
30
)
; \
stq
$
2
,
16
(
$
30
)
; \
...
...
@@ -98,12 +72,8 @@
ldq
$
8
,
64
(
$
30
)
; \
beq
$
20
,
99
f
; \
ldq
$
20
,
HAE_REG
(
$
19
)
; \
addq
$
31
,
7
,
$
16
; \
call_pal
PAL_swpipl
; \
stq
$
21
,
HAE_CACHE
(
$
19
)
; \
stq
$
21
,
0
(
$
20
)
; \
mov
$
0
,
$
16
; \
call_pal
PAL_swpipl
; \
ldq
$
0
,
0
(
$
30
)
; \
ldq
$
1
,
8
(
$
30
)
; \
99
:
; \
...
...
@@ -117,13 +87,10 @@
ldq
$
26
,
128
(
$
30
)
; \
ldq
$
27
,
136
(
$
30
)
; \
ldq
$
28
,
144
(
$
30
)
; \
addq
$
30
,
184
,
$
30
addq
$
30
,
SP_OFF
,
$
30
.
text
.
set
noat
#if defined(__linux__) && !defined(__ELF__)
.
set
singlegp
#endif
.
align
3
.
globl
entInt
...
...
@@ -202,69 +169,58 @@ entDbg:
.
end
entDbg
/*
*
Fork
()
is
one
of
the
special
system
calls
:
it
needs
to
*
save
the
callee
-
saved
regs
so
that
the
regs
can
be
found
*
for
the
new
process
..
We
save
them
in
the
"context switch"
*
stack
format
(
see
arch
/
alpha
/
kernel
/
process
.
c
)
.
*
*
Also
,
for
the
kernel
fork
,
we
need
to
fake
the
system
call
*
stack
buildup
,
as
we
can
't do system calls from kernel space.
*/
.
align
3
.
ent
kernel_clone
kernel_clone
:
.
frame
$
30
,
0
,
$
26
.
prologue
0
subq
$
30
,
6
*
8
,
$
30
stq
$
31
,
0
(
$
30
)
stq
$
26
,
8
(
$
30
)
stq
$
29
,
16
(
$
30
)
stq
$
16
,
24
(
$
30
)
stq
$
17
,
32
(
$
30
)
stq
$
18
,
40
(
$
30
)
bis
$
31
,
2
,
$
0
/*
Register
v0
:
syscall
nr
for
fork
()
*/
SAVE_ALL
bsr
$
26
,
sys_clone
stq
$
0
,
0
(
$
30
)
br
ret_from_sys_call
.
end
kernel_clone
/*
*
kernel_thread
(
fn
,
arg
,
clone_flags
)
*/
.
align
3
.
globl
kernel_thread
.
ent
kernel_thread
.
align
4
.
globl
kernel_thread
.
ent
kernel_thread
kernel_thread
:
ldgp
$
29
,
0
(
$
27
)
/*
we
can
be
called
from
a
module
*/
.
frame
$
30
,
4
*
8
,
$
26
subq
$
30
,
4
*
8
,
$
30
stq
$
10
,
16
(
$
30
)
stq
$
9
,
8
(
$
30
)
lda
$
0
,
CLONE_VM
stq
$
26
,
0
(
$
30
)
.
prologue
1
mov
$
16
,
$
9
/*
save
fn
*/
mov
$
17
,
$
10
/*
save
arg
*/
or
$
18
,
$
0
,
$
16
/*
shuffle
flags
to
front
; add CLONE_VM. */
bsr
$
26
,
kernel_clone
bne
$
20
,
1
f
/*
$
20
is
non
-
zero
in
child
*/
ldq
$
26
,
0
(
$
30
)
ldq
$
9
,
8
(
$
30
)
ldq
$
10
,
16
(
$
30
)
addq
$
30
,
4
*
8
,
$
30
ret
$
31
,(
$
26
),
1
/*
this
is
in
child
:
look
out
as
we
don
't have any stack here.. */
1
:
mov
$
9
,
$
27
/*
get
fn
*/
lda
$
8
,
0x3fff
mov
$
10
,
$
16
/*
get
arg
*/
bic
$
30
,
$
8
,
$
8
/*
get
current
*/
jsr
$
26
,(
$
27
)
ldgp
$
29
,
0
(
$
26
)
mov
$
0
,
$
16
mov
$
31
,
$
26
jsr
$
31
,
sys_exit
subq
$
30
,
SP_OFF
+
6
*
8
,
$
30
br
$
1
,
2
f
/*
load
start
address
*/
/
*
We
've now "returned" from a fake system call. */
unop
blt
$
0
,
1
f
/*
error
?
*/
ldi
$
1
,
0x3fff
beq
$
20
,
1
f
/*
parent
or
child
?
*/
bic
$
30
,
$
1
,
$
8
/*
in
child
.
*/
jsr
$
26
,
(
$
27
)
ldgp
$
29
,
0
(
$
26
)
mov
$
0
,
$
16
mov
$
31
,
$
26
jmp
$
31
,
sys_exit
1
:
ret
/*
in
parent
.
*/
.
align
4
2
:
/
*
Fake
a
system
call
stack
frame
,
as
we
can
't do system calls
from
kernel
space
.
Note
that
we
store
FN
and
ARG
as
they
need
to
be
set
up
in
the
child
for
the
call
.
Also
store
$
8
and
$
26
for
use
in
the
parent
.
*/
stq
$
31
,
SP_OFF
(
$
30
)
/*
ps
*/
stq
$
1
,
SP_OFF
+
8
(
$
30
)
/*
pc
*/
stq
$
29
,
SP_OFF
+
16
(
$
30
)
/*
gp
*/
stq
$
16
,
136
(
$
30
)
/*
$
27
; FN for child */
stq
$
17
,
SP_OFF
+
24
(
$
30
)
/*
$
16
; ARG for child */
stq
$
8
,
64
(
$
30
)
/*
$
8
*/
stq
$
26
,
128
(
$
30
)
/*
$
26
*/
/
*
Avoid
the
HAE
being
gratuitously
wrong
,
to
avoid
restoring
it
.
*/
ldq
$
2
,
alpha_mv
+
HAE_CACHE
stq
$
2
,
152
(
$
30
)
/*
HAE
*/
/
*
Shuffle
FLAGS
to
the
front
; add CLONE_VM. */
ldi
$
1
,
CLONE_VM
or
$
18
,
$
1
,
$
16
bsr
$
26
,
sys_clone
/
*
We
don
't actually care for a3 success widgetry in the kernel.
Not
for
positive
errno
values
.
*/
stq
$
0
,
0
(
$
30
)
/*
$
0
*/
br
restore_all
.
end
kernel_thread
/*
...
...
@@ -535,10 +491,21 @@ alpha_switch_to:
call_pal
PAL_swpctx
unop
bsr
$
1
,
undo_switch_stack
lda
$
8
,
0x3fff
mov
$
17
,
$
0
bic
$
30
,
$
8
,
$
8
ret
$
31
,(
$
26
),
1
.
end
alpha_switch_to
.
globl
ret_from_fork
.
align
3
.
ent
ret_from_fork
ret_from_fork
:
lda
$
26
,
ret_from_sys_call
mov
$
0
,
$
16
jmp
$
31
,
schedule_tail
.
end
ret_from_fork
/*
*
Oh
,
well
..
Disassembling
OSF
/
1
binaries
to
find
out
how
the
*
system
calls
work
isn
't much fun.
...
...
@@ -559,12 +526,11 @@ entSys:
lda
$
5
,
sys_call_table
lda
$
27
,
sys_ni_syscall
cmpult
$
0
,
$
4
,
$
4
ld
q
$
3
,
TASK_PTRACE
(
$
8
)
ld
l
$
3
,
TI_FLAGS
(
$
8
)
stq
$
17
,
SP_OFF
+
32
(
$
30
)
s8addq
$
0
,
$
5
,
$
5
and
$
3
,
PT_PTRACED
,
$
3
stq
$
18
,
SP_OFF
+
40
(
$
30
)
b
ne
$
3
,
strace
b
lbs
$
3
,
strace
beq
$
4
,
1
f
ldq
$
27
,
0
(
$
5
)
1
:
jsr
$
26
,(
$
27
),
alpha_ni_syscall
...
...
@@ -580,17 +546,48 @@ ret_from_sys_call:
and
$
0
,
8
,
$
0
beq
$
0
,
restore_all
ret_from_reschedule
:
#error ldq $2,TASK_NEED_RESCHED($8)
lda
$
4
,
init_task_union
bne
$
2
,
reschedule
xor
$
4
,
$
8
,
$
4
#error ldl $5,TASK_SIGPENDING
($8)
beq
$
4
,
restore_all
bne
$
5
,
signal_return
/
*
Make
sure
need_resched
and
sigpending
don
't change between
sampling
and
the
rti
.
*/
lda
$
16
,
7
call_pal
PAL_swpipl
ldl
$
5
,
TI_FLAGS
(
$
8
)
and
$
5
,
_TIF_WORK_MASK
,
$
2
bne
$
5
,
work_pending
restore_all
:
RESTORE_ALL
call_pal
PAL_rti
work_pending
:
and
$
5
,
_TIF_NEED_RESCHED
,
$
2
beq
$
2
,
work_notifysig
work_resched
:
subq
$
30
,
16
,
$
30
stq
$
19
,
0
(
$
30
)
/*
save
syscall
nr
*/
stq
$
20
,
8
(
$
30
)
/*
and
error
indication
(
a3
)
*/
jsr
$
26
,
schedule
ldq
$
19
,
0
(
$
30
)
ldq
$
20
,
8
(
$
30
)
addq
$
30
,
16
,
$
30
/
*
Make
sure
need_resched
and
sigpending
don
't change between
sampling
and
the
rti
.
*/
lda
$
16
,
7
call_pal
PAL_swpipl
ldl
$
5
,
TI_FLAGS
(
$
8
)
and
$
5
,
_TIF_WORK_MASK
,
$
2
beq
$
2
,
restore_all
and
$
5
,
_TIF_NEED_RESCHED
,
$
2
bne
$
2
,
work_resched
work_notifysig
:
mov
$
30
,
$
17
br
$
1
,
do_switch_stack
mov
$
5
,
$
21
mov
$
30
,
$
18
mov
$
31
,
$
16
jsr
$
26
,
do_notify_resume
bsr
$
1
,
undo_switch_stack
br
restore_all
/*
PTRACE
syscall
handler
*/
.
align
3
...
...
@@ -677,40 +674,8 @@ ret_success:
stq
$
0
,
0
(
$
30
)
stq
$
31
,
72
(
$
30
)
/*
a3
=
0
=>
no
error
*/
br
ret_from_sys_call
.
align
3
signal_return
:
mov
$
30
,
$
17
br
$
1
,
do_switch_stack
mov
$
30
,
$
18
mov
$
31
,
$
16
jsr
$
26
,
do_signal
bsr
$
1
,
undo_switch_stack
br
restore_all
.
end
entSys
.
globl
ret_from_fork
.
align
3
.
ent
ret_from_fork
ret_from_fork
:
lda
$
26
,
ret_from_sys_call
mov
$
17
,
$
16
jsr
$
31
,
schedule_tail
.
end
ret_from_fork
.
align
3
.
ent
reschedule
reschedule
:
subq
$
30
,
16
,
$
30
stq
$
19
,
0
(
$
30
)
/*
save
syscall
nr
*/
stq
$
20
,
8
(
$
30
)
/*
and
error
indication
(
a3
)
*/
jsr
$
26
,
schedule
ldq
$
19
,
0
(
$
30
)
ldq
$
20
,
8
(
$
30
)
addq
$
30
,
16
,
$
30
br
ret_from_reschedule
.
end
reschedule
.
align
3
.
ent
sys_sigreturn
sys_sigreturn
:
...
...
arch/alpha/kernel/head.S
View file @
e18fad25
...
...
@@ -22,8 +22,8 @@ __start:
.
prologue
0
br
$
27
,
1
f
1
:
ldgp
$
29
,
0
(
$
27
)
/
*
We
need
to
get
current
loaded
up
with
our
first
task
...
*/
lda
$
8
,
init_t
ask
_union
/
*
We
need
to
get
current
_task_info
loaded
up
...
*/
lda
$
8
,
init_t
hread
_union
/
*
...
and
find
our
stack
...
*/
lda
$
30
,
0x4000
(
$
8
)
/
*
...
and
then
we
can
start
the
kernel
.
*/
...
...
arch/alpha/kernel/init_task.c
0 → 100644
View file @
e18fad25
#include <linux/mm.h>
#include <linux/sched.h>
#include <linux/init.h>
#include <linux/init_task.h>
#include <linux/fs.h>
#include <asm/uaccess.h>
static
struct
fs_struct
init_fs
=
INIT_FS
;
static
struct
files_struct
init_files
=
INIT_FILES
;
static
struct
signal_struct
init_signals
=
INIT_SIGNALS
;
struct
mm_struct
init_mm
=
INIT_MM
(
init_mm
);
struct
task_struct
init_task
=
INIT_TASK
(
init_task
);
union
thread_union
init_thread_union
__attribute__
((
section
(
".data.init_thread"
)))
=
{
INIT_THREAD_INFO
(
init_task
)
};
arch/alpha/kernel/osf_sys.c
View file @
e18fad25
...
...
@@ -776,7 +776,7 @@ asmlinkage unsigned long osf_getsysinfo(unsigned long op, void *buffer,
/* Return current software fp control & status bits. */
/* Note that DU doesn't verify available space here. */
w
=
current
->
thread
.
flags
&
IEEE_SW_MASK
;
w
=
current
_thread_info
()
->
ieee_state
&
IEEE_SW_MASK
;
w
=
swcr_update_status
(
w
,
rdfpcr
());
if
(
put_user
(
w
,
(
unsigned
long
*
)
buffer
))
return
-
EFAULT
;
...
...
@@ -793,7 +793,7 @@ asmlinkage unsigned long osf_getsysinfo(unsigned long op, void *buffer,
case
GSI_UACPROC
:
if
(
nbytes
<
sizeof
(
unsigned
int
))
return
-
EINVAL
;
w
=
(
current
->
thread
.
flags
>>
UAC_SHIFT
)
&
UAC_BITMASK
;
w
=
(
current
_thread_info
()
->
flags
>>
UAC_SHIFT
)
&
UAC_BITMASK
;
if
(
put_user
(
w
,
(
unsigned
int
*
)
buffer
))
return
-
EFAULT
;
return
1
;
...
...
@@ -840,8 +840,9 @@ asmlinkage unsigned long osf_setsysinfo(unsigned long op, void *buffer,
/* Update softare trap enable bits. */
if
(
get_user
(
swcr
,
(
unsigned
long
*
)
buffer
))
return
-
EFAULT
;
current
->
thread
.
flags
&=
~
IEEE_SW_MASK
;
current
->
thread
.
flags
|=
swcr
&
IEEE_SW_MASK
;
current_thread_info
()
->
ieee_state
=
((
current_thread_info
()
->
ieee_state
&
~
IEEE_SW_MASK
)
|
(
swcr
&
IEEE_SW_MASK
));
/* Update the real fpcr. */
fpcr
=
rdfpcr
();
...
...
@@ -869,18 +870,23 @@ asmlinkage unsigned long osf_setsysinfo(unsigned long op, void *buffer,
case
SSI_NVPAIRS
:
{
unsigned
long
v
,
w
,
i
;
unsigned
int
old
,
new
;
for
(
i
=
0
;
i
<
nbytes
;
++
i
)
{
if
(
get_user
(
v
,
2
*
i
+
(
unsigned
int
*
)
buffer
))
return
-
EFAULT
;
if
(
get_user
(
w
,
2
*
i
+
1
+
(
unsigned
int
*
)
buffer
))
return
-
EFAULT
;
switch
(
v
)
{
case
SSIN_UACPROC
:
current
->
thread
.
flags
&=
~
(
UAC_BITMASK
<<
UAC_SHIFT
);
current
->
thread
.
flags
|=
(
w
&
UAC_BITMASK
)
<<
UAC_SHIFT
;
again:
old
=
current_thread_info
()
->
flags
;
new
=
old
&
~
(
UAC_BITMASK
<<
UAC_SHIFT
);
new
=
new
|
(
w
&
UAC_BITMASK
)
<<
UAC_SHIFT
;
if
(
cmpxchg
(
&
current_thread_info
()
->
flags
,
old
,
new
)
!=
old
)
goto
again
;
break
;
default:
...
...
arch/alpha/kernel/pci_iommu.c
View file @
e18fad25
...
...
@@ -411,13 +411,8 @@ pci_free_consistent(struct pci_dev *pdev, size_t size, void *cpu_addr,
Write dma_length of each leader with the combined lengths of
the mergable followers. */
#define SG_ENT_VIRT_ADDRESS(SG) \
((SG)->address \
? (SG)->address \
: page_address((SG)->page) + (SG)->offset)
#define SG_ENT_PHYS_ADDRESS(SG) \
__pa(SG_ENT_VIRT_ADDRESS(SG))
#define SG_ENT_VIRT_ADDRESS(SG) (page_address((SG)->page) + (SG)->offset)
#define SG_ENT_PHYS_ADDRESS(SG) __pa(SG_ENT_VIRT_ADDRESS(SG))
static
void
sg_classify
(
struct
scatterlist
*
sg
,
struct
scatterlist
*
end
,
int
virt_ok
)
...
...
arch/alpha/kernel/process.c
View file @
e18fad25
...
...
@@ -42,22 +42,6 @@
#include "proto.h"
#include "pci_impl.h"
/*
* Initial task structure. Make this a per-architecture thing,
* because different architectures tend to have different
* alignment requirements and potentially different initial
* setup.
*/
unsigned
long
init_user_stack
[
1024
]
=
{
STACK_MAGIC
,
};
static
struct
fs_struct
init_fs
=
INIT_FS
;
static
struct
files_struct
init_files
=
INIT_FILES
;
static
struct
signal_struct
init_signals
=
INIT_SIGNALS
;
struct
mm_struct
init_mm
=
INIT_MM
(
init_mm
);
union
task_union
init_task_union
__attribute__
((
section
(
"init_task"
)))
=
{
task
:
INIT_TASK
(
init_task_union
.
task
)
};
/*
* No need to acquire the kernel lock, we're entirely local..
*/
...
...
@@ -73,18 +57,12 @@ sys_sethae(unsigned long hae, unsigned long a1, unsigned long a2,
void
cpu_idle
(
void
)
{
/* An endless idle loop with no priority at all. */
current
->
nice
=
20
;
while
(
1
)
{
/* FIXME -- EV6 and LCA45 know how to power down
the CPU. */
/* Although we are an idle CPU, we do not want to
get into the scheduler unnecessarily. */
long
oldval
=
xchg
(
&
current
->
work
.
need_resched
,
-
1UL
);
if
(
!
oldval
)
while
(
current
->
work
.
need_resched
<
0
);
while
(
!
need_resched
())
barrier
();
schedule
();
check_pgt_cache
();
}
...
...
@@ -255,7 +233,7 @@ flush_thread(void)
{
/* Arrange for each exec'ed process to start off with a clean slate
with respect to the FPU. This is all exceptions disabled. */
current
->
thread
.
flags
&=
~
IEEE_SW_MASK
;
current
_thread_info
()
->
ieee_state
=
0
;
wrfpcr
(
FPCR_DYN_NORMAL
|
ieee_swcr_to_fpcr
(
0
));
}
...
...
@@ -294,8 +272,8 @@ alpha_vfork(struct switch_stack * swstack)
*
* Note the "stack_offset" stuff: when returning to kernel mode, we need
* to have some extra stack-space for the kernel stack that still exists
* after the "ret_from_
sys_call". When returning to user mode, we only
*
want
the space needed by the syscall stack frame (ie "struct pt_regs").
* after the "ret_from_
fork". When returning to user mode, we only want
* the space needed by the syscall stack frame (ie "struct pt_regs").
* Use the passed "regs" pointer to determine how much space we need
* for a kernel fork().
*/
...
...
@@ -305,9 +283,9 @@ copy_thread(int nr, unsigned long clone_flags, unsigned long usp,
unsigned
long
unused
,
struct
task_struct
*
p
,
struct
pt_regs
*
regs
)
{
extern
void
ret_from_sys_call
(
void
);
extern
void
ret_from_fork
(
void
);
struct
thread_info
*
childti
=
p
->
thread_info
;
struct
pt_regs
*
childregs
;
struct
switch_stack
*
childstack
,
*
stack
;
unsigned
long
stack_offset
;
...
...
@@ -315,7 +293,8 @@ copy_thread(int nr, unsigned long clone_flags, unsigned long usp,
stack_offset
=
PAGE_SIZE
-
sizeof
(
struct
pt_regs
);
if
(
!
(
regs
->
ps
&
8
))
stack_offset
=
(
PAGE_SIZE
-
1
)
&
(
unsigned
long
)
regs
;
childregs
=
(
struct
pt_regs
*
)
(
stack_offset
+
PAGE_SIZE
+
(
long
)
p
);
childregs
=
(
struct
pt_regs
*
)
(
stack_offset
+
PAGE_SIZE
+
(
long
)
childti
);
*
childregs
=
*
regs
;
childregs
->
r0
=
0
;
...
...
@@ -326,10 +305,9 @@ copy_thread(int nr, unsigned long clone_flags, unsigned long usp,
childstack
=
((
struct
switch_stack
*
)
childregs
)
-
1
;
*
childstack
=
*
stack
;
childstack
->
r26
=
(
unsigned
long
)
ret_from_fork
;
p
->
thread
.
usp
=
usp
;
p
->
thread
.
ksp
=
(
unsigned
long
)
childstack
;
p
->
thread
.
pal_flags
=
1
;
/* set FEN, clear everything else */
p
->
thread
.
flags
=
current
->
thread
.
flags
;
childti
->
pcb
.
usp
=
usp
;
childti
->
pcb
.
ksp
=
(
unsigned
long
)
childstack
;
childti
->
pcb
.
flags
=
1
;
/* set FEN, clear everything else */
return
0
;
}
...
...
@@ -432,6 +410,35 @@ sys_execve(char *ufilename, char **argv, char **envp,
return
error
;
}
/*
* Return saved PC of a blocked thread. This assumes the frame
* pointer is the 6th saved long on the kernel stack and that the
* saved return address is the first long in the frame. This all
* holds provided the thread blocked through a call to schedule() ($15
* is the frame pointer in schedule() and $15 is saved at offset 48 by
* entry.S:do_switch_stack).
*
* Under heavy swap load I've seen this lose in an ugly way. So do
* some extra sanity checking on the ranges we expect these pointers
* to be in so that we can fail gracefully. This is just for ps after
* all. -- r~
*/
unsigned
long
thread_saved_pc
(
task_t
*
t
)
{
unsigned
long
base
=
(
unsigned
long
)
t
->
thread_info
;
unsigned
long
fp
,
sp
=
t
->
thread_info
->
pcb
.
ksp
;
if
(
sp
>
base
&&
sp
+
6
*
8
<
base
+
16
*
1024
)
{
fp
=
((
unsigned
long
*
)
sp
)[
6
];
if
(
fp
>
sp
&&
fp
<
base
+
16
*
1024
)
return
*
(
unsigned
long
*
)
fp
;
}
return
0
;
}
/*
* These bracket the sleeping functions..
*/
...
...
@@ -457,9 +464,9 @@ get_wchan(struct task_struct *p)
* after all...
*/
pc
=
thread_saved_pc
(
&
p
->
thread
);
pc
=
thread_saved_pc
(
p
);
if
(
pc
>=
first_sched
&&
pc
<
last_sched
)
{
schedule_frame
=
((
unsigned
long
*
)
p
->
thread
.
ksp
)[
6
];
schedule_frame
=
((
unsigned
long
*
)
p
->
thread
_info
->
pcb
.
ksp
)[
6
];
return
((
unsigned
long
*
)
schedule_frame
)[
12
];
}
return
pc
;
...
...
arch/alpha/kernel/ptrace.c
View file @
e18fad25
...
...
@@ -101,7 +101,7 @@ get_reg_addr(struct task_struct * task, unsigned long regno)
long
*
addr
;
if
(
regno
==
30
)
{
addr
=
&
task
->
thread
.
usp
;
addr
=
&
task
->
thread
_info
->
pcb
.
usp
;
}
else
if
(
regno
==
31
||
regno
>
64
)
{
zero
=
0
;
addr
=
&
zero
;
...
...
@@ -120,7 +120,8 @@ get_reg(struct task_struct * task, unsigned long regno)
/* Special hack for fpcr -- combine hardware and software bits. */
if
(
regno
==
63
)
{
unsigned
long
fpcr
=
*
get_reg_addr
(
task
,
regno
);
unsigned
long
swcr
=
task
->
thread
.
flags
&
IEEE_SW_MASK
;
unsigned
long
swcr
=
task
->
thread_info
->
ieee_state
&
IEEE_SW_MASK
;
swcr
=
swcr_update_status
(
swcr
,
fpcr
);
return
fpcr
|
swcr
;
}
...
...
@@ -134,7 +135,8 @@ static int
put_reg
(
struct
task_struct
*
task
,
unsigned
long
regno
,
long
data
)
{
if
(
regno
==
63
)
{
task
->
thread
.
flags
=
((
task
->
thread
.
flags
&
~
IEEE_SW_MASK
)
task
->
thread_info
->
ieee_state
=
((
task
->
thread_info
->
ieee_state
&
~
IEEE_SW_MASK
)
|
(
data
&
IEEE_SW_MASK
));
data
=
(
data
&
FPCR_DYN_MASK
)
|
ieee_swcr_to_fpcr
(
data
);
}
...
...
@@ -182,31 +184,34 @@ ptrace_set_bpt(struct task_struct * child)
* branch (emulation can be tricky for fp branches).
*/
displ
=
((
s32
)(
insn
<<
11
))
>>
9
;
child
->
thread
.
bpt_addr
[
nsaved
++
]
=
pc
+
4
;
child
->
thread
_info
->
bpt_addr
[
nsaved
++
]
=
pc
+
4
;
if
(
displ
)
/* guard against unoptimized code */
child
->
thread
.
bpt_addr
[
nsaved
++
]
=
pc
+
4
+
displ
;
child
->
thread_info
->
bpt_addr
[
nsaved
++
]
=
pc
+
4
+
displ
;
DBG
(
DBG_BPT
,
(
"execing branch
\n
"
));
}
else
if
(
op_code
==
0x1a
)
{
reg_b
=
(
insn
>>
16
)
&
0x1f
;
child
->
thread
.
bpt_addr
[
nsaved
++
]
=
get_reg
(
child
,
reg_b
);
child
->
thread
_info
->
bpt_addr
[
nsaved
++
]
=
get_reg
(
child
,
reg_b
);
DBG
(
DBG_BPT
,
(
"execing jump
\n
"
));
}
else
{
child
->
thread
.
bpt_addr
[
nsaved
++
]
=
pc
+
4
;
child
->
thread
_info
->
bpt_addr
[
nsaved
++
]
=
pc
+
4
;
DBG
(
DBG_BPT
,
(
"execing normal insn
\n
"
));
}
/* install breakpoints: */
for
(
i
=
0
;
i
<
nsaved
;
++
i
)
{
res
=
read_int
(
child
,
child
->
thread
.
bpt_addr
[
i
],
&
insn
);
res
=
read_int
(
child
,
child
->
thread
_info
->
bpt_addr
[
i
],
&
insn
);
if
(
res
<
0
)
return
res
;
child
->
thread
.
bpt_insn
[
i
]
=
insn
;
DBG
(
DBG_BPT
,
(
" -> next_pc=%lx
\n
"
,
child
->
thread
.
bpt_addr
[
i
]));
res
=
write_int
(
child
,
child
->
thread
.
bpt_addr
[
i
],
BREAKINST
);
child
->
thread_info
->
bpt_insn
[
i
]
=
insn
;
DBG
(
DBG_BPT
,
(
" -> next_pc=%lx
\n
"
,
child
->
thread_info
->
bpt_addr
[
i
]));
res
=
write_int
(
child
,
child
->
thread_info
->
bpt_addr
[
i
],
BREAKINST
);
if
(
res
<
0
)
return
res
;
}
child
->
thread
.
bpt_nsaved
=
nsaved
;
child
->
thread
_info
->
bpt_nsaved
=
nsaved
;
return
0
;
}
...
...
@@ -217,9 +222,9 @@ ptrace_set_bpt(struct task_struct * child)
int
ptrace_cancel_bpt
(
struct
task_struct
*
child
)
{
int
i
,
nsaved
=
child
->
thread
.
bpt_nsaved
;
int
i
,
nsaved
=
child
->
thread
_info
->
bpt_nsaved
;
child
->
thread
.
bpt_nsaved
=
0
;
child
->
thread
_info
->
bpt_nsaved
=
0
;
if
(
nsaved
>
2
)
{
printk
(
"ptrace_cancel_bpt: bogus nsaved: %d!
\n
"
,
nsaved
);
...
...
@@ -227,8 +232,8 @@ ptrace_cancel_bpt(struct task_struct * child)
}
for
(
i
=
0
;
i
<
nsaved
;
++
i
)
{
write_int
(
child
,
child
->
thread
.
bpt_addr
[
i
],
child
->
thread
.
bpt_insn
[
i
]);
write_int
(
child
,
child
->
thread
_info
->
bpt_addr
[
i
],
child
->
thread
_info
->
bpt_insn
[
i
]);
}
return
(
nsaved
!=
0
);
}
...
...
@@ -335,9 +340,9 @@ sys_ptrace(long request, long pid, long addr, long data,
if
((
unsigned
long
)
data
>
_NSIG
)
goto
out
;
if
(
request
==
PTRACE_SYSCALL
)
child
->
ptrace
|=
PT_TRACESYS
;
set_thread_flag
(
TIF_SYSCALL_TRACE
)
;
else
c
hild
->
ptrace
&=
~
PT_TRACESYS
;
c
lear_thread_flag
(
TIF_SYSCALL_TRACE
)
;
child
->
exit_code
=
data
;
wake_up_process
(
child
);
/* make sure single-step breakpoint is gone. */
...
...
@@ -364,8 +369,9 @@ sys_ptrace(long request, long pid, long addr, long data,
ret
=
-
EIO
;
if
((
unsigned
long
)
data
>
_NSIG
)
goto
out
;
child
->
thread
.
bpt_nsaved
=
-
1
;
/* mark single-stepping */
child
->
ptrace
&=
~
PT_TRACESYS
;
/* Mark single stepping. */
child
->
thread_info
->
bpt_nsaved
=
-
1
;
clear_thread_flag
(
TIF_SYSCALL_TRACE
);
wake_up_process
(
child
);
child
->
exit_code
=
data
;
/* give it a chance to run. */
...
...
@@ -381,7 +387,7 @@ sys_ptrace(long request, long pid, long addr, long data,
goto
out
;
}
out:
free
_task_struct
(
child
);
put
_task_struct
(
child
);
out_notsk:
unlock_kernel
();
return
ret
;
...
...
@@ -390,8 +396,9 @@ sys_ptrace(long request, long pid, long addr, long data,
asmlinkage
void
syscall_trace
(
void
)
{
if
((
current
->
ptrace
&
(
PT_PTRACED
|
PT_TRACESYS
))
!=
(
PT_PTRACED
|
PT_TRACESYS
))
if
(
!
test_thread_flag
(
TIF_SYSCALL_TRACE
))
return
;
if
(
!
(
current
->
ptrace
&
PT_PTRACED
))
return
;
current
->
exit_code
=
SIGTRAP
;
current
->
state
=
TASK_STOPPED
;
...
...
arch/alpha/kernel/signal.c
View file @
e18fad25
...
...
@@ -32,8 +32,8 @@
#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
asmlinkage
void
ret_from_sys_call
(
void
);
asmlinkage
int
do_signal
(
sigset_t
*
,
struct
pt_regs
*
,
struct
switch_stack
*
,
unsigned
long
,
unsigned
long
);
static
int
do_signal
(
sigset_t
*
,
struct
pt_regs
*
,
struct
switch_stack
*
,
unsigned
long
,
unsigned
long
);
int
copy_siginfo_to_user
(
siginfo_t
*
to
,
siginfo_t
*
from
)
...
...
@@ -618,7 +618,7 @@ syscall_restart(unsigned long r0, unsigned long r19,
* restart. "r0" is also used as an indicator whether we can restart at
* all (if we get here from anything but a syscall return, it will be 0)
*/
asmlinkage
int
static
int
do_signal
(
sigset_t
*
oldset
,
struct
pt_regs
*
regs
,
struct
switch_stack
*
sw
,
unsigned
long
r0
,
unsigned
long
r19
)
{
...
...
@@ -744,3 +744,12 @@ do_signal(sigset_t *oldset, struct pt_regs * regs, struct switch_stack * sw,
return
0
;
}
void
do_notify_resume
(
sigset_t
*
oldset
,
struct
pt_regs
*
regs
,
struct
switch_stack
*
sw
,
unsigned
long
r0
,
unsigned
long
r19
,
unsigned
long
thread_info_flags
)
{
if
(
thread_info_flags
&
_TIF_SIGPENDING
)
do_signal
(
oldset
,
regs
,
sw
,
r0
,
r19
);
}
arch/alpha/kernel/traps.c
View file @
e18fad25
...
...
@@ -131,8 +131,8 @@ dik_show_trace(unsigned long *sp)
void
show_trace_task
(
struct
task_struct
*
tsk
)
{
struct
thread_
struct
*
thread
=
&
tsk
->
thread
;
unsigned
long
fp
,
sp
=
t
hread
->
ksp
,
base
=
(
unsigned
long
)
thread
;
struct
thread_
info
*
ti
=
tsk
->
thread_info
;
unsigned
long
fp
,
sp
=
t
i
->
pcb
.
ksp
,
base
=
(
unsigned
long
)
ti
;
if
(
sp
>
base
&&
sp
+
6
*
8
<
base
+
16
*
1024
)
{
fp
=
((
unsigned
long
*
)
sp
)[
6
];
...
...
@@ -180,12 +180,11 @@ die_if_kernel(char * str, struct pt_regs *regs, long err, unsigned long *r9_15)
dik_show_trace
((
unsigned
long
*
)(
regs
+
1
));
dik_show_code
((
unsigned
int
*
)
regs
->
pc
);
if
(
current
->
thread
.
flags
&
(
1UL
<<
63
))
{
if
(
test_and_set_thread_flag
(
TIF_DIE_IF_KERNEL
))
{
printk
(
"die_if_kernel recursion detected.
\n
"
);
sti
();
while
(
1
);
}
current
->
thread
.
flags
|=
(
1UL
<<
63
);
do_exit
(
SIGSEGV
);
}
...
...
@@ -232,6 +231,13 @@ do_entIF(unsigned long type, unsigned long a1,
unsigned
long
a5
,
struct
pt_regs
regs
)
{
if
(
!
opDEC_testing
||
type
!=
4
)
{
if
(
type
==
1
)
{
const
unsigned
int
*
data
=
(
const
unsigned
int
*
)
regs
.
pc
;
printk
(
"Kernel bug at %s:%d
\n
"
,
(
const
char
*
)(
data
[
1
]
|
(
long
)
data
[
2
]
<<
32
),
data
[
0
]);
}
die_if_kernel
((
type
==
1
?
"Kernel Bug"
:
"Instruction fault"
),
&
regs
,
type
,
0
);
}
...
...
@@ -324,8 +330,8 @@ do_entIF(unsigned long type, unsigned long a1,
FP registers, PAL_clrfen is not useful except for DoS
attacks. So turn the bleeding FPU back on and be done
with it. */
current
->
thread
.
pal_
flags
|=
1
;
__reload_thread
(
&
current
->
thread
);
current
_thread_info
()
->
pcb
.
flags
|=
1
;
__reload_thread
(
&
current
_thread_info
()
->
pcb
);
return
;
case
5
:
/* illoc */
...
...
@@ -605,12 +611,11 @@ do_entUna(void * va, unsigned long opcode, unsigned long reg,
dik_show_code
((
unsigned
int
*
)
pc
);
dik_show_trace
((
unsigned
long
*
)(
&
regs
+
1
));
if
(
current
->
thread
.
flags
&
(
1UL
<<
63
))
{
if
(
test_and_set_thread_flag
(
TIF_DIE_IF_KERNEL
))
{
printk
(
"die_if_kernel recursion detected.
\n
"
);
sti
();
while
(
1
);
}
current
->
thread
.
flags
|=
(
1UL
<<
63
);
do_exit
(
SIGSEGV
);
}
...
...
@@ -706,14 +711,12 @@ do_entUnaUser(void * va, unsigned long opcode,
unsigned
long
tmp1
,
tmp2
,
tmp3
,
tmp4
;
unsigned
long
fake_reg
,
*
reg_addr
=
&
fake_reg
;
unsigned
long
uac_bits
;
long
error
;
/* Check the UAC bits to decide what the user wants us to do
with the unaliged access. */
uac_bits
=
(
current
->
thread
.
flags
>>
UAC_SHIFT
)
&
UAC_BITMASK
;
if
(
!
(
uac_bits
&
UAC_NOPRINT
))
{
if
(
!
test_thread_flag
(
TIF_UAC_NOPRINT
))
{
if
(
cnt
>=
5
&&
jiffies
-
last_time
>
5
*
HZ
)
{
cnt
=
0
;
}
...
...
@@ -724,13 +727,11 @@ do_entUnaUser(void * va, unsigned long opcode,
}
last_time
=
jiffies
;
}
if
(
uac_bits
&
UAC_SIGBUS
)
{
if
(
test_thread_flag
(
TIF_UAC_SIGBUS
))
goto
give_sigbus
;
}
if
(
uac_bits
&
UAC_NOFIX
)
{
/* Not sure why you'd want to use this, but... */
if
(
test_thread_flag
(
TIF_UAC_NOFIX
))
return
;
}
/* Don't bother reading ds in the access check since we already
know that this came from the user. Also rely on the fact that
...
...
arch/alpha/lib/ev6-memset.S
View file @
e18fad25
...
...
@@ -236,7 +236,7 @@ end_b:
*
entry
point
.
*/
.
align
4
.
ent
__memset
.
ent
__
constant_c_
memset
__constant_c_memset
:
.
frame
$
30
,
0
,
$
26
,
0
.
prologue
0
...
...
arch/alpha/math-emu/math.c
View file @
e18fad25
...
...
@@ -114,7 +114,7 @@ alpha_fp_emul (unsigned long pc)
mode
=
(
insn
>>
11
)
&
0x3
;
fpcr
=
rdfpcr
();
swcr
=
swcr_update_status
(
current
->
thread
.
flags
,
fpcr
);
swcr
=
swcr_update_status
(
current
_thread_info
()
->
ieee_state
,
fpcr
);
if
(
mode
==
3
)
{
/* Dynamic -- get rounding mode from fpcr. */
...
...
@@ -297,7 +297,8 @@ alpha_fp_emul (unsigned long pc)
if
(
_fex
)
{
/* Record exceptions in software control word. */
swcr
|=
(
_fex
<<
IEEE_STATUS_TO_EXCSUM_SHIFT
);
current
->
thread
.
flags
|=
(
_fex
<<
IEEE_STATUS_TO_EXCSUM_SHIFT
);
current_thread_info
()
->
ieee_state
|=
(
_fex
<<
IEEE_STATUS_TO_EXCSUM_SHIFT
);
/* Update hardware control register. */
fpcr
&=
(
~
FPCR_MASK
|
FPCR_DYN_MASK
);
...
...
arch/alpha/mm/fault.c
View file @
e18fad25
...
...
@@ -43,14 +43,16 @@ extern void
__load_new_mm_context
(
struct
mm_struct
*
next_mm
)
{
unsigned
long
mmc
;
struct
pcb_struct
*
pcb
;
mmc
=
__get_new_mm_context
(
next_mm
,
smp_processor_id
());
next_mm
->
context
[
smp_processor_id
()]
=
mmc
;
current
->
thread
.
asn
=
mmc
&
HARDWARE_ASN_MASK
;
current
->
thread
.
ptbr
=
((
unsigned
long
)
next_mm
->
pgd
-
IDENT_ADDR
)
>>
PAGE_SHIFT
;
__reload_thread
(
&
current
->
thread
);
pcb
=
&
current_thread_info
()
->
pcb
;
pcb
->
asn
=
mmc
&
HARDWARE_ASN_MASK
;
pcb
->
ptbr
=
((
unsigned
long
)
next_mm
->
pgd
-
IDENT_ADDR
)
>>
PAGE_SHIFT
;
__reload_thread
(
pcb
);
}
...
...
arch/alpha/mm/init.c
View file @
e18fad25
...
...
@@ -40,7 +40,7 @@ unsigned long totalram_pages;
extern
void
die_if_kernel
(
char
*
,
struct
pt_regs
*
,
long
);
st
ruct
thread
_struct
original_pcb
;
st
atic
struct
pcb
_struct
original_pcb
;
#ifndef CONFIG_SMP
struct
pgtable_cache_struct
quicklists
;
...
...
@@ -151,7 +151,7 @@ show_mem(void)
#endif
static
inline
unsigned
long
load_PCB
(
struct
thread_struct
*
pcb
)
load_PCB
(
struct
pcb_struct
*
pcb
)
{
register
unsigned
long
sp
__asm__
(
"$30"
);
pcb
->
ksp
=
sp
;
...
...
@@ -182,10 +182,9 @@ switch_to_system_map(void)
}
/* Also set up the real kernel PCB while we're at it. */
init_task
.
thread
.
ptbr
=
newptbr
;
init_task
.
thread
.
pal_flags
=
1
;
/* set FEN, clear everything else */
init_task
.
thread
.
flags
=
0
;
original_pcb_ptr
=
load_PCB
(
&
init_task
.
thread
);
init_thread_info
.
pcb
.
ptbr
=
newptbr
;
init_thread_info
.
pcb
.
flags
=
1
;
/* set FEN, clear everything else */
original_pcb_ptr
=
load_PCB
(
&
init_thread_info
.
pcb
);
tbia
();
/* Save off the contents of the original PCB so that we can
...
...
@@ -199,7 +198,7 @@ switch_to_system_map(void)
original_pcb_ptr
=
(
unsigned
long
)
phys_to_virt
(
original_pcb_ptr
);
}
original_pcb
=
*
(
struct
thread
_struct
*
)
original_pcb_ptr
;
original_pcb
=
*
(
struct
pcb
_struct
*
)
original_pcb_ptr
;
}
int
callback_init_done
;
...
...
@@ -270,7 +269,7 @@ callback_init(void * kernel_end)
/* Let vmalloc know that we've allocated some space. */
console_remap_vm
.
flags
=
VM_ALLOC
;
console_remap_vm
.
addr
=
VMALLOC_START
;
console_remap_vm
.
addr
=
(
void
*
)
VMALLOC_START
;
console_remap_vm
.
size
=
vaddr
-
VMALLOC_START
;
vmlist
=
&
console_remap_vm
;
}
...
...
arch/alpha/vmlinux.lds.in
View file @
e18fad25
...
...
@@ -11,37 +11,42 @@ SECTIONS
. = 0xfffffc0000810000;
#endif
.text : {
_text = .;
.text : { *(.text) } :kernel
*(.text)
_etext = .;
} :kernel
/* Exception table */
. = ALIGN(16);
__ex_table ALIGN(16) : {
__start___ex_table = .;
__ex_table : { *(__ex_table) }
*(__ex_table)
__stop___ex_table = .;
}
/* Kernel symbol table */
. = ALIGN(8);
__ksymtab ALIGN(8) : {
__start___ksymtab = .;
__ksymtab : { *(__ksymtab) }
*(__ksymtab)
__stop___ksymtab = .;
}
.kstrtab : { *(.kstrtab) }
/* Startup code */
.
= ALIGN(8192);
.
text.init ALIGN(8192) : {
__init_begin = .;
.text.init : { *(.text.init) }
*(.text.init)
}
.data.init : { *(.data.init) }
.
= ALIGN(16);
.
setup.init ALIGN(16): {
__setup_start = .;
.setup.init : { *(.setup.init) }
*(.setup.init)
__setup_end = .;
}
.
= ALIGN(8);
.
initcall.init ALIGN(8): {
__initcall_start = .;
.initcall.init : {
*(.initcall1.init)
*(.initcall2.init)
*(.initcall3.init)
...
...
@@ -49,29 +54,37 @@ SECTIONS
*(.initcall5.init)
*(.initcall6.init)
*(.initcall7.init)
}
__initcall_end = .;
. = ALIGN(2*8192); /* Align double page for init_task_union */
__init_end = .;
}
/* The initial task and kernel stack */
init_task : { *(init_task) }
.data.init_thread ALIGN(2*8192) : {
__init_end = .;
*(.data.init_thread)
}
/* Global data */
.data.cacheline_aligned : {
_data = .;
.data.cacheline_aligned : { *(.data.cacheline_aligned) }
*(.data.cacheline_aligned)
}
.rodata : { *(.rodata) *(.rodata.*) }
.data : { *(.data) CONSTRUCTORS }
.got : { *(.got) }
.sdata : { *(.sdata) }
.sdata : {
*(.sdata)
_edata = .;
}
.sbss : {
__bss_start = .;
.sbss : { *(.sbss) *(.scommon) }
.bss : { *(.bss) *(COMMON) }
*(.sbss) *(.scommon)
}
.bss : {
*(.bss) *(COMMON)
__bss_stop = .;
_end = .;
}
.mdebug 0 : { *(.mdebug) }
.note 0 : { *(.note) }
...
...
include/asm-alpha/asm_offsets.h
deleted
100644 → 0
View file @
77ae7b5e
#ifndef __ASM_OFFSETS_H__
#define __ASM_OFFSETS_H__
#define TASK_STATE 0
#define TASK_FLAGS 8
#define TASK_SIGPENDING 16
#define TASK_ADDR_LIMIT 24
#define TASK_EXEC_DOMAIN 32
#define TASK_NEED_RESCHED 40
#define TASK_SIZE 1096
#define STACK_SIZE 16384
#define HAE_CACHE 0
#define HAE_REG 8
#endif
/* __ASM_OFFSETS_H__ */
include/asm-alpha/bitops.h
View file @
e18fad25
...
...
@@ -74,11 +74,11 @@ clear_bit(unsigned long nr, volatile void * addr)
* WARNING: non atomic version.
*/
static
__inline__
void
__c
hange
_bit
(
unsigned
long
nr
,
volatile
void
*
addr
)
__c
lear
_bit
(
unsigned
long
nr
,
volatile
void
*
addr
)
{
int
*
m
=
((
int
*
)
addr
)
+
(
nr
>>
5
);
*
m
^=
1
<<
(
nr
&
31
);
*
m
&=
~
(
1
<<
(
nr
&
31
)
);
}
static
inline
void
...
...
@@ -99,6 +99,17 @@ change_bit(unsigned long nr, volatile void * addr)
:
"Ir"
(
1UL
<<
(
nr
&
31
)),
"m"
(
*
m
));
}
/*
* WARNING: non atomic version.
*/
static
__inline__
void
__change_bit
(
unsigned
long
nr
,
volatile
void
*
addr
)
{
int
*
m
=
((
int
*
)
addr
)
+
(
nr
>>
5
);
*
m
^=
1
<<
(
nr
&
31
);
}
static
inline
int
test_and_set_bit
(
unsigned
long
nr
,
volatile
void
*
addr
)
{
...
...
@@ -181,20 +192,6 @@ __test_and_clear_bit(unsigned long nr, volatile void * addr)
return
(
old
&
mask
)
!=
0
;
}
/*
* WARNING: non atomic version.
*/
static
__inline__
int
__test_and_change_bit
(
unsigned
long
nr
,
volatile
void
*
addr
)
{
unsigned
long
mask
=
1
<<
(
nr
&
0x1f
);
int
*
m
=
((
int
*
)
addr
)
+
(
nr
>>
5
);
int
old
=
*
m
;
*
m
=
old
^
mask
;
return
(
old
&
mask
)
!=
0
;
}
static
inline
int
test_and_change_bit
(
unsigned
long
nr
,
volatile
void
*
addr
)
{
...
...
@@ -220,6 +217,20 @@ test_and_change_bit(unsigned long nr, volatile void * addr)
return
oldbit
!=
0
;
}
/*
* WARNING: non atomic version.
*/
static
__inline__
int
__test_and_change_bit
(
unsigned
long
nr
,
volatile
void
*
addr
)
{
unsigned
long
mask
=
1
<<
(
nr
&
0x1f
);
int
*
m
=
((
int
*
)
addr
)
+
(
nr
>>
5
);
int
old
=
*
m
;
*
m
=
old
^
mask
;
return
(
old
&
mask
)
!=
0
;
}
static
inline
int
test_bit
(
int
nr
,
volatile
void
*
addr
)
{
...
...
@@ -264,17 +275,39 @@ static inline unsigned long ffz(unsigned long word)
#endif
}
/*
* __ffs = Find First set bit in word. Undefined if no set bit exists.
*/
static
inline
unsigned
long
__ffs
(
unsigned
long
word
)
{
#if defined(__alpha_cix__) && defined(__alpha_fix__)
/* Whee. EV67 can calculate it directly. */
unsigned
long
result
;
__asm__
(
"cttz %1,%0"
:
"=r"
(
result
)
:
"r"
(
word
));
return
result
;
#else
unsigned
long
bits
,
qofs
,
bofs
;
__asm__
(
"cmpbge $31,%1,%0"
:
"=r"
(
bits
)
:
"r"
(
word
));
qofs
=
ffz_b
(
bits
);
__asm__
(
"extbl %1,%2,%0"
:
"=r"
(
bits
)
:
"r"
(
word
),
"r"
(
qofs
));
bofs
=
ffz_b
(
~
bits
);
return
qofs
*
8
+
bofs
;
#endif
}
#ifdef __KERNEL__
/*
* ffs: find first bit set. This is defined the same way as
* the libc and compiler builtin ffs routines, therefore
* differs in spirit from the above
ffz (man ffs)
.
* differs in spirit from the above
__ffs
.
*/
static
inline
int
ffs
(
int
word
)
{
int
result
=
ffz
(
~
word
);
int
result
=
__ffs
(
word
);
return
word
?
result
+
1
:
0
;
}
...
...
@@ -365,10 +398,53 @@ find_next_zero_bit(void * addr, unsigned long size, unsigned long offset)
}
/*
* The optimizer actually does good code for this case..
* Find next one bit in a bitmap reasonably efficiently.
*/
static
inline
unsigned
long
find_next_bit
(
void
*
addr
,
unsigned
long
size
,
unsigned
long
offset
)
{
unsigned
long
*
p
=
((
unsigned
long
*
)
addr
)
+
(
offset
>>
6
);
unsigned
long
result
=
offset
&
~
63UL
;
unsigned
long
tmp
;
if
(
offset
>=
size
)
return
size
;
size
-=
result
;
offset
&=
63UL
;
if
(
offset
)
{
tmp
=
*
(
p
++
);
tmp
&=
~
0UL
<<
offset
;
if
(
size
<
64
)
goto
found_first
;
if
(
tmp
)
goto
found_middle
;
size
-=
64
;
result
+=
64
;
}
while
(
size
&
~
63UL
)
{
if
((
tmp
=
*
(
p
++
)))
goto
found_middle
;
result
+=
64
;
size
-=
64
;
}
if
(
!
size
)
return
result
;
tmp
=
*
p
;
found_first:
tmp
&=
~
0UL
>>
(
64
-
size
);
if
(
!
tmp
)
return
result
+
size
;
found_middle:
return
result
+
__ffs
(
tmp
);
}
/*
* The optimizer actually does good code for this case.
*/
#define find_first_zero_bit(addr, size) \
find_next_zero_bit((addr), (size), 0)
#define find_first_bit(addr, size) \
find_next_bit((addr), (size), 0)
#ifdef __KERNEL__
...
...
include/asm-alpha/current.h
View file @
e18fad25
#ifndef _ALPHA_CURRENT_H
#define _ALPHA_CURRENT_H
register
struct
task_struct
*
current
__asm__
(
"$8"
);
#include <asm/thread_info.h>
#endif
/* !(_ALPHA_CURRENT_H) */
#define get_current() (current_thread_info()->task + 0)
#define current get_current()
#endif
/* _ALPHA_CURRENT_H */
include/asm-alpha/fpu.h
View file @
e18fad25
...
...
@@ -31,7 +31,7 @@
/*
* IEEE trap enables are implemented in software. These per-thread
* bits are stored in the "
flags" field of "struct thread_struct
".
* bits are stored in the "
ieee_state" field of "struct thread_info
".
* Thus, the bits are defined so as not to conflict with the
* floating-point enable bit (which is architected). On top of that,
* we want to make these bits compatible with OSF/1 so
...
...
include/asm-alpha/io.h
View file @
e18fad25
...
...
@@ -18,6 +18,7 @@
#include <linux/config.h>
#include <linux/kernel.h>
#include <asm/system.h>
#include <asm/pgtable.h>
#include <asm/machvec.h>
/*
...
...
@@ -60,7 +61,10 @@ static inline void * phys_to_virt(unsigned long address)
return
(
void
*
)
(
address
+
IDENT_ADDR
);
}
#define page_to_phys(page) (((page) - (page)->zone->zone_mem_map) << PAGE_SHIFT)
#define page_to_phys(page) PAGE_TO_PA(page)
/* This depends on working iommu. */
#define BIO_VMERGE_BOUNDARY (alpha_mv.mv_pci_tbi ? PAGE_SIZE : 0)
/*
* Change addresses as seen by the kernel (virtual) to addresses as
...
...
include/asm-alpha/mmu_context.h
View file @
e18fad25
...
...
@@ -21,8 +21,34 @@
#include <asm/io.h>
#endif
/* ??? This does not belong here. */
/*
* Every architecture must define this function. It's the fastest
* way of searching a 168-bit bitmap where the first 128 bits are
* unlikely to be set. It's guaranteed that at least one of the 168
* bits is set.
*/
#if MAX_RT_PRIO != 128 || MAX_PRIO > 192
# error update this function.
#endif
static
inline
int
sched_find_first_bit
(
unsigned
long
*
b
)
{
unsigned
long
b0
=
b
[
0
],
b1
=
b
[
1
],
b2
=
b
[
2
];
unsigned
long
offset
=
128
;
if
(
unlikely
(
b0
|
b1
))
{
b2
=
(
b0
?
b0
:
b1
);
offset
=
(
b0
?
0
:
64
);
}
return
__ffs
(
b2
)
+
offset
;
}
extern
inline
unsigned
long
__reload_thread
(
struct
thread
_struct
*
pcb
)
__reload_thread
(
struct
pcb
_struct
*
pcb
)
{
register
unsigned
long
a0
__asm__
(
"$16"
);
register
unsigned
long
v0
__asm__
(
"$0"
);
...
...
@@ -153,7 +179,7 @@ ev5_switch_mm(struct mm_struct *prev_mm, struct mm_struct *next_mm,
/* Always update the PCB ASN. Another thread may have allocated
a new mm->context (via flush_tlb_mm) without the ASN serial
number wrapping. We have no way to detect when this is needed. */
next
->
thread
.
asn
=
mmc
&
HARDWARE_ASN_MASK
;
next
->
thread
_info
->
pcb
.
asn
=
mmc
&
HARDWARE_ASN_MASK
;
}
__EXTERN_INLINE
void
...
...
@@ -228,7 +254,8 @@ init_new_context(struct task_struct *tsk, struct mm_struct *mm)
for
(
i
=
0
;
i
<
smp_num_cpus
;
i
++
)
mm
->
context
[
cpu_logical_map
(
i
)]
=
0
;
tsk
->
thread
.
ptbr
=
((
unsigned
long
)
mm
->
pgd
-
IDENT_ADDR
)
>>
PAGE_SHIFT
;
tsk
->
thread_info
->
pcb
.
ptbr
=
((
unsigned
long
)
mm
->
pgd
-
IDENT_ADDR
)
>>
PAGE_SHIFT
;
return
0
;
}
...
...
@@ -241,7 +268,8 @@ destroy_context(struct mm_struct *mm)
static
inline
void
enter_lazy_tlb
(
struct
mm_struct
*
mm
,
struct
task_struct
*
tsk
,
unsigned
cpu
)
{
tsk
->
thread
.
ptbr
=
((
unsigned
long
)
mm
->
pgd
-
IDENT_ADDR
)
>>
PAGE_SHIFT
;
tsk
->
thread_info
->
pcb
.
ptbr
=
((
unsigned
long
)
mm
->
pgd
-
IDENT_ADDR
)
>>
PAGE_SHIFT
;
}
#ifdef __MMU_EXTERN_INLINE
...
...
include/asm-alpha/page.h
View file @
e18fad25
...
...
@@ -59,11 +59,11 @@ typedef unsigned long pgprot_t;
#endif
/* STRICT_MM_TYPECHECKS */
/* ??? Would be nice to use .gprel32 here, but we can't be sure that the
function loaded the GP, so this could fail in modules. */
#define BUG() \
do { \
printk("kernel BUG at %s:%d!\n", __FILE__, __LINE__); \
__asm__ __volatile__("call_pal %0 # bugchk" : : "i" (PAL_bugchk)); \
} while (0)
__asm__ __volatile__("call_pal %0 # bugchk\n\t"".long %1\n\t.8byte %2" \
: : "i" (PAL_bugchk), "i"(__LINE__), "i"(__FILE__))
#define PAGE_BUG(page) BUG()
...
...
include/asm-alpha/processor.h
View file @
e18fad25
...
...
@@ -38,83 +38,17 @@ typedef struct {
unsigned
long
seg
;
}
mm_segment_t
;
struct
thread_struct
{
/* the fields below are used by PALcode and must match struct pcb: */
unsigned
long
ksp
;
unsigned
long
usp
;
unsigned
long
ptbr
;
unsigned
int
pcc
;
unsigned
int
asn
;
unsigned
long
unique
;
/*
* bit 0: floating point enable
* bit 62: performance monitor enable
*/
unsigned
long
pal_flags
;
unsigned
long
res1
,
res2
;
/*
* The fields below are Linux-specific:
*
* bit 1..5: IEEE_TRAP_ENABLE bits (see fpu.h)
* bit 6..8: UAC bits (see sysinfo.h)
* bit 17..21: IEEE_STATUS_MASK bits (see fpu.h)
* bit 63: die_if_kernel recursion lock
*/
unsigned
long
flags
;
/* Perform syscall argument validation (get/set_fs). */
mm_segment_t
fs
;
/* Breakpoint handling for ptrace. */
unsigned
long
bpt_addr
[
2
];
unsigned
int
bpt_insn
[
2
];
int
bpt_nsaved
;
};
#define INIT_THREAD { \
0, 0, 0, \
0, 0, 0, \
0, 0, 0, \
0, \
KERNEL_DS \
}
#define THREAD_SIZE (2*PAGE_SIZE)
/* This is dead. Everything has been moved to thread_info. */
struct
thread_struct
{
};
#define INIT_THREAD { }
#include <asm/ptrace.h>
/*
* Return saved PC of a blocked thread. This assumes the frame
* pointer is the 6th saved long on the kernel stack and that the
* saved return address is the first long in the frame. This all
* holds provided the thread blocked through a call to schedule() ($15
* is the frame pointer in schedule() and $15 is saved at offset 48 by
* entry.S:do_switch_stack).
*
* Under heavy swap load I've seen this lose in an ugly way. So do
* some extra sanity checking on the ranges we expect these pointers
* to be in so that we can fail gracefully. This is just for ps after
* all. -- r~
*/
extern
inline
unsigned
long
thread_saved_pc
(
struct
thread_struct
*
t
)
{
unsigned
long
fp
,
sp
=
t
->
ksp
,
base
=
(
unsigned
long
)
t
;
if
(
sp
>
base
&&
sp
+
6
*
8
<
base
+
16
*
1024
)
{
fp
=
((
unsigned
long
*
)
sp
)[
6
];
if
(
fp
>
sp
&&
fp
<
base
+
16
*
1024
)
return
*
(
unsigned
long
*
)
fp
;
}
return
0
;
}
/* Return saved PC of a blocked thread. */
struct
task_struct
;
extern
unsigned
long
thread_saved_pc
(
struct
task_struct
*
);
/* Do necessary setup to start up a newly executed thread. */
extern
void
start_thread
(
struct
pt_regs
*
,
unsigned
long
,
unsigned
long
);
struct
task_struct
;
/* Free all resources held by a thread. */
extern
void
release_thread
(
struct
task_struct
*
);
...
...
@@ -127,26 +61,18 @@ extern long kernel_thread(int (*fn)(void *), void *arg, unsigned long flags);
unsigned
long
get_wchan
(
struct
task_struct
*
p
);
/* See arch/alpha/kernel/ptrace.c for details. */
#define PT_REG(reg)
(PAGE_SIZE*2 - sizeof(struct pt_regs)
\
+ (long)&((struct pt_regs *)0)->reg
)
#define PT_REG(reg)
\
(PAGE_SIZE*2 - sizeof(struct pt_regs) + offsetof(struct pt_regs, reg)
)
#define SW_REG(reg)
(PAGE_SIZE*2 - sizeof(struct pt_regs)
\
- sizeof(struct switch_stack)
\
+ (long)&((struct switch_stack *)0)->reg
)
#define SW_REG(reg)
\
(PAGE_SIZE*2 - sizeof(struct pt_regs) - sizeof(struct switch_stack)
\
+ offsetof(struct switch_stack, reg)
)
#define KSTK_EIP(tsk) \
(*(unsigned long *)(PT_REG(pc) + (unsigned long)(tsk)))
#define KSTK_ESP(tsk) ((tsk) == current ? rdusp() : (tsk)->thread.usp)
/* NOTE: The task struct and the stack go together! */
#define alloc_task_struct() \
((struct task_struct *) __get_free_pages(GFP_KERNEL,1))
#define free_task_struct(p) free_pages((unsigned long)(p),1)
#define get_task_struct(tsk) atomic_inc(&virt_to_page(tsk)->count)
(*(unsigned long *)(PT_REG(pc) + (unsigned long) ((tsk)->thread_info)))
#define
init_task (init_task_union.task)
#define init_stack (init_task_union.stack
)
#define
KSTK_ESP(tsk) \
((tsk) == current ? rdusp() : (tsk)->thread_info->pcb.usp
)
#define cpu_relax() do { } while (0)
...
...
@@ -154,21 +80,36 @@ unsigned long get_wchan(struct task_struct *p);
#define ARCH_HAS_PREFETCHW
#define ARCH_HAS_SPINLOCK_PREFETCH
#if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 1)
extern
inline
void
prefetch
(
const
void
*
ptr
)
{
__
asm__
(
"ldl $31,%0"
:
:
"m"
(
*
(
char
*
)
ptr
));
__
builtin_prefetch
(
ptr
,
0
,
3
);
}
extern
inline
void
prefetchw
(
const
void
*
ptr
)
{
__
asm__
(
"ldl $31,%0"
:
:
"m"
(
*
(
char
*
)
ptr
));
__
builtin_prefetch
(
ptr
,
1
,
3
);
}
extern
inline
void
spin_lock_prefetch
(
const
void
*
ptr
)
{
__builtin_prefetch
(
ptr
,
1
,
3
);
}
#else
extern
inline
void
prefetch
(
const
void
*
ptr
)
{
__asm__
(
"ldl $31,%0"
:
:
"m"
(
*
(
char
*
)
ptr
));
}
extern
inline
void
prefetchw
(
const
void
*
ptr
)
{
__asm__
(
"ldq $31,%0"
:
:
"m"
(
*
(
char
*
)
ptr
));
}
extern
inline
void
spin_lock_prefetch
(
const
void
*
ptr
)
{
__asm__
(
"ldq $31,%0"
:
:
"m"
(
*
(
char
*
)
ptr
));
}
#endif
/* GCC 3.1 */
#endif
/* __ASM_ALPHA_PROCESSOR_H */
include/asm-alpha/smp.h
View file @
e18fad25
...
...
@@ -2,6 +2,7 @@
#define __ASM_SMP_H
#include <linux/config.h>
#include <linux/threads.h>
#include <asm/pal.h>
/* HACK: Cabrio WHAMI return value is bogus if more than 8 bits used.. :-( */
...
...
@@ -55,7 +56,7 @@ extern int __cpu_logical_map[NR_CPUS];
#define cpu_logical_map(cpu) __cpu_logical_map[cpu]
#define hard_smp_processor_id() __hard_smp_processor_id()
#define smp_processor_id() (current
->processor
)
#define smp_processor_id() (current
_thread_info()->cpu
)
extern
unsigned
long
cpu_present_mask
;
#define cpu_online_map cpu_present_mask
...
...
include/asm-alpha/sysinfo.h
View file @
e18fad25
...
...
@@ -30,7 +30,7 @@
#ifdef __KERNEL__
/* This is the shift that is applied to the UAC bits as stored in the
per-thread flags. */
per-thread flags.
See thread_info.h.
*/
#define UAC_SHIFT 6
#endif
...
...
include/asm-alpha/system.h
View file @
e18fad25
...
...
@@ -134,8 +134,7 @@ extern void halt(void) __attribute__((noreturn));
#define switch_to(prev,next,last) \
do { \
unsigned long pcbb; \
current = (next); \
pcbb = virt_to_phys(¤t->thread); \
pcbb = virt_to_phys(&(next)->thread_info->pcb); \
(last) = alpha_switch_to(pcbb, (prev)); \
check_mmu_context(); \
} while (0)
...
...
include/asm-alpha/thread_info.h
0 → 100644
View file @
e18fad25
#ifndef _ALPHA_THREAD_INFO_H
#define _ALPHA_THREAD_INFO_H
#ifdef __KERNEL__
#ifndef __ASSEMBLY__
#include <asm/processor.h>
#include <asm/types.h>
#include <asm/hwrpb.h>
#endif
#ifndef __ASSEMBLY__
struct
thread_info
{
struct
pcb_struct
pcb
;
/* palcode state */
struct
task_struct
*
task
;
/* main task structure */
unsigned
int
flags
;
/* low level flags */
unsigned
int
ieee_state
;
/* see fpu.h */
struct
exec_domain
*
exec_domain
;
/* execution domain */
mm_segment_t
addr_limit
;
/* thread address space */
int
cpu
;
/* current CPU */
int
bpt_nsaved
;
unsigned
long
bpt_addr
[
2
];
/* breakpoint handling */
unsigned
int
bpt_insn
[
2
];
};
/*
* Macros/functions for gaining access to the thread information structure.
*/
#define INIT_THREAD_INFO(tsk) \
{ \
task: &tsk, \
exec_domain: &default_exec_domain, \
addr_limit: KERNEL_DS, \
}
#define init_thread_info (init_thread_union.thread_info)
#define init_stack (init_thread_union.stack)
/* How to get the thread information struct from C. */
register
struct
thread_info
*
__current_thread_info
__asm__
(
"$8"
);
#define current_thread_info() __current_thread_info
/* Thread information allocation. */
#define THREAD_SIZE (2*PAGE_SIZE)
#define alloc_thread_info() \
((struct thread_info *) __get_free_pages(GFP_KERNEL,1))
#define free_thread_info(ti) free_pages((unsigned long) (ti), 1)
#define get_thread_info(ti) get_task_struct((ti)->task)
#define put_thread_info(ti) put_task_struct((ti)->task)
#endif
/* __ASSEMBLY__ */
/*
* Thread information flags:
* - these are process state flags and used from assembly
* - pending work-to-be-done flags come first to fit in and immediate operand.
*
* TIF_SYSCALL_TRACE is known to be 0 via blbs.
*/
#define TIF_SYSCALL_TRACE 0
/* syscall trace active */
#define TIF_NOTIFY_RESUME 1
/* resumption notification requested */
#define TIF_SIGPENDING 2
/* signal pending */
#define TIF_NEED_RESCHED 3
/* rescheduling necessary */
#define TIF_POLLING_NRFLAG 4
/* poll_idle is polling NEED_RESCHED */
#define TIF_DIE_IF_KERNEL 5
/* dik recursion lock */
#define TIF_UAC_NOPRINT 6
/* see sysinfo.h */
#define TIF_UAC_NOFIX 7
#define TIF_UAC_SIGBUS 8
#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
#define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME)
#define _TIF_SIGPENDING (1<<TIF_SIGPENDING)
#define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED)
#define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG)
/* Work to do on interrupt/exception return. */
#define _TIF_WORK_MASK (_TIF_NOTIFY_RESUME \
| _TIF_SIGPENDING \
| _TIF_NEED_RESCHED)
/* Work to do on any return to userspace. */
#define _TIF_ALLWORK_MASK (_TIF_WORK_MASK \
| _TIF_SYSCALL_TRACE)
#endif
/* __KERNEL__ */
#endif
/* _ALPHA_THREAD_INFO_H */
include/asm-alpha/uaccess.h
View file @
e18fad25
...
...
@@ -23,9 +23,9 @@
#define VERIFY_READ 0
#define VERIFY_WRITE 1
#define get_fs() (current
->thread.fs
)
#define get_fs() (current
_thread_info()->addr_limit
)
#define get_ds() (KERNEL_DS)
#define set_fs(x) (current
->thread.fs
= (x))
#define set_fs(x) (current
_thread_info()->addr_limit
= (x))
#define segment_eq(a,b) ((a).seg == (b).seg)
...
...
include/asm-alpha/unistd.h
View file @
e18fad25
...
...
@@ -506,6 +506,7 @@ type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, type6 arg6)\
#include <linux/string.h>
#include <linux/signal.h>
#include <asm/ptrace.h>
extern
void
sys_idle
(
void
);
static
inline
void
idle
(
void
)
...
...
@@ -576,6 +577,8 @@ static inline long sync(void)
return
sys_sync
();
}
struct
rusage
;
extern
asmlinkage
long
sys_wait4
(
pid_t
,
unsigned
int
*
,
int
,
struct
rusage
*
);
static
inline
pid_t
waitpid
(
int
pid
,
int
*
wait_stat
,
int
flags
)
{
return
sys_wait4
(
pid
,
wait_stat
,
flags
,
NULL
);
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment