Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
Kirill Smelkov
linux
Commits
c1ee055c
Commit
c1ee055c
authored
Jun 19, 2004
by
Nathan Scott
Browse files
Options
Browse Files
Download
Plain Diff
Merge sgi.com:/source2/linux-2.6 into sgi.com:/source2/xfs-linux-2.6
parents
6860067c
3900b963
Changes
30
Expand all
Show whitespace changes
Inline
Side-by-side
Showing
30 changed files
with
1261 additions
and
1088 deletions
+1261
-1088
arch/alpha/mm/numa.c
arch/alpha/mm/numa.c
+2
-2
arch/ppc64/Kconfig
arch/ppc64/Kconfig
+0
-1
arch/ppc64/kernel/align.c
arch/ppc64/kernel/align.c
+9
-4
arch/ppc64/kernel/asm-offsets.c
arch/ppc64/kernel/asm-offsets.c
+11
-2
arch/ppc64/kernel/entry.S
arch/ppc64/kernel/entry.S
+306
-181
arch/ppc64/kernel/head.S
arch/ppc64/kernel/head.S
+706
-759
arch/ppc64/kernel/misc.S
arch/ppc64/kernel/misc.S
+13
-12
arch/ppc64/kernel/pacaData.c
arch/ppc64/kernel/pacaData.c
+0
-2
arch/ppc64/kernel/process.c
arch/ppc64/kernel/process.c
+69
-24
arch/ppc64/kernel/ptrace.c
arch/ppc64/kernel/ptrace.c
+4
-8
arch/ppc64/kernel/ptrace32.c
arch/ppc64/kernel/ptrace32.c
+6
-12
arch/ppc64/kernel/rtas.c
arch/ppc64/kernel/rtas.c
+4
-2
arch/ppc64/kernel/signal.c
arch/ppc64/kernel/signal.c
+5
-7
arch/ppc64/kernel/signal32.c
arch/ppc64/kernel/signal32.c
+6
-8
arch/ppc64/kernel/sys_ppc32.c
arch/ppc64/kernel/sys_ppc32.c
+2
-6
arch/ppc64/kernel/syscalls.c
arch/ppc64/kernel/syscalls.c
+14
-0
arch/ppc64/kernel/traps.c
arch/ppc64/kernel/traps.c
+17
-5
arch/ppc64/mm/fault.c
arch/ppc64/mm/fault.c
+33
-24
arch/ppc64/mm/hash_utils.c
arch/ppc64/mm/hash_utils.c
+7
-3
arch/ppc64/mm/hugetlbpage.c
arch/ppc64/mm/hugetlbpage.c
+4
-1
arch/ppc64/mm/init.c
arch/ppc64/mm/init.c
+4
-1
arch/ppc64/mm/tlb.c
arch/ppc64/mm/tlb.c
+5
-1
arch/ppc64/xmon/xmon.c
arch/ppc64/xmon/xmon.c
+0
-3
include/asm-ppc64/hardirq.h
include/asm-ppc64/hardirq.h
+2
-0
include/asm-ppc64/paca.h
include/asm-ppc64/paca.h
+9
-11
include/asm-ppc64/ppc_asm.h
include/asm-ppc64/ppc_asm.h
+3
-5
include/asm-ppc64/processor.h
include/asm-ppc64/processor.h
+1
-2
include/asm-ppc64/ptrace.h
include/asm-ppc64/ptrace.h
+12
-0
include/asm-ppc64/system.h
include/asm-ppc64/system.h
+2
-0
include/asm-ppc64/thread_info.h
include/asm-ppc64/thread_info.h
+5
-2
No files found.
arch/alpha/mm/numa.c
View file @
c1ee055c
...
...
@@ -279,8 +279,8 @@ setup_memory(void *kernel_end)
initrd_end
,
phys_to_virt
(
PFN_PHYS
(
max_low_pfn
)));
}
else
{
nid
=
NODE_DATA
(
kvaddr_to_nid
(
initrd_start
)
);
reserve_bootmem_node
(
nid
,
nid
=
kvaddr_to_nid
(
initrd_start
);
reserve_bootmem_node
(
NODE_DATA
(
nid
)
,
virt_to_phys
((
void
*
)
initrd_start
),
INITRD_SIZE
);
}
...
...
arch/ppc64/Kconfig
View file @
c1ee055c
...
...
@@ -198,7 +198,6 @@ config SCHED_SMT
config PREEMPT
bool "Preemptible Kernel"
depends on BROKEN
help
This option reduces the latency of the kernel when reacting to
real-time or interactive events by allowing a low priority process to
...
...
arch/ppc64/kernel/align.c
View file @
c1ee055c
...
...
@@ -22,8 +22,6 @@
#include <asm/cache.h>
#include <asm/cputable.h>
void
disable_kernel_fp
(
void
);
/* asm function from head.S */
struct
aligninfo
{
unsigned
char
len
;
unsigned
char
flags
;
...
...
@@ -280,8 +278,11 @@ fix_alignment(struct pt_regs *regs)
}
/* Force the fprs into the save area so we can reference them */
if
((
flags
&
F
)
&&
(
regs
->
msr
&
MSR_FP
))
giveup_fpu
(
current
);
if
(
flags
&
F
)
{
if
(
!
user_mode
(
regs
))
return
0
;
flush_fp_to_thread
(
current
);
}
/* If we are loading, get the data from user space */
if
(
flags
&
LD
)
{
...
...
@@ -310,9 +311,11 @@ fix_alignment(struct pt_regs *regs)
if
(
flags
&
F
)
{
if
(
nb
==
4
)
{
/* Doing stfs, have to convert to single */
preempt_disable
();
enable_kernel_fp
();
cvt_df
(
&
current
->
thread
.
fpr
[
reg
],
(
float
*
)
&
data
.
v
[
4
],
&
current
->
thread
.
fpscr
);
disable_kernel_fp
();
preempt_enable
();
}
else
data
.
dd
=
current
->
thread
.
fpr
[
reg
];
...
...
@@ -344,9 +347,11 @@ fix_alignment(struct pt_regs *regs)
if
(
flags
&
F
)
{
if
(
nb
==
4
)
{
/* Doing lfs, have to convert to double */
preempt_disable
();
enable_kernel_fp
();
cvt_fd
((
float
*
)
&
data
.
v
[
4
],
&
current
->
thread
.
fpr
[
reg
],
&
current
->
thread
.
fpscr
);
disable_kernel_fp
();
preempt_enable
();
}
else
current
->
thread
.
fpr
[
reg
]
=
data
.
dd
;
...
...
arch/ppc64/kernel/asm-offsets.c
View file @
c1ee055c
...
...
@@ -48,6 +48,8 @@ int main(void)
DEFINE
(
THREAD_SHIFT
,
THREAD_SHIFT
);
DEFINE
(
THREAD_SIZE
,
THREAD_SIZE
);
DEFINE
(
TI_FLAGS
,
offsetof
(
struct
thread_info
,
flags
));
DEFINE
(
TI_PREEMPT
,
offsetof
(
struct
thread_info
,
preempt_count
));
DEFINE
(
TI_SC_NOERR
,
offsetof
(
struct
thread_info
,
syscall_noerror
));
/* task_struct->thread */
DEFINE
(
THREAD
,
offsetof
(
struct
task_struct
,
thread
));
...
...
@@ -99,7 +101,10 @@ int main(void)
DEFINE
(
PACALPPACA
,
offsetof
(
struct
paca_struct
,
xLpPaca
));
DEFINE
(
LPPACA
,
offsetof
(
struct
paca_struct
,
xLpPaca
));
DEFINE
(
PACAREGSAV
,
offsetof
(
struct
paca_struct
,
xRegSav
));
DEFINE
(
PACAEXC
,
offsetof
(
struct
paca_struct
,
exception_stack
));
DEFINE
(
PACA_EXGEN
,
offsetof
(
struct
paca_struct
,
exgen
));
DEFINE
(
PACA_EXMC
,
offsetof
(
struct
paca_struct
,
exmc
));
DEFINE
(
PACA_EXSLB
,
offsetof
(
struct
paca_struct
,
exslb
));
DEFINE
(
PACA_EXDSI
,
offsetof
(
struct
paca_struct
,
exdsi
));
DEFINE
(
PACAGUARD
,
offsetof
(
struct
paca_struct
,
guard
));
DEFINE
(
LPPACASRR0
,
offsetof
(
struct
ItLpPaca
,
xSavedSrr0
));
DEFINE
(
LPPACASRR1
,
offsetof
(
struct
ItLpPaca
,
xSavedSrr1
));
...
...
@@ -136,6 +141,10 @@ int main(void)
DEFINE
(
GPR7
,
STACK_FRAME_OVERHEAD
+
offsetof
(
struct
pt_regs
,
gpr
[
7
]));
DEFINE
(
GPR8
,
STACK_FRAME_OVERHEAD
+
offsetof
(
struct
pt_regs
,
gpr
[
8
]));
DEFINE
(
GPR9
,
STACK_FRAME_OVERHEAD
+
offsetof
(
struct
pt_regs
,
gpr
[
9
]));
DEFINE
(
GPR10
,
STACK_FRAME_OVERHEAD
+
offsetof
(
struct
pt_regs
,
gpr
[
10
]));
DEFINE
(
GPR11
,
STACK_FRAME_OVERHEAD
+
offsetof
(
struct
pt_regs
,
gpr
[
11
]));
DEFINE
(
GPR12
,
STACK_FRAME_OVERHEAD
+
offsetof
(
struct
pt_regs
,
gpr
[
12
]));
DEFINE
(
GPR13
,
STACK_FRAME_OVERHEAD
+
offsetof
(
struct
pt_regs
,
gpr
[
13
]));
DEFINE
(
GPR20
,
STACK_FRAME_OVERHEAD
+
offsetof
(
struct
pt_regs
,
gpr
[
20
]));
DEFINE
(
GPR21
,
STACK_FRAME_OVERHEAD
+
offsetof
(
struct
pt_regs
,
gpr
[
21
]));
DEFINE
(
GPR22
,
STACK_FRAME_OVERHEAD
+
offsetof
(
struct
pt_regs
,
gpr
[
22
]));
...
...
@@ -154,7 +163,7 @@ int main(void)
DEFINE
(
_DSISR
,
STACK_FRAME_OVERHEAD
+
offsetof
(
struct
pt_regs
,
dsisr
));
DEFINE
(
ORIG_GPR3
,
STACK_FRAME_OVERHEAD
+
offsetof
(
struct
pt_regs
,
orig_gpr3
));
DEFINE
(
RESULT
,
STACK_FRAME_OVERHEAD
+
offsetof
(
struct
pt_regs
,
result
));
DEFINE
(
TRAP
,
STACK_FRAME_OVERHEAD
+
offsetof
(
struct
pt_regs
,
trap
));
DEFINE
(
_
TRAP
,
STACK_FRAME_OVERHEAD
+
offsetof
(
struct
pt_regs
,
trap
));
DEFINE
(
SOFTE
,
STACK_FRAME_OVERHEAD
+
offsetof
(
struct
pt_regs
,
softe
));
/* These _only_ to be used with {PROM,RTAS}_FRAME_SIZE!!! */
...
...
arch/ppc64/kernel/entry.S
View file @
c1ee055c
This diff is collapsed.
Click to expand it.
arch/ppc64/kernel/head.S
View file @
c1ee055c
This diff is collapsed.
Click to expand it.
arch/ppc64/kernel/misc.S
View file @
c1ee055c
...
...
@@ -85,13 +85,14 @@ _GLOBAL(local_irq_restore)
cmpw
0
,
r3
,
r5
beqlr
/
*
are
we
enabling
interrupts
?
*/
cmpi
0
,
r3
,
0
cmp
d
i
0
,
r3
,
0
stb
r3
,
PACAPROCENABLED
(
r13
)
beqlr
/
*
Check
pending
interrupts
*/
/
*
A
decrementer
,
IPI
or
PMC
interrupt
may
have
occurred
*
while
we
were
in
the
hypervisor
(
which
enables
)
*/
CHECKANYINT
(
r4
,
r5
)
ld
r4
,
PACALPPACA
+
LPPACAANYINT
(
r13
)
cmpdi
r4
,
0
beqlr
/
*
...
...
@@ -608,7 +609,7 @@ _GLOBAL(kernel_thread)
_GLOBAL
(
sys_call_table32
)
.
llong
.
sys_restart_syscall
/*
0
*/
.
llong
.
sys_exit
.
llong
.
sys
_fork
.
llong
.
ppc
_fork
.
llong
.
sys_read
.
llong
.
sys_write
.
llong
.
sys32_open
/*
5
*/
...
...
@@ -678,7 +679,7 @@ _GLOBAL(sys_call_table32)
.
llong
.
sys32_ssetmask
.
llong
.
sys_setreuid
/*
70
*/
.
llong
.
sys_setregid
.
llong
.
sys
32_sigsuspend
.
llong
.
ppc
32_sigsuspend
.
llong
.
compat_sys_sigpending
.
llong
.
sys32_sethostname
.
llong
.
compat_sys_setrlimit
/*
75
*/
...
...
@@ -726,7 +727,7 @@ _GLOBAL(sys_call_table32)
.
llong
.
sys32_ipc
.
llong
.
sys_fsync
.
llong
.
ppc32_sigreturn
.
llong
.
sys
_clone
/*
120
*/
.
llong
.
ppc
_clone
/*
120
*/
.
llong
.
sys32_setdomainname
.
llong
.
ppc64_newuname
.
llong
.
sys_ni_syscall
/*
old
modify_ldt
syscall
*/
...
...
@@ -784,7 +785,7 @@ _GLOBAL(sys_call_table32)
.
llong
.
sys32_rt_sigpending
/*
175
*/
.
llong
.
sys32_rt_sigtimedwait
.
llong
.
sys32_rt_sigqueueinfo
.
llong
.
sys
32_rt_sigsuspend
.
llong
.
ppc
32_rt_sigsuspend
.
llong
.
sys32_pread64
.
llong
.
sys32_pwrite64
/*
180
*/
.
llong
.
sys_chown
...
...
@@ -795,7 +796,7 @@ _GLOBAL(sys_call_table32)
.
llong
.
sys32_sendfile
.
llong
.
sys_ni_syscall
/*
reserved
for
streams1
*/
.
llong
.
sys_ni_syscall
/*
reserved
for
streams2
*/
.
llong
.
sys
_vfork
.
llong
.
ppc
_vfork
.
llong
.
compat_sys_getrlimit
/*
190
*/
.
llong
.
sys32_readahead
.
llong
.
sys32_mmap2
...
...
@@ -880,7 +881,7 @@ _GLOBAL(sys_call_table32)
_GLOBAL
(
sys_call_table
)
.
llong
.
sys_restart_syscall
/*
0
*/
.
llong
.
sys_exit
.
llong
.
sys
_fork
.
llong
.
ppc
_fork
.
llong
.
sys_read
.
llong
.
sys_write
.
llong
.
sys_open
/*
5
*/
...
...
@@ -998,7 +999,7 @@ _GLOBAL(sys_call_table)
.
llong
.
sys_ipc
.
llong
.
sys_fsync
.
llong
.
sys_ni_syscall
.
llong
.
sys
_clone
/*
120
*/
.
llong
.
ppc
_clone
/*
120
*/
.
llong
.
sys_setdomainname
.
llong
.
ppc64_newuname
.
llong
.
sys_ni_syscall
/*
old
modify_ldt
syscall
*/
...
...
@@ -1056,7 +1057,7 @@ _GLOBAL(sys_call_table)
.
llong
.
sys_rt_sigpending
/*
175
*/
.
llong
.
sys_rt_sigtimedwait
.
llong
.
sys_rt_sigqueueinfo
.
llong
.
sys
_rt_sigsuspend
.
llong
.
ppc64
_rt_sigsuspend
.
llong
.
sys_pread64
.
llong
.
sys_pwrite64
/*
180
*/
.
llong
.
sys_chown
...
...
@@ -1067,7 +1068,7 @@ _GLOBAL(sys_call_table)
.
llong
.
sys_sendfile64
.
llong
.
sys_ni_syscall
/*
reserved
for
streams1
*/
.
llong
.
sys_ni_syscall
/*
reserved
for
streams2
*/
.
llong
.
sys
_vfork
.
llong
.
ppc
_vfork
.
llong
.
sys_getrlimit
/*
190
*/
.
llong
.
sys_readahead
.
llong
.
sys_ni_syscall
/*
32
bit
only
mmap2
*/
...
...
arch/ppc64/kernel/pacaData.c
View file @
c1ee055c
...
...
@@ -62,8 +62,6 @@ struct systemcfg *systemcfg;
.xDesc = 0xd397d9e2,
/* "LpRS" */
\
.xSize = sizeof(struct ItLpRegSave) \
}, \
.exception_sp = \
(&paca[number].exception_stack[0]) - EXC_FRAME_SIZE, \
}
struct
paca_struct
paca
[]
__page_aligned
=
{
...
...
arch/ppc64/kernel/process.c
View file @
c1ee055c
...
...
@@ -65,8 +65,43 @@ struct mm_struct ioremap_mm = {
.
page_table_lock
=
SPIN_LOCK_UNLOCKED
,
};
/*
* Make sure the floating-point register state in the
* the thread_struct is up to date for task tsk.
*/
void
flush_fp_to_thread
(
struct
task_struct
*
tsk
)
{
if
(
tsk
->
thread
.
regs
)
{
/*
* We need to disable preemption here because if we didn't,
* another process could get scheduled after the regs->msr
* test but before we have finished saving the FP registers
* to the thread_struct. That process could take over the
* FPU, and then when we get scheduled again we would store
* bogus values for the remaining FP registers.
*/
preempt_disable
();
if
(
tsk
->
thread
.
regs
->
msr
&
MSR_FP
)
{
#ifdef CONFIG_SMP
/*
* This should only ever be called for current or
* for a stopped child process. Since we save away
* the FP register state on context switch on SMP,
* there is something wrong if a stopped child appears
* to still have its FP state in the CPU registers.
*/
BUG_ON
(
tsk
!=
current
);
#endif
giveup_fpu
(
current
);
}
preempt_enable
();
}
}
void
enable_kernel_fp
(
void
)
{
WARN_ON
(
preemptible
());
#ifdef CONFIG_SMP
if
(
current
->
thread
.
regs
&&
(
current
->
thread
.
regs
->
msr
&
MSR_FP
))
giveup_fpu
(
current
);
...
...
@@ -80,12 +115,9 @@ EXPORT_SYMBOL(enable_kernel_fp);
int
dump_task_fpu
(
struct
task_struct
*
tsk
,
elf_fpregset_t
*
fpregs
)
{
struct
pt_regs
*
regs
=
tsk
->
thread
.
regs
;
if
(
!
regs
)
if
(
!
tsk
->
thread
.
regs
)
return
0
;
if
(
tsk
==
current
&&
(
regs
->
msr
&
MSR_FP
))
giveup_fpu
(
current
);
flush_fp_to_thread
(
current
);
memcpy
(
fpregs
,
&
tsk
->
thread
.
fpr
[
0
],
sizeof
(
*
fpregs
));
...
...
@@ -96,6 +128,8 @@ int dump_task_fpu(struct task_struct *tsk, elf_fpregset_t *fpregs)
void
enable_kernel_altivec
(
void
)
{
WARN_ON
(
preemptible
());
#ifdef CONFIG_SMP
if
(
current
->
thread
.
regs
&&
(
current
->
thread
.
regs
->
msr
&
MSR_VEC
))
giveup_altivec
(
current
);
...
...
@@ -107,10 +141,29 @@ void enable_kernel_altivec(void)
}
EXPORT_SYMBOL
(
enable_kernel_altivec
);
int
dump_task_altivec
(
struct
pt_regs
*
regs
,
elf_vrregset_t
*
vrregs
)
/*
* Make sure the VMX/Altivec register state in the
* the thread_struct is up to date for task tsk.
*/
void
flush_altivec_to_thread
(
struct
task_struct
*
tsk
)
{
if
(
regs
->
msr
&
MSR_VEC
)
#ifdef CONFIG_ALTIVEC
if
(
tsk
->
thread
.
regs
)
{
preempt_disable
();
if
(
tsk
->
thread
.
regs
->
msr
&
MSR_VEC
)
{
#ifdef CONFIG_SMP
BUG_ON
(
tsk
!=
current
);
#endif
giveup_altivec
(
current
);
}
preempt_enable
();
}
#endif
}
int
dump_task_altivec
(
struct
pt_regs
*
regs
,
elf_vrregset_t
*
vrregs
)
{
flush_altivec_to_thread
(
current
);
memcpy
(
vrregs
,
&
current
->
thread
.
vr
[
0
],
sizeof
(
*
vrregs
));
return
1
;
}
...
...
@@ -166,6 +219,7 @@ struct task_struct *__switch_to(struct task_struct *prev,
void
show_regs
(
struct
pt_regs
*
regs
)
{
int
i
;
unsigned
long
trap
;
printk
(
"NIP: %016lX XER: %016lX LR: %016lX
\n
"
,
regs
->
nip
,
regs
->
xer
,
regs
->
link
);
...
...
@@ -176,7 +230,8 @@ void show_regs(struct pt_regs * regs)
regs
->
msr
&
MSR_FP
?
1
:
0
,
regs
->
msr
&
MSR_ME
?
1
:
0
,
regs
->
msr
&
MSR_IR
?
1
:
0
,
regs
->
msr
&
MSR_DR
?
1
:
0
);
if
(
regs
->
trap
==
0x300
||
regs
->
trap
==
0x380
||
regs
->
trap
==
0x600
)
trap
=
TRAP
(
regs
);
if
(
trap
==
0x300
||
trap
==
0x380
||
trap
==
0x600
)
printk
(
"DAR: %016lx, DSISR: %016lx
\n
"
,
regs
->
dar
,
regs
->
dsisr
);
printk
(
"TASK: %p[%d] '%s' THREAD: %p"
,
current
,
current
->
pid
,
current
->
comm
,
current
->
thread_info
);
...
...
@@ -191,6 +246,8 @@ void show_regs(struct pt_regs * regs)
}
printk
(
"%016lX "
,
regs
->
gpr
[
i
]);
if
(
i
==
13
&&
!
FULL_REGS
(
regs
))
break
;
}
printk
(
"
\n
"
);
/*
...
...
@@ -245,16 +302,8 @@ release_thread(struct task_struct *t)
*/
void
prepare_to_copy
(
struct
task_struct
*
tsk
)
{
struct
pt_regs
*
regs
=
tsk
->
thread
.
regs
;
if
(
regs
==
NULL
)
return
;
if
(
regs
->
msr
&
MSR_FP
)
giveup_fpu
(
current
);
#ifdef CONFIG_ALTIVEC
if
(
regs
->
msr
&
MSR_VEC
)
giveup_altivec
(
current
);
#endif
/* CONFIG_ALTIVEC */
flush_fp_to_thread
(
current
);
flush_altivec_to_thread
(
current
);
}
/*
...
...
@@ -439,12 +488,8 @@ int sys_execve(unsigned long a0, unsigned long a1, unsigned long a2,
error
=
PTR_ERR
(
filename
);
if
(
IS_ERR
(
filename
))
goto
out
;
if
(
regs
->
msr
&
MSR_FP
)
giveup_fpu
(
current
);
#ifdef CONFIG_ALTIVEC
if
(
regs
->
msr
&
MSR_VEC
)
giveup_altivec
(
current
);
#endif
/* CONFIG_ALTIVEC */
flush_fp_to_thread
(
current
);
flush_altivec_to_thread
(
current
);
error
=
do_execve
(
filename
,
(
char
__user
*
__user
*
)
a1
,
(
char
__user
*
__user
*
)
a2
,
regs
);
...
...
arch/ppc64/kernel/ptrace.c
View file @
c1ee055c
...
...
@@ -119,8 +119,7 @@ int sys_ptrace(long request, long pid, long addr, long data)
if
(
index
<
PT_FPR0
)
{
tmp
=
get_reg
(
child
,
(
int
)
index
);
}
else
{
if
(
child
->
thread
.
regs
->
msr
&
MSR_FP
)
giveup_fpu
(
child
);
flush_fp_to_thread
(
child
);
tmp
=
((
unsigned
long
*
)
child
->
thread
.
fpr
)[
index
-
PT_FPR0
];
}
ret
=
put_user
(
tmp
,(
unsigned
long
__user
*
)
data
);
...
...
@@ -152,8 +151,7 @@ int sys_ptrace(long request, long pid, long addr, long data)
if
(
index
<
PT_FPR0
)
{
ret
=
put_reg
(
child
,
index
,
data
);
}
else
{
if
(
child
->
thread
.
regs
->
msr
&
MSR_FP
)
giveup_fpu
(
child
);
flush_fp_to_thread
(
child
);
((
unsigned
long
*
)
child
->
thread
.
fpr
)[
index
-
PT_FPR0
]
=
data
;
ret
=
0
;
}
...
...
@@ -245,8 +243,7 @@ int sys_ptrace(long request, long pid, long addr, long data)
unsigned
long
*
reg
=
&
((
unsigned
long
*
)
child
->
thread
.
fpr
)[
0
];
unsigned
long
__user
*
tmp
=
(
unsigned
long
__user
*
)
addr
;
if
(
child
->
thread
.
regs
->
msr
&
MSR_FP
)
giveup_fpu
(
child
);
flush_fp_to_thread
(
child
);
for
(
i
=
0
;
i
<
32
;
i
++
)
{
ret
=
put_user
(
*
reg
,
tmp
);
...
...
@@ -263,8 +260,7 @@ int sys_ptrace(long request, long pid, long addr, long data)
unsigned
long
*
reg
=
&
((
unsigned
long
*
)
child
->
thread
.
fpr
)[
0
];
unsigned
long
__user
*
tmp
=
(
unsigned
long
__user
*
)
addr
;
if
(
child
->
thread
.
regs
->
msr
&
MSR_FP
)
giveup_fpu
(
child
);
flush_fp_to_thread
(
child
);
for
(
i
=
0
;
i
<
32
;
i
++
)
{
ret
=
get_user
(
*
reg
,
tmp
);
...
...
arch/ppc64/kernel/ptrace32.c
View file @
c1ee055c
...
...
@@ -136,8 +136,7 @@ int sys32_ptrace(long request, long pid, unsigned long addr, unsigned long data)
if
(
index
<
PT_FPR0
)
{
tmp
=
get_reg
(
child
,
index
);
}
else
{
if
(
child
->
thread
.
regs
->
msr
&
MSR_FP
)
giveup_fpu
(
child
);
flush_fp_to_thread
(
child
);
/*
* the user space code considers the floating point
* to be an array of unsigned int (32 bits) - the
...
...
@@ -179,8 +178,7 @@ int sys32_ptrace(long request, long pid, unsigned long addr, unsigned long data)
break
;
if
(
numReg
>=
PT_FPR0
)
{
if
(
child
->
thread
.
regs
->
msr
&
MSR_FP
)
giveup_fpu
(
child
);
flush_fp_to_thread
(
child
);
tmp
=
((
unsigned
long
int
*
)
child
->
thread
.
fpr
)[
numReg
-
PT_FPR0
];
}
else
{
/* register within PT_REGS struct */
tmp
=
get_reg
(
child
,
numReg
);
...
...
@@ -244,8 +242,7 @@ int sys32_ptrace(long request, long pid, unsigned long addr, unsigned long data)
if
(
index
<
PT_FPR0
)
{
ret
=
put_reg
(
child
,
index
,
data
);
}
else
{
if
(
child
->
thread
.
regs
->
msr
&
MSR_FP
)
giveup_fpu
(
child
);
flush_fp_to_thread
(
child
);
/*
* the user space code considers the floating point
* to be an array of unsigned int (32 bits) - the
...
...
@@ -283,8 +280,7 @@ int sys32_ptrace(long request, long pid, unsigned long addr, unsigned long data)
||
((
numReg
>
PT_CCR
)
&&
(
numReg
<
PT_FPR0
)))
break
;
if
(
numReg
>=
PT_FPR0
)
{
if
(
child
->
thread
.
regs
->
msr
&
MSR_FP
)
giveup_fpu
(
child
);
flush_fp_to_thread
(
child
);
}
if
(
numReg
==
PT_MSR
)
data
=
(
data
&
MSR_DEBUGCHANGE
)
...
...
@@ -379,8 +375,7 @@ int sys32_ptrace(long request, long pid, unsigned long addr, unsigned long data)
unsigned
long
*
reg
=
&
((
unsigned
long
*
)
child
->
thread
.
fpr
)[
0
];
unsigned
int
__user
*
tmp
=
(
unsigned
int
__user
*
)
addr
;
if
(
child
->
thread
.
regs
->
msr
&
MSR_FP
)
giveup_fpu
(
child
);
flush_fp_to_thread
(
child
);
for
(
i
=
0
;
i
<
32
;
i
++
)
{
ret
=
put_user
(
*
reg
,
tmp
);
...
...
@@ -397,8 +392,7 @@ int sys32_ptrace(long request, long pid, unsigned long addr, unsigned long data)
unsigned
long
*
reg
=
&
((
unsigned
long
*
)
child
->
thread
.
fpr
)[
0
];
unsigned
int
__user
*
tmp
=
(
unsigned
int
__user
*
)
addr
;
if
(
child
->
thread
.
regs
->
msr
&
MSR_FP
)
giveup_fpu
(
child
);
flush_fp_to_thread
(
child
);
for
(
i
=
0
;
i
<
32
;
i
++
)
{
ret
=
get_user
(
*
reg
,
tmp
);
...
...
arch/ppc64/kernel/rtas.c
View file @
c1ee055c
...
...
@@ -68,10 +68,11 @@ char rtas_data_buf[RTAS_DATA_BUF_SIZE]__page_aligned;
void
call_rtas_display_status
(
char
c
)
{
struct
rtas_args
*
args
=
&
(
get_paca
()
->
xRtas
)
;
struct
rtas_args
*
args
;
unsigned
long
s
;
spin_lock_irqsave
(
&
rtas
.
lock
,
s
);
args
=
&
(
get_paca
()
->
xRtas
);
args
->
token
=
10
;
args
->
nargs
=
1
;
...
...
@@ -145,7 +146,7 @@ rtas_call(int token, int nargs, int nret,
va_list
list
;
int
i
,
logit
=
0
;
unsigned
long
s
;
struct
rtas_args
*
rtas_args
=
&
(
get_paca
()
->
xRtas
)
;
struct
rtas_args
*
rtas_args
;
long
ret
;
PPCDBG
(
PPCDBG_RTAS
,
"Entering rtas_call
\n
"
);
...
...
@@ -158,6 +159,7 @@ rtas_call(int token, int nargs, int nret,
/* Gotta do something different here, use global lock for now... */
spin_lock_irqsave
(
&
rtas
.
lock
,
s
);
rtas_args
=
&
(
get_paca
()
->
xRtas
);
rtas_args
->
token
=
token
;
rtas_args
->
nargs
=
nargs
;
...
...
arch/ppc64/kernel/signal.c
View file @
c1ee055c
...
...
@@ -131,8 +131,7 @@ static long setup_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs,
#endif
long
err
=
0
;
if
(
regs
->
msr
&
MSR_FP
)
giveup_fpu
(
current
);
flush_fp_to_thread
(
current
);
/* Make sure signal doesn't get spurrious FP exceptions */
current
->
thread
.
fpscr
=
0
;
...
...
@@ -142,8 +141,7 @@ static long setup_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs,
/* save altivec registers */
if
(
current
->
thread
.
used_vr
)
{
if
(
regs
->
msr
&
MSR_VEC
)
giveup_altivec
(
current
);
flush_altivec_to_thread
(
current
);
/* Copy 33 vec registers (vr0..31 and vscr) to the stack */
err
|=
__copy_to_user
(
v_regs
,
current
->
thread
.
vr
,
33
*
sizeof
(
vector128
));
/* set MSR_VEC in the MSR value in the frame to indicate that sc->v_reg)
...
...
@@ -530,13 +528,13 @@ int do_signal(sigset_t *oldset, struct pt_regs *regs)
struct
k_sigaction
*
ka
=
&
current
->
sighand
->
action
[
signr
-
1
];
/* Whee! Actually deliver the signal. */
if
(
regs
->
trap
==
0x0C00
)
if
(
TRAP
(
regs
)
==
0x0C00
)
syscall_restart
(
regs
,
ka
);
handle_signal
(
signr
,
ka
,
&
info
,
oldset
,
regs
);
return
1
;
}
if
(
regs
->
trap
==
0x0C00
)
{
/* System Call! */
if
(
TRAP
(
regs
)
==
0x0C00
)
{
/* System Call! */
if
((
int
)
regs
->
result
==
-
ERESTARTNOHAND
||
(
int
)
regs
->
result
==
-
ERESTARTSYS
||
(
int
)
regs
->
result
==
-
ERESTARTNOINTR
)
{
...
...
arch/ppc64/kernel/signal32.c
View file @
c1ee055c
...
...
@@ -132,8 +132,7 @@ static int save_user_regs(struct pt_regs *regs, struct mcontext32 __user *frame,
int
i
,
err
=
0
;
/* Make sure floating point registers are stored in regs */
if
(
regs
->
msr
&
MSR_FP
)
giveup_fpu
(
current
);
flush_fp_to_thread
(
current
);
/* save general and floating-point registers */
for
(
i
=
0
;
i
<=
PT_RESULT
;
i
++
)
...
...
@@ -148,8 +147,7 @@ static int save_user_regs(struct pt_regs *regs, struct mcontext32 __user *frame,
#ifdef CONFIG_ALTIVEC
/* save altivec registers */
if
(
current
->
thread
.
used_vr
)
{
if
(
regs
->
msr
&
MSR_VEC
)
giveup_altivec
(
current
);
flush_altivec_to_thread
(
current
);
if
(
__copy_to_user
(
&
frame
->
mc_vregs
,
current
->
thread
.
vr
,
ELF_NVRREG32
*
sizeof
(
vector128
)))
return
1
;
...
...
@@ -934,7 +932,7 @@ int do_signal32(sigset_t *oldset, struct pt_regs *regs)
ka
=
(
signr
==
0
)
?
NULL
:
&
current
->
sighand
->
action
[
signr
-
1
];
if
(
regs
->
trap
==
0x0C00
/* System Call! */
if
(
TRAP
(
regs
)
==
0x0C00
/* System Call! */
&&
regs
->
ccr
&
0x10000000
/* error signalled */
&&
((
ret
=
regs
->
gpr
[
3
])
==
ERESTARTSYS
||
ret
==
ERESTARTNOHAND
||
ret
==
ERESTARTNOINTR
...
...
arch/ppc64/kernel/sys_ppc32.c
View file @
c1ee055c
...
...
@@ -617,12 +617,8 @@ long sys32_execve(unsigned long a0, unsigned long a1, unsigned long a2,
error
=
PTR_ERR
(
filename
);
if
(
IS_ERR
(
filename
))
goto
out
;
if
(
regs
->
msr
&
MSR_FP
)
giveup_fpu
(
current
);
#ifdef CONFIG_ALTIVEC
if
(
regs
->
msr
&
MSR_VEC
)
giveup_altivec
(
current
);
#endif
/* CONFIG_ALTIVEC */
flush_fp_to_thread
(
current
);
flush_altivec_to_thread
(
current
);
error
=
compat_do_execve
(
filename
,
compat_ptr
(
a1
),
compat_ptr
(
a2
),
regs
);
...
...
arch/ppc64/kernel/syscalls.c
View file @
c1ee055c
...
...
@@ -237,5 +237,19 @@ asmlinkage time_t sys64_time(time_t __user * tloc)
return
secs
;
}
void
do_show_syscall
(
unsigned
long
r3
,
unsigned
long
r4
,
unsigned
long
r5
,
unsigned
long
r6
,
unsigned
long
r7
,
unsigned
long
r8
,
struct
pt_regs
*
regs
)
{
printk
(
"syscall %ld(%lx, %lx, %lx, %lx, %lx, %lx) regs=%p current=%p"
" cpu=%d
\n
"
,
regs
->
gpr
[
0
],
r3
,
r4
,
r5
,
r6
,
r7
,
r8
,
regs
,
current
,
smp_processor_id
());
}
void
do_show_syscall_exit
(
unsigned
long
r3
)
{
printk
(
" -> %lx, current=%p cpu=%d
\n
"
,
r3
,
current
,
smp_processor_id
());
}
/* Only exists on P-series. */
cond_syscall
(
ppc_rtas
);
arch/ppc64/kernel/traps.c
View file @
c1ee055c
...
...
@@ -308,8 +308,7 @@ static void parse_fpe(struct pt_regs *regs)
siginfo_t
info
;
unsigned
long
fpscr
;
if
(
regs
->
msr
&
MSR_FP
)
giveup_fpu
(
current
);
flush_fp_to_thread
(
current
);
fpscr
=
current
->
thread
.
fpscr
;
...
...
@@ -442,8 +441,22 @@ void KernelFPUnavailableException(struct pt_regs *regs)
die
(
"Unrecoverable FP Unavailable Exception"
,
regs
,
SIGABRT
);
}
void
Kernel
AltivecUnavailableException
(
struct
pt_regs
*
regs
)
void
AltivecUnavailableException
(
struct
pt_regs
*
regs
)
{
#ifndef CONFIG_ALTIVEC
if
(
user_mode
(
regs
))
{
/* A user program has executed an altivec instruction,
but this kernel doesn't support altivec. */
siginfo_t
info
;
memset
(
&
info
,
0
,
sizeof
(
info
));
info
.
si_signo
=
SIGILL
;
info
.
si_code
=
ILL_ILLOPC
;
info
.
si_addr
=
(
void
*
)
regs
->
nip
;
_exception
(
SIGILL
,
&
info
,
regs
);
return
;
}
#endif
printk
(
KERN_EMERG
"Unrecoverable VMX/Altivec Unavailable Exception "
"%lx at %lx
\n
"
,
regs
->
trap
,
regs
->
nip
);
die
(
"Unrecoverable VMX/Altivec Unavailable Exception"
,
regs
,
SIGABRT
);
...
...
@@ -531,8 +544,7 @@ AlignmentException(struct pt_regs *regs)
void
AltivecAssistException
(
struct
pt_regs
*
regs
)
{
if
(
regs
->
msr
&
MSR_VEC
)
giveup_altivec
(
current
);
flush_altivec_to_thread
(
current
);
/* XXX quick hack for now: set the non-Java bit in the VSCR */
current
->
thread
.
vscr
.
u
[
3
]
|=
0x10000
;
}
...
...
arch/ppc64/mm/fault.c
View file @
c1ee055c
...
...
@@ -80,8 +80,10 @@ static int store_updates_sp(struct pt_regs *regs)
* - DSISR for a non-SLB data access fault,
* - SRR1 & 0x08000000 for a non-SLB instruction access fault
* - 0 any SLB fault.
* The return value is 0 if the fault was handled, or the signal
* number if this is a kernel fault that can't be handled here.
*/
void
do_page_fault
(
struct
pt_regs
*
regs
,
unsigned
long
address
,
int
do_page_fault
(
struct
pt_regs
*
regs
,
unsigned
long
address
,
unsigned
long
error_code
)
{
struct
vm_area_struct
*
vma
;
...
...
@@ -89,27 +91,34 @@ void do_page_fault(struct pt_regs *regs, unsigned long address,
siginfo_t
info
;
unsigned
long
code
=
SEGV_MAPERR
;
unsigned
long
is_write
=
error_code
&
0x02000000
;
unsigned
long
trap
=
TRAP
(
regs
);
if
(
regs
->
trap
==
0x300
||
regs
->
trap
==
0x380
)
{
if
(
trap
==
0x300
||
trap
==
0x380
)
{
if
(
debugger_fault_handler
(
regs
))
return
;
return
0
;
}
/* On a kernel SLB miss we can only check for a valid exception entry */
if
(
!
user_mode
(
regs
)
&&
(
regs
->
trap
==
0x380
))
{
bad_page_fault
(
regs
,
address
,
SIGSEGV
);
return
;
}
if
(
!
user_mode
(
regs
)
&&
(
trap
==
0x380
||
address
>=
TASK_SIZE
))
return
SIGSEGV
;
if
(
error_code
&
0x00400000
)
{
if
(
debugger_dabr_match
(
regs
))
return
;
return
0
;
}
if
(
in_atomic
()
||
mm
==
NULL
)
{
bad_page_fault
(
regs
,
address
,
SIGSEGV
);
return
;
if
(
!
user_mode
(
regs
))
return
SIGSEGV
;
/* in_atomic() in user mode is really bad,
as is current->mm == NULL. */
printk
(
KERN_EMERG
"Page fault in user mode with"
"in_atomic() = %d mm = %p
\n
"
,
in_atomic
(),
mm
);
printk
(
KERN_EMERG
"NIP = %lx MSR = %lx
\n
"
,
regs
->
nip
,
regs
->
msr
);
die
(
"Weird page fault"
,
regs
,
SIGSEGV
);
}
down_read
(
&
mm
->
mmap_sem
);
vma
=
find_vma
(
mm
,
address
);
if
(
!
vma
)
...
...
@@ -195,7 +204,7 @@ void do_page_fault(struct pt_regs *regs, unsigned long address,
}
up_read
(
&
mm
->
mmap_sem
);
return
;
return
0
;
bad_area:
up_read
(
&
mm
->
mmap_sem
);
...
...
@@ -207,11 +216,10 @@ void do_page_fault(struct pt_regs *regs, unsigned long address,
info
.
si_code
=
code
;
info
.
si_addr
=
(
void
*
)
address
;
force_sig_info
(
SIGSEGV
,
&
info
,
current
);
return
;
return
0
;
}
bad_page_fault
(
regs
,
address
,
SIGSEGV
);
return
;
return
SIGSEGV
;
/*
* We ran out of memory, or some other thing happened to us that made
...
...
@@ -227,18 +235,19 @@ void do_page_fault(struct pt_regs *regs, unsigned long address,
printk
(
"VM: killing process %s
\n
"
,
current
->
comm
);
if
(
user_mode
(
regs
))
do_exit
(
SIGKILL
);
bad_page_fault
(
regs
,
address
,
SIGKILL
);
return
;
return
SIGKILL
;
do_sigbus:
up_read
(
&
mm
->
mmap_sem
);
if
(
user_mode
(
regs
))
{
info
.
si_signo
=
SIGBUS
;
info
.
si_errno
=
0
;
info
.
si_code
=
BUS_ADRERR
;
info
.
si_addr
=
(
void
*
)
address
;
force_sig_info
(
SIGBUS
,
&
info
,
current
);
if
(
!
user_mode
(
regs
))
bad_page_fault
(
regs
,
address
,
SIGBUS
);
force_sig_info
(
SIGBUS
,
&
info
,
current
);
return
0
;
}
return
SIGBUS
;
}
/*
...
...
arch/ppc64/mm/hash_utils.c
View file @
c1ee055c
...
...
@@ -251,6 +251,7 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap)
struct
mm_struct
*
mm
;
pte_t
*
ptep
;
int
ret
;
int
cpu
;
int
user_region
=
0
;
int
local
=
0
;
cpumask_t
tmp
;
...
...
@@ -302,7 +303,8 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap)
if
(
pgdir
==
NULL
)
return
1
;
tmp
=
cpumask_of_cpu
(
smp_processor_id
());
cpu
=
get_cpu
();
tmp
=
cpumask_of_cpu
(
cpu
);
if
(
user_region
&&
cpus_equal
(
mm
->
cpu_vm_mask
,
tmp
))
local
=
1
;
...
...
@@ -311,11 +313,13 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap)
ret
=
hash_huge_page
(
mm
,
access
,
ea
,
vsid
,
local
);
else
{
ptep
=
find_linux_pte
(
pgdir
,
ea
);
if
(
ptep
==
NULL
)
if
(
ptep
==
NULL
)
{
put_cpu
();
return
1
;
}
ret
=
__hash_page
(
ea
,
access
,
vsid
,
ptep
,
trap
,
local
);
}
put_cpu
();
return
ret
;
}
...
...
arch/ppc64/mm/hugetlbpage.c
View file @
c1ee055c
...
...
@@ -375,6 +375,7 @@ void unmap_hugepage_range(struct vm_area_struct *vma,
unsigned
long
addr
;
hugepte_t
*
ptep
;
struct
page
*
page
;
int
cpu
;
int
local
=
0
;
cpumask_t
tmp
;
...
...
@@ -383,7 +384,8 @@ void unmap_hugepage_range(struct vm_area_struct *vma,
BUG_ON
((
end
%
HPAGE_SIZE
)
!=
0
);
/* XXX are there races with checking cpu_vm_mask? - Anton */
tmp
=
cpumask_of_cpu
(
smp_processor_id
());
cpu
=
get_cpu
();
tmp
=
cpumask_of_cpu
(
cpu
);
if
(
cpus_equal
(
vma
->
vm_mm
->
cpu_vm_mask
,
tmp
))
local
=
1
;
...
...
@@ -406,6 +408,7 @@ void unmap_hugepage_range(struct vm_area_struct *vma,
put_page
(
page
);
}
put_cpu
();
mm
->
rss
-=
(
end
-
start
)
>>
PAGE_SHIFT
;
}
...
...
arch/ppc64/mm/init.c
View file @
c1ee055c
...
...
@@ -764,6 +764,7 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long ea,
void
*
pgdir
;
pte_t
*
ptep
;
int
local
=
0
;
int
cpu
;
cpumask_t
tmp
;
/* handle i-cache coherency */
...
...
@@ -794,12 +795,14 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long ea,
vsid
=
get_vsid
(
vma
->
vm_mm
->
context
.
id
,
ea
);
tmp
=
cpumask_of_cpu
(
smp_processor_id
());
cpu
=
get_cpu
();
tmp
=
cpumask_of_cpu
(
cpu
);
if
(
cpus_equal
(
vma
->
vm_mm
->
cpu_vm_mask
,
tmp
))
local
=
1
;
__hash_page
(
ea
,
pte_val
(
pte
)
&
(
_PAGE_USER
|
_PAGE_RW
),
vsid
,
ptep
,
0x300
,
local
);
put_cpu
();
}
void
*
reserve_phb_iospace
(
unsigned
long
size
)
...
...
arch/ppc64/mm/tlb.c
View file @
c1ee055c
...
...
@@ -91,12 +91,15 @@ void hpte_update(pte_t *ptep, unsigned long pte, int wrprot)
void
__flush_tlb_pending
(
struct
ppc64_tlb_batch
*
batch
)
{
int
i
;
cpumask_t
tmp
=
cpumask_of_cpu
(
smp_processor_id
());
int
cpu
;
cpumask_t
tmp
;
int
local
=
0
;
BUG_ON
(
in_interrupt
());
cpu
=
get_cpu
();
i
=
batch
->
index
;
tmp
=
cpumask_of_cpu
(
cpu
);
if
(
cpus_equal
(
batch
->
mm
->
cpu_vm_mask
,
tmp
))
local
=
1
;
...
...
@@ -106,6 +109,7 @@ void __flush_tlb_pending(struct ppc64_tlb_batch *batch)
else
flush_hash_range
(
batch
->
context
,
i
,
local
);
batch
->
index
=
0
;
put_cpu
();
}
#ifdef CONFIG_SMP
...
...
arch/ppc64/xmon/xmon.c
View file @
c1ee055c
...
...
@@ -44,9 +44,6 @@ static int xmon_owner;
static
int
xmon_gate
;
#endif
/* CONFIG_SMP */
#define TRAP(regs) ((regs)->trap)
#define FULL_REGS(regs) 1
static
unsigned
long
in_xmon
=
0
;
static
unsigned
long
adrs
;
...
...
include/asm-ppc64/hardirq.h
View file @
c1ee055c
...
...
@@ -82,9 +82,11 @@ typedef struct {
#ifdef CONFIG_PREEMPT
# define in_atomic() ((preempt_count() & ~PREEMPT_ACTIVE) != kernel_locked())
# define preemptible() (preempt_count() == 0 && !irqs_disabled())
# define IRQ_EXIT_OFFSET (HARDIRQ_OFFSET-1)
#else
# define in_atomic() (preempt_count() != 0)
# define preemptible() 0
# define IRQ_EXIT_OFFSET HARDIRQ_OFFSET
#endif
#define irq_exit() \
...
...
include/asm-ppc64/paca.h
View file @
c1ee055c
...
...
@@ -136,23 +136,21 @@ struct paca_struct {
u8
rsvd6
[
0x500
-
0x8
];
/*=====================================================================================
* CACHE_LINE_31
0x0F00 - 0x0F7F Exception stack
* CACHE_LINE_31
-32 0x0F00 - 0x0FFF Exception register save areas
*=====================================================================================
*/
u8
exception_stack
[
N_EXC_STACK
*
EXC_FRAME_SIZE
];
u64
exgen
[
8
];
/* used for most interrupts/exceptions */
u64
exmc
[
8
];
/* used for machine checks */
u64
exslb
[
8
];
/* used for SLB/segment table misses
* on the linear mapping */
u64
exdsi
[
8
];
/* used for linear mapping hash table misses */
/*=====================================================================================
* CACHE_LINE_32 0x0F80 - 0x0FFF Reserved
* Page 2 used as a stack when we detect a bad kernel stack pointer,
* and early in SMP boots before relocation is enabled.
*=====================================================================================
*/
u8
rsvd7
[
0x80
];
/* Give the stack some rope ... */
/*=====================================================================================
* Page 2 Reserved for guard page. Also used as a stack early in SMP boots before
* relocation is enabled.
*=====================================================================================
*/
u8
guard
[
0x1000
];
/* ... and then hang 'em */
u8
guard
[
0x1000
];
};
#endif
/* _PPC64_PACA_H */
include/asm-ppc64/ppc_asm.h
View file @
c1ee055c
...
...
@@ -28,6 +28,9 @@
#define REST_8GPRS(n, base) REST_4GPRS(n, base); REST_4GPRS(n+4, base)
#define REST_10GPRS(n, base) REST_8GPRS(n, base); REST_2GPRS(n+8, base)
#define SAVE_NVGPRS(base) SAVE_8GPRS(14, base); SAVE_10GPRS(22, base)
#define REST_NVGPRS(base) REST_8GPRS(14, base); REST_10GPRS(22, base)
#define SAVE_FPR(n, base) stfd n,THREAD_FPR0+8*(n)(base)
#define SAVE_2FPRS(n, base) SAVE_FPR(n, base); SAVE_FPR(n+1, base)
#define SAVE_4FPRS(n, base) SAVE_2FPRS(n, base); SAVE_2FPRS(n+2, base)
...
...
@@ -54,11 +57,6 @@
#define REST_16VRS(n,b,base) REST_8VRS(n,b,base); REST_8VRS(n+8,b,base)
#define REST_32VRS(n,b,base) REST_16VRS(n,b,base); REST_16VRS(n+16,b,base)
#define CHECKANYINT(ra,rb) \
mfspr rb,SPRG3;
/* Get Paca address */
\
ld ra,PACALPPACA+LPPACAANYINT(rb);
/* Get pending interrupt flags */
\
cmpldi 0,ra,0;
/* Macros to adjust thread priority for Iseries hardware multithreading */
#define HMT_LOW or 1,1,1
#define HMT_MEDIUM or 2,2,2
...
...
include/asm-ppc64/processor.h
View file @
c1ee055c
...
...
@@ -543,8 +543,7 @@ struct thread_struct {
double
fpr
[
32
];
/* Complete floating point set */
unsigned
long
fpscr
;
/* Floating point status (plus pad) */
unsigned
long
fpexc_mode
;
/* Floating-point exception mode */
unsigned
long
saved_msr
;
/* Save MSR across signal handlers */
unsigned
long
saved_softe
;
/* Ditto for Soft Enable/Disable */
unsigned
long
pad
[
3
];
/* was saved_msr, saved_softe */
#ifdef CONFIG_ALTIVEC
/* Complete AltiVec register set */
vector128
vr
[
32
]
__attribute
((
aligned
(
16
)));
...
...
include/asm-ppc64/ptrace.h
View file @
c1ee055c
...
...
@@ -71,6 +71,18 @@ struct pt_regs32 {
#define instruction_pointer(regs) ((regs)->nip)
#define user_mode(regs) ((((regs)->msr) >> MSR_PR_LG) & 0x1)
#define force_successful_syscall_return() \
(current_thread_info()->syscall_noerror = 1)
/*
* We use the least-significant bit of the trap field to indicate
* whether we have saved the full set of registers, or only a
* partial set. A 1 there means the partial set.
*/
#define FULL_REGS(regs) (((regs)->trap & 1) == 0)
#define TRAP(regs) ((regs)->trap & ~0xF)
#define CHECK_FULL_REGS(regs) BUG_ON(regs->trap & 1)
/*
* Offsets used by 'ptrace' system call interface.
*/
...
...
include/asm-ppc64/system.h
View file @
c1ee055c
...
...
@@ -111,6 +111,8 @@ extern void flush_instruction_cache(void);
extern
int
_get_PVR
(
void
);
extern
void
giveup_fpu
(
struct
task_struct
*
);
extern
void
disable_kernel_fp
(
void
);
extern
void
flush_fp_to_thread
(
struct
task_struct
*
);
extern
void
flush_altivec_to_thread
(
struct
task_struct
*
);
extern
void
enable_kernel_fp
(
void
);
extern
void
giveup_altivec
(
struct
task_struct
*
);
extern
void
disable_kernel_altivec
(
void
);
...
...
include/asm-ppc64/thread_info.h
View file @
c1ee055c
...
...
@@ -13,6 +13,7 @@
#ifndef __ASSEMBLY__
#include <linux/config.h>
#include <asm/processor.h>
#include <asm/page.h>
#include <linux/stringify.h>
/*
...
...
@@ -23,8 +24,10 @@ struct thread_info {
struct
exec_domain
*
exec_domain
;
/* execution domain */
unsigned
long
flags
;
/* low level flags */
int
cpu
;
/* cpu we're on */
int
preempt_count
;
/* not used at present */
int
preempt_count
;
struct
restart_block
restart_block
;
/* set by force_successful_syscall_return */
unsigned
char
syscall_noerror
;
};
/*
...
...
@@ -73,7 +76,7 @@ struct thread_info {
static
inline
struct
thread_info
*
current_thread_info
(
void
)
{
struct
thread_info
*
ti
;
__asm__
(
"clrrdi %0,1,
14"
:
"=r"
(
ti
));
__asm__
(
"clrrdi %0,1,
%1"
:
"=r"
(
ti
)
:
"i"
(
THREAD_SHIFT
));
return
ti
;
}
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment