Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
nexedi
linux
Commits
22b28032
Commit
22b28032
authored
Oct 28, 2005
by
Paul Mackerras
Browse files
Options
Browse Files
Download
Plain Diff
Merge
git://oak/home/sfr/kernels/iseries/work/
parents
45424376
299f6ce4
Changes
18
Expand all
Hide whitespace changes
Inline
Side-by-side
Showing
18 changed files
with
21 additions
and
3064 deletions
+21
-3064
arch/powerpc/kernel/Makefile
arch/powerpc/kernel/Makefile
+4
-4
arch/powerpc/kernel/setup_64.c
arch/powerpc/kernel/setup_64.c
+5
-13
arch/powerpc/lib/Makefile
arch/powerpc/lib/Makefile
+8
-4
arch/ppc64/Makefile
arch/ppc64/Makefile
+2
-0
arch/ppc64/kernel/Makefile
arch/ppc64/kernel/Makefile
+1
-1
arch/ppc64/kernel/entry.S
arch/ppc64/kernel/entry.S
+0
-845
arch/ppc64/kernel/misc.S
arch/ppc64/kernel/misc.S
+0
-563
arch/ppc64/lib/Makefile
arch/ppc64/lib/Makefile
+1
-14
arch/ppc64/lib/checksum.S
arch/ppc64/lib/checksum.S
+0
-229
arch/ppc64/lib/copypage.S
arch/ppc64/lib/copypage.S
+0
-121
arch/ppc64/lib/copyuser.S
arch/ppc64/lib/copyuser.S
+0
-576
arch/ppc64/lib/e2a.c
arch/ppc64/lib/e2a.c
+0
-108
arch/ppc64/lib/locks.c
arch/ppc64/lib/locks.c
+0
-95
arch/ppc64/lib/memcpy.S
arch/ppc64/lib/memcpy.S
+0
-172
arch/ppc64/lib/sstep.c
arch/ppc64/lib/sstep.c
+0
-141
arch/ppc64/lib/strcase.c
arch/ppc64/lib/strcase.c
+0
-31
arch/ppc64/lib/string.S
arch/ppc64/lib/string.S
+0
-106
arch/ppc64/lib/usercopy.c
arch/ppc64/lib/usercopy.c
+0
-41
No files found.
arch/powerpc/kernel/Makefile
View file @
22b28032
...
@@ -13,7 +13,7 @@ endif
...
@@ -13,7 +13,7 @@ endif
obj-y
:=
semaphore.o cputable.o ptrace.o syscalls.o
\
obj-y
:=
semaphore.o cputable.o ptrace.o syscalls.o
\
signal_32.o pmc.o
signal_32.o pmc.o
obj-$(CONFIG_PPC64)
+=
setup_64.o binfmt_elf32.o sys_ppc32.o
\
obj-$(CONFIG_PPC64)
+=
setup_64.o binfmt_elf32.o sys_ppc32.o
\
ptrace32.o
ptrace32.o
systbl.o
obj-$(CONFIG_ALTIVEC)
+=
vecemu.o vector.o
obj-$(CONFIG_ALTIVEC)
+=
vecemu.o vector.o
obj-$(CONFIG_POWER4)
+=
idle_power4.o
obj-$(CONFIG_POWER4)
+=
idle_power4.o
obj-$(CONFIG_PPC_OF)
+=
of_device.o
obj-$(CONFIG_PPC_OF)
+=
of_device.o
...
@@ -28,12 +28,11 @@ extra-$(CONFIG_40x) := head_4xx.o
...
@@ -28,12 +28,11 @@ extra-$(CONFIG_40x) := head_4xx.o
extra-$(CONFIG_44x)
:=
head_44x.o
extra-$(CONFIG_44x)
:=
head_44x.o
extra-$(CONFIG_FSL_BOOKE)
:=
head_fsl_booke.o
extra-$(CONFIG_FSL_BOOKE)
:=
head_fsl_booke.o
extra-$(CONFIG_8xx)
:=
head_8xx.o
extra-$(CONFIG_8xx)
:=
head_8xx.o
extra-$(CONFIG_PPC64)
+=
entry_64.o
extra-y
+=
vmlinux.lds
extra-y
+=
vmlinux.lds
obj-y
+=
process.o init_task.o time.o
\
obj-y
+=
process.o init_task.o time.o
\
prom.o
systbl.o
traps.o setup-common.o
prom.o traps.o setup-common.o
obj-$(CONFIG_PPC32)
+=
entry_32.o setup_32.o misc_32.o
obj-$(CONFIG_PPC32)
+=
entry_32.o setup_32.o misc_32.o
systbl.o
obj-$(CONFIG_PPC64)
+=
misc_64.o
obj-$(CONFIG_PPC64)
+=
misc_64.o
obj-$(CONFIG_PPC_OF)
+=
prom_init.o
obj-$(CONFIG_PPC_OF)
+=
prom_init.o
obj-$(CONFIG_MODULES)
+=
ppc_ksyms.o
obj-$(CONFIG_MODULES)
+=
ppc_ksyms.o
...
@@ -54,3 +53,4 @@ obj-$(CONFIG_PPC64) += traps.o process.o init_task.o time.o \
...
@@ -54,3 +53,4 @@ obj-$(CONFIG_PPC64) += traps.o process.o init_task.o time.o \
endif
endif
extra-$(CONFIG_PPC_FPU)
+=
fpu.o
extra-$(CONFIG_PPC_FPU)
+=
fpu.o
extra-$(CONFIG_PPC64)
+=
entry_64.o
arch/powerpc/kernel/setup_64.c
View file @
22b28032
...
@@ -701,17 +701,6 @@ static void __init emergency_stack_init(void)
...
@@ -701,17 +701,6 @@ static void __init emergency_stack_init(void)
limit
))
+
PAGE_SIZE
;
limit
))
+
PAGE_SIZE
;
}
}
extern
unsigned
long
*
sys_call_table
;
extern
unsigned
long
sys_ni_syscall
;
#ifdef CONFIG_PPC_MERGE
#define SYS_CALL_ENTRY64(i) sys_call_table[(i) * 2]
#define SYS_CALL_ENTRY32(i) sys_call_table[(i) * 2 + 1]
#else
extern
unsigned
long
*
sys_call_table32
;
#define SYS_CALL_ENTRY64(i) sys_call_table[(i)]
#define SYS_CALL_ENTRY32(i) sys_call_table32[(i)]
#endif
/*
/*
* Called from setup_arch to initialize the bitmap of available
* Called from setup_arch to initialize the bitmap of available
* syscalls in the systemcfg page
* syscalls in the systemcfg page
...
@@ -719,14 +708,17 @@ extern unsigned long *sys_call_table32;
...
@@ -719,14 +708,17 @@ extern unsigned long *sys_call_table32;
void
__init
setup_syscall_map
(
void
)
void
__init
setup_syscall_map
(
void
)
{
{
unsigned
int
i
,
count64
=
0
,
count32
=
0
;
unsigned
int
i
,
count64
=
0
,
count32
=
0
;
extern
unsigned
long
*
sys_call_table
;
extern
unsigned
long
sys_ni_syscall
;
for
(
i
=
0
;
i
<
__NR_syscalls
;
i
++
)
{
for
(
i
=
0
;
i
<
__NR_syscalls
;
i
++
)
{
if
(
SYS_CALL_ENTRY64
(
i
)
!=
sys_ni_syscall
)
{
if
(
sys_call_table
[
i
*
2
]
!=
sys_ni_syscall
)
{
count64
++
;
count64
++
;
systemcfg
->
syscall_map_64
[
i
>>
5
]
|=
systemcfg
->
syscall_map_64
[
i
>>
5
]
|=
0x80000000UL
>>
(
i
&
0x1f
);
0x80000000UL
>>
(
i
&
0x1f
);
}
}
if
(
SYS_CALL_ENTRY32
(
i
)
!=
sys_ni_syscall
)
{
if
(
sys_call_table
[
i
*
2
+
1
]
!=
sys_ni_syscall
)
{
count32
++
;
count32
++
;
systemcfg
->
syscall_map_32
[
i
>>
5
]
|=
systemcfg
->
syscall_map_32
[
i
>>
5
]
|=
0x80000000UL
>>
(
i
&
0x1f
);
0x80000000UL
>>
(
i
&
0x1f
);
...
...
arch/powerpc/lib/Makefile
View file @
22b28032
...
@@ -2,12 +2,16 @@
...
@@ -2,12 +2,16 @@
# Makefile for ppc-specific library files..
# Makefile for ppc-specific library files..
#
#
obj-y
:=
strcase.o string.o
ifeq
($(CONFIG_PPC_MERGE),y)
obj-y
:=
string.o
endif
obj-y
+=
strcase.o
obj-$(CONFIG_PPC32)
+=
div64.o copy_32.o checksum_32.o
obj-$(CONFIG_PPC32)
+=
div64.o copy_32.o checksum_32.o
obj-$(CONFIG_PPC64)
+=
c
opypage_64.o copyuser_64.o memcpy
_64.o
\
obj-$(CONFIG_PPC64)
+=
c
hecksum_64.o copypage_64.o copyuser
_64.o
\
usercopy_64.o sstep.o checksum
_64.o mem_64.o
memcpy_64.o usercopy
_64.o mem_64.o
obj-$(CONFIG_PPC_ISERIES)
+=
e2a.o
obj-$(CONFIG_PPC_ISERIES)
+=
e2a.o
ifeq
($(CONFIG_PPC64),y)
ifeq
($(CONFIG_PPC64),y)
obj-$(CONFIG_SMP)
+=
locks.o
obj-$(CONFIG_SMP)
+=
locks.o
obj-$(CONFIG_DEBUG_KERNEL)
+=
sstep.o
endif
endif
arch/ppc64/Makefile
View file @
22b28032
...
@@ -81,12 +81,14 @@ CFLAGS += $(call cc-option,-funit-at-a-time)
...
@@ -81,12 +81,14 @@ CFLAGS += $(call cc-option,-funit-at-a-time)
head-y
:=
arch
/ppc64/kernel/head.o
head-y
:=
arch
/ppc64/kernel/head.o
head-y
+=
arch
/powerpc/kernel/fpu.o
head-y
+=
arch
/powerpc/kernel/fpu.o
head-y
+=
arch
/powerpc/kernel/entry_64.o
libs-y
+=
arch
/ppc64/lib/
libs-y
+=
arch
/ppc64/lib/
core-y
+=
arch
/ppc64/kernel/
arch
/powerpc/kernel/
core-y
+=
arch
/ppc64/kernel/
arch
/powerpc/kernel/
core-y
+=
arch
/powerpc/mm/
core-y
+=
arch
/powerpc/mm/
core-y
+=
arch
/powerpc/sysdev/
core-y
+=
arch
/powerpc/sysdev/
core-y
+=
arch
/powerpc/platforms/
core-y
+=
arch
/powerpc/platforms/
core-y
+=
arch
/powerpc/lib/
core-$(CONFIG_XMON)
+=
arch
/ppc64/xmon/
core-$(CONFIG_XMON)
+=
arch
/ppc64/xmon/
drivers-$(CONFIG_OPROFILE)
+=
arch
/powerpc/oprofile/
drivers-$(CONFIG_OPROFILE)
+=
arch
/powerpc/oprofile/
...
...
arch/ppc64/kernel/Makefile
View file @
22b28032
...
@@ -7,7 +7,7 @@ ifneq ($(CONFIG_PPC_MERGE),y)
...
@@ -7,7 +7,7 @@ ifneq ($(CONFIG_PPC_MERGE),y)
EXTRA_CFLAGS
+=
-mno-minimal-toc
EXTRA_CFLAGS
+=
-mno-minimal-toc
extra-y
:=
head.o vmlinux.lds
extra-y
:=
head.o vmlinux.lds
obj-y
:=
entry.o
misc.o prom.o
obj-y
:=
misc.o prom.o
endif
endif
...
...
arch/ppc64/kernel/entry.S
deleted
100644 → 0
View file @
45424376
This diff is collapsed.
Click to expand it.
arch/ppc64/kernel/misc.S
View file @
22b28032
This diff is collapsed.
Click to expand it.
arch/ppc64/lib/Makefile
View file @
22b28032
...
@@ -2,17 +2,4 @@
...
@@ -2,17 +2,4 @@
# Makefile for ppc64-specific library files..
# Makefile for ppc64-specific library files..
#
#
lib-y
:=
checksum.o string.o strcase.o
lib-y
:=
string.o
lib-y
+=
copypage.o memcpy.o copyuser.o usercopy.o
# Lock primitives are defined as no-ops in include/linux/spinlock.h
# for non-SMP configs. Don't build the real versions.
lib-$(CONFIG_SMP)
+=
locks.o
# e2a provides EBCDIC to ASCII conversions.
ifdef
CONFIG_PPC_ISERIES
obj-y
+=
e2a.o
endif
lib-$(CONFIG_DEBUG_KERNEL)
+=
sstep.o
arch/ppc64/lib/checksum.S
deleted
100644 → 0
View file @
45424376
/*
*
This
file
contains
assembly
-
language
implementations
*
of
IP
-
style
1
's complement checksum routines.
*
*
Copyright
(
C
)
1995
-
1996
Gary
Thomas
(
gdt
@
linuxppc
.
org
)
*
*
This
program
is
free
software
; you can redistribute it and/or
*
modify
it
under
the
terms
of
the
GNU
General
Public
License
*
as
published
by
the
Free
Software
Foundation
; either version
*
2
of
the
License
,
or
(
at
your
option
)
any
later
version
.
*
*
Severely
hacked
about
by
Paul
Mackerras
(
paulus
@
cs
.
anu
.
edu
.
au
)
.
*/
#include <linux/sys.h>
#include <asm/processor.h>
#include <asm/errno.h>
#include <asm/ppc_asm.h>
/*
*
ip_fast_csum
(
r3
=
buf
,
r4
=
len
)
--
Optimized
for
IP
header
*
len
is
in
words
and
is
always
>=
5
.
*
*
In
practice
len
==
5
,
but
this
is
not
guaranteed
.
So
this
code
does
not
*
attempt
to
use
doubleword
instructions
.
*/
_GLOBAL
(
ip_fast_csum
)
lwz
r0
,
0
(
r3
)
lwzu
r5
,
4
(
r3
)
addic
.
r4
,
r4
,-
2
addc
r0
,
r0
,
r5
mtctr
r4
blelr
-
1
:
lwzu
r4
,
4
(
r3
)
adde
r0
,
r0
,
r4
bdnz
1
b
addze
r0
,
r0
/*
add
in
final
carry
*/
rldicl
r4
,
r0
,
32
,
0
/*
fold
two
32
-
bit
halves
together
*/
add
r0
,
r0
,
r4
srdi
r0
,
r0
,
32
rlwinm
r3
,
r0
,
16
,
0
,
31
/*
fold
two
halves
together
*/
add
r3
,
r0
,
r3
not
r3
,
r3
srwi
r3
,
r3
,
16
blr
/*
*
Compute
checksum
of
TCP
or
UDP
pseudo
-
header
:
*
csum_tcpudp_magic
(
r3
=
saddr
,
r4
=
daddr
,
r5
=
len
,
r6
=
proto
,
r7
=
sum
)
*
No
real
gain
trying
to
do
this
specially
for
64
bit
,
but
*
the
32
bit
addition
may
spill
into
the
upper
bits
of
*
the
doubleword
so
we
still
must
fold
it
down
from
64
.
*/
_GLOBAL
(
csum_tcpudp_magic
)
rlwimi
r5
,
r6
,
16
,
0
,
15
/*
put
proto
in
upper
half
of
len
*/
addc
r0
,
r3
,
r4
/*
add
4
32
-
bit
words
together
*/
adde
r0
,
r0
,
r5
adde
r0
,
r0
,
r7
rldicl
r4
,
r0
,
32
,
0
/*
fold
64
bit
value
*/
add
r0
,
r4
,
r0
srdi
r0
,
r0
,
32
rlwinm
r3
,
r0
,
16
,
0
,
31
/*
fold
two
halves
together
*/
add
r3
,
r0
,
r3
not
r3
,
r3
srwi
r3
,
r3
,
16
blr
/*
*
Computes
the
checksum
of
a
memory
block
at
buff
,
length
len
,
*
and
adds
in
"sum"
(
32
-
bit
)
.
*
*
This
code
assumes
at
least
halfword
alignment
,
though
the
length
*
can
be
any
number
of
bytes
.
The
sum
is
accumulated
in
r5
.
*
*
csum_partial
(
r3
=
buff
,
r4
=
len
,
r5
=
sum
)
*/
_GLOBAL
(
csum_partial
)
subi
r3
,
r3
,
8
/*
we
'll offset by 8 for the loads */
srdi
.
r6
,
r4
,
3
/*
divide
by
8
for
doubleword
count
*/
addic
r5
,
r5
,
0
/*
clear
carry
*/
beq
3
f
/*
if
we
're doing < 8 bytes */
andi
.
r0
,
r3
,
2
/*
aligned
on
a
word
boundary
already
?
*/
beq
+
1
f
lhz
r6
,
8
(
r3
)
/*
do
2
bytes
to
get
aligned
*/
addi
r3
,
r3
,
2
subi
r4
,
r4
,
2
addc
r5
,
r5
,
r6
srdi
.
r6
,
r4
,
3
/*
recompute
number
of
doublewords
*/
beq
3
f
/*
any
left
?
*/
1
:
mtctr
r6
2
:
ldu
r6
,
8
(
r3
)
/*
main
sum
loop
*/
adde
r5
,
r5
,
r6
bdnz
2
b
andi
.
r4
,
r4
,
7
/*
compute
bytes
left
to
sum
after
doublewords
*/
3
:
cmpwi
0
,
r4
,
4
/*
is
at
least
a
full
word
left
?
*/
blt
4
f
lwz
r6
,
8
(
r3
)
/*
sum
this
word
*/
addi
r3
,
r3
,
4
subi
r4
,
r4
,
4
adde
r5
,
r5
,
r6
4
:
cmpwi
0
,
r4
,
2
/*
is
at
least
a
halfword
left
?
*/
blt
+
5
f
lhz
r6
,
8
(
r3
)
/*
sum
this
halfword
*/
addi
r3
,
r3
,
2
subi
r4
,
r4
,
2
adde
r5
,
r5
,
r6
5
:
cmpwi
0
,
r4
,
1
/*
is
at
least
a
byte
left
?
*/
bne
+
6
f
lbz
r6
,
8
(
r3
)
/*
sum
this
byte
*/
slwi
r6
,
r6
,
8
/*
this
byte
is
assumed
to
be
the
upper
byte
of
a
halfword
*/
adde
r5
,
r5
,
r6
6
:
addze
r5
,
r5
/*
add
in
final
carry
*/
rldicl
r4
,
r5
,
32
,
0
/*
fold
two
32
-
bit
halves
together
*/
add
r3
,
r4
,
r5
srdi
r3
,
r3
,
32
blr
/*
*
Computes
the
checksum
of
a
memory
block
at
src
,
length
len
,
*
and
adds
in
"sum"
(
32
-
bit
),
while
copying
the
block
to
dst
.
*
If
an
access
exception
occurs
on
src
or
dst
,
it
stores
-
EFAULT
*
to
*
src_err
or
*
dst_err
respectively
,
and
(
for
an
error
on
*
src
)
zeroes
the
rest
of
dst
.
*
*
This
code
needs
to
be
reworked
to
take
advantage
of
64
bit
sum
+
copy
.
*
However
,
due
to
tokenring
halfword
alignment
problems
this
will
be
very
*
tricky
.
For
now
we
'll leave it until we instrument it somehow.
*
*
csum_partial_copy_generic
(
r3
=
src
,
r4
=
dst
,
r5
=
len
,
r6
=
sum
,
r7
=
src_err
,
r8
=
dst_err
)
*/
_GLOBAL
(
csum_partial_copy_generic
)
addic
r0
,
r6
,
0
subi
r3
,
r3
,
4
subi
r4
,
r4
,
4
srwi
.
r6
,
r5
,
2
beq
3
f
/*
if
we
're doing < 4 bytes */
andi
.
r9
,
r4
,
2
/*
Align
dst
to
longword
boundary
*/
beq
+
1
f
81
:
lhz
r6
,
4
(
r3
)
/*
do
2
bytes
to
get
aligned
*/
addi
r3
,
r3
,
2
subi
r5
,
r5
,
2
91
:
sth
r6
,
4
(
r4
)
addi
r4
,
r4
,
2
addc
r0
,
r0
,
r6
srwi
.
r6
,
r5
,
2
/*
#
words
to
do
*/
beq
3
f
1
:
mtctr
r6
82
:
lwzu
r6
,
4
(
r3
)
/*
the
bdnz
has
zero
overhead
,
so
it
should
*/
92
:
stwu
r6
,
4
(
r4
)
/*
be
unnecessary
to
unroll
this
loop
*/
adde
r0
,
r0
,
r6
bdnz
82
b
andi
.
r5
,
r5
,
3
3
:
cmpwi
0
,
r5
,
2
blt
+
4
f
83
:
lhz
r6
,
4
(
r3
)
addi
r3
,
r3
,
2
subi
r5
,
r5
,
2
93
:
sth
r6
,
4
(
r4
)
addi
r4
,
r4
,
2
adde
r0
,
r0
,
r6
4
:
cmpwi
0
,
r5
,
1
bne
+
5
f
84
:
lbz
r6
,
4
(
r3
)
94
:
stb
r6
,
4
(
r4
)
slwi
r6
,
r6
,
8
/*
Upper
byte
of
word
*/
adde
r0
,
r0
,
r6
5
:
addze
r3
,
r0
/*
add
in
final
carry
(
unlikely
with
64
-
bit
regs
)
*/
rldicl
r4
,
r3
,
32
,
0
/*
fold
64
bit
value
*/
add
r3
,
r4
,
r3
srdi
r3
,
r3
,
32
blr
/*
These
shouldn
't go in the fixup section, since that would
cause
the
ex_table
addresses
to
get
out
of
order
.
*/
.
globl
src_error_1
src_error_1
:
li
r6
,
0
subi
r5
,
r5
,
2
95
:
sth
r6
,
4
(
r4
)
addi
r4
,
r4
,
2
srwi
.
r6
,
r5
,
2
beq
3
f
mtctr
r6
.
globl
src_error_2
src_error_2
:
li
r6
,
0
96
:
stwu
r6
,
4
(
r4
)
bdnz
96
b
3
:
andi
.
r5
,
r5
,
3
beq
src_error
.
globl
src_error_3
src_error_3
:
li
r6
,
0
mtctr
r5
addi
r4
,
r4
,
3
97
:
stbu
r6
,
1
(
r4
)
bdnz
97
b
.
globl
src_error
src_error
:
cmpdi
0
,
r7
,
0
beq
1
f
li
r6
,-
EFAULT
stw
r6
,
0
(
r7
)
1
:
addze
r3
,
r0
blr
.
globl
dst_error
dst_error
:
cmpdi
0
,
r8
,
0
beq
1
f
li
r6
,-
EFAULT
stw
r6
,
0
(
r8
)
1
:
addze
r3
,
r0
blr
.
section
__ex_table
,"
a
"
.
align
3
.
llong
81
b
,
src_error_1
.
llong
91
b
,
dst_error
.
llong
82
b
,
src_error_2
.
llong
92
b
,
dst_error
.
llong
83
b
,
src_error_3
.
llong
93
b
,
dst_error
.
llong
84
b
,
src_error_3
.
llong
94
b
,
dst_error
.
llong
95
b
,
dst_error
.
llong
96
b
,
dst_error
.
llong
97
b
,
dst_error
arch/ppc64/lib/copypage.S
deleted
100644 → 0
View file @
45424376
/*
*
arch
/
ppc64
/
lib
/
copypage
.
S
*
*
Copyright
(
C
)
2002
Paul
Mackerras
,
IBM
Corp
.
*
*
This
program
is
free
software
; you can redistribute it and/or
*
modify
it
under
the
terms
of
the
GNU
General
Public
License
*
as
published
by
the
Free
Software
Foundation
; either version
*
2
of
the
License
,
or
(
at
your
option
)
any
later
version
.
*/
#include <asm/processor.h>
#include <asm/ppc_asm.h>
_GLOBAL
(
copy_page
)
std
r31
,-
8
(
1
)
std
r30
,-
16
(
1
)
std
r29
,-
24
(
1
)
std
r28
,-
32
(
1
)
std
r27
,-
40
(
1
)
std
r26
,-
48
(
1
)
std
r25
,-
56
(
1
)
std
r24
,-
64
(
1
)
std
r23
,-
72
(
1
)
std
r22
,-
80
(
1
)
std
r21
,-
88
(
1
)
std
r20
,-
96
(
1
)
li
r5
,
4096
/
32
-
1
addi
r3
,
r3
,-
8
li
r12
,
5
0
:
addi
r5
,
r5
,-
24
mtctr
r12
ld
r22
,
640
(
4
)
ld
r21
,
512
(
4
)
ld
r20
,
384
(
4
)
ld
r11
,
256
(
4
)
ld
r9
,
128
(
4
)
ld
r7
,
0
(
4
)
ld
r25
,
648
(
4
)
ld
r24
,
520
(
4
)
ld
r23
,
392
(
4
)
ld
r10
,
264
(
4
)
ld
r8
,
136
(
4
)
ldu
r6
,
8
(
4
)
cmpwi
r5
,
24
1
:
std
r22
,
648
(
3
)
std
r21
,
520
(
3
)
std
r20
,
392
(
3
)
std
r11
,
264
(
3
)
std
r9
,
136
(
3
)
std
r7
,
8
(
3
)
ld
r28
,
648
(
4
)
ld
r27
,
520
(
4
)
ld
r26
,
392
(
4
)
ld
r31
,
264
(
4
)
ld
r30
,
136
(
4
)
ld
r29
,
8
(
4
)
std
r25
,
656
(
3
)
std
r24
,
528
(
3
)
std
r23
,
400
(
3
)
std
r10
,
272
(
3
)
std
r8
,
144
(
3
)
std
r6
,
16
(
3
)
ld
r22
,
656
(
4
)
ld
r21
,
528
(
4
)
ld
r20
,
400
(
4
)
ld
r11
,
272
(
4
)
ld
r9
,
144
(
4
)
ld
r7
,
16
(
4
)
std
r28
,
664
(
3
)
std
r27
,
536
(
3
)
std
r26
,
408
(
3
)
std
r31
,
280
(
3
)
std
r30
,
152
(
3
)
stdu
r29
,
24
(
3
)
ld
r25
,
664
(
4
)
ld
r24
,
536
(
4
)
ld
r23
,
408
(
4
)
ld
r10
,
280
(
4
)
ld
r8
,
152
(
4
)
ldu
r6
,
24
(
4
)
bdnz
1
b
std
r22
,
648
(
3
)
std
r21
,
520
(
3
)
std
r20
,
392
(
3
)
std
r11
,
264
(
3
)
std
r9
,
136
(
3
)
std
r7
,
8
(
3
)
addi
r4
,
r4
,
640
addi
r3
,
r3
,
648
bge
0
b
mtctr
r5
ld
r7
,
0
(
4
)
ld
r8
,
8
(
4
)
ldu
r9
,
16
(
4
)
3
:
ld
r10
,
8
(
4
)
std
r7
,
8
(
3
)
ld
r7
,
16
(
4
)
std
r8
,
16
(
3
)
ld
r8
,
24
(
4
)
std
r9
,
24
(
3
)
ldu
r9
,
32
(
4
)
stdu
r10
,
32
(
3
)
bdnz
3
b
4
:
ld
r10
,
8
(
4
)
std
r7
,
8
(
3
)
std
r8
,
16
(
3
)
std
r9
,
24
(
3
)
std
r10
,
32
(
3
)
9
:
ld
r20
,-
96
(
1
)
ld
r21
,-
88
(
1
)
ld
r22
,-
80
(
1
)
ld
r23
,-
72
(
1
)
ld
r24
,-
64
(
1
)
ld
r25
,-
56
(
1
)
ld
r26
,-
48
(
1
)
ld
r27
,-
40
(
1
)
ld
r28
,-
32
(
1
)
ld
r29
,-
24
(
1
)
ld
r30
,-
16
(
1
)
ld
r31
,-
8
(
1
)
blr
arch/ppc64/lib/copyuser.S
deleted
100644 → 0
View file @
45424376
/*
*
arch
/
ppc64
/
lib
/
copyuser
.
S
*
*
Copyright
(
C
)
2002
Paul
Mackerras
,
IBM
Corp
.
*
*
This
program
is
free
software
; you can redistribute it and/or
*
modify
it
under
the
terms
of
the
GNU
General
Public
License
*
as
published
by
the
Free
Software
Foundation
; either version
*
2
of
the
License
,
or
(
at
your
option
)
any
later
version
.
*/
#include <asm/processor.h>
#include <asm/ppc_asm.h>
.
align
7
_GLOBAL
(
__copy_tofrom_user
)
/
*
first
check
for
a
whole
page
copy
on
a
page
boundary
*/
cmpldi
cr1
,
r5
,
16
cmpdi
cr6
,
r5
,
4096
or
r0
,
r3
,
r4
neg
r6
,
r3
/*
LS
3
bits
=
#
bytes
to
8
-
byte
dest
bdry
*/
andi
.
r0
,
r0
,
4095
std
r3
,-
24
(
r1
)
crand
cr0
*
4
+
2
,
cr0
*
4
+
2
,
cr6
*
4
+
2
std
r4
,-
16
(
r1
)
std
r5
,-
8
(
r1
)
dcbt
0
,
r4
beq
.
Lcopy_page
andi
.
r6
,
r6
,
7
mtcrf
0x01
,
r5
blt
cr1
,
.
Lshort_copy
bne
.
Ldst_unaligned
.
Ldst_aligned
:
andi
.
r0
,
r4
,
7
addi
r3
,
r3
,-
16
bne
.
Lsrc_unaligned
srdi
r7
,
r5
,
4
20
:
ld
r9
,
0
(
r4
)
addi
r4
,
r4
,-
8
mtctr
r7
andi
.
r5
,
r5
,
7
bf
cr7
*
4
+
0
,
22
f
addi
r3
,
r3
,
8
addi
r4
,
r4
,
8
mr
r8
,
r9
blt
cr1
,
72
f
21
:
ld
r9
,
8
(
r4
)
70
:
std
r8
,
8
(
r3
)
22
:
ldu
r8
,
16
(
r4
)
71
:
stdu
r9
,
16
(
r3
)
bdnz
21
b
72
:
std
r8
,
8
(
r3
)
beq
+
3
f
addi
r3
,
r3
,
16
23
:
ld
r9
,
8
(
r4
)
.
Ldo_tail
:
bf
cr7
*
4
+
1
,
1
f
rotldi
r9
,
r9
,
32
73
:
stw
r9
,
0
(
r3
)
addi
r3
,
r3
,
4
1
:
bf
cr7
*
4
+
2
,
2
f
rotldi
r9
,
r9
,
16
74
:
sth
r9
,
0
(
r3
)
addi
r3
,
r3
,
2
2
:
bf
cr7
*
4
+
3
,
3
f
rotldi
r9
,
r9
,
8
75
:
stb
r9
,
0
(
r3
)
3
:
li
r3
,
0
blr
.
Lsrc_unaligned
:
srdi
r6
,
r5
,
3
addi
r5
,
r5
,-
16
subf
r4
,
r0
,
r4
srdi
r7
,
r5
,
4
sldi
r10
,
r0
,
3
cmpldi
cr6
,
r6
,
3
andi
.
r5
,
r5
,
7
mtctr
r7
subfic
r11
,
r10
,
64
add
r5
,
r5
,
r0
bt
cr7
*
4
+
0
,
28
f
24
:
ld
r9
,
0
(
r4
)
/*
3
+
2
n
loads
,
2
+
2
n
stores
*/
25
:
ld
r0
,
8
(
r4
)
sld
r6
,
r9
,
r10
26
:
ldu
r9
,
16
(
r4
)
srd
r7
,
r0
,
r11
sld
r8
,
r0
,
r10
or
r7
,
r7
,
r6
blt
cr6
,
79
f
27
:
ld
r0
,
8
(
r4
)
b
2
f
28
:
ld
r0
,
0
(
r4
)
/*
4
+
2
n
loads
,
3
+
2
n
stores
*/
29
:
ldu
r9
,
8
(
r4
)
sld
r8
,
r0
,
r10
addi
r3
,
r3
,-
8
blt
cr6
,
5
f
30
:
ld
r0
,
8
(
r4
)
srd
r12
,
r9
,
r11
sld
r6
,
r9
,
r10
31
:
ldu
r9
,
16
(
r4
)
or
r12
,
r8
,
r12
srd
r7
,
r0
,
r11
sld
r8
,
r0
,
r10
addi
r3
,
r3
,
16
beq
cr6
,
78
f
1
:
or
r7
,
r7
,
r6
32
:
ld
r0
,
8
(
r4
)
76
:
std
r12
,
8
(
r3
)
2
:
srd
r12
,
r9
,
r11
sld
r6
,
r9
,
r10
33
:
ldu
r9
,
16
(
r4
)
or
r12
,
r8
,
r12
77
:
stdu
r7
,
16
(
r3
)
srd
r7
,
r0
,
r11
sld
r8
,
r0
,
r10
bdnz
1
b
78
:
std
r12
,
8
(
r3
)
or
r7
,
r7
,
r6
79
:
std
r7
,
16
(
r3
)
5
:
srd
r12
,
r9
,
r11
or
r12
,
r8
,
r12
80
:
std
r12
,
24
(
r3
)
bne
6
f
li
r3
,
0
blr
6
:
cmpwi
cr1
,
r5
,
8
addi
r3
,
r3
,
32
sld
r9
,
r9
,
r10
ble
cr1
,
.
Ldo_tail
34
:
ld
r0
,
8
(
r4
)
srd
r7
,
r0
,
r11
or
r9
,
r7
,
r9
b
.
Ldo_tail
.
Ldst_unaligned
:
mtcrf
0x01
,
r6
/*
put
#
bytes
to
8
B
bdry
into
cr7
*/
subf
r5
,
r6
,
r5
li
r7
,
0
cmpldi
r1
,
r5
,
16
bf
cr7
*
4
+
3
,
1
f
35
:
lbz
r0
,
0
(
r4
)
81
:
stb
r0
,
0
(
r3
)
addi
r7
,
r7
,
1
1
:
bf
cr7
*
4
+
2
,
2
f
36
:
lhzx
r0
,
r7
,
r4
82
:
sthx
r0
,
r7
,
r3
addi
r7
,
r7
,
2
2
:
bf
cr7
*
4
+
1
,
3
f
37
:
lwzx
r0
,
r7
,
r4
83
:
stwx
r0
,
r7
,
r3
3
:
mtcrf
0x01
,
r5
add
r4
,
r6
,
r4
add
r3
,
r6
,
r3
b
.
Ldst_aligned
.
Lshort_copy
:
bf
cr7
*
4
+
0
,
1
f
38
:
lwz
r0
,
0
(
r4
)
39
:
lwz
r9
,
4
(
r4
)
addi
r4
,
r4
,
8
84
:
stw
r0
,
0
(
r3
)
85
:
stw
r9
,
4
(
r3
)
addi
r3
,
r3
,
8
1
:
bf
cr7
*
4
+
1
,
2
f
40
:
lwz
r0
,
0
(
r4
)
addi
r4
,
r4
,
4
86
:
stw
r0
,
0
(
r3
)
addi
r3
,
r3
,
4
2
:
bf
cr7
*
4
+
2
,
3
f
41
:
lhz
r0
,
0
(
r4
)
addi
r4
,
r4
,
2
87
:
sth
r0
,
0
(
r3
)
addi
r3
,
r3
,
2
3
:
bf
cr7
*
4
+
3
,
4
f
42
:
lbz
r0
,
0
(
r4
)
88
:
stb
r0
,
0
(
r3
)
4
:
li
r3
,
0
blr
/*
*
exception
handlers
follow
*
we
have
to
return
the
number
of
bytes
not
copied
*
for
an
exception
on
a
load
,
we
set
the
rest
of
the
destination
to
0
*/
136
:
137
:
add
r3
,
r3
,
r7
b
1
f
130
:
131
:
addi
r3
,
r3
,
8
120
:
122
:
124
:
125
:
126
:
127
:
128
:
129
:
133
:
addi
r3
,
r3
,
8
121
:
132
:
addi
r3
,
r3
,
8
123
:
134
:
135
:
138
:
139
:
140
:
141
:
142
:
/*
*
here
we
have
had
a
fault
on
a
load
and
r3
points
to
the
first
*
unmodified
byte
of
the
destination
*/
1
:
ld
r6
,-
24
(
r1
)
ld
r4
,-
16
(
r1
)
ld
r5
,-
8
(
r1
)
subf
r6
,
r6
,
r3
add
r4
,
r4
,
r6
subf
r5
,
r6
,
r5
/*
#
bytes
left
to
go
*/
/*
*
first
see
if
we
can
copy
any
more
bytes
before
hitting
another
exception
*/
mtctr
r5
43
:
lbz
r0
,
0
(
r4
)
addi
r4
,
r4
,
1
89
:
stb
r0
,
0
(
r3
)
addi
r3
,
r3
,
1
bdnz
43
b
li
r3
,
0
/*
huh
?
all
copied
successfully
this
time
?
*/
blr
/*
*
here
we
have
trapped
again
,
need
to
clear
ctr
bytes
starting
at
r3
*/
143
:
mfctr
r5
li
r0
,
0
mr
r4
,
r3
mr
r3
,
r5
/*
return
the
number
of
bytes
not
copied
*/
1
:
andi
.
r9
,
r4
,
7
beq
3
f
90
:
stb
r0
,
0
(
r4
)
addic
.
r5
,
r5
,-
1
addi
r4
,
r4
,
1
bne
1
b
blr
3
:
cmpldi
cr1
,
r5
,
8
srdi
r9
,
r5
,
3
andi
.
r5
,
r5
,
7
blt
cr1
,
93
f
mtctr
r9
91
:
std
r0
,
0
(
r4
)
addi
r4
,
r4
,
8
bdnz
91
b
93
:
beqlr
mtctr
r5
92
:
stb
r0
,
0
(
r4
)
addi
r4
,
r4
,
1
bdnz
92
b
blr
/*
*
exception
handlers
for
stores
:
we
just
need
to
work
*
out
how
many
bytes
weren
't copied
*/
182
:
183
:
add
r3
,
r3
,
r7
b
1
f
180
:
addi
r3
,
r3
,
8
171
:
177
:
addi
r3
,
r3
,
8
170
:
172
:
176
:
178
:
addi
r3
,
r3
,
4
185
:
addi
r3
,
r3
,
4
173
:
174
:
175
:
179
:
181
:
184
:
186
:
187
:
188
:
189
:
1
:
ld
r6
,-
24
(
r1
)
ld
r5
,-
8
(
r1
)
add
r6
,
r6
,
r5
subf
r3
,
r3
,
r6
/*
#
bytes
not
copied
*/
190
:
191
:
192
:
blr
/*
#
bytes
not
copied
in
r3
*/
.
section
__ex_table
,
"a"
.
align
3
.
llong
20
b
,
120
b
.
llong
21
b
,
121
b
.
llong
70
b
,
170
b
.
llong
22
b
,
122
b
.
llong
71
b
,
171
b
.
llong
72
b
,
172
b
.
llong
23
b
,
123
b
.
llong
73
b
,
173
b
.
llong
74
b
,
174
b
.
llong
75
b
,
175
b
.
llong
24
b
,
124
b
.
llong
25
b
,
125
b
.
llong
26
b
,
126
b
.
llong
27
b
,
127
b
.
llong
28
b
,
128
b
.
llong
29
b
,
129
b
.
llong
30
b
,
130
b
.
llong
31
b
,
131
b
.
llong
32
b
,
132
b
.
llong
76
b
,
176
b
.
llong
33
b
,
133
b
.
llong
77
b
,
177
b
.
llong
78
b
,
178
b
.
llong
79
b
,
179
b
.
llong
80
b
,
180
b
.
llong
34
b
,
134
b
.
llong
35
b
,
135
b
.
llong
81
b
,
181
b
.
llong
36
b
,
136
b
.
llong
82
b
,
182
b
.
llong
37
b
,
137
b
.
llong
83
b
,
183
b
.
llong
38
b
,
138
b
.
llong
39
b
,
139
b
.
llong
84
b
,
184
b
.
llong
85
b
,
185
b
.
llong
40
b
,
140
b
.
llong
86
b
,
186
b
.
llong
41
b
,
141
b
.
llong
87
b
,
187
b
.
llong
42
b
,
142
b
.
llong
88
b
,
188
b
.
llong
43
b
,
143
b
.
llong
89
b
,
189
b
.
llong
90
b
,
190
b
.
llong
91
b
,
191
b
.
llong
92
b
,
192
b
.
text
/*
*
Routine
to
copy
a
whole
page
of
data
,
optimized
for
POWER4
.
*
On
POWER4
it
is
more
than
50
%
faster
than
the
simple
loop
*
above
(
following
the
.
Ldst_aligned
label
)
but
it
runs
slightly
*
slower
on
POWER3
.
*/
.
Lcopy_page
:
std
r31
,-
32
(
1
)
std
r30
,-
40
(
1
)
std
r29
,-
48
(
1
)
std
r28
,-
56
(
1
)
std
r27
,-
64
(
1
)
std
r26
,-
72
(
1
)
std
r25
,-
80
(
1
)
std
r24
,-
88
(
1
)
std
r23
,-
96
(
1
)
std
r22
,-
104
(
1
)
std
r21
,-
112
(
1
)
std
r20
,-
120
(
1
)
li
r5
,
4096
/
32
-
1
addi
r3
,
r3
,-
8
li
r0
,
5
0
:
addi
r5
,
r5
,-
24
mtctr
r0
20
:
ld
r22
,
640
(
4
)
21
:
ld
r21
,
512
(
4
)
22
:
ld
r20
,
384
(
4
)
23
:
ld
r11
,
256
(
4
)
24
:
ld
r9
,
128
(
4
)
25
:
ld
r7
,
0
(
4
)
26
:
ld
r25
,
648
(
4
)
27
:
ld
r24
,
520
(
4
)
28
:
ld
r23
,
392
(
4
)
29
:
ld
r10
,
264
(
4
)
30
:
ld
r8
,
136
(
4
)
31
:
ldu
r6
,
8
(
4
)
cmpwi
r5
,
24
1
:
32
:
std
r22
,
648
(
3
)
33
:
std
r21
,
520
(
3
)
34
:
std
r20
,
392
(
3
)
35
:
std
r11
,
264
(
3
)
36
:
std
r9
,
136
(
3
)
37
:
std
r7
,
8
(
3
)
38
:
ld
r28
,
648
(
4
)
39
:
ld
r27
,
520
(
4
)
40
:
ld
r26
,
392
(
4
)
41
:
ld
r31
,
264
(
4
)
42
:
ld
r30
,
136
(
4
)
43
:
ld
r29
,
8
(
4
)
44
:
std
r25
,
656
(
3
)
45
:
std
r24
,
528
(
3
)
46
:
std
r23
,
400
(
3
)
47
:
std
r10
,
272
(
3
)
48
:
std
r8
,
144
(
3
)
49
:
std
r6
,
16
(
3
)
50
:
ld
r22
,
656
(
4
)
51
:
ld
r21
,
528
(
4
)
52
:
ld
r20
,
400
(
4
)
53
:
ld
r11
,
272
(
4
)
54
:
ld
r9
,
144
(
4
)
55
:
ld
r7
,
16
(
4
)
56
:
std
r28
,
664
(
3
)
57
:
std
r27
,
536
(
3
)
58
:
std
r26
,
408
(
3
)
59
:
std
r31
,
280
(
3
)
60
:
std
r30
,
152
(
3
)
61
:
stdu
r29
,
24
(
3
)
62
:
ld
r25
,
664
(
4
)
63
:
ld
r24
,
536
(
4
)
64
:
ld
r23
,
408
(
4
)
65
:
ld
r10
,
280
(
4
)
66
:
ld
r8
,
152
(
4
)
67
:
ldu
r6
,
24
(
4
)
bdnz
1
b
68
:
std
r22
,
648
(
3
)
69
:
std
r21
,
520
(
3
)
70
:
std
r20
,
392
(
3
)
71
:
std
r11
,
264
(
3
)
72
:
std
r9
,
136
(
3
)
73
:
std
r7
,
8
(
3
)
74
:
addi
r4
,
r4
,
640
75
:
addi
r3
,
r3
,
648
bge
0
b
mtctr
r5
76
:
ld
r7
,
0
(
4
)
77
:
ld
r8
,
8
(
4
)
78
:
ldu
r9
,
16
(
4
)
3
:
79
:
ld
r10
,
8
(
4
)
80
:
std
r7
,
8
(
3
)
81
:
ld
r7
,
16
(
4
)
82
:
std
r8
,
16
(
3
)
83
:
ld
r8
,
24
(
4
)
84
:
std
r9
,
24
(
3
)
85
:
ldu
r9
,
32
(
4
)
86
:
stdu
r10
,
32
(
3
)
bdnz
3
b
4
:
87
:
ld
r10
,
8
(
4
)
88
:
std
r7
,
8
(
3
)
89
:
std
r8
,
16
(
3
)
90
:
std
r9
,
24
(
3
)
91
:
std
r10
,
32
(
3
)
9
:
ld
r20
,-
120
(
1
)
ld
r21
,-
112
(
1
)
ld
r22
,-
104
(
1
)
ld
r23
,-
96
(
1
)
ld
r24
,-
88
(
1
)
ld
r25
,-
80
(
1
)
ld
r26
,-
72
(
1
)
ld
r27
,-
64
(
1
)
ld
r28
,-
56
(
1
)
ld
r29
,-
48
(
1
)
ld
r30
,-
40
(
1
)
ld
r31
,-
32
(
1
)
li
r3
,
0
blr
/*
*
on
an
exception
,
reset
to
the
beginning
and
jump
back
into
the
*
standard
__copy_tofrom_user
*/
100
:
ld
r20
,-
120
(
1
)
ld
r21
,-
112
(
1
)
ld
r22
,-
104
(
1
)
ld
r23
,-
96
(
1
)
ld
r24
,-
88
(
1
)
ld
r25
,-
80
(
1
)
ld
r26
,-
72
(
1
)
ld
r27
,-
64
(
1
)
ld
r28
,-
56
(
1
)
ld
r29
,-
48
(
1
)
ld
r30
,-
40
(
1
)
ld
r31
,-
32
(
1
)
ld
r3
,-
24
(
r1
)
ld
r4
,-
16
(
r1
)
li
r5
,
4096
b
.
Ldst_aligned
.
section
__ex_table
,
"a"
.
align
3
.
llong
20
b
,
100
b
.
llong
21
b
,
100
b
.
llong
22
b
,
100
b
.
llong
23
b
,
100
b
.
llong
24
b
,
100
b
.
llong
25
b
,
100
b
.
llong
26
b
,
100
b
.
llong
27
b
,
100
b
.
llong
28
b
,
100
b
.
llong
29
b
,
100
b
.
llong
30
b
,
100
b
.
llong
31
b
,
100
b
.
llong
32
b
,
100
b
.
llong
33
b
,
100
b
.
llong
34
b
,
100
b
.
llong
35
b
,
100
b
.
llong
36
b
,
100
b
.
llong
37
b
,
100
b
.
llong
38
b
,
100
b
.
llong
39
b
,
100
b
.
llong
40
b
,
100
b
.
llong
41
b
,
100
b
.
llong
42
b
,
100
b
.
llong
43
b
,
100
b
.
llong
44
b
,
100
b
.
llong
45
b
,
100
b
.
llong
46
b
,
100
b
.
llong
47
b
,
100
b
.
llong
48
b
,
100
b
.
llong
49
b
,
100
b
.
llong
50
b
,
100
b
.
llong
51
b
,
100
b
.
llong
52
b
,
100
b
.
llong
53
b
,
100
b
.
llong
54
b
,
100
b
.
llong
55
b
,
100
b
.
llong
56
b
,
100
b
.
llong
57
b
,
100
b
.
llong
58
b
,
100
b
.
llong
59
b
,
100
b
.
llong
60
b
,
100
b
.
llong
61
b
,
100
b
.
llong
62
b
,
100
b
.
llong
63
b
,
100
b
.
llong
64
b
,
100
b
.
llong
65
b
,
100
b
.
llong
66
b
,
100
b
.
llong
67
b
,
100
b
.
llong
68
b
,
100
b
.
llong
69
b
,
100
b
.
llong
70
b
,
100
b
.
llong
71
b
,
100
b
.
llong
72
b
,
100
b
.
llong
73
b
,
100
b
.
llong
74
b
,
100
b
.
llong
75
b
,
100
b
.
llong
76
b
,
100
b
.
llong
77
b
,
100
b
.
llong
78
b
,
100
b
.
llong
79
b
,
100
b
.
llong
80
b
,
100
b
.
llong
81
b
,
100
b
.
llong
82
b
,
100
b
.
llong
83
b
,
100
b
.
llong
84
b
,
100
b
.
llong
85
b
,
100
b
.
llong
86
b
,
100
b
.
llong
87
b
,
100
b
.
llong
88
b
,
100
b
.
llong
89
b
,
100
b
.
llong
90
b
,
100
b
.
llong
91
b
,
100
b
arch/ppc64/lib/e2a.c
deleted
100644 → 0
View file @
45424376
/*
* arch/ppc64/lib/e2a.c
*
* EBCDIC to ASCII conversion
*
* This function moved here from arch/ppc64/kernel/viopath.c
*
* (C) Copyright 2000-2004 IBM Corporation
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation; either version 2 of the
* License, or (at your option) anyu later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software Foundation,
* Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*/
#include <linux/module.h>
unsigned
char
e2a
(
unsigned
char
x
)
{
switch
(
x
)
{
case
0xF0
:
return
'0'
;
case
0xF1
:
return
'1'
;
case
0xF2
:
return
'2'
;
case
0xF3
:
return
'3'
;
case
0xF4
:
return
'4'
;
case
0xF5
:
return
'5'
;
case
0xF6
:
return
'6'
;
case
0xF7
:
return
'7'
;
case
0xF8
:
return
'8'
;
case
0xF9
:
return
'9'
;
case
0xC1
:
return
'A'
;
case
0xC2
:
return
'B'
;
case
0xC3
:
return
'C'
;
case
0xC4
:
return
'D'
;
case
0xC5
:
return
'E'
;
case
0xC6
:
return
'F'
;
case
0xC7
:
return
'G'
;
case
0xC8
:
return
'H'
;
case
0xC9
:
return
'I'
;
case
0xD1
:
return
'J'
;
case
0xD2
:
return
'K'
;
case
0xD3
:
return
'L'
;
case
0xD4
:
return
'M'
;
case
0xD5
:
return
'N'
;
case
0xD6
:
return
'O'
;
case
0xD7
:
return
'P'
;
case
0xD8
:
return
'Q'
;
case
0xD9
:
return
'R'
;
case
0xE2
:
return
'S'
;
case
0xE3
:
return
'T'
;
case
0xE4
:
return
'U'
;
case
0xE5
:
return
'V'
;
case
0xE6
:
return
'W'
;
case
0xE7
:
return
'X'
;
case
0xE8
:
return
'Y'
;
case
0xE9
:
return
'Z'
;
}
return
' '
;
}
EXPORT_SYMBOL
(
e2a
);
arch/ppc64/lib/locks.c
deleted
100644 → 0
View file @
45424376
/*
* Spin and read/write lock operations.
*
* Copyright (C) 2001-2004 Paul Mackerras <paulus@au.ibm.com>, IBM
* Copyright (C) 2001 Anton Blanchard <anton@au.ibm.com>, IBM
* Copyright (C) 2002 Dave Engebretsen <engebret@us.ibm.com>, IBM
* Rework to support virtual processors
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <linux/config.h>
#include <linux/kernel.h>
#include <linux/spinlock.h>
#include <linux/module.h>
#include <linux/stringify.h>
#include <asm/hvcall.h>
#include <asm/iSeries/HvCall.h>
/* waiting for a spinlock... */
#if defined(CONFIG_PPC_SPLPAR) || defined(CONFIG_PPC_ISERIES)
void
__spin_yield
(
raw_spinlock_t
*
lock
)
{
unsigned
int
lock_value
,
holder_cpu
,
yield_count
;
struct
paca_struct
*
holder_paca
;
lock_value
=
lock
->
slock
;
if
(
lock_value
==
0
)
return
;
holder_cpu
=
lock_value
&
0xffff
;
BUG_ON
(
holder_cpu
>=
NR_CPUS
);
holder_paca
=
&
paca
[
holder_cpu
];
yield_count
=
holder_paca
->
lppaca
.
yield_count
;
if
((
yield_count
&
1
)
==
0
)
return
;
/* virtual cpu is currently running */
rmb
();
if
(
lock
->
slock
!=
lock_value
)
return
;
/* something has changed */
#ifdef CONFIG_PPC_ISERIES
HvCall2
(
HvCallBaseYieldProcessor
,
HvCall_YieldToProc
,
((
u64
)
holder_cpu
<<
32
)
|
yield_count
);
#else
plpar_hcall_norets
(
H_CONFER
,
get_hard_smp_processor_id
(
holder_cpu
),
yield_count
);
#endif
}
/*
* Waiting for a read lock or a write lock on a rwlock...
* This turns out to be the same for read and write locks, since
* we only know the holder if it is write-locked.
*/
void
__rw_yield
(
raw_rwlock_t
*
rw
)
{
int
lock_value
;
unsigned
int
holder_cpu
,
yield_count
;
struct
paca_struct
*
holder_paca
;
lock_value
=
rw
->
lock
;
if
(
lock_value
>=
0
)
return
;
/* no write lock at present */
holder_cpu
=
lock_value
&
0xffff
;
BUG_ON
(
holder_cpu
>=
NR_CPUS
);
holder_paca
=
&
paca
[
holder_cpu
];
yield_count
=
holder_paca
->
lppaca
.
yield_count
;
if
((
yield_count
&
1
)
==
0
)
return
;
/* virtual cpu is currently running */
rmb
();
if
(
rw
->
lock
!=
lock_value
)
return
;
/* something has changed */
#ifdef CONFIG_PPC_ISERIES
HvCall2
(
HvCallBaseYieldProcessor
,
HvCall_YieldToProc
,
((
u64
)
holder_cpu
<<
32
)
|
yield_count
);
#else
plpar_hcall_norets
(
H_CONFER
,
get_hard_smp_processor_id
(
holder_cpu
),
yield_count
);
#endif
}
#endif
void
__raw_spin_unlock_wait
(
raw_spinlock_t
*
lock
)
{
while
(
lock
->
slock
)
{
HMT_low
();
if
(
SHARED_PROCESSOR
)
__spin_yield
(
lock
);
}
HMT_medium
();
}
EXPORT_SYMBOL
(
__raw_spin_unlock_wait
);
arch/ppc64/lib/memcpy.S
deleted
100644 → 0
View file @
45424376
/*
*
arch
/
ppc64
/
lib
/
memcpy
.
S
*
*
Copyright
(
C
)
2002
Paul
Mackerras
,
IBM
Corp
.
*
*
This
program
is
free
software
; you can redistribute it and/or
*
modify
it
under
the
terms
of
the
GNU
General
Public
License
*
as
published
by
the
Free
Software
Foundation
; either version
*
2
of
the
License
,
or
(
at
your
option
)
any
later
version
.
*/
#include <asm/processor.h>
#include <asm/ppc_asm.h>
.
align
7
_GLOBAL
(
memcpy
)
mtcrf
0x01
,
r5
cmpldi
cr1
,
r5
,
16
neg
r6
,
r3
#
LS
3
bits
=
#
bytes
to
8
-
byte
dest
bdry
andi
.
r6
,
r6
,
7
dcbt
0
,
r4
blt
cr1
,
.
Lshort_copy
bne
.
Ldst_unaligned
.
Ldst_aligned
:
andi
.
r0
,
r4
,
7
addi
r3
,
r3
,-
16
bne
.
Lsrc_unaligned
srdi
r7
,
r5
,
4
ld
r9
,
0
(
r4
)
addi
r4
,
r4
,-
8
mtctr
r7
andi
.
r5
,
r5
,
7
bf
cr7
*
4
+
0
,
2
f
addi
r3
,
r3
,
8
addi
r4
,
r4
,
8
mr
r8
,
r9
blt
cr1
,
3
f
1
:
ld
r9
,
8
(
r4
)
std
r8
,
8
(
r3
)
2
:
ldu
r8
,
16
(
r4
)
stdu
r9
,
16
(
r3
)
bdnz
1
b
3
:
std
r8
,
8
(
r3
)
beqlr
addi
r3
,
r3
,
16
ld
r9
,
8
(
r4
)
.
Ldo_tail
:
bf
cr7
*
4
+
1
,
1
f
rotldi
r9
,
r9
,
32
stw
r9
,
0
(
r3
)
addi
r3
,
r3
,
4
1
:
bf
cr7
*
4
+
2
,
2
f
rotldi
r9
,
r9
,
16
sth
r9
,
0
(
r3
)
addi
r3
,
r3
,
2
2
:
bf
cr7
*
4
+
3
,
3
f
rotldi
r9
,
r9
,
8
stb
r9
,
0
(
r3
)
3
:
blr
.
Lsrc_unaligned
:
srdi
r6
,
r5
,
3
addi
r5
,
r5
,-
16
subf
r4
,
r0
,
r4
srdi
r7
,
r5
,
4
sldi
r10
,
r0
,
3
cmpdi
cr6
,
r6
,
3
andi
.
r5
,
r5
,
7
mtctr
r7
subfic
r11
,
r10
,
64
add
r5
,
r5
,
r0
bt
cr7
*
4
+
0
,
0
f
ld
r9
,
0
(
r4
)
#
3
+
2
n
loads
,
2
+
2
n
stores
ld
r0
,
8
(
r4
)
sld
r6
,
r9
,
r10
ldu
r9
,
16
(
r4
)
srd
r7
,
r0
,
r11
sld
r8
,
r0
,
r10
or
r7
,
r7
,
r6
blt
cr6
,
4
f
ld
r0
,
8
(
r4
)
#
s1
<<
in
r8
,
d0
=(
s0
<<|
s1
>>)
in
r7
,
s3
in
r0
,
s2
in
r9
,
nix
in
r6
&
r12
b
2
f
0
:
ld
r0
,
0
(
r4
)
#
4
+
2
n
loads
,
3
+
2
n
stores
ldu
r9
,
8
(
r4
)
sld
r8
,
r0
,
r10
addi
r3
,
r3
,-
8
blt
cr6
,
5
f
ld
r0
,
8
(
r4
)
srd
r12
,
r9
,
r11
sld
r6
,
r9
,
r10
ldu
r9
,
16
(
r4
)
or
r12
,
r8
,
r12
srd
r7
,
r0
,
r11
sld
r8
,
r0
,
r10
addi
r3
,
r3
,
16
beq
cr6
,
3
f
#
d0
=(
s0
<<|
s1
>>)
in
r12
,
s1
<<
in
r6
,
s2
>>
in
r7
,
s2
<<
in
r8
,
s3
in
r9
1
:
or
r7
,
r7
,
r6
ld
r0
,
8
(
r4
)
std
r12
,
8
(
r3
)
2
:
srd
r12
,
r9
,
r11
sld
r6
,
r9
,
r10
ldu
r9
,
16
(
r4
)
or
r12
,
r8
,
r12
stdu
r7
,
16
(
r3
)
srd
r7
,
r0
,
r11
sld
r8
,
r0
,
r10
bdnz
1
b
3
:
std
r12
,
8
(
r3
)
or
r7
,
r7
,
r6
4
:
std
r7
,
16
(
r3
)
5
:
srd
r12
,
r9
,
r11
or
r12
,
r8
,
r12
std
r12
,
24
(
r3
)
beqlr
cmpwi
cr1
,
r5
,
8
addi
r3
,
r3
,
32
sld
r9
,
r9
,
r10
ble
cr1
,
.
Ldo_tail
ld
r0
,
8
(
r4
)
srd
r7
,
r0
,
r11
or
r9
,
r7
,
r9
b
.
Ldo_tail
.
Ldst_unaligned
:
mtcrf
0x01
,
r6
#
put
#
bytes
to
8
B
bdry
into
cr7
subf
r5
,
r6
,
r5
li
r7
,
0
cmpldi
r1
,
r5
,
16
bf
cr7
*
4
+
3
,
1
f
lbz
r0
,
0
(
r4
)
stb
r0
,
0
(
r3
)
addi
r7
,
r7
,
1
1
:
bf
cr7
*
4
+
2
,
2
f
lhzx
r0
,
r7
,
r4
sthx
r0
,
r7
,
r3
addi
r7
,
r7
,
2
2
:
bf
cr7
*
4
+
1
,
3
f
lwzx
r0
,
r7
,
r4
stwx
r0
,
r7
,
r3
3
:
mtcrf
0x01
,
r5
add
r4
,
r6
,
r4
add
r3
,
r6
,
r3
b
.
Ldst_aligned
.
Lshort_copy
:
bf
cr7
*
4
+
0
,
1
f
lwz
r0
,
0
(
r4
)
lwz
r9
,
4
(
r4
)
addi
r4
,
r4
,
8
stw
r0
,
0
(
r3
)
stw
r9
,
4
(
r3
)
addi
r3
,
r3
,
8
1
:
bf
cr7
*
4
+
1
,
2
f
lwz
r0
,
0
(
r4
)
addi
r4
,
r4
,
4
stw
r0
,
0
(
r3
)
addi
r3
,
r3
,
4
2
:
bf
cr7
*
4
+
2
,
3
f
lhz
r0
,
0
(
r4
)
addi
r4
,
r4
,
2
sth
r0
,
0
(
r3
)
addi
r3
,
r3
,
2
3
:
bf
cr7
*
4
+
3
,
4
f
lbz
r0
,
0
(
r4
)
stb
r0
,
0
(
r3
)
4
:
blr
arch/ppc64/lib/sstep.c
deleted
100644 → 0
View file @
45424376
/*
* Single-step support.
*
* Copyright (C) 2004 Paul Mackerras <paulus@au.ibm.com>, IBM
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <linux/kernel.h>
#include <linux/ptrace.h>
#include <asm/sstep.h>
#include <asm/processor.h>
extern
char
system_call_common
[];
/* Bits in SRR1 that are copied from MSR */
#define MSR_MASK 0xffffffff87c0ffff
/*
* Determine whether a conditional branch instruction would branch.
*/
static
int
branch_taken
(
unsigned
int
instr
,
struct
pt_regs
*
regs
)
{
unsigned
int
bo
=
(
instr
>>
21
)
&
0x1f
;
unsigned
int
bi
;
if
((
bo
&
4
)
==
0
)
{
/* decrement counter */
--
regs
->
ctr
;
if
(((
bo
>>
1
)
&
1
)
^
(
regs
->
ctr
==
0
))
return
0
;
}
if
((
bo
&
0x10
)
==
0
)
{
/* check bit from CR */
bi
=
(
instr
>>
16
)
&
0x1f
;
if
(((
regs
->
ccr
>>
(
31
-
bi
))
&
1
)
!=
((
bo
>>
3
)
&
1
))
return
0
;
}
return
1
;
}
/*
* Emulate instructions that cause a transfer of control.
* Returns 1 if the step was emulated, 0 if not,
* or -1 if the instruction is one that should not be stepped,
* such as an rfid, or a mtmsrd that would clear MSR_RI.
*/
int
emulate_step
(
struct
pt_regs
*
regs
,
unsigned
int
instr
)
{
unsigned
int
opcode
,
rd
;
unsigned
long
int
imm
;
opcode
=
instr
>>
26
;
switch
(
opcode
)
{
case
16
:
/* bc */
imm
=
(
signed
short
)(
instr
&
0xfffc
);
if
((
instr
&
2
)
==
0
)
imm
+=
regs
->
nip
;
regs
->
nip
+=
4
;
if
((
regs
->
msr
&
MSR_SF
)
==
0
)
regs
->
nip
&=
0xffffffffUL
;
if
(
instr
&
1
)
regs
->
link
=
regs
->
nip
;
if
(
branch_taken
(
instr
,
regs
))
regs
->
nip
=
imm
;
return
1
;
case
17
:
/* sc */
/*
* N.B. this uses knowledge about how the syscall
* entry code works. If that is changed, this will
* need to be changed also.
*/
regs
->
gpr
[
9
]
=
regs
->
gpr
[
13
];
regs
->
gpr
[
11
]
=
regs
->
nip
+
4
;
regs
->
gpr
[
12
]
=
regs
->
msr
&
MSR_MASK
;
regs
->
gpr
[
13
]
=
(
unsigned
long
)
get_paca
();
regs
->
nip
=
(
unsigned
long
)
&
system_call_common
;
regs
->
msr
=
MSR_KERNEL
;
return
1
;
case
18
:
/* b */
imm
=
instr
&
0x03fffffc
;
if
(
imm
&
0x02000000
)
imm
-=
0x04000000
;
if
((
instr
&
2
)
==
0
)
imm
+=
regs
->
nip
;
if
(
instr
&
1
)
{
regs
->
link
=
regs
->
nip
+
4
;
if
((
regs
->
msr
&
MSR_SF
)
==
0
)
regs
->
link
&=
0xffffffffUL
;
}
if
((
regs
->
msr
&
MSR_SF
)
==
0
)
imm
&=
0xffffffffUL
;
regs
->
nip
=
imm
;
return
1
;
case
19
:
switch
(
instr
&
0x7fe
)
{
case
0x20
:
/* bclr */
case
0x420
:
/* bcctr */
imm
=
(
instr
&
0x400
)
?
regs
->
ctr
:
regs
->
link
;
regs
->
nip
+=
4
;
if
((
regs
->
msr
&
MSR_SF
)
==
0
)
{
regs
->
nip
&=
0xffffffffUL
;
imm
&=
0xffffffffUL
;
}
if
(
instr
&
1
)
regs
->
link
=
regs
->
nip
;
if
(
branch_taken
(
instr
,
regs
))
regs
->
nip
=
imm
;
return
1
;
case
0x24
:
/* rfid, scary */
return
-
1
;
}
case
31
:
rd
=
(
instr
>>
21
)
&
0x1f
;
switch
(
instr
&
0x7fe
)
{
case
0xa6
:
/* mfmsr */
regs
->
gpr
[
rd
]
=
regs
->
msr
&
MSR_MASK
;
regs
->
nip
+=
4
;
if
((
regs
->
msr
&
MSR_SF
)
==
0
)
regs
->
nip
&=
0xffffffffUL
;
return
1
;
case
0x164
:
/* mtmsrd */
/* only MSR_EE and MSR_RI get changed if bit 15 set */
/* mtmsrd doesn't change MSR_HV and MSR_ME */
imm
=
(
instr
&
0x10000
)
?
0x8002
:
0xefffffffffffefffUL
;
imm
=
(
regs
->
msr
&
MSR_MASK
&
~
imm
)
|
(
regs
->
gpr
[
rd
]
&
imm
);
if
((
imm
&
MSR_RI
)
==
0
)
/* can't step mtmsrd that would clear MSR_RI */
return
-
1
;
regs
->
msr
=
imm
;
regs
->
nip
+=
4
;
if
((
imm
&
MSR_SF
)
==
0
)
regs
->
nip
&=
0xffffffffUL
;
return
1
;
}
}
return
0
;
}
arch/ppc64/lib/strcase.c
deleted
100644 → 0
View file @
45424376
/*
* c 2001 PPC 64 Team, IBM Corp
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <linux/ctype.h>
int
strcasecmp
(
const
char
*
s1
,
const
char
*
s2
)
{
int
c1
,
c2
;
do
{
c1
=
tolower
(
*
s1
++
);
c2
=
tolower
(
*
s2
++
);
}
while
(
c1
==
c2
&&
c1
!=
0
);
return
c1
-
c2
;
}
int
strncasecmp
(
const
char
*
s1
,
const
char
*
s2
,
int
n
)
{
int
c1
,
c2
;
do
{
c1
=
tolower
(
*
s1
++
);
c2
=
tolower
(
*
s2
++
);
}
while
((
--
n
>
0
)
&&
c1
==
c2
&&
c1
!=
0
);
return
c1
-
c2
;
}
arch/ppc64/lib/string.S
View file @
22b28032
...
@@ -65,112 +65,6 @@ _GLOBAL(strlen)
...
@@ -65,112 +65,6 @@ _GLOBAL(strlen)
subf
r3
,
r3
,
r4
subf
r3
,
r3
,
r4
blr
blr
_GLOBAL
(
memset
)
neg
r0
,
r3
rlwimi
r4
,
r4
,
8
,
16
,
23
andi
.
r0
,
r0
,
7
/*
#
bytes
to
be
8
-
byte
aligned
*/
rlwimi
r4
,
r4
,
16
,
0
,
15
cmplw
cr1
,
r5
,
r0
/*
do
we
get
that
far
?
*/
rldimi
r4
,
r4
,
32
,
0
mtcrf
1
,
r0
mr
r6
,
r3
blt
cr1
,
8
f
beq
+
3
f
/*
if
already
8
-
byte
aligned
*/
subf
r5
,
r0
,
r5
bf
31
,
1
f
stb
r4
,
0
(
r6
)
addi
r6
,
r6
,
1
1
:
bf
30
,
2
f
sth
r4
,
0
(
r6
)
addi
r6
,
r6
,
2
2
:
bf
29
,
3
f
stw
r4
,
0
(
r6
)
addi
r6
,
r6
,
4
3
:
srdi
.
r0
,
r5
,
6
clrldi
r5
,
r5
,
58
mtctr
r0
beq
5
f
4
:
std
r4
,
0
(
r6
)
std
r4
,
8
(
r6
)
std
r4
,
16
(
r6
)
std
r4
,
24
(
r6
)
std
r4
,
32
(
r6
)
std
r4
,
40
(
r6
)
std
r4
,
48
(
r6
)
std
r4
,
56
(
r6
)
addi
r6
,
r6
,
64
bdnz
4
b
5
:
srwi
.
r0
,
r5
,
3
clrlwi
r5
,
r5
,
29
mtcrf
1
,
r0
beq
8
f
bf
29
,
6
f
std
r4
,
0
(
r6
)
std
r4
,
8
(
r6
)
std
r4
,
16
(
r6
)
std
r4
,
24
(
r6
)
addi
r6
,
r6
,
32
6
:
bf
30
,
7
f
std
r4
,
0
(
r6
)
std
r4
,
8
(
r6
)
addi
r6
,
r6
,
16
7
:
bf
31
,
8
f
std
r4
,
0
(
r6
)
addi
r6
,
r6
,
8
8
:
cmpwi
r5
,
0
mtcrf
1
,
r5
beqlr
+
bf
29
,
9
f
stw
r4
,
0
(
r6
)
addi
r6
,
r6
,
4
9
:
bf
30
,
10
f
sth
r4
,
0
(
r6
)
addi
r6
,
r6
,
2
10
:
bflr
31
stb
r4
,
0
(
r6
)
blr
_GLOBAL
(
memmove
)
cmplw
0
,
r3
,
r4
bgt
.
backwards_memcpy
b
.
memcpy
_GLOBAL
(
backwards_memcpy
)
rlwinm
.
r7
,
r5
,
32
-
3
,
3
,
31
/*
r0
=
r5
>>
3
*/
add
r6
,
r3
,
r5
add
r4
,
r4
,
r5
beq
2
f
andi
.
r0
,
r6
,
3
mtctr
r7
bne
5
f
1
:
lwz
r7
,-
4
(
r4
)
lwzu
r8
,-
8
(
r4
)
stw
r7
,-
4
(
r6
)
stwu
r8
,-
8
(
r6
)
bdnz
1
b
andi
.
r5
,
r5
,
7
2
:
cmplwi
0
,
r5
,
4
blt
3
f
lwzu
r0
,-
4
(
r4
)
subi
r5
,
r5
,
4
stwu
r0
,-
4
(
r6
)
3
:
cmpwi
0
,
r5
,
0
beqlr
mtctr
r5
4
:
lbzu
r0
,-
1
(
r4
)
stbu
r0
,-
1
(
r6
)
bdnz
4
b
blr
5
:
mtctr
r0
6
:
lbzu
r7
,-
1
(
r4
)
stbu
r7
,-
1
(
r6
)
bdnz
6
b
subf
r5
,
r0
,
r5
rlwinm
.
r7
,
r5
,
32
-
3
,
3
,
31
beq
2
b
mtctr
r7
b
1
b
_GLOBAL
(
memcmp
)
_GLOBAL
(
memcmp
)
cmpwi
0
,
r5
,
0
cmpwi
0
,
r5
,
0
ble
-
2
f
ble
-
2
f
...
...
arch/ppc64/lib/usercopy.c
deleted
100644 → 0
View file @
45424376
/*
* Functions which are too large to be inlined.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <linux/module.h>
#include <asm/uaccess.h>
unsigned
long
copy_from_user
(
void
*
to
,
const
void
__user
*
from
,
unsigned
long
n
)
{
if
(
likely
(
access_ok
(
VERIFY_READ
,
from
,
n
)))
n
=
__copy_from_user
(
to
,
from
,
n
);
else
memset
(
to
,
0
,
n
);
return
n
;
}
unsigned
long
copy_to_user
(
void
__user
*
to
,
const
void
*
from
,
unsigned
long
n
)
{
if
(
likely
(
access_ok
(
VERIFY_WRITE
,
to
,
n
)))
n
=
__copy_to_user
(
to
,
from
,
n
);
return
n
;
}
unsigned
long
copy_in_user
(
void
__user
*
to
,
const
void
__user
*
from
,
unsigned
long
n
)
{
might_sleep
();
if
(
likely
(
access_ok
(
VERIFY_READ
,
from
,
n
)
&&
access_ok
(
VERIFY_WRITE
,
to
,
n
)))
n
=
__copy_tofrom_user
(
to
,
from
,
n
);
return
n
;
}
EXPORT_SYMBOL
(
copy_from_user
);
EXPORT_SYMBOL
(
copy_to_user
);
EXPORT_SYMBOL
(
copy_in_user
);
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment