Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
Kirill Smelkov
linux
Commits
cf64c2a9
Commit
cf64c2a9
authored
Feb 26, 2021
by
David S. Miller
Browse files
Options
Browse Files
Download
Plain Diff
Merge branch 'work.sparc32' of
git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs
parents
b9d62433
73686e78
Changes
14
Show whitespace changes
Inline
Side-by-side
Showing
14 changed files
with
193 additions
and
526 deletions
+193
-526
arch/sparc/include/asm/elf_64.h
arch/sparc/include/asm/elf_64.h
+0
-1
arch/sparc/include/asm/extable.h
arch/sparc/include/asm/extable.h
+2
-2
arch/sparc/include/asm/uaccess.h
arch/sparc/include/asm/uaccess.h
+3
-0
arch/sparc/include/asm/uaccess_32.h
arch/sparc/include/asm/uaccess_32.h
+0
-38
arch/sparc/include/asm/uaccess_64.h
arch/sparc/include/asm/uaccess_64.h
+0
-1
arch/sparc/kernel/unaligned_32.c
arch/sparc/kernel/unaligned_32.c
+5
-5
arch/sparc/lib/checksum_32.S
arch/sparc/lib/checksum_32.S
+27
-37
arch/sparc/lib/copy_user.S
arch/sparc/lib/copy_user.S
+112
-203
arch/sparc/lib/memset.S
arch/sparc/lib/memset.S
+33
-54
arch/sparc/mm/Makefile
arch/sparc/mm/Makefile
+1
-1
arch/sparc/mm/extable.c
arch/sparc/mm/extable.c
+0
-107
arch/sparc/mm/fault_32.c
arch/sparc/mm/fault_32.c
+10
-70
arch/sparc/mm/mm_32.h
arch/sparc/mm/mm_32.h
+0
-2
lib/extable.c
lib/extable.c
+0
-5
No files found.
arch/sparc/include/asm/elf_64.h
View file @
cf64c2a9
...
@@ -8,7 +8,6 @@
...
@@ -8,7 +8,6 @@
#include <asm/ptrace.h>
#include <asm/ptrace.h>
#include <asm/processor.h>
#include <asm/processor.h>
#include <asm/extable_64.h>
#include <asm/spitfire.h>
#include <asm/spitfire.h>
#include <asm/adi.h>
#include <asm/adi.h>
...
...
arch/sparc/include/asm/extable
_64
.h
→
arch/sparc/include/asm/extable.h
View file @
cf64c2a9
/* SPDX-License-Identifier: GPL-2.0 */
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __ASM_EXTABLE
64
_H
#ifndef __ASM_EXTABLE_H
#define __ASM_EXTABLE
64
_H
#define __ASM_EXTABLE_H
/*
/*
* The exception table consists of pairs of addresses: the first is the
* The exception table consists of pairs of addresses: the first is the
* address of an instruction that is allowed to fault, and the second is
* address of an instruction that is allowed to fault, and the second is
...
...
arch/sparc/include/asm/uaccess.h
View file @
cf64c2a9
/* SPDX-License-Identifier: GPL-2.0 */
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef ___ASM_SPARC_UACCESS_H
#ifndef ___ASM_SPARC_UACCESS_H
#define ___ASM_SPARC_UACCESS_H
#define ___ASM_SPARC_UACCESS_H
#include <asm/extable.h>
#if defined(__sparc__) && defined(__arch64__)
#if defined(__sparc__) && defined(__arch64__)
#include <asm/uaccess_64.h>
#include <asm/uaccess_64.h>
#else
#else
...
...
arch/sparc/include/asm/uaccess_32.h
View file @
cf64c2a9
...
@@ -13,9 +13,6 @@
...
@@ -13,9 +13,6 @@
#include <asm/processor.h>
#include <asm/processor.h>
#define ARCH_HAS_SORT_EXTABLE
#define ARCH_HAS_SEARCH_EXTABLE
/* Sparc is not segmented, however we need to be able to fool access_ok()
/* Sparc is not segmented, however we need to be able to fool access_ok()
* when doing system calls from kernel mode legitimately.
* when doing system calls from kernel mode legitimately.
*
*
...
@@ -40,36 +37,6 @@
...
@@ -40,36 +37,6 @@
#define __access_ok(addr, size) (__user_ok((addr) & get_fs().seg, (size)))
#define __access_ok(addr, size) (__user_ok((addr) & get_fs().seg, (size)))
#define access_ok(addr, size) __access_ok((unsigned long)(addr), size)
#define access_ok(addr, size) __access_ok((unsigned long)(addr), size)
/*
* The exception table consists of pairs of addresses: the first is the
* address of an instruction that is allowed to fault, and the second is
* the address at which the program should continue. No registers are
* modified, so it is entirely up to the continuation code to figure out
* what to do.
*
* All the routines below use bits of fixup code that are out of line
* with the main instruction path. This means when everything is well,
* we don't even have to jump over them. Further, they do not intrude
* on our cache or tlb entries.
*
* There is a special way how to put a range of potentially faulting
* insns (like twenty ldd/std's with now intervening other instructions)
* You specify address of first in insn and 0 in fixup and in the next
* exception_table_entry you specify last potentially faulting insn + 1
* and in fixup the routine which should handle the fault.
* That fixup code will get
* (faulting_insn_address - first_insn_in_the_range_address)/4
* in %g2 (ie. index of the faulting instruction in the range).
*/
struct
exception_table_entry
{
unsigned
long
insn
,
fixup
;
};
/* Returns 0 if exception not found and fixup otherwise. */
unsigned
long
search_extables_range
(
unsigned
long
addr
,
unsigned
long
*
g2
);
/* Uh, these should become the main single-value transfer routines..
/* Uh, these should become the main single-value transfer routines..
* They automatically use the right size if we just have the right
* They automatically use the right size if we just have the right
* pointer type..
* pointer type..
...
@@ -252,12 +219,7 @@ static inline unsigned long __clear_user(void __user *addr, unsigned long size)
...
@@ -252,12 +219,7 @@ static inline unsigned long __clear_user(void __user *addr, unsigned long size)
unsigned
long
ret
;
unsigned
long
ret
;
__asm__
__volatile__
(
__asm__
__volatile__
(
".section __ex_table,#alloc
\n\t
"
".align 4
\n\t
"
".word 1f,3
\n\t
"
".previous
\n\t
"
"mov %2, %%o1
\n
"
"mov %2, %%o1
\n
"
"1:
\n\t
"
"call __bzero
\n\t
"
"call __bzero
\n\t
"
" mov %1, %%o0
\n\t
"
" mov %1, %%o0
\n\t
"
"mov %%o0, %0
\n
"
"mov %%o0, %0
\n
"
...
...
arch/sparc/include/asm/uaccess_64.h
View file @
cf64c2a9
...
@@ -10,7 +10,6 @@
...
@@ -10,7 +10,6 @@
#include <linux/string.h>
#include <linux/string.h>
#include <asm/asi.h>
#include <asm/asi.h>
#include <asm/spitfire.h>
#include <asm/spitfire.h>
#include <asm/extable_64.h>
#include <asm/processor.h>
#include <asm/processor.h>
...
...
arch/sparc/kernel/unaligned_32.c
View file @
cf64c2a9
...
@@ -16,6 +16,7 @@
...
@@ -16,6 +16,7 @@
#include <linux/uaccess.h>
#include <linux/uaccess.h>
#include <linux/smp.h>
#include <linux/smp.h>
#include <linux/perf_event.h>
#include <linux/perf_event.h>
#include <linux/extable.h>
#include <asm/setup.h>
#include <asm/setup.h>
...
@@ -213,10 +214,10 @@ static inline int ok_for_kernel(unsigned int insn)
...
@@ -213,10 +214,10 @@ static inline int ok_for_kernel(unsigned int insn)
static
void
kernel_mna_trap_fault
(
struct
pt_regs
*
regs
,
unsigned
int
insn
)
static
void
kernel_mna_trap_fault
(
struct
pt_regs
*
regs
,
unsigned
int
insn
)
{
{
unsigned
long
g2
=
regs
->
u_regs
[
UREG_G2
];
const
struct
exception_table_entry
*
entry
;
unsigned
long
fixup
=
search_extables_range
(
regs
->
pc
,
&
g2
);
if
(
!
fixup
)
{
entry
=
search_exception_tables
(
regs
->
pc
);
if
(
!
entry
)
{
unsigned
long
address
=
compute_effective_address
(
regs
,
insn
);
unsigned
long
address
=
compute_effective_address
(
regs
,
insn
);
if
(
address
<
PAGE_SIZE
)
{
if
(
address
<
PAGE_SIZE
)
{
printk
(
KERN_ALERT
"Unable to handle kernel NULL pointer dereference in mna handler"
);
printk
(
KERN_ALERT
"Unable to handle kernel NULL pointer dereference in mna handler"
);
...
@@ -232,9 +233,8 @@ static void kernel_mna_trap_fault(struct pt_regs *regs, unsigned int insn)
...
@@ -232,9 +233,8 @@ static void kernel_mna_trap_fault(struct pt_regs *regs, unsigned int insn)
die_if_kernel
(
"Oops"
,
regs
);
die_if_kernel
(
"Oops"
,
regs
);
/* Not reached */
/* Not reached */
}
}
regs
->
pc
=
fixup
;
regs
->
pc
=
entry
->
fixup
;
regs
->
npc
=
regs
->
pc
+
4
;
regs
->
npc
=
regs
->
pc
+
4
;
regs
->
u_regs
[
UREG_G2
]
=
g2
;
}
}
asmlinkage
void
kernel_unaligned_trap
(
struct
pt_regs
*
regs
,
unsigned
int
insn
)
asmlinkage
void
kernel_unaligned_trap
(
struct
pt_regs
*
regs
,
unsigned
int
insn
)
...
...
arch/sparc/lib/checksum_32.S
View file @
cf64c2a9
...
@@ -155,13 +155,6 @@ cpout: retl ! get outta here
...
@@ -155,13 +155,6 @@ cpout: retl ! get outta here
.
text
; \
.
text
; \
.
align
4
.
align
4
#define EXT(start,end) \
.
section
__ex_table
,
ALLOC
; \
.
align
4
; \
.
word
start
,
0
,
end
,
cc_fault
; \
.
text
; \
.
align
4
/
*
This
aligned
version
executes
typically
in
8
.5
superscalar
cycles
,
this
/
*
This
aligned
version
executes
typically
in
8
.5
superscalar
cycles
,
this
*
is
the
best
I
can
do
.
I
say
8
.5
because
the
final
add
will
pair
with
*
is
the
best
I
can
do
.
I
say
8
.5
because
the
final
add
will
pair
with
*
the
next
ldd
in
the
main
unrolled
loop
.
Thus
the
pipe
is
always
full
.
*
the
next
ldd
in
the
main
unrolled
loop
.
Thus
the
pipe
is
always
full
.
...
@@ -169,20 +162,20 @@ cpout: retl ! get outta here
...
@@ -169,20 +162,20 @@ cpout: retl ! get outta here
*
please
check
the
fixup
code
below
as
well
.
*
please
check
the
fixup
code
below
as
well
.
*/
*/
#define CSUMCOPY_BIGCHUNK_ALIGNED(src, dst, sum, off, t0, t1, t2, t3, t4, t5, t6, t7) \
#define CSUMCOPY_BIGCHUNK_ALIGNED(src, dst, sum, off, t0, t1, t2, t3, t4, t5, t6, t7) \
ldd
[
src
+
off
+
0x00
],
t0
;
\
EX
(
ldd
[
src
+
off
+
0x00
],
t0
)
;
\
ldd
[
src
+
off
+
0x08
],
t2
;
\
EX
(
ldd
[
src
+
off
+
0x08
],
t2
)
;
\
addxcc
t0
,
sum
,
sum
; \
addxcc
t0
,
sum
,
sum
; \
ldd
[
src
+
off
+
0x10
],
t4
;
\
EX
(
ldd
[
src
+
off
+
0x10
],
t4
)
;
\
addxcc
t1
,
sum
,
sum
; \
addxcc
t1
,
sum
,
sum
; \
ldd
[
src
+
off
+
0x18
],
t6
;
\
EX
(
ldd
[
src
+
off
+
0x18
],
t6
)
;
\
addxcc
t2
,
sum
,
sum
; \
addxcc
t2
,
sum
,
sum
; \
std
t0
,
[
dst
+
off
+
0x00
]
;
\
EX
(
std
t0
,
[
dst
+
off
+
0x00
])
;
\
addxcc
t3
,
sum
,
sum
; \
addxcc
t3
,
sum
,
sum
; \
std
t2
,
[
dst
+
off
+
0x08
]
;
\
EX
(
std
t2
,
[
dst
+
off
+
0x08
])
;
\
addxcc
t4
,
sum
,
sum
; \
addxcc
t4
,
sum
,
sum
; \
std
t4
,
[
dst
+
off
+
0x10
]
;
\
EX
(
std
t4
,
[
dst
+
off
+
0x10
])
;
\
addxcc
t5
,
sum
,
sum
; \
addxcc
t5
,
sum
,
sum
; \
std
t6
,
[
dst
+
off
+
0x18
]
;
\
EX
(
std
t6
,
[
dst
+
off
+
0x18
])
;
\
addxcc
t6
,
sum
,
sum
; \
addxcc
t6
,
sum
,
sum
; \
addxcc
t7
,
sum
,
sum
;
addxcc
t7
,
sum
,
sum
;
...
@@ -191,39 +184,39 @@ cpout: retl ! get outta here
...
@@ -191,39 +184,39 @@ cpout: retl ! get outta here
*
Viking
MXCC
into
streaming
mode
.
Ho
hum
...
*
Viking
MXCC
into
streaming
mode
.
Ho
hum
...
*/
*/
#define CSUMCOPY_BIGCHUNK(src, dst, sum, off, t0, t1, t2, t3, t4, t5, t6, t7) \
#define CSUMCOPY_BIGCHUNK(src, dst, sum, off, t0, t1, t2, t3, t4, t5, t6, t7) \
ldd
[
src
+
off
+
0x00
],
t0
;
\
EX
(
ldd
[
src
+
off
+
0x00
],
t0
)
;
\
ldd
[
src
+
off
+
0x08
],
t2
;
\
EX
(
ldd
[
src
+
off
+
0x08
],
t2
)
;
\
ldd
[
src
+
off
+
0x10
],
t4
;
\
EX
(
ldd
[
src
+
off
+
0x10
],
t4
)
;
\
ldd
[
src
+
off
+
0x18
],
t6
;
\
EX
(
ldd
[
src
+
off
+
0x18
],
t6
)
;
\
st
t0
,
[
dst
+
off
+
0x00
]
;
\
EX
(
st
t0
,
[
dst
+
off
+
0x00
])
;
\
addxcc
t0
,
sum
,
sum
; \
addxcc
t0
,
sum
,
sum
; \
st
t1
,
[
dst
+
off
+
0x04
]
;
\
EX
(
st
t1
,
[
dst
+
off
+
0x04
])
;
\
addxcc
t1
,
sum
,
sum
; \
addxcc
t1
,
sum
,
sum
; \
st
t2
,
[
dst
+
off
+
0x08
]
;
\
EX
(
st
t2
,
[
dst
+
off
+
0x08
])
;
\
addxcc
t2
,
sum
,
sum
; \
addxcc
t2
,
sum
,
sum
; \
st
t3
,
[
dst
+
off
+
0x0c
]
;
\
EX
(
st
t3
,
[
dst
+
off
+
0x0c
])
;
\
addxcc
t3
,
sum
,
sum
; \
addxcc
t3
,
sum
,
sum
; \
st
t4
,
[
dst
+
off
+
0x10
]
;
\
EX
(
st
t4
,
[
dst
+
off
+
0x10
])
;
\
addxcc
t4
,
sum
,
sum
; \
addxcc
t4
,
sum
,
sum
; \
st
t5
,
[
dst
+
off
+
0x14
]
;
\
EX
(
st
t5
,
[
dst
+
off
+
0x14
])
;
\
addxcc
t5
,
sum
,
sum
; \
addxcc
t5
,
sum
,
sum
; \
st
t6
,
[
dst
+
off
+
0x18
]
;
\
EX
(
st
t6
,
[
dst
+
off
+
0x18
])
;
\
addxcc
t6
,
sum
,
sum
; \
addxcc
t6
,
sum
,
sum
; \
st
t7
,
[
dst
+
off
+
0x1c
]
;
\
EX
(
st
t7
,
[
dst
+
off
+
0x1c
])
;
\
addxcc
t7
,
sum
,
sum
;
addxcc
t7
,
sum
,
sum
;
/
*
Yuck
,
6
superscalar
cycles
...
*/
/
*
Yuck
,
6
superscalar
cycles
...
*/
#define CSUMCOPY_LASTCHUNK(src, dst, sum, off, t0, t1, t2, t3) \
#define CSUMCOPY_LASTCHUNK(src, dst, sum, off, t0, t1, t2, t3) \
ldd
[
src
-
off
-
0x08
],
t0
;
\
EX
(
ldd
[
src
-
off
-
0x08
],
t0
)
;
\
ldd
[
src
-
off
-
0x00
],
t2
;
\
EX
(
ldd
[
src
-
off
-
0x00
],
t2
)
;
\
addxcc
t0
,
sum
,
sum
; \
addxcc
t0
,
sum
,
sum
; \
st
t0
,
[
dst
-
off
-
0x08
]
;
\
EX
(
st
t0
,
[
dst
-
off
-
0x08
])
;
\
addxcc
t1
,
sum
,
sum
; \
addxcc
t1
,
sum
,
sum
; \
st
t1
,
[
dst
-
off
-
0x04
]
;
\
EX
(
st
t1
,
[
dst
-
off
-
0x04
])
;
\
addxcc
t2
,
sum
,
sum
; \
addxcc
t2
,
sum
,
sum
; \
st
t2
,
[
dst
-
off
-
0x00
]
;
\
EX
(
st
t2
,
[
dst
-
off
-
0x00
])
;
\
addxcc
t3
,
sum
,
sum
; \
addxcc
t3
,
sum
,
sum
; \
st
t3
,
[
dst
-
off
+
0x04
]
;
EX
(
st
t3
,
[
dst
-
off
+
0x04
])
;
/
*
Handle
the
end
cruft
code
out
of
band
for
better
cache
patterns
.
*/
/
*
Handle
the
end
cruft
code
out
of
band
for
better
cache
patterns
.
*/
cc_end_cruft
:
cc_end_cruft
:
...
@@ -331,7 +324,6 @@ __csum_partial_copy_sparc_generic:
...
@@ -331,7 +324,6 @@ __csum_partial_copy_sparc_generic:
CSUMCOPY_BIGCHUNK
(%
o0
,%
o1
,%
g7
,0
x20
,%
o4
,%
o5
,%
g2
,%
g3
,%
g4
,%
g5
,%
o2
,%
o3
)
CSUMCOPY_BIGCHUNK
(%
o0
,%
o1
,%
g7
,0
x20
,%
o4
,%
o5
,%
g2
,%
g3
,%
g4
,%
g5
,%
o2
,%
o3
)
CSUMCOPY_BIGCHUNK
(%
o0
,%
o1
,%
g7
,0
x40
,%
o4
,%
o5
,%
g2
,%
g3
,%
g4
,%
g5
,%
o2
,%
o3
)
CSUMCOPY_BIGCHUNK
(%
o0
,%
o1
,%
g7
,0
x40
,%
o4
,%
o5
,%
g2
,%
g3
,%
g4
,%
g5
,%
o2
,%
o3
)
CSUMCOPY_BIGCHUNK
(%
o0
,%
o1
,%
g7
,0
x60
,%
o4
,%
o5
,%
g2
,%
g3
,%
g4
,%
g5
,%
o2
,%
o3
)
CSUMCOPY_BIGCHUNK
(%
o0
,%
o1
,%
g7
,0
x60
,%
o4
,%
o5
,%
g2
,%
g3
,%
g4
,%
g5
,%
o2
,%
o3
)
10
:
EXT
(5
b
,
10
b
)
!
note
for
exception
handling
sub
%
g1
,
128
,
%
g1
!
detract
from
length
sub
%
g1
,
128
,
%
g1
!
detract
from
length
addx
%
g0
,
%
g7
,
%
g7
!
add
in
last
carry
bit
addx
%
g0
,
%
g7
,
%
g7
!
add
in
last
carry
bit
andcc
%
g1
,
0xffffff80
,
%
g0
!
more
to
csum
?
andcc
%
g1
,
0xffffff80
,
%
g0
!
more
to
csum
?
...
@@ -356,8 +348,7 @@ cctbl: CSUMCOPY_LASTCHUNK(%o0,%o1,%g7,0x68,%g2,%g3,%g4,%g5)
...
@@ -356,8 +348,7 @@ cctbl: CSUMCOPY_LASTCHUNK(%o0,%o1,%g7,0x68,%g2,%g3,%g4,%g5)
CSUMCOPY_LASTCHUNK
(%
o0
,%
o1
,%
g7
,0
x28
,%
g2
,%
g3
,%
g4
,%
g5
)
CSUMCOPY_LASTCHUNK
(%
o0
,%
o1
,%
g7
,0
x28
,%
g2
,%
g3
,%
g4
,%
g5
)
CSUMCOPY_LASTCHUNK
(%
o0
,%
o1
,%
g7
,0
x18
,%
g2
,%
g3
,%
g4
,%
g5
)
CSUMCOPY_LASTCHUNK
(%
o0
,%
o1
,%
g7
,0
x18
,%
g2
,%
g3
,%
g4
,%
g5
)
CSUMCOPY_LASTCHUNK
(%
o0
,%
o1
,%
g7
,0
x08
,%
g2
,%
g3
,%
g4
,%
g5
)
CSUMCOPY_LASTCHUNK
(%
o0
,%
o1
,%
g7
,0
x08
,%
g2
,%
g3
,%
g4
,%
g5
)
12
:
EXT
(
cctbl
,
12
b
)
!
note
for
exception
table
handling
12
:
addx
%
g0
,
%
g7
,
%
g7
addx
%
g0
,
%
g7
,
%
g7
andcc
%
o3
,
0xf
,
%
g0
!
check
for
low
bits
set
andcc
%
o3
,
0xf
,
%
g0
!
check
for
low
bits
set
ccte
:
bne
cc_end_cruft
!
something
left
,
handle
it
out
of
band
ccte
:
bne
cc_end_cruft
!
something
left
,
handle
it
out
of
band
andcc
%
o3
,
8
,
%
g0
!
begin
checks
for
that
code
andcc
%
o3
,
8
,
%
g0
!
begin
checks
for
that
code
...
@@ -367,7 +358,6 @@ ccdbl: CSUMCOPY_BIGCHUNK_ALIGNED(%o0,%o1,%g7,0x00,%o4,%o5,%g2,%g3,%g4,%g5,%o2,%o
...
@@ -367,7 +358,6 @@ ccdbl: CSUMCOPY_BIGCHUNK_ALIGNED(%o0,%o1,%g7,0x00,%o4,%o5,%g2,%g3,%g4,%g5,%o2,%o
CSUMCOPY_BIGCHUNK_ALIGNED
(%
o0
,%
o1
,%
g7
,0
x20
,%
o4
,%
o5
,%
g2
,%
g3
,%
g4
,%
g5
,%
o2
,%
o3
)
CSUMCOPY_BIGCHUNK_ALIGNED
(%
o0
,%
o1
,%
g7
,0
x20
,%
o4
,%
o5
,%
g2
,%
g3
,%
g4
,%
g5
,%
o2
,%
o3
)
CSUMCOPY_BIGCHUNK_ALIGNED
(%
o0
,%
o1
,%
g7
,0
x40
,%
o4
,%
o5
,%
g2
,%
g3
,%
g4
,%
g5
,%
o2
,%
o3
)
CSUMCOPY_BIGCHUNK_ALIGNED
(%
o0
,%
o1
,%
g7
,0
x40
,%
o4
,%
o5
,%
g2
,%
g3
,%
g4
,%
g5
,%
o2
,%
o3
)
CSUMCOPY_BIGCHUNK_ALIGNED
(%
o0
,%
o1
,%
g7
,0
x60
,%
o4
,%
o5
,%
g2
,%
g3
,%
g4
,%
g5
,%
o2
,%
o3
)
CSUMCOPY_BIGCHUNK_ALIGNED
(%
o0
,%
o1
,%
g7
,0
x60
,%
o4
,%
o5
,%
g2
,%
g3
,%
g4
,%
g5
,%
o2
,%
o3
)
11
:
EXT
(
ccdbl
,
11
b
)
!
note
for
exception
table
handling
sub
%
g1
,
128
,
%
g1
!
detract
from
length
sub
%
g1
,
128
,
%
g1
!
detract
from
length
addx
%
g0
,
%
g7
,
%
g7
!
add
in
last
carry
bit
addx
%
g0
,
%
g7
,
%
g7
!
add
in
last
carry
bit
andcc
%
g1
,
0xffffff80
,
%
g0
!
more
to
csum
?
andcc
%
g1
,
0xffffff80
,
%
g0
!
more
to
csum
?
...
...
arch/sparc/lib/copy_user.S
View file @
cf64c2a9
...
@@ -21,98 +21,134 @@
...
@@ -21,98 +21,134 @@
/*
Work
around
cpp
-
rob
*/
/*
Work
around
cpp
-
rob
*/
#define ALLOC #alloc
#define ALLOC #alloc
#define EXECINSTR #execinstr
#define EXECINSTR #execinstr
#define EX_ENTRY(l1, l2) \
.
section
__ex_table
,
ALLOC
; \
.
align
4
; \
.
word
l1
,
l2
; \
.
text
;
#define EX(x,y,a,b) \
#define EX(x,y,a,b) \
98
:
x
,
y
; \
98
:
x
,
y
; \
.
section
.
fixup
,
ALLOC
,
EXECINSTR
; \
.
section
.
fixup
,
ALLOC
,
EXECINSTR
; \
.
align
4
; \
.
align
4
; \
99
:
ba
fixupretl
; \
99
:
retl
; \
a
,
b
,
%
g3
; \
a
,
b
,
%
o0
; \
.
section
__ex_table
,
ALLOC
; \
EX_ENTRY
(98
b
,
99
b
)
.
align
4
; \
.
word
98
b
,
99
b
; \
.
text
; \
.
align
4
#define EX2(x,y,c,d,e,a,b) \
#define EX2(x,y,c,d,e,a,b) \
98
:
x
,
y
; \
98
:
x
,
y
; \
.
section
.
fixup
,
ALLOC
,
EXECINSTR
; \
.
section
.
fixup
,
ALLOC
,
EXECINSTR
; \
.
align
4
; \
.
align
4
; \
99
:
c
,
d
,
e
; \
99
:
c
,
d
,
e
; \
ba
fixupretl
; \
retl
; \
a
,
b
,
%
g3
; \
a
,
b
,
%
o0
; \
.
section
__ex_table
,
ALLOC
; \
EX_ENTRY
(98
b
,
99
b
)
.
align
4
; \
.
word
98
b
,
99
b
; \
.
text
; \
.
align
4
#define EXO2(x,y) \
#define EXO2(x,y) \
98
:
x
,
y
; \
98
:
x
,
y
; \
.
section
__ex_table
,
ALLOC
; \
EX_ENTRY
(98
b
,
97
f
)
.
align
4
; \
.
word
98
b
,
97
f
; \
.
text
; \
.
align
4
#define
EXT(start,end,handler)
\
#define
LD(insn, src, offset, reg, label)
\
.
section
__ex_table
,
ALLOC
; \
98
:
insn
[%
src
+
(
offset
)],
%
reg
; \
.
align
4
;
\
.
section
.
fixup
,
ALLOC
,
EXECINSTR
;
\
.
word
start
,
0
,
end
,
handler
;
\
99
:
ba
label
;
\
.
text
;
\
mov
offset
,
%
g5
;
\
.
align
4
EX_ENTRY
(98
b
,
99
b
)
/*
Please
do
not
change
following
macros
unless
you
change
logic
used
#define ST(insn, dst, offset, reg, label) \
*
in
.
fixup
at
the
end
of
this
file
as
well
98
:
insn
%
reg
,
[%
dst
+
(
offset
)]
; \
*/
.
section
.
fixup
,
ALLOC
,
EXECINSTR
; \
99
:
ba
label
; \
mov
offset
,
%
g5
; \
EX_ENTRY
(98
b
,
99
b
)
/*
Both
these
macros
have
to
start
with
exactly
the
same
insn
*/
/*
Both
these
macros
have
to
start
with
exactly
the
same
insn
*/
/*
left
:
g7
+
(
g1
%
128
)
-
offset
*/
#define MOVE_BIGCHUNK(src, dst, offset, t0, t1, t2, t3, t4, t5, t6, t7) \
#define MOVE_BIGCHUNK(src, dst, offset, t0, t1, t2, t3, t4, t5, t6, t7) \
ldd
[%
src
+
(
offset
)
+
0x00
],
%t0
; \
LD
(
ldd
,
src
,
offset
+
0x00
,
t0
,
bigchunk_fault
)
\
ldd
[%
src
+
(
offset
)
+
0x08
],
%t2
; \
LD
(
ldd
,
src
,
offset
+
0x08
,
t2
,
bigchunk_fault
)
\
ldd
[%
src
+
(
offset
)
+
0x10
],
%t4
; \
LD
(
ldd
,
src
,
offset
+
0x10
,
t4
,
bigchunk_fault
)
\
ldd
[%
src
+
(
offset
)
+
0x18
],
%t6
; \
LD
(
ldd
,
src
,
offset
+
0x18
,
t6
,
bigchunk_fault
)
\
st
%t0
,
[%
dst
+
(
offset
)
+
0x00
]
; \
ST
(
st
,
dst
,
offset
+
0x00
,
t0
,
bigchunk_fault
)
\
st
%t1
,
[%
dst
+
(
offset
)
+
0x04
]
; \
ST
(
st
,
dst
,
offset
+
0x04
,
t1
,
bigchunk_fault
)
\
st
%t2
,
[%
dst
+
(
offset
)
+
0x08
]
; \
ST
(
st
,
dst
,
offset
+
0x08
,
t2
,
bigchunk_fault
)
\
st
%t3
,
[%
dst
+
(
offset
)
+
0x0c
]
; \
ST
(
st
,
dst
,
offset
+
0x0c
,
t3
,
bigchunk_fault
)
\
st
%t4
,
[%
dst
+
(
offset
)
+
0x10
]
; \
ST
(
st
,
dst
,
offset
+
0x10
,
t4
,
bigchunk_fault
)
\
st
%t5
,
[%
dst
+
(
offset
)
+
0x14
]
; \
ST
(
st
,
dst
,
offset
+
0x14
,
t5
,
bigchunk_fault
)
\
st
%t6
,
[%
dst
+
(
offset
)
+
0x18
]
; \
ST
(
st
,
dst
,
offset
+
0x18
,
t6
,
bigchunk_fault
)
\
st
%t7
,
[%
dst
+
(
offset
)
+
0x1c
]
;
ST
(
st
,
dst
,
offset
+
0x1c
,
t7
,
bigchunk_fault
)
/*
left
:
g7
+
(
g1
%
128
)
-
offset
*/
#define MOVE_BIGALIGNCHUNK(src, dst, offset, t0, t1, t2, t3, t4, t5, t6, t7) \
#define MOVE_BIGALIGNCHUNK(src, dst, offset, t0, t1, t2, t3, t4, t5, t6, t7) \
ldd
[%
src
+
(
offset
)
+
0x00
],
%t0
; \
LD
(
ldd
,
src
,
offset
+
0x00
,
t0
,
bigchunk_fault
)
\
ldd
[%
src
+
(
offset
)
+
0x08
],
%t2
; \
LD
(
ldd
,
src
,
offset
+
0x08
,
t2
,
bigchunk_fault
)
\
ldd
[%
src
+
(
offset
)
+
0x10
],
%t4
; \
LD
(
ldd
,
src
,
offset
+
0x10
,
t4
,
bigchunk_fault
)
\
ldd
[%
src
+
(
offset
)
+
0x18
],
%t6
; \
LD
(
ldd
,
src
,
offset
+
0x18
,
t6
,
bigchunk_fault
)
\
std
%t0
,
[%
dst
+
(
offset
)
+
0x00
]
; \
ST
(
std
,
dst
,
offset
+
0x00
,
t0
,
bigchunk_fault
)
\
std
%t2
,
[%
dst
+
(
offset
)
+
0x08
]
; \
ST
(
std
,
dst
,
offset
+
0x08
,
t2
,
bigchunk_fault
)
\
std
%t4
,
[%
dst
+
(
offset
)
+
0x10
]
; \
ST
(
std
,
dst
,
offset
+
0x10
,
t4
,
bigchunk_fault
)
\
std
%t6
,
[%
dst
+
(
offset
)
+
0x18
]
;
ST
(
std
,
dst
,
offset
+
0x18
,
t6
,
bigchunk_fault
)
.
section
.
fixup
,#
alloc
,#
execinstr
bigchunk_fault
:
sub
%
g7
,
%
g5
,
%
o0
and
%
g1
,
127
,
%
g1
retl
add
%
o0
,
%
g1
,
%
o0
/*
left
:
offset
+
16
+
(
g1
%
16
)
*/
#define MOVE_LASTCHUNK(src, dst, offset, t0, t1, t2, t3) \
#define MOVE_LASTCHUNK(src, dst, offset, t0, t1, t2, t3) \
ldd
[%
src
-
(
offset
)
-
0x10
],
%t0
;
\
LD
(
ldd
,
src
,
-(
offset
+
0x10
),
t0
,
lastchunk_fault
)
\
ldd
[%
src
-
(
offset
)
-
0x08
],
%t2
;
\
LD
(
ldd
,
src
,
-(
offset
+
0x08
),
t2
,
lastchunk_fault
)
\
st
%t0
,
[%
dst
-
(
offset
)
-
0x10
]
;
\
ST
(
st
,
dst
,
-(
offset
+
0x10
),
t0
,
lastchunk_fault
)
\
st
%t1
,
[%
dst
-
(
offset
)
-
0x0c
]
;
\
ST
(
st
,
dst
,
-(
offset
+
0x0c
),
t1
,
lastchunk_fault
)
\
st
%t2
,
[%
dst
-
(
offset
)
-
0x08
]
;
\
ST
(
st
,
dst
,
-(
offset
+
0x08
),
t2
,
lastchunk_fault
)
\
st
%t3
,
[%
dst
-
(
offset
)
-
0x04
]
;
ST
(
st
,
dst
,
-(
offset
+
0x04
),
t3
,
lastchunk_fault
)
#define MOVE_HALFCHUNK(src, dst, offset, t0, t1, t2, t3) \
.
section
.
fixup
,#
alloc
,#
execinstr
lduh
[%
src
+
(
offset
)
+
0x00
],
%t0
; \
lastchunk_fault
:
lduh
[%
src
+
(
offset
)
+
0x02
],
%t1
; \
and
%
g1
,
15
,
%
g1
lduh
[%
src
+
(
offset
)
+
0x04
],
%t2
; \
retl
lduh
[%
src
+
(
offset
)
+
0x06
],
%t3
; \
sub
%
g1
,
%
g5
,
%
o0
sth
%t0
,
[%
dst
+
(
offset
)
+
0x00
]
; \
sth
%t1
,
[%
dst
+
(
offset
)
+
0x02
]
; \
sth
%t2
,
[%
dst
+
(
offset
)
+
0x04
]
; \
sth
%t3
,
[%
dst
+
(
offset
)
+
0x06
]
;
/*
left
:
o3
+
(
o2
%
16
)
-
offset
*/
#define MOVE_HALFCHUNK(src, dst, offset, t0, t1, t2, t3) \
LD
(
lduh
,
src
,
offset
+
0x00
,
t0
,
halfchunk_fault
)
\
LD
(
lduh
,
src
,
offset
+
0x02
,
t1
,
halfchunk_fault
)
\
LD
(
lduh
,
src
,
offset
+
0x04
,
t2
,
halfchunk_fault
)
\
LD
(
lduh
,
src
,
offset
+
0x06
,
t3
,
halfchunk_fault
)
\
ST
(
sth
,
dst
,
offset
+
0x00
,
t0
,
halfchunk_fault
)
\
ST
(
sth
,
dst
,
offset
+
0x02
,
t1
,
halfchunk_fault
)
\
ST
(
sth
,
dst
,
offset
+
0x04
,
t2
,
halfchunk_fault
)
\
ST
(
sth
,
dst
,
offset
+
0x06
,
t3
,
halfchunk_fault
)
/*
left
:
o3
+
(
o2
%
16
)
+
offset
+
2
*/
#define MOVE_SHORTCHUNK(src, dst, offset, t0, t1) \
#define MOVE_SHORTCHUNK(src, dst, offset, t0, t1) \
ldub
[%
src
-
(
offset
)
-
0x02
],
%t0
; \
LD
(
ldub
,
src
,
-(
offset
+
0x02
),
t0
,
halfchunk_fault
)
\
ldub
[%
src
-
(
offset
)
-
0x01
],
%t1
; \
LD
(
ldub
,
src
,
-(
offset
+
0x01
),
t1
,
halfchunk_fault
)
\
stb
%t0
,
[%
dst
-
(
offset
)
-
0x02
]
; \
ST
(
stb
,
dst
,
-(
offset
+
0x02
),
t0
,
halfchunk_fault
)
\
stb
%t1
,
[%
dst
-
(
offset
)
-
0x01
]
;
ST
(
stb
,
dst
,
-(
offset
+
0x01
),
t1
,
halfchunk_fault
)
.
section
.
fixup
,#
alloc
,#
execinstr
halfchunk_fault
:
and
%
o2
,
15
,
%
o2
sub
%
o3
,
%
g5
,
%
o3
retl
add
%
o2
,
%
o3
,
%
o0
/*
left
:
offset
+
2
+
(
o2
%
2
)
*/
#define MOVE_LAST_SHORTCHUNK(src, dst, offset, t0, t1) \
LD
(
ldub
,
src
,
-(
offset
+
0x02
),
t0
,
last_shortchunk_fault
)
\
LD
(
ldub
,
src
,
-(
offset
+
0x01
),
t1
,
last_shortchunk_fault
)
\
ST
(
stb
,
dst
,
-(
offset
+
0x02
),
t0
,
last_shortchunk_fault
)
\
ST
(
stb
,
dst
,
-(
offset
+
0x01
),
t1
,
last_shortchunk_fault
)
.
section
.
fixup
,#
alloc
,#
execinstr
last_shortchunk_fault
:
and
%
o2
,
1
,
%
o2
retl
sub
%
o2
,
%
g5
,
%
o0
.
text
.
text
.
align
4
.
align
4
...
@@ -182,8 +218,6 @@ __copy_user: /* %o0=dst %o1=src %o2=len */
...
@@ -182,8 +218,6 @@ __copy_user: /* %o0=dst %o1=src %o2=len */
MOVE_BIGCHUNK
(
o1
,
o0
,
0x20
,
o2
,
o3
,
o4
,
o5
,
g2
,
g3
,
g4
,
g5
)
MOVE_BIGCHUNK
(
o1
,
o0
,
0x20
,
o2
,
o3
,
o4
,
o5
,
g2
,
g3
,
g4
,
g5
)
MOVE_BIGCHUNK
(
o1
,
o0
,
0x40
,
o2
,
o3
,
o4
,
o5
,
g2
,
g3
,
g4
,
g5
)
MOVE_BIGCHUNK
(
o1
,
o0
,
0x40
,
o2
,
o3
,
o4
,
o5
,
g2
,
g3
,
g4
,
g5
)
MOVE_BIGCHUNK
(
o1
,
o0
,
0x60
,
o2
,
o3
,
o4
,
o5
,
g2
,
g3
,
g4
,
g5
)
MOVE_BIGCHUNK
(
o1
,
o0
,
0x60
,
o2
,
o3
,
o4
,
o5
,
g2
,
g3
,
g4
,
g5
)
80
:
EXT
(5
b
,
80
b
,
50
f
)
subcc
%
g7
,
128
,
%
g7
subcc
%
g7
,
128
,
%
g7
add
%
o1
,
128
,
%
o1
add
%
o1
,
128
,
%
o1
bne
5
b
bne
5
b
...
@@ -201,7 +235,6 @@ __copy_user: /* %o0=dst %o1=src %o2=len */
...
@@ -201,7 +235,6 @@ __copy_user: /* %o0=dst %o1=src %o2=len */
jmpl
%
o5
+
%
lo
(
copy_user_table_end
),
%
g0
jmpl
%
o5
+
%
lo
(
copy_user_table_end
),
%
g0
add
%
o0
,
%
g7
,
%
o0
add
%
o0
,
%
g7
,
%
o0
copy_user_table
:
MOVE_LASTCHUNK
(
o1
,
o0
,
0x60
,
g2
,
g3
,
g4
,
g5
)
MOVE_LASTCHUNK
(
o1
,
o0
,
0x60
,
g2
,
g3
,
g4
,
g5
)
MOVE_LASTCHUNK
(
o1
,
o0
,
0x50
,
g2
,
g3
,
g4
,
g5
)
MOVE_LASTCHUNK
(
o1
,
o0
,
0x50
,
g2
,
g3
,
g4
,
g5
)
MOVE_LASTCHUNK
(
o1
,
o0
,
0x40
,
g2
,
g3
,
g4
,
g5
)
MOVE_LASTCHUNK
(
o1
,
o0
,
0x40
,
g2
,
g3
,
g4
,
g5
)
...
@@ -210,7 +243,6 @@ copy_user_table:
...
@@ -210,7 +243,6 @@ copy_user_table:
MOVE_LASTCHUNK
(
o1
,
o0
,
0x10
,
g2
,
g3
,
g4
,
g5
)
MOVE_LASTCHUNK
(
o1
,
o0
,
0x10
,
g2
,
g3
,
g4
,
g5
)
MOVE_LASTCHUNK
(
o1
,
o0
,
0x00
,
g2
,
g3
,
g4
,
g5
)
MOVE_LASTCHUNK
(
o1
,
o0
,
0x00
,
g2
,
g3
,
g4
,
g5
)
copy_user_table_end
:
copy_user_table_end
:
EXT
(
copy_user_table
,
copy_user_table_end
,
51
f
)
be
copy_user_last7
be
copy_user_last7
andcc
%
g1
,
4
,
%
g0
andcc
%
g1
,
4
,
%
g0
...
@@ -250,8 +282,6 @@ ldd_std:
...
@@ -250,8 +282,6 @@ ldd_std:
MOVE_BIGALIGNCHUNK
(
o1
,
o0
,
0x20
,
o2
,
o3
,
o4
,
o5
,
g2
,
g3
,
g4
,
g5
)
MOVE_BIGALIGNCHUNK
(
o1
,
o0
,
0x20
,
o2
,
o3
,
o4
,
o5
,
g2
,
g3
,
g4
,
g5
)
MOVE_BIGALIGNCHUNK
(
o1
,
o0
,
0x40
,
o2
,
o3
,
o4
,
o5
,
g2
,
g3
,
g4
,
g5
)
MOVE_BIGALIGNCHUNK
(
o1
,
o0
,
0x40
,
o2
,
o3
,
o4
,
o5
,
g2
,
g3
,
g4
,
g5
)
MOVE_BIGALIGNCHUNK
(
o1
,
o0
,
0x60
,
o2
,
o3
,
o4
,
o5
,
g2
,
g3
,
g4
,
g5
)
MOVE_BIGALIGNCHUNK
(
o1
,
o0
,
0x60
,
o2
,
o3
,
o4
,
o5
,
g2
,
g3
,
g4
,
g5
)
81
:
EXT
(
ldd_std
,
81
b
,
52
f
)
subcc
%
g7
,
128
,
%
g7
subcc
%
g7
,
128
,
%
g7
add
%
o1
,
128
,
%
o1
add
%
o1
,
128
,
%
o1
bne
ldd_std
bne
ldd_std
...
@@ -290,8 +320,6 @@ cannot_optimize:
...
@@ -290,8 +320,6 @@ cannot_optimize:
10
:
10
:
MOVE_HALFCHUNK
(
o1
,
o0
,
0x00
,
g2
,
g3
,
g4
,
g5
)
MOVE_HALFCHUNK
(
o1
,
o0
,
0x00
,
g2
,
g3
,
g4
,
g5
)
MOVE_HALFCHUNK
(
o1
,
o0
,
0x08
,
g2
,
g3
,
g4
,
g5
)
MOVE_HALFCHUNK
(
o1
,
o0
,
0x08
,
g2
,
g3
,
g4
,
g5
)
82
:
EXT
(10
b
,
82
b
,
53
f
)
subcc
%
o3
,
0x10
,
%
o3
subcc
%
o3
,
0x10
,
%
o3
add
%
o1
,
0x10
,
%
o1
add
%
o1
,
0x10
,
%
o1
bne
10
b
bne
10
b
...
@@ -308,8 +336,6 @@ byte_chunk:
...
@@ -308,8 +336,6 @@ byte_chunk:
MOVE_SHORTCHUNK
(
o1
,
o0
,
-
0x0c
,
g2
,
g3
)
MOVE_SHORTCHUNK
(
o1
,
o0
,
-
0x0c
,
g2
,
g3
)
MOVE_SHORTCHUNK
(
o1
,
o0
,
-
0x0e
,
g2
,
g3
)
MOVE_SHORTCHUNK
(
o1
,
o0
,
-
0x0e
,
g2
,
g3
)
MOVE_SHORTCHUNK
(
o1
,
o0
,
-
0x10
,
g2
,
g3
)
MOVE_SHORTCHUNK
(
o1
,
o0
,
-
0x10
,
g2
,
g3
)
83
:
EXT
(
byte_chunk
,
83
b
,
54
f
)
subcc
%
o3
,
0x10
,
%
o3
subcc
%
o3
,
0x10
,
%
o3
add
%
o1
,
0x10
,
%
o1
add
%
o1
,
0x10
,
%
o1
bne
byte_chunk
bne
byte_chunk
...
@@ -325,16 +351,14 @@ short_end:
...
@@ -325,16 +351,14 @@ short_end:
add
%
o1
,
%
o3
,
%
o1
add
%
o1
,
%
o3
,
%
o1
jmpl
%
o5
+
%
lo
(
short_table_end
),
%
g0
jmpl
%
o5
+
%
lo
(
short_table_end
),
%
g0
andcc
%
o2
,
1
,
%
g0
andcc
%
o2
,
1
,
%
g0
84
:
MOVE_LAST_SHORTCHUNK
(
o1
,
o0
,
0x0c
,
g2
,
g3
)
MOVE_SHORTCHUNK
(
o1
,
o0
,
0x0c
,
g2
,
g3
)
MOVE_LAST_SHORTCHUNK
(
o1
,
o0
,
0x0a
,
g2
,
g3
)
MOVE_SHORTCHUNK
(
o1
,
o0
,
0x0a
,
g2
,
g3
)
MOVE_LAST_SHORTCHUNK
(
o1
,
o0
,
0x08
,
g2
,
g3
)
MOVE_SHORTCHUNK
(
o1
,
o0
,
0x08
,
g2
,
g3
)
MOVE_LAST_SHORTCHUNK
(
o1
,
o0
,
0x06
,
g2
,
g3
)
MOVE_SHORTCHUNK
(
o1
,
o0
,
0x06
,
g2
,
g3
)
MOVE_LAST_SHORTCHUNK
(
o1
,
o0
,
0x04
,
g2
,
g3
)
MOVE_SHORTCHUNK
(
o1
,
o0
,
0x04
,
g2
,
g3
)
MOVE_LAST_SHORTCHUNK
(
o1
,
o0
,
0x02
,
g2
,
g3
)
MOVE_SHORTCHUNK
(
o1
,
o0
,
0x02
,
g2
,
g3
)
MOVE_LAST_SHORTCHUNK
(
o1
,
o0
,
0x00
,
g2
,
g3
)
MOVE_SHORTCHUNK
(
o1
,
o0
,
0x00
,
g2
,
g3
)
short_table_end
:
short_table_end
:
EXT
(84
b
,
short_table_end
,
55
f
)
be
1
f
be
1
f
nop
nop
EX
(
ldub
[%
o1
],
%
g2
,
add
%
g0
,
1
)
EX
(
ldub
[%
o1
],
%
g2
,
add
%
g0
,
1
)
...
@@ -363,123 +387,8 @@ short_aligned_end:
...
@@ -363,123 +387,8 @@ short_aligned_end:
.
section
.
fixup
,#
alloc
,#
execinstr
.
section
.
fixup
,#
alloc
,#
execinstr
.
align
4
.
align
4
97
:
97
:
mov
%
o2
,
%
g3
fixupretl
:
retl
retl
mov
%
g3
,
%
o0
mov
%
o2
,
%
o0
/*
exception
routine
sets
%
g2
to
(
broken_insn
-
first_insn
)>>
2
*/
50
:
/*
This
magic
counts
how
many
bytes
are
left
when
crash
in
MOVE_BIGCHUNK
*
happens
.
This
is
derived
from
the
amount
ldd
reads
,
st
stores
,
etc
.
*
x
=
g2
%
12
;
*
g3
=
g1
+
g7
-
((
g2
/
12
)
*
32
+
(
x
<
4
)
?
0
:
(
x
-
4
)
*
4
)
;
*
o0
+=
(
g2
/
12
)
*
32
;
*/
cmp
%
g2
,
12
add
%
o0
,
%
g7
,
%
o0
bcs
1
f
cmp
%
g2
,
24
bcs
2
f
cmp
%
g2
,
36
bcs
3
f
nop
sub
%
g2
,
12
,
%
g2
sub
%
g7
,
32
,
%
g7
3
:
sub
%
g2
,
12
,
%
g2
sub
%
g7
,
32
,
%
g7
2
:
sub
%
g2
,
12
,
%
g2
sub
%
g7
,
32
,
%
g7
1
:
cmp
%
g2
,
4
bcs
,
a
60
f
clr
%
g2
sub
%
g2
,
4
,
%
g2
sll
%
g2
,
2
,
%
g2
60
:
and
%
g1
,
0x7f
,
%
g3
sub
%
o0
,
%
g7
,
%
o0
add
%
g3
,
%
g7
,
%
g3
ba
fixupretl
sub
%
g3
,
%
g2
,
%
g3
51
:
/*
i
=
41
-
g2
; j = i % 6;
*
g3
=
(
g1
&
15
)
+
(
i
/
6
)
*
16
+
(
j
<
4
)
?
(
j
+
1
)
*
4
:
16
;
*
o0
-=
(
i
/
6
)
*
16
+
16
;
*/
neg
%
g2
and
%
g1
,
0xf
,
%
g1
add
%
g2
,
41
,
%
g2
add
%
o0
,
%
g1
,
%
o0
1
:
cmp
%
g2
,
6
bcs
,
a
2
f
cmp
%
g2
,
4
add
%
g1
,
16
,
%
g1
b
1
b
sub
%
g2
,
6
,
%
g2
2
:
bcc
,
a
2
f
mov
16
,
%
g2
inc
%
g2
sll
%
g2
,
2
,
%
g2
2
:
add
%
g1
,
%
g2
,
%
g3
ba
fixupretl
sub
%
o0
,
%
g3
,
%
o0
52
:
/*
g3
=
g1
+
g7
-
(
g2
/
8
)
*
32
+
(
g2
&
4
)
?
(
g2
&
3
)
*
8
:
0
;
o0
+=
(
g2
/
8
)
*
32
*/
andn
%
g2
,
7
,
%
g4
add
%
o0
,
%
g7
,
%
o0
andcc
%
g2
,
4
,
%
g0
and
%
g2
,
3
,
%
g2
sll
%
g4
,
2
,
%
g4
sll
%
g2
,
3
,
%
g2
bne
60
b
sub
%
g7
,
%
g4
,
%
g7
ba
60
b
clr
%
g2
53
:
/*
g3
=
o3
+
(
o2
&
15
)
-
(
g2
&
8
)
-
(
g2
&
4
)
?
(
g2
&
3
)
*
2
:
0
;
o0
+=
(
g2
&
8
)
*/
and
%
g2
,
3
,
%
g4
andcc
%
g2
,
4
,
%
g0
and
%
g2
,
8
,
%
g2
sll
%
g4
,
1
,
%
g4
be
1
f
add
%
o0
,
%
g2
,
%
o0
add
%
g2
,
%
g4
,
%
g2
1
:
and
%
o2
,
0xf
,
%
g3
add
%
g3
,
%
o3
,
%
g3
ba
fixupretl
sub
%
g3
,
%
g2
,
%
g3
54
:
/*
g3
=
o3
+
(
o2
&
15
)
-
(
g2
/
4
)
*
2
-
(
g2
&
2
)
?
(
g2
&
1
)
:
0
;
o0
+=
(
g2
/
4
)
*
2
*/
srl
%
g2
,
2
,
%
o4
and
%
g2
,
1
,
%
o5
srl
%
g2
,
1
,
%
g2
add
%
o4
,
%
o4
,
%
o4
and
%
o5
,
%
g2
,
%
o5
and
%
o2
,
0xf
,
%
o2
add
%
o0
,
%
o4
,
%
o0
sub
%
o3
,
%
o5
,
%
o3
sub
%
o2
,
%
o4
,
%
o2
ba
fixupretl
add
%
o2
,
%
o3
,
%
g3
55
:
/*
i
=
27
-
g2
;
g3
=
(
o2
&
1
)
+
i
/
4
*
2
+
!(
i
&
3
)
;
o0
-=
i
/
4
*
2
+
1
*/
neg
%
g2
and
%
o2
,
1
,
%
o2
add
%
g2
,
27
,
%
g2
srl
%
g2
,
2
,
%
o5
andcc
%
g2
,
3
,
%
g0
mov
1
,
%
g2
add
%
o5
,
%
o5
,
%
o5
be
,
a
1
f
clr
%
g2
1
:
add
%
g2
,
%
o5
,
%
g3
sub
%
o0
,
%
g3
,
%
o0
ba
fixupretl
add
%
g3
,
%
o2
,
%
g3
.
globl
__copy_user_end
.
globl
__copy_user_end
__copy_user_end
:
__copy_user_end
:
arch/sparc/lib/memset.S
View file @
cf64c2a9
...
@@ -19,7 +19,7 @@
...
@@ -19,7 +19,7 @@
98
:
x
,
y
; \
98
:
x
,
y
; \
.
section
.
fixup
,
ALLOC
,
EXECINSTR
; \
.
section
.
fixup
,
ALLOC
,
EXECINSTR
; \
.
align
4
; \
.
align
4
; \
99
:
ba
30
f
; \
99
:
retl
; \
a
,
b
,
%
o0
; \
a
,
b
,
%
o0
; \
.
section
__ex_table
,
ALLOC
; \
.
section
__ex_table
,
ALLOC
; \
.
align
4
; \
.
align
4
; \
...
@@ -27,35 +27,44 @@
...
@@ -27,35 +27,44 @@
.
text
; \
.
text
; \
.
align
4
.
align
4
#define EXT(start,end,handler) \
#define STORE(source, base, offset, n) \
98
:
std
source
,
[
base
+
offset
+
n
]
; \
.
section
.
fixup
,
ALLOC
,
EXECINSTR
; \
.
align
4
; \
99
:
ba
30
f
; \
sub
%
o3
,
n
-
offset
,
%
o3
; \
.
section
__ex_table
,
ALLOC
; \
.
section
__ex_table
,
ALLOC
; \
.
align
4
; \
.
align
4
; \
.
word
start
,
0
,
end
,
handler
;
\
.
word
98
b
,
99
b
;
\
.
text
; \
.
text
; \
.
align
4
.
align
4
;
#define STORE_LAST(source, base, offset, n) \
EX
(
std
source
,
[
base
-
offset
-
n
],
\
add
%
o1
,
offset
+
n
)
;
/*
Please
don
't change these macros, unless you change the logic
/*
Please
don
't change these macros, unless you change the logic
*
in
the
.
fixup
section
below
as
well
.
*
in
the
.
fixup
section
below
as
well
.
*
Store
64
bytes
at
(
BASE
+
OFFSET
)
using
value
SOURCE
.
*/
*
Store
64
bytes
at
(
BASE
+
OFFSET
)
using
value
SOURCE
.
*/
#define ZERO_BIG_BLOCK(base, offset, source) \
#define ZERO_BIG_BLOCK(base, offset, source) \
std
source
,
[
base
+
offset
+
0x00
]
;
\
STORE
(
source
,
base
,
offset
,
0x00
)
;
\
std
source
,
[
base
+
offset
+
0x08
]
;
\
STORE
(
source
,
base
,
offset
,
0x08
)
;
\
std
source
,
[
base
+
offset
+
0x10
]
;
\
STORE
(
source
,
base
,
offset
,
0x10
)
;
\
std
source
,
[
base
+
offset
+
0x18
]
;
\
STORE
(
source
,
base
,
offset
,
0x18
)
;
\
std
source
,
[
base
+
offset
+
0x20
]
;
\
STORE
(
source
,
base
,
offset
,
0x20
)
;
\
std
source
,
[
base
+
offset
+
0x28
]
;
\
STORE
(
source
,
base
,
offset
,
0x28
)
;
\
std
source
,
[
base
+
offset
+
0x30
]
;
\
STORE
(
source
,
base
,
offset
,
0x30
)
;
\
std
source
,
[
base
+
offset
+
0x38
]
;
STORE
(
source
,
base
,
offset
,
0x38
)
;
#define ZERO_LAST_BLOCKS(base, offset, source) \
#define ZERO_LAST_BLOCKS(base, offset, source) \
std
source
,
[
base
-
offset
-
0x38
]
;
\
STORE_LAST
(
source
,
base
,
offset
,
0x38
)
;
\
std
source
,
[
base
-
offset
-
0x30
]
; \
STORE_LAST
(
source
,
base
,
offset
,
0x30
)
; \
std
source
,
[
base
-
offset
-
0x28
]
;
\
STORE_LAST
(
source
,
base
,
offset
,
0x28
)
;
\
std
source
,
[
base
-
offset
-
0x20
]
;
\
STORE_LAST
(
source
,
base
,
offset
,
0x20
)
;
\
std
source
,
[
base
-
offset
-
0x18
]
;
\
STORE_LAST
(
source
,
base
,
offset
,
0x18
)
;
\
std
source
,
[
base
-
offset
-
0x10
]
;
\
STORE_LAST
(
source
,
base
,
offset
,
0x10
)
;
\
std
source
,
[
base
-
offset
-
0x08
]
;
\
STORE_LAST
(
source
,
base
,
offset
,
0x08
)
;
\
std
source
,
[
base
-
offset
-
0x00
]
;
STORE_LAST
(
source
,
base
,
offset
,
0x00
)
;
.
text
.
text
.
align
4
.
align
4
...
@@ -68,8 +77,6 @@ __bzero_begin:
...
@@ -68,8 +77,6 @@ __bzero_begin:
.
globl
memset
.
globl
memset
EXPORT_SYMBOL
(
__bzero
)
EXPORT_SYMBOL
(
__bzero
)
EXPORT_SYMBOL
(
memset
)
EXPORT_SYMBOL
(
memset
)
.
globl
__memset_start
,
__memset_end
__memset_start
:
memset
:
memset
:
mov
%
o0
,
%
g1
mov
%
o0
,
%
g1
mov
1
,
%
g4
mov
1
,
%
g4
...
@@ -122,8 +129,6 @@ __bzero:
...
@@ -122,8 +129,6 @@ __bzero:
ZERO_BIG_BLOCK
(%
o0
,
0x00
,
%
g2
)
ZERO_BIG_BLOCK
(%
o0
,
0x00
,
%
g2
)
subcc
%
o3
,
128
,
%
o3
subcc
%
o3
,
128
,
%
o3
ZERO_BIG_BLOCK
(%
o0
,
0x40
,
%
g2
)
ZERO_BIG_BLOCK
(%
o0
,
0x40
,
%
g2
)
11
:
EXT
(10
b
,
11
b
,
20
f
)
bne
10
b
bne
10
b
add
%
o0
,
128
,
%
o0
add
%
o0
,
128
,
%
o0
...
@@ -138,11 +143,9 @@ __bzero:
...
@@ -138,11 +143,9 @@ __bzero:
jmp
%
o4
jmp
%
o4
add
%
o0
,
%
o2
,
%
o0
add
%
o0
,
%
o2
,
%
o0
12
:
ZERO_LAST_BLOCKS
(%
o0
,
0x48
,
%
g2
)
ZERO_LAST_BLOCKS
(%
o0
,
0x48
,
%
g2
)
ZERO_LAST_BLOCKS
(%
o0
,
0x08
,
%
g2
)
ZERO_LAST_BLOCKS
(%
o0
,
0x08
,
%
g2
)
13
:
13
:
EXT
(12
b
,
13
b
,
21
f
)
be
8
f
be
8
f
andcc
%
o1
,
4
,
%
g0
andcc
%
o1
,
4
,
%
g0
...
@@ -182,37 +185,13 @@ __bzero:
...
@@ -182,37 +185,13 @@ __bzero:
5
:
5
:
retl
retl
clr
%
o0
clr
%
o0
__memset_end
:
.
section
.
fixup
,#
alloc
,#
execinstr
.
section
.
fixup
,#
alloc
,#
execinstr
.
align
4
.
align
4
20
:
30
:
cmp
%
g2
,
8
bleu
1
f
and
%
o1
,
0x7f
,
%
o1
and
%
o1
,
0x7f
,
%
o1
sub
%
g2
,
9
,
%
g2
retl
add
%
o3
,
64
,
%
o3
1
:
sll
%
g2
,
3
,
%
g2
add
%
o3
,
%
o1
,
%
o0
add
%
o3
,
%
o1
,
%
o0
b
30
f
sub
%
o0
,
%
g2
,
%
o0
21
:
mov
8
,
%
o0
and
%
o1
,
7
,
%
o1
sub
%
o0
,
%
g2
,
%
o0
sll
%
o0
,
3
,
%
o0
b
30
f
add
%
o0
,
%
o1
,
%
o0
30
:
/*
%
o4
is
faulting
address
,
%
o5
is
%
pc
where
fault
occurred
*/
save
%
sp
,
-
104
,
%
sp
mov
%
i5
,
%
o0
mov
%
i7
,
%
o1
call
lookup_fault
mov
%
i4
,
%
o2
ret
restore
.
globl
__bzero_end
.
globl
__bzero_end
__bzero_end
:
__bzero_end
:
arch/sparc/mm/Makefile
View file @
cf64c2a9
...
@@ -8,7 +8,7 @@ ccflags-y := -Werror
...
@@ -8,7 +8,7 @@ ccflags-y := -Werror
obj-$(CONFIG_SPARC64)
+=
ultra.o tlb.o tsb.o
obj-$(CONFIG_SPARC64)
+=
ultra.o tlb.o tsb.o
obj-y
+=
fault_
$(BITS)
.o
obj-y
+=
fault_
$(BITS)
.o
obj-y
+=
init_
$(BITS)
.o
obj-y
+=
init_
$(BITS)
.o
obj-$(CONFIG_SPARC32)
+=
extable.o
srmmu.o iommu.o io-unit.o
obj-$(CONFIG_SPARC32)
+=
srmmu.o iommu.o io-unit.o
obj-$(CONFIG_SPARC32)
+=
srmmu_access.o
obj-$(CONFIG_SPARC32)
+=
srmmu_access.o
obj-$(CONFIG_SPARC32)
+=
hypersparc.o viking.o tsunami.o swift.o
obj-$(CONFIG_SPARC32)
+=
hypersparc.o viking.o tsunami.o swift.o
obj-$(CONFIG_SPARC32)
+=
leon_mm.o
obj-$(CONFIG_SPARC32)
+=
leon_mm.o
...
...
arch/sparc/mm/extable.c
deleted
100644 → 0
View file @
b9d62433
// SPDX-License-Identifier: GPL-2.0
/*
* linux/arch/sparc/mm/extable.c
*/
#include <linux/module.h>
#include <linux/extable.h>
#include <linux/uaccess.h>
void
sort_extable
(
struct
exception_table_entry
*
start
,
struct
exception_table_entry
*
finish
)
{
}
/* Caller knows they are in a range if ret->fixup == 0 */
const
struct
exception_table_entry
*
search_extable
(
const
struct
exception_table_entry
*
base
,
const
size_t
num
,
unsigned
long
value
)
{
int
i
;
/* Single insn entries are encoded as:
* word 1: insn address
* word 2: fixup code address
*
* Range entries are encoded as:
* word 1: first insn address
* word 2: 0
* word 3: last insn address + 4 bytes
* word 4: fixup code address
*
* Deleted entries are encoded as:
* word 1: unused
* word 2: -1
*
* See asm/uaccess.h for more details.
*/
/* 1. Try to find an exact match. */
for
(
i
=
0
;
i
<
num
;
i
++
)
{
if
(
base
[
i
].
fixup
==
0
)
{
/* A range entry, skip both parts. */
i
++
;
continue
;
}
/* A deleted entry; see trim_init_extable */
if
(
base
[
i
].
fixup
==
-
1
)
continue
;
if
(
base
[
i
].
insn
==
value
)
return
&
base
[
i
];
}
/* 2. Try to find a range match. */
for
(
i
=
0
;
i
<
(
num
-
1
);
i
++
)
{
if
(
base
[
i
].
fixup
)
continue
;
if
(
base
[
i
].
insn
<=
value
&&
base
[
i
+
1
].
insn
>
value
)
return
&
base
[
i
];
i
++
;
}
return
NULL
;
}
#ifdef CONFIG_MODULES
/* We could memmove them around; easier to mark the trimmed ones. */
void
trim_init_extable
(
struct
module
*
m
)
{
unsigned
int
i
;
bool
range
;
for
(
i
=
0
;
i
<
m
->
num_exentries
;
i
+=
range
?
2
:
1
)
{
range
=
m
->
extable
[
i
].
fixup
==
0
;
if
(
within_module_init
(
m
->
extable
[
i
].
insn
,
m
))
{
m
->
extable
[
i
].
fixup
=
-
1
;
if
(
range
)
m
->
extable
[
i
+
1
].
fixup
=
-
1
;
}
if
(
range
)
i
++
;
}
}
#endif
/* CONFIG_MODULES */
/* Special extable search, which handles ranges. Returns fixup */
unsigned
long
search_extables_range
(
unsigned
long
addr
,
unsigned
long
*
g2
)
{
const
struct
exception_table_entry
*
entry
;
entry
=
search_exception_tables
(
addr
);
if
(
!
entry
)
return
0
;
/* Inside range? Fix g2 and return correct fixup */
if
(
!
entry
->
fixup
)
{
*
g2
=
(
addr
-
entry
->
insn
)
/
4
;
return
(
entry
+
1
)
->
fixup
;
}
return
entry
->
fixup
;
}
arch/sparc/mm/fault_32.c
View file @
cf64c2a9
...
@@ -23,6 +23,7 @@
...
@@ -23,6 +23,7 @@
#include <linux/interrupt.h>
#include <linux/interrupt.h>
#include <linux/kdebug.h>
#include <linux/kdebug.h>
#include <linux/uaccess.h>
#include <linux/uaccess.h>
#include <linux/extable.h>
#include <asm/page.h>
#include <asm/page.h>
#include <asm/openprom.h>
#include <asm/openprom.h>
...
@@ -54,54 +55,6 @@ static void __noreturn unhandled_fault(unsigned long address,
...
@@ -54,54 +55,6 @@ static void __noreturn unhandled_fault(unsigned long address,
die_if_kernel
(
"Oops"
,
regs
);
die_if_kernel
(
"Oops"
,
regs
);
}
}
asmlinkage
int
lookup_fault
(
unsigned
long
pc
,
unsigned
long
ret_pc
,
unsigned
long
address
)
{
struct
pt_regs
regs
;
unsigned
long
g2
;
unsigned
int
insn
;
int
i
;
i
=
search_extables_range
(
ret_pc
,
&
g2
);
switch
(
i
)
{
case
3
:
/* load & store will be handled by fixup */
return
3
;
case
1
:
/* store will be handled by fixup, load will bump out */
/* for _to_ macros */
insn
=
*
((
unsigned
int
*
)
pc
);
if
((
insn
>>
21
)
&
1
)
return
1
;
break
;
case
2
:
/* load will be handled by fixup, store will bump out */
/* for _from_ macros */
insn
=
*
((
unsigned
int
*
)
pc
);
if
(
!
((
insn
>>
21
)
&
1
)
||
((
insn
>>
19
)
&
0x3f
)
==
15
)
return
2
;
break
;
default:
break
;
}
memset
(
&
regs
,
0
,
sizeof
(
regs
));
regs
.
pc
=
pc
;
regs
.
npc
=
pc
+
4
;
__asm__
__volatile__
(
"rd %%psr, %0
\n\t
"
"nop
\n\t
"
"nop
\n\t
"
"nop
\n
"
:
"=r"
(
regs
.
psr
));
unhandled_fault
(
address
,
current
,
&
regs
);
/* Not reached */
return
0
;
}
static
inline
void
static
inline
void
show_signal_msg
(
struct
pt_regs
*
regs
,
int
sig
,
int
code
,
show_signal_msg
(
struct
pt_regs
*
regs
,
int
sig
,
int
code
,
unsigned
long
address
,
struct
task_struct
*
tsk
)
unsigned
long
address
,
struct
task_struct
*
tsk
)
...
@@ -162,8 +115,6 @@ asmlinkage void do_sparc_fault(struct pt_regs *regs, int text_fault, int write,
...
@@ -162,8 +115,6 @@ asmlinkage void do_sparc_fault(struct pt_regs *regs, int text_fault, int write,
struct
vm_area_struct
*
vma
;
struct
vm_area_struct
*
vma
;
struct
task_struct
*
tsk
=
current
;
struct
task_struct
*
tsk
=
current
;
struct
mm_struct
*
mm
=
tsk
->
mm
;
struct
mm_struct
*
mm
=
tsk
->
mm
;
unsigned
int
fixup
;
unsigned
long
g2
;
int
from_user
=
!
(
regs
->
psr
&
PSR_PS
);
int
from_user
=
!
(
regs
->
psr
&
PSR_PS
);
int
code
;
int
code
;
vm_fault_t
fault
;
vm_fault_t
fault
;
...
@@ -281,31 +232,20 @@ asmlinkage void do_sparc_fault(struct pt_regs *regs, int text_fault, int write,
...
@@ -281,31 +232,20 @@ asmlinkage void do_sparc_fault(struct pt_regs *regs, int text_fault, int write,
/* Is this in ex_table? */
/* Is this in ex_table? */
no_context:
no_context:
g2
=
regs
->
u_regs
[
UREG_G2
];
if
(
!
from_user
)
{
if
(
!
from_user
)
{
fixup
=
search_extables_range
(
regs
->
pc
,
&
g2
);
const
struct
exception_table_entry
*
entry
;
/* Values below 10 are reserved for other things */
if
(
fixup
>
10
)
{
extern
const
unsigned
int
__memset_start
[];
extern
const
unsigned
int
__memset_end
[];
entry
=
search_exception_tables
(
regs
->
pc
);
#ifdef DEBUG_EXCEPTIONS
#ifdef DEBUG_EXCEPTIONS
printk
(
"Exception: PC<%08lx> faddr<%08lx>
\n
"
,
printk
(
"Exception: PC<%08lx> faddr<%08lx>
\n
"
,
regs
->
pc
,
address
);
regs
->
pc
,
address
);
printk
(
"EX_TABLE: insn<%08lx> fixup<%08x> g2<%08l
x>
\n
"
,
printk
(
"EX_TABLE: insn<%08lx> fixup<%08
x>
\n
"
,
regs
->
pc
,
fixup
,
g2
);
regs
->
pc
,
entry
->
fixup
);
#endif
#endif
if
((
regs
->
pc
>=
(
unsigned
long
)
__memset_start
&&
regs
->
pc
=
entry
->
fixup
;
regs
->
pc
<
(
unsigned
long
)
__memset_end
))
{
regs
->
u_regs
[
UREG_I4
]
=
address
;
regs
->
u_regs
[
UREG_I5
]
=
regs
->
pc
;
}
regs
->
u_regs
[
UREG_G2
]
=
g2
;
regs
->
pc
=
fixup
;
regs
->
npc
=
regs
->
pc
+
4
;
regs
->
npc
=
regs
->
pc
+
4
;
return
;
return
;
}
}
}
unhandled_fault
(
address
,
tsk
,
regs
);
unhandled_fault
(
address
,
tsk
,
regs
);
do_exit
(
SIGKILL
);
do_exit
(
SIGKILL
);
...
...
arch/sparc/mm/mm_32.h
View file @
cf64c2a9
/* SPDX-License-Identifier: GPL-2.0 */
/* SPDX-License-Identifier: GPL-2.0 */
/* fault_32.c - visible as they are called from assembler */
/* fault_32.c - visible as they are called from assembler */
asmlinkage
int
lookup_fault
(
unsigned
long
pc
,
unsigned
long
ret_pc
,
unsigned
long
address
);
asmlinkage
void
do_sparc_fault
(
struct
pt_regs
*
regs
,
int
text_fault
,
int
write
,
asmlinkage
void
do_sparc_fault
(
struct
pt_regs
*
regs
,
int
text_fault
,
int
write
,
unsigned
long
address
);
unsigned
long
address
);
...
...
lib/extable.c
View file @
cf64c2a9
...
@@ -21,7 +21,6 @@ static inline unsigned long ex_to_insn(const struct exception_table_entry *x)
...
@@ -21,7 +21,6 @@ static inline unsigned long ex_to_insn(const struct exception_table_entry *x)
}
}
#endif
#endif
#ifndef ARCH_HAS_SORT_EXTABLE
#ifndef ARCH_HAS_RELATIVE_EXTABLE
#ifndef ARCH_HAS_RELATIVE_EXTABLE
#define swap_ex NULL
#define swap_ex NULL
#else
#else
...
@@ -88,9 +87,6 @@ void trim_init_extable(struct module *m)
...
@@ -88,9 +87,6 @@ void trim_init_extable(struct module *m)
m
->
num_exentries
--
;
m
->
num_exentries
--
;
}
}
#endif
/* CONFIG_MODULES */
#endif
/* CONFIG_MODULES */
#endif
/* !ARCH_HAS_SORT_EXTABLE */
#ifndef ARCH_HAS_SEARCH_EXTABLE
static
int
cmp_ex_search
(
const
void
*
key
,
const
void
*
elt
)
static
int
cmp_ex_search
(
const
void
*
key
,
const
void
*
elt
)
{
{
...
@@ -120,4 +116,3 @@ search_extable(const struct exception_table_entry *base,
...
@@ -120,4 +116,3 @@ search_extable(const struct exception_table_entry *base,
return
bsearch
(
&
value
,
base
,
num
,
return
bsearch
(
&
value
,
base
,
num
,
sizeof
(
struct
exception_table_entry
),
cmp_ex_search
);
sizeof
(
struct
exception_table_entry
),
cmp_ex_search
);
}
}
#endif
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment