Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
nexedi
linux
Commits
448ba078
Commit
448ba078
authored
Feb 04, 2002
by
Linus Torvalds
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
v2.4.0.2 -> v2.4.0.3
- me: clean up XMM support
parent
3192b2dc
Changes
8
Show whitespace changes
Inline
Side-by-side
Showing
8 changed files
with
70 additions
and
70 deletions
+70
-70
arch/i386/kernel/i387.c
arch/i386/kernel/i387.c
+36
-20
arch/i386/kernel/setup.c
arch/i386/kernel/setup.c
+7
-1
arch/i386/lib/mmx.c
arch/i386/lib/mmx.c
+10
-24
include/asm-i386/bugs.h
include/asm-i386/bugs.h
+10
-15
include/asm-i386/i387.h
include/asm-i386/i387.h
+4
-0
include/asm-i386/processor.h
include/asm-i386/processor.h
+0
-2
mm/page_alloc.c
mm/page_alloc.c
+2
-8
net/sunrpc/sunrpc_syms.c
net/sunrpc/sunrpc_syms.c
+1
-0
No files found.
arch/i386/kernel/i387.c
View file @
448ba078
...
@@ -33,7 +33,7 @@
...
@@ -33,7 +33,7 @@
void
init_fpu
(
void
)
void
init_fpu
(
void
)
{
{
__asm__
(
"fninit"
);
__asm__
(
"fninit"
);
if
(
HAVE_XMM
)
if
(
cpu_has_xmm
)
load_mxcsr
(
0x1f80
);
load_mxcsr
(
0x1f80
);
current
->
used_math
=
1
;
current
->
used_math
=
1
;
...
@@ -43,9 +43,9 @@ void init_fpu(void)
...
@@ -43,9 +43,9 @@ void init_fpu(void)
* FPU lazy state save handling.
* FPU lazy state save handling.
*/
*/
void
save_init_fpu
(
struct
task_struct
*
tsk
)
static
inline
void
__
save_init_fpu
(
struct
task_struct
*
tsk
)
{
{
if
(
HAVE_FXSR
)
{
if
(
cpu_has_fxsr
)
{
asm
volatile
(
"fxsave %0 ; fnclex"
asm
volatile
(
"fxsave %0 ; fnclex"
:
"=m"
(
tsk
->
thread
.
i387
.
fxsave
)
);
:
"=m"
(
tsk
->
thread
.
i387
.
fxsave
)
);
}
else
{
}
else
{
...
@@ -53,12 +53,28 @@ void save_init_fpu( struct task_struct *tsk )
...
@@ -53,12 +53,28 @@ void save_init_fpu( struct task_struct *tsk )
:
"=m"
(
tsk
->
thread
.
i387
.
fsave
)
);
:
"=m"
(
tsk
->
thread
.
i387
.
fsave
)
);
}
}
tsk
->
flags
&=
~
PF_USEDFPU
;
tsk
->
flags
&=
~
PF_USEDFPU
;
}
void
save_init_fpu
(
struct
task_struct
*
tsk
)
{
__save_init_fpu
(
tsk
);
stts
();
stts
();
}
}
void
kernel_fpu_begin
(
void
)
{
struct
task_struct
*
tsk
=
current
;
if
(
tsk
->
flags
&
PF_USEDFPU
)
{
__save_init_fpu
(
tsk
);
return
;
}
clts
();
}
void
restore_fpu
(
struct
task_struct
*
tsk
)
void
restore_fpu
(
struct
task_struct
*
tsk
)
{
{
if
(
HAVE_FXSR
)
{
if
(
cpu_has_fxsr
)
{
asm
volatile
(
"fxrstor %0"
asm
volatile
(
"fxrstor %0"
:
:
"m"
(
tsk
->
thread
.
i387
.
fxsave
)
);
:
:
"m"
(
tsk
->
thread
.
i387
.
fxsave
)
);
}
else
{
}
else
{
...
@@ -136,7 +152,7 @@ static inline unsigned long twd_fxsr_to_i387( struct i387_fxsave_struct *fxsave
...
@@ -136,7 +152,7 @@ static inline unsigned long twd_fxsr_to_i387( struct i387_fxsave_struct *fxsave
unsigned
short
get_fpu_cwd
(
struct
task_struct
*
tsk
)
unsigned
short
get_fpu_cwd
(
struct
task_struct
*
tsk
)
{
{
if
(
HAVE_FXSR
)
{
if
(
cpu_has_fxsr
)
{
return
tsk
->
thread
.
i387
.
fxsave
.
cwd
;
return
tsk
->
thread
.
i387
.
fxsave
.
cwd
;
}
else
{
}
else
{
return
(
unsigned
short
)
tsk
->
thread
.
i387
.
fsave
.
cwd
;
return
(
unsigned
short
)
tsk
->
thread
.
i387
.
fsave
.
cwd
;
...
@@ -145,7 +161,7 @@ unsigned short get_fpu_cwd( struct task_struct *tsk )
...
@@ -145,7 +161,7 @@ unsigned short get_fpu_cwd( struct task_struct *tsk )
unsigned
short
get_fpu_swd
(
struct
task_struct
*
tsk
)
unsigned
short
get_fpu_swd
(
struct
task_struct
*
tsk
)
{
{
if
(
HAVE_FXSR
)
{
if
(
cpu_has_fxsr
)
{
return
tsk
->
thread
.
i387
.
fxsave
.
swd
;
return
tsk
->
thread
.
i387
.
fxsave
.
swd
;
}
else
{
}
else
{
return
(
unsigned
short
)
tsk
->
thread
.
i387
.
fsave
.
swd
;
return
(
unsigned
short
)
tsk
->
thread
.
i387
.
fsave
.
swd
;
...
@@ -154,7 +170,7 @@ unsigned short get_fpu_swd( struct task_struct *tsk )
...
@@ -154,7 +170,7 @@ unsigned short get_fpu_swd( struct task_struct *tsk )
unsigned
short
get_fpu_twd
(
struct
task_struct
*
tsk
)
unsigned
short
get_fpu_twd
(
struct
task_struct
*
tsk
)
{
{
if
(
HAVE_FXSR
)
{
if
(
cpu_has_fxsr
)
{
return
tsk
->
thread
.
i387
.
fxsave
.
twd
;
return
tsk
->
thread
.
i387
.
fxsave
.
twd
;
}
else
{
}
else
{
return
(
unsigned
short
)
tsk
->
thread
.
i387
.
fsave
.
twd
;
return
(
unsigned
short
)
tsk
->
thread
.
i387
.
fsave
.
twd
;
...
@@ -163,7 +179,7 @@ unsigned short get_fpu_twd( struct task_struct *tsk )
...
@@ -163,7 +179,7 @@ unsigned short get_fpu_twd( struct task_struct *tsk )
unsigned
short
get_fpu_mxcsr
(
struct
task_struct
*
tsk
)
unsigned
short
get_fpu_mxcsr
(
struct
task_struct
*
tsk
)
{
{
if
(
HAVE_FXSR
)
{
if
(
cpu_has_fxsr
)
{
return
tsk
->
thread
.
i387
.
fxsave
.
mxcsr
;
return
tsk
->
thread
.
i387
.
fxsave
.
mxcsr
;
}
else
{
}
else
{
return
0x1f80
;
return
0x1f80
;
...
@@ -172,7 +188,7 @@ unsigned short get_fpu_mxcsr( struct task_struct *tsk )
...
@@ -172,7 +188,7 @@ unsigned short get_fpu_mxcsr( struct task_struct *tsk )
void
set_fpu_cwd
(
struct
task_struct
*
tsk
,
unsigned
short
cwd
)
void
set_fpu_cwd
(
struct
task_struct
*
tsk
,
unsigned
short
cwd
)
{
{
if
(
HAVE_FXSR
)
{
if
(
cpu_has_fxsr
)
{
tsk
->
thread
.
i387
.
fxsave
.
cwd
=
cwd
;
tsk
->
thread
.
i387
.
fxsave
.
cwd
=
cwd
;
}
else
{
}
else
{
tsk
->
thread
.
i387
.
fsave
.
cwd
=
((
long
)
cwd
|
0xffff0000
);
tsk
->
thread
.
i387
.
fsave
.
cwd
=
((
long
)
cwd
|
0xffff0000
);
...
@@ -181,7 +197,7 @@ void set_fpu_cwd( struct task_struct *tsk, unsigned short cwd )
...
@@ -181,7 +197,7 @@ void set_fpu_cwd( struct task_struct *tsk, unsigned short cwd )
void
set_fpu_swd
(
struct
task_struct
*
tsk
,
unsigned
short
swd
)
void
set_fpu_swd
(
struct
task_struct
*
tsk
,
unsigned
short
swd
)
{
{
if
(
HAVE_FXSR
)
{
if
(
cpu_has_fxsr
)
{
tsk
->
thread
.
i387
.
fxsave
.
swd
=
swd
;
tsk
->
thread
.
i387
.
fxsave
.
swd
=
swd
;
}
else
{
}
else
{
tsk
->
thread
.
i387
.
fsave
.
swd
=
((
long
)
swd
|
0xffff0000
);
tsk
->
thread
.
i387
.
fsave
.
swd
=
((
long
)
swd
|
0xffff0000
);
...
@@ -190,7 +206,7 @@ void set_fpu_swd( struct task_struct *tsk, unsigned short swd )
...
@@ -190,7 +206,7 @@ void set_fpu_swd( struct task_struct *tsk, unsigned short swd )
void
set_fpu_twd
(
struct
task_struct
*
tsk
,
unsigned
short
twd
)
void
set_fpu_twd
(
struct
task_struct
*
tsk
,
unsigned
short
twd
)
{
{
if
(
HAVE_FXSR
)
{
if
(
cpu_has_fxsr
)
{
tsk
->
thread
.
i387
.
fxsave
.
twd
=
twd_i387_to_fxsr
(
twd
);
tsk
->
thread
.
i387
.
fxsave
.
twd
=
twd_i387_to_fxsr
(
twd
);
}
else
{
}
else
{
tsk
->
thread
.
i387
.
fsave
.
twd
=
((
long
)
twd
|
0xffff0000
);
tsk
->
thread
.
i387
.
fsave
.
twd
=
((
long
)
twd
|
0xffff0000
);
...
@@ -199,7 +215,7 @@ void set_fpu_twd( struct task_struct *tsk, unsigned short twd )
...
@@ -199,7 +215,7 @@ void set_fpu_twd( struct task_struct *tsk, unsigned short twd )
void
set_fpu_mxcsr
(
struct
task_struct
*
tsk
,
unsigned
short
mxcsr
)
void
set_fpu_mxcsr
(
struct
task_struct
*
tsk
,
unsigned
short
mxcsr
)
{
{
if
(
HAVE_XMM
)
{
if
(
cpu_has_xmm
)
{
tsk
->
thread
.
i387
.
fxsave
.
mxcsr
=
mxcsr
;
tsk
->
thread
.
i387
.
fxsave
.
mxcsr
=
mxcsr
;
}
}
}
}
...
@@ -313,7 +329,7 @@ int save_i387( struct _fpstate *buf )
...
@@ -313,7 +329,7 @@ int save_i387( struct _fpstate *buf )
current
->
used_math
=
0
;
current
->
used_math
=
0
;
if
(
HAVE_HWFP
)
{
if
(
HAVE_HWFP
)
{
if
(
HAVE_FXSR
)
{
if
(
cpu_has_fxsr
)
{
return
save_i387_fxsave
(
buf
);
return
save_i387_fxsave
(
buf
);
}
else
{
}
else
{
return
save_i387_fsave
(
buf
);
return
save_i387_fsave
(
buf
);
...
@@ -346,7 +362,7 @@ int restore_i387( struct _fpstate *buf )
...
@@ -346,7 +362,7 @@ int restore_i387( struct _fpstate *buf )
int
err
;
int
err
;
if
(
HAVE_HWFP
)
{
if
(
HAVE_HWFP
)
{
if
(
HAVE_FXSR
)
{
if
(
cpu_has_fxsr
)
{
err
=
restore_i387_fxsave
(
buf
);
err
=
restore_i387_fxsave
(
buf
);
}
else
{
}
else
{
err
=
restore_i387_fsave
(
buf
);
err
=
restore_i387_fsave
(
buf
);
...
@@ -379,7 +395,7 @@ static inline int get_fpregs_fxsave( struct user_i387_struct *buf,
...
@@ -379,7 +395,7 @@ static inline int get_fpregs_fxsave( struct user_i387_struct *buf,
int
get_fpregs
(
struct
user_i387_struct
*
buf
,
struct
task_struct
*
tsk
)
int
get_fpregs
(
struct
user_i387_struct
*
buf
,
struct
task_struct
*
tsk
)
{
{
if
(
HAVE_HWFP
)
{
if
(
HAVE_HWFP
)
{
if
(
HAVE_FXSR
)
{
if
(
cpu_has_fxsr
)
{
return
get_fpregs_fxsave
(
buf
,
tsk
);
return
get_fpregs_fxsave
(
buf
,
tsk
);
}
else
{
}
else
{
return
get_fpregs_fsave
(
buf
,
tsk
);
return
get_fpregs_fsave
(
buf
,
tsk
);
...
@@ -407,7 +423,7 @@ static inline int set_fpregs_fxsave( struct task_struct *tsk,
...
@@ -407,7 +423,7 @@ static inline int set_fpregs_fxsave( struct task_struct *tsk,
int
set_fpregs
(
struct
task_struct
*
tsk
,
struct
user_i387_struct
*
buf
)
int
set_fpregs
(
struct
task_struct
*
tsk
,
struct
user_i387_struct
*
buf
)
{
{
if
(
HAVE_HWFP
)
{
if
(
HAVE_HWFP
)
{
if
(
HAVE_FXSR
)
{
if
(
cpu_has_fxsr
)
{
return
set_fpregs_fxsave
(
tsk
,
buf
);
return
set_fpregs_fxsave
(
tsk
,
buf
);
}
else
{
}
else
{
return
set_fpregs_fsave
(
tsk
,
buf
);
return
set_fpregs_fsave
(
tsk
,
buf
);
...
@@ -420,7 +436,7 @@ int set_fpregs( struct task_struct *tsk, struct user_i387_struct *buf )
...
@@ -420,7 +436,7 @@ int set_fpregs( struct task_struct *tsk, struct user_i387_struct *buf )
int
get_fpxregs
(
struct
user_fxsr_struct
*
buf
,
struct
task_struct
*
tsk
)
int
get_fpxregs
(
struct
user_fxsr_struct
*
buf
,
struct
task_struct
*
tsk
)
{
{
if
(
HAVE_FXSR
)
{
if
(
cpu_has_fxsr
)
{
if
(
__copy_to_user
(
(
void
*
)
buf
,
&
tsk
->
thread
.
i387
.
fxsave
,
if
(
__copy_to_user
(
(
void
*
)
buf
,
&
tsk
->
thread
.
i387
.
fxsave
,
sizeof
(
struct
user_fxsr_struct
)
))
sizeof
(
struct
user_fxsr_struct
)
))
return
-
EFAULT
;
return
-
EFAULT
;
...
@@ -432,7 +448,7 @@ int get_fpxregs( struct user_fxsr_struct *buf, struct task_struct *tsk )
...
@@ -432,7 +448,7 @@ int get_fpxregs( struct user_fxsr_struct *buf, struct task_struct *tsk )
int
set_fpxregs
(
struct
task_struct
*
tsk
,
struct
user_fxsr_struct
*
buf
)
int
set_fpxregs
(
struct
task_struct
*
tsk
,
struct
user_fxsr_struct
*
buf
)
{
{
if
(
HAVE_FXSR
)
{
if
(
cpu_has_fxsr
)
{
__copy_from_user
(
&
tsk
->
thread
.
i387
.
fxsave
,
(
void
*
)
buf
,
__copy_from_user
(
&
tsk
->
thread
.
i387
.
fxsave
,
(
void
*
)
buf
,
sizeof
(
struct
user_fxsr_struct
)
);
sizeof
(
struct
user_fxsr_struct
)
);
/* mxcsr bit 6 and 31-16 must be zero for security reasons */
/* mxcsr bit 6 and 31-16 must be zero for security reasons */
...
@@ -478,7 +494,7 @@ int dump_fpu( struct pt_regs *regs, struct user_i387_struct *fpu )
...
@@ -478,7 +494,7 @@ int dump_fpu( struct pt_regs *regs, struct user_i387_struct *fpu )
fpvalid
=
tsk
->
used_math
;
fpvalid
=
tsk
->
used_math
;
if
(
fpvalid
)
{
if
(
fpvalid
)
{
unlazy_fpu
(
tsk
);
unlazy_fpu
(
tsk
);
if
(
HAVE_FXSR
)
{
if
(
cpu_has_fxsr
)
{
copy_fpu_fxsave
(
tsk
,
fpu
);
copy_fpu_fxsave
(
tsk
,
fpu
);
}
else
{
}
else
{
copy_fpu_fsave
(
tsk
,
fpu
);
copy_fpu_fsave
(
tsk
,
fpu
);
...
@@ -493,7 +509,7 @@ int dump_extended_fpu( struct pt_regs *regs, struct user_fxsr_struct *fpu )
...
@@ -493,7 +509,7 @@ int dump_extended_fpu( struct pt_regs *regs, struct user_fxsr_struct *fpu )
int
fpvalid
;
int
fpvalid
;
struct
task_struct
*
tsk
=
current
;
struct
task_struct
*
tsk
=
current
;
fpvalid
=
tsk
->
used_math
&&
HAVE_FXSR
;
fpvalid
=
tsk
->
used_math
&&
cpu_has_fxsr
;
if
(
fpvalid
)
{
if
(
fpvalid
)
{
unlazy_fpu
(
tsk
);
unlazy_fpu
(
tsk
);
memcpy
(
fpu
,
&
tsk
->
thread
.
i387
.
fxsave
,
memcpy
(
fpu
,
&
tsk
->
thread
.
i387
.
fxsave
,
...
...
arch/i386/kernel/setup.c
View file @
448ba078
...
@@ -147,7 +147,7 @@ extern char _text, _etext, _edata, _end;
...
@@ -147,7 +147,7 @@ extern char _text, _etext, _edata, _end;
extern
unsigned
long
cpu_khz
;
extern
unsigned
long
cpu_khz
;
static
int
disable_x86_serial_nr
__initdata
=
1
;
static
int
disable_x86_serial_nr
__initdata
=
1
;
int
disable_x86_fxsr
__initdata
=
0
;
static
int
disable_x86_fxsr
__initdata
=
0
;
/*
/*
* This is set up by the setup-routine at boot-time
* This is set up by the setup-routine at boot-time
...
@@ -2013,6 +2013,12 @@ void __init identify_cpu(struct cpuinfo_x86 *c)
...
@@ -2013,6 +2013,12 @@ void __init identify_cpu(struct cpuinfo_x86 *c)
clear_bit
(
X86_FEATURE_TSC
,
&
c
->
x86_capability
);
clear_bit
(
X86_FEATURE_TSC
,
&
c
->
x86_capability
);
#endif
#endif
/* FXSR disabled? */
if
(
disable_x86_fxsr
)
{
clear_bit
(
X86_FEATURE_FXSR
,
&
c
->
x86_capability
);
clear_bit
(
X86_FEATURE_XMM
,
&
c
->
x86_capability
);
}
/* Disable the PN if appropriate */
/* Disable the PN if appropriate */
squash_the_stupid_serial_number
(
c
);
squash_the_stupid_serial_number
(
c
);
...
...
arch/i386/lib/mmx.c
View file @
448ba078
...
@@ -2,6 +2,8 @@
...
@@ -2,6 +2,8 @@
#include <linux/string.h>
#include <linux/string.h>
#include <linux/sched.h>
#include <linux/sched.h>
#include <asm/i387.h>
/*
/*
* MMX 3DNow! library helper functions
* MMX 3DNow! library helper functions
*
*
...
@@ -26,13 +28,7 @@ void *_mmx_memcpy(void *to, const void *from, size_t len)
...
@@ -26,13 +28,7 @@ void *_mmx_memcpy(void *to, const void *from, size_t len)
void
*
p
=
to
;
void
*
p
=
to
;
int
i
=
len
>>
6
;
/* len/64 */
int
i
=
len
>>
6
;
/* len/64 */
if
(
!
(
current
->
flags
&
PF_USEDFPU
))
kernel_fpu_begin
();
clts
();
else
{
__asm__
__volatile__
(
" fnsave %0; fwait
\n
"
::
"m"
(
current
->
thread
.
i387
));
current
->
flags
&=
~
PF_USEDFPU
;
}
__asm__
__volatile__
(
__asm__
__volatile__
(
"1: prefetch (%0)
\n
"
/* This set is 28 bytes */
"1: prefetch (%0)
\n
"
/* This set is 28 bytes */
...
@@ -88,20 +84,15 @@ void *_mmx_memcpy(void *to, const void *from, size_t len)
...
@@ -88,20 +84,15 @@ void *_mmx_memcpy(void *to, const void *from, size_t len)
* Now do the tail of the block
* Now do the tail of the block
*/
*/
__memcpy
(
to
,
from
,
len
&
63
);
__memcpy
(
to
,
from
,
len
&
63
);
stts
();
kernel_fpu_end
();
return
p
;
return
p
;
}
}
static
void
fast_clear_page
(
void
*
page
)
static
void
fast_clear_page
(
void
*
page
)
{
{
int
i
;
int
i
;
if
(
!
(
current
->
flags
&
PF_USEDFPU
))
clts
();
kernel_fpu_begin
();
else
{
__asm__
__volatile__
(
" fnsave %0; fwait
\n
"
::
"m"
(
current
->
thread
.
i387
));
current
->
flags
&=
~
PF_USEDFPU
;
}
__asm__
__volatile__
(
__asm__
__volatile__
(
" pxor %%mm0, %%mm0
\n
"
:
:
" pxor %%mm0, %%mm0
\n
"
:
:
...
@@ -127,19 +118,14 @@ static void fast_clear_page(void *page)
...
@@ -127,19 +118,14 @@ static void fast_clear_page(void *page)
__asm__
__volatile__
(
__asm__
__volatile__
(
" sfence
\n
"
:
:
" sfence
\n
"
:
:
);
);
stts
();
kernel_fpu_end
();
}
}
static
void
fast_copy_page
(
void
*
to
,
void
*
from
)
static
void
fast_copy_page
(
void
*
to
,
void
*
from
)
{
{
int
i
;
int
i
;
if
(
!
(
current
->
flags
&
PF_USEDFPU
))
clts
();
kernel_fpu_begin
();
else
{
__asm__
__volatile__
(
" fnsave %0; fwait
\n
"
::
"m"
(
current
->
thread
.
i387
));
current
->
flags
&=
~
PF_USEDFPU
;
}
/* maybe the prefetch stuff can go before the expensive fnsave...
/* maybe the prefetch stuff can go before the expensive fnsave...
* but that is for later. -AV
* but that is for later. -AV
...
@@ -199,7 +185,7 @@ static void fast_copy_page(void *to, void *from)
...
@@ -199,7 +185,7 @@ static void fast_copy_page(void *to, void *from)
__asm__
__volatile__
(
__asm__
__volatile__
(
" sfence
\n
"
:
:
" sfence
\n
"
:
:
);
);
stts
();
kernel_fpu_end
();
}
}
/*
/*
...
...
include/asm-i386/bugs.h
View file @
448ba078
...
@@ -66,8 +66,6 @@ static double __initdata y = 3145727.0;
...
@@ -66,8 +66,6 @@ static double __initdata y = 3145727.0;
*/
*/
static
void
__init
check_fpu
(
void
)
static
void
__init
check_fpu
(
void
)
{
{
extern
int
disable_x86_fxsr
;
if
(
!
boot_cpu_data
.
hard_math
)
{
if
(
!
boot_cpu_data
.
hard_math
)
{
#ifndef CONFIG_MATH_EMULATION
#ifndef CONFIG_MATH_EMULATION
printk
(
KERN_EMERG
"No coprocessor found and no math emulation present.
\n
"
);
printk
(
KERN_EMERG
"No coprocessor found and no math emulation present.
\n
"
);
...
@@ -85,7 +83,6 @@ static void __init check_fpu(void)
...
@@ -85,7 +83,6 @@ static void __init check_fpu(void)
extern
void
__buggy_fxsr_alignment
(
void
);
extern
void
__buggy_fxsr_alignment
(
void
);
__buggy_fxsr_alignment
();
__buggy_fxsr_alignment
();
}
}
if
(
!
disable_x86_fxsr
)
{
if
(
cpu_has_fxsr
)
{
if
(
cpu_has_fxsr
)
{
printk
(
KERN_INFO
"Enabling fast FPU save and restore... "
);
printk
(
KERN_INFO
"Enabling fast FPU save and restore... "
);
set_in_cr4
(
X86_CR4_OSFXSR
);
set_in_cr4
(
X86_CR4_OSFXSR
);
...
@@ -96,8 +93,6 @@ static void __init check_fpu(void)
...
@@ -96,8 +93,6 @@ static void __init check_fpu(void)
set_in_cr4
(
X86_CR4_OSXMMEXCPT
);
set_in_cr4
(
X86_CR4_OSXMMEXCPT
);
printk
(
"done.
\n
"
);
printk
(
"done.
\n
"
);
}
}
}
else
printk
(
KERN_INFO
"Disabling fast FPU save and restore.
\n
"
);
/* Test for the divl bug.. */
/* Test for the divl bug.. */
__asm__
(
"fninit
\n\t
"
__asm__
(
"fninit
\n\t
"
...
...
include/asm-i386/i387.h
View file @
448ba078
...
@@ -23,6 +23,10 @@ extern void init_fpu(void);
...
@@ -23,6 +23,10 @@ extern void init_fpu(void);
extern
void
save_init_fpu
(
struct
task_struct
*
tsk
);
extern
void
save_init_fpu
(
struct
task_struct
*
tsk
);
extern
void
restore_fpu
(
struct
task_struct
*
tsk
);
extern
void
restore_fpu
(
struct
task_struct
*
tsk
);
extern
void
kernel_fpu_begin
(
void
);
#define kernel_fpu_end() stts()
#define unlazy_fpu( tsk ) do { \
#define unlazy_fpu( tsk ) do { \
if ( tsk->flags & PF_USEDFPU ) \
if ( tsk->flags & PF_USEDFPU ) \
save_init_fpu( tsk ); \
save_init_fpu( tsk ); \
...
...
include/asm-i386/processor.h
View file @
448ba078
...
@@ -88,8 +88,6 @@ extern struct cpuinfo_x86 cpu_data[];
...
@@ -88,8 +88,6 @@ extern struct cpuinfo_x86 cpu_data[];
#define cpu_has_fxsr (test_bit(X86_FEATURE_FXSR, boot_cpu_data.x86_capability))
#define cpu_has_fxsr (test_bit(X86_FEATURE_FXSR, boot_cpu_data.x86_capability))
#define cpu_has_xmm (test_bit(X86_FEATURE_XMM, boot_cpu_data.x86_capability))
#define cpu_has_xmm (test_bit(X86_FEATURE_XMM, boot_cpu_data.x86_capability))
#define cpu_has_fpu (test_bit(X86_FEATURE_FPU, boot_cpu_data.x86_capability))
#define cpu_has_fpu (test_bit(X86_FEATURE_FPU, boot_cpu_data.x86_capability))
#define HAVE_FXSR (mmu_cr4_features & X86_CR4_OSFXSR)
#define HAVE_XMM (mmu_cr4_features & X86_CR4_OSXMMEXCPT)
extern
char
ignore_irq13
;
extern
char
ignore_irq13
;
...
...
mm/page_alloc.c
View file @
448ba078
...
@@ -542,14 +542,8 @@ void __free_pages(struct page *page, unsigned long order)
...
@@ -542,14 +542,8 @@ void __free_pages(struct page *page, unsigned long order)
void
free_pages
(
unsigned
long
addr
,
unsigned
long
order
)
void
free_pages
(
unsigned
long
addr
,
unsigned
long
order
)
{
{
struct
page
*
fpage
;
if
(
addr
!=
0
)
__free_pages
(
virt_to_page
(
addr
),
order
);
#ifdef CONFIG_DISCONTIGMEM
if
(
addr
==
0
)
return
;
#endif
fpage
=
virt_to_page
(
addr
);
if
(
VALID_PAGE
(
fpage
))
__free_pages
(
fpage
,
order
);
}
}
/*
/*
...
...
net/sunrpc/sunrpc_syms.c
View file @
448ba078
...
@@ -36,6 +36,7 @@ EXPORT_SYMBOL(rpciod_down);
...
@@ -36,6 +36,7 @@ EXPORT_SYMBOL(rpciod_down);
EXPORT_SYMBOL
(
rpciod_up
);
EXPORT_SYMBOL
(
rpciod_up
);
EXPORT_SYMBOL
(
rpc_new_task
);
EXPORT_SYMBOL
(
rpc_new_task
);
EXPORT_SYMBOL
(
rpc_wake_up_status
);
EXPORT_SYMBOL
(
rpc_wake_up_status
);
EXPORT_SYMBOL
(
rpc_release_task
);
/* RPC client functions */
/* RPC client functions */
EXPORT_SYMBOL
(
rpc_create_client
);
EXPORT_SYMBOL
(
rpc_create_client
);
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment