Commit 91066588 authored by Ingo Molnar's avatar Ingo Molnar

x86/fpu: Uninline the irq_ts_save()/restore() functions

Especially the irq_ts_save() function is pretty bloaty, generating
over a dozen instructions, so uninline them.

Even though the API is used rarely, the space savings are measurable:

   text    data     bss     dec     hex filename
   13331995        2572920 1634304 17539219        10ba093 vmlinux.before
   13331739        2572920 1634304 17538963        10b9f93 vmlinux.after

( This also allows the removal of an include file inclusion from fpu/api.h,
  speeding up the kernel build slightly. )
Reviewed-by: default avatarBorislav Petkov <bp@alien8.de>
Cc: Andy Lutomirski <luto@amacapital.net>
Cc: Dave Hansen <dave.hansen@linux.intel.com>
Cc: Fenghua Yu <fenghua.yu@intel.com>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Oleg Nesterov <oleg@redhat.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent 952f07ec
...@@ -10,8 +10,6 @@ ...@@ -10,8 +10,6 @@
#ifndef _ASM_X86_FPU_API_H #ifndef _ASM_X86_FPU_API_H
#define _ASM_X86_FPU_API_H #define _ASM_X86_FPU_API_H
#include <linux/hardirq.h>
/* /*
* Careful: __kernel_fpu_begin/end() must be called with preempt disabled * Careful: __kernel_fpu_begin/end() must be called with preempt disabled
* and they don't touch the preempt state on their own. * and they don't touch the preempt state on their own.
...@@ -35,28 +33,7 @@ extern bool irq_fpu_usable(void); ...@@ -35,28 +33,7 @@ extern bool irq_fpu_usable(void);
* in interrupt context interacting wrongly with other user/kernel fpu usage, we * in interrupt context interacting wrongly with other user/kernel fpu usage, we
* should use them only in the context of irq_ts_save/restore() * should use them only in the context of irq_ts_save/restore()
*/ */
static inline int irq_ts_save(void) extern int irq_ts_save(void);
{ extern void irq_ts_restore(int TS_state);
/*
* If in process context and not atomic, we can take a spurious DNA fault.
* Otherwise, doing clts() in process context requires disabling preemption
* or some heavy lifting like kernel_fpu_begin()
*/
if (!in_atomic())
return 0;
if (read_cr0() & X86_CR0_TS) {
clts();
return 1;
}
return 0;
}
static inline void irq_ts_restore(int TS_state)
{
if (TS_state)
stts();
}
#endif /* _ASM_X86_FPU_API_H */ #endif /* _ASM_X86_FPU_API_H */
...@@ -6,6 +6,7 @@ ...@@ -6,6 +6,7 @@
* Gareth Hughes <gareth@valinux.com>, May 2000 * Gareth Hughes <gareth@valinux.com>, May 2000
*/ */
#include <asm/fpu/internal.h> #include <asm/fpu/internal.h>
#include <linux/hardirq.h>
/* /*
* Track whether the kernel is using the FPU state * Track whether the kernel is using the FPU state
...@@ -140,6 +141,35 @@ void kernel_fpu_end(void) ...@@ -140,6 +141,35 @@ void kernel_fpu_end(void)
} }
EXPORT_SYMBOL_GPL(kernel_fpu_end); EXPORT_SYMBOL_GPL(kernel_fpu_end);
/*
* CR0::TS save/restore functions:
*/
int irq_ts_save(void)
{
/*
* If in process context and not atomic, we can take a spurious DNA fault.
* Otherwise, doing clts() in process context requires disabling preemption
* or some heavy lifting like kernel_fpu_begin()
*/
if (!in_atomic())
return 0;
if (read_cr0() & X86_CR0_TS) {
clts();
return 1;
}
return 0;
}
EXPORT_SYMBOL_GPL(irq_ts_save);
void irq_ts_restore(int TS_state)
{
if (TS_state)
stts();
}
EXPORT_SYMBOL_GPL(irq_ts_restore);
static void __save_fpu(struct fpu *fpu) static void __save_fpu(struct fpu *fpu)
{ {
if (use_xsave()) { if (use_xsave()) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment