Commit e97131a8 authored by Ingo Molnar's avatar Ingo Molnar

x86/fpu: Add CONFIG_X86_DEBUG_FPU=y FPU debugging code

There are various internal FPU state debugging checks that never
trigger in practice, but which are useful for FPU code development.

Separate these out into CONFIG_X86_DEBUG_FPU=y, and also add a
couple of new ones.

The size difference is about 0.5K of code on defconfig:

   text        data     bss          filename
   15028906    2578816  1638400      vmlinux
   15029430    2578816  1638400      vmlinux

( Keep this enabled by default until the new FPU code is debugged. )

Cc: Andy Lutomirski <luto@amacapital.net>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Dave Hansen <dave.hansen@linux.intel.com>
Cc: Fenghua Yu <fenghua.yu@intel.com>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Oleg Nesterov <oleg@redhat.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent d364a765
...@@ -332,4 +332,16 @@ config X86_DEBUG_STATIC_CPU_HAS ...@@ -332,4 +332,16 @@ config X86_DEBUG_STATIC_CPU_HAS
If unsure, say N. If unsure, say N.
config X86_DEBUG_FPU
bool "Debug the x86 FPU code"
depends on DEBUG_KERNEL
default y
---help---
If this option is enabled then there will be extra sanity
checks and (boot time) debug printouts added to the kernel.
This debugging adds some small amount of runtime overhead
to the kernel.
If unsure, say N.
endmenu endmenu
...@@ -59,6 +59,15 @@ extern void fpu__clear(struct fpu *fpu); ...@@ -59,6 +59,15 @@ extern void fpu__clear(struct fpu *fpu);
extern void fpu__init_check_bugs(void); extern void fpu__init_check_bugs(void);
extern void fpu__resume_cpu(void); extern void fpu__resume_cpu(void);
/*
* Debugging facility:
*/
#ifdef CONFIG_X86_DEBUG_FPU
# define WARN_ON_FPU(x) WARN_ON_ONCE(x)
#else
# define WARN_ON_FPU(x) ({ 0; })
#endif
DECLARE_PER_CPU(struct fpu *, fpu_fpregs_owner_ctx); DECLARE_PER_CPU(struct fpu *, fpu_fpregs_owner_ctx);
/* /*
...@@ -296,6 +305,8 @@ static inline void __fpregs_deactivate_hw(void) ...@@ -296,6 +305,8 @@ static inline void __fpregs_deactivate_hw(void)
/* Must be paired with an 'stts' (fpregs_deactivate_hw()) after! */ /* Must be paired with an 'stts' (fpregs_deactivate_hw()) after! */
static inline void __fpregs_deactivate(struct fpu *fpu) static inline void __fpregs_deactivate(struct fpu *fpu)
{ {
WARN_ON_FPU(!fpu->fpregs_active);
fpu->fpregs_active = 0; fpu->fpregs_active = 0;
this_cpu_write(fpu_fpregs_owner_ctx, NULL); this_cpu_write(fpu_fpregs_owner_ctx, NULL);
} }
...@@ -303,6 +314,8 @@ static inline void __fpregs_deactivate(struct fpu *fpu) ...@@ -303,6 +314,8 @@ static inline void __fpregs_deactivate(struct fpu *fpu)
/* Must be paired with a 'clts' (fpregs_activate_hw()) before! */ /* Must be paired with a 'clts' (fpregs_activate_hw()) before! */
static inline void __fpregs_activate(struct fpu *fpu) static inline void __fpregs_activate(struct fpu *fpu)
{ {
WARN_ON_FPU(fpu->fpregs_active);
fpu->fpregs_active = 1; fpu->fpregs_active = 1;
this_cpu_write(fpu_fpregs_owner_ctx, fpu); this_cpu_write(fpu_fpregs_owner_ctx, fpu);
} }
...@@ -433,8 +446,10 @@ switch_fpu_prepare(struct fpu *old_fpu, struct fpu *new_fpu, int cpu) ...@@ -433,8 +446,10 @@ switch_fpu_prepare(struct fpu *old_fpu, struct fpu *new_fpu, int cpu)
static inline void switch_fpu_finish(struct fpu *new_fpu, fpu_switch_t fpu_switch) static inline void switch_fpu_finish(struct fpu *new_fpu, fpu_switch_t fpu_switch)
{ {
if (fpu_switch.preload) { if (fpu_switch.preload) {
if (unlikely(copy_fpstate_to_fpregs(new_fpu))) if (unlikely(copy_fpstate_to_fpregs(new_fpu))) {
WARN_ON_FPU(1);
fpu__clear(new_fpu); fpu__clear(new_fpu);
}
} }
} }
......
...@@ -38,13 +38,13 @@ DEFINE_PER_CPU(struct fpu *, fpu_fpregs_owner_ctx); ...@@ -38,13 +38,13 @@ DEFINE_PER_CPU(struct fpu *, fpu_fpregs_owner_ctx);
static void kernel_fpu_disable(void) static void kernel_fpu_disable(void)
{ {
WARN_ON(this_cpu_read(in_kernel_fpu)); WARN_ON_FPU(this_cpu_read(in_kernel_fpu));
this_cpu_write(in_kernel_fpu, true); this_cpu_write(in_kernel_fpu, true);
} }
static void kernel_fpu_enable(void) static void kernel_fpu_enable(void)
{ {
WARN_ON_ONCE(!this_cpu_read(in_kernel_fpu)); WARN_ON_FPU(!this_cpu_read(in_kernel_fpu));
this_cpu_write(in_kernel_fpu, false); this_cpu_write(in_kernel_fpu, false);
} }
...@@ -109,7 +109,7 @@ void __kernel_fpu_begin(void) ...@@ -109,7 +109,7 @@ void __kernel_fpu_begin(void)
{ {
struct fpu *fpu = &current->thread.fpu; struct fpu *fpu = &current->thread.fpu;
WARN_ON_ONCE(!irq_fpu_usable()); WARN_ON_FPU(!irq_fpu_usable());
kernel_fpu_disable(); kernel_fpu_disable();
...@@ -127,7 +127,7 @@ void __kernel_fpu_end(void) ...@@ -127,7 +127,7 @@ void __kernel_fpu_end(void)
struct fpu *fpu = &current->thread.fpu; struct fpu *fpu = &current->thread.fpu;
if (fpu->fpregs_active) { if (fpu->fpregs_active) {
if (WARN_ON(copy_fpstate_to_fpregs(fpu))) if (WARN_ON_FPU(copy_fpstate_to_fpregs(fpu)))
fpu__clear(fpu); fpu__clear(fpu);
} else { } else {
__fpregs_deactivate_hw(); __fpregs_deactivate_hw();
...@@ -187,7 +187,7 @@ EXPORT_SYMBOL_GPL(irq_ts_restore); ...@@ -187,7 +187,7 @@ EXPORT_SYMBOL_GPL(irq_ts_restore);
*/ */
void fpu__save(struct fpu *fpu) void fpu__save(struct fpu *fpu)
{ {
WARN_ON(fpu != &current->thread.fpu); WARN_ON_FPU(fpu != &current->thread.fpu);
preempt_disable(); preempt_disable();
if (fpu->fpregs_active) { if (fpu->fpregs_active) {
...@@ -233,7 +233,7 @@ EXPORT_SYMBOL_GPL(fpstate_init); ...@@ -233,7 +233,7 @@ EXPORT_SYMBOL_GPL(fpstate_init);
*/ */
static void fpu_copy(struct fpu *dst_fpu, struct fpu *src_fpu) static void fpu_copy(struct fpu *dst_fpu, struct fpu *src_fpu)
{ {
WARN_ON(src_fpu != &current->thread.fpu); WARN_ON_FPU(src_fpu != &current->thread.fpu);
/* /*
* Don't let 'init optimized' areas of the XSAVE area * Don't let 'init optimized' areas of the XSAVE area
...@@ -284,7 +284,7 @@ int fpu__copy(struct fpu *dst_fpu, struct fpu *src_fpu) ...@@ -284,7 +284,7 @@ int fpu__copy(struct fpu *dst_fpu, struct fpu *src_fpu)
*/ */
void fpu__activate_curr(struct fpu *fpu) void fpu__activate_curr(struct fpu *fpu)
{ {
WARN_ON_ONCE(fpu != &current->thread.fpu); WARN_ON_FPU(fpu != &current->thread.fpu);
if (!fpu->fpstate_active) { if (!fpu->fpstate_active) {
fpstate_init(&fpu->state); fpstate_init(&fpu->state);
...@@ -321,7 +321,7 @@ EXPORT_SYMBOL_GPL(fpu__activate_curr); ...@@ -321,7 +321,7 @@ EXPORT_SYMBOL_GPL(fpu__activate_curr);
*/ */
void fpu__activate_stopped(struct fpu *child_fpu) void fpu__activate_stopped(struct fpu *child_fpu)
{ {
WARN_ON_ONCE(child_fpu == &current->thread.fpu); WARN_ON_FPU(child_fpu == &current->thread.fpu);
if (child_fpu->fpstate_active) { if (child_fpu->fpstate_active) {
child_fpu->last_cpu = -1; child_fpu->last_cpu = -1;
...@@ -407,7 +407,7 @@ static inline void copy_init_fpstate_to_fpregs(void) ...@@ -407,7 +407,7 @@ static inline void copy_init_fpstate_to_fpregs(void)
*/ */
void fpu__clear(struct fpu *fpu) void fpu__clear(struct fpu *fpu)
{ {
WARN_ON_ONCE(fpu != &current->thread.fpu); /* Almost certainly an anomaly */ WARN_ON_FPU(fpu != &current->thread.fpu); /* Almost certainly an anomaly */
if (!use_eager_fpu()) { if (!use_eager_fpu()) {
/* FPU state will be reallocated lazily at the first use. */ /* FPU state will be reallocated lazily at the first use. */
......
...@@ -143,6 +143,11 @@ EXPORT_SYMBOL_GPL(xstate_size); ...@@ -143,6 +143,11 @@ EXPORT_SYMBOL_GPL(xstate_size);
*/ */
static void __init fpu__init_system_xstate_size_legacy(void) static void __init fpu__init_system_xstate_size_legacy(void)
{ {
static int on_boot_cpu = 1;
WARN_ON_FPU(!on_boot_cpu);
on_boot_cpu = 0;
/* /*
* Note that xstate_size might be overwriten later during * Note that xstate_size might be overwriten later during
* fpu__init_system_xstate(). * fpu__init_system_xstate().
...@@ -214,7 +219,12 @@ __setup("eagerfpu=", eager_fpu_setup); ...@@ -214,7 +219,12 @@ __setup("eagerfpu=", eager_fpu_setup);
*/ */
static void __init fpu__init_system_ctx_switch(void) static void __init fpu__init_system_ctx_switch(void)
{ {
WARN_ON(current->thread.fpu.fpstate_active); static bool on_boot_cpu = 1;
WARN_ON_FPU(!on_boot_cpu);
on_boot_cpu = 0;
WARN_ON_FPU(current->thread.fpu.fpstate_active);
current_thread_info()->status = 0; current_thread_info()->status = 0;
/* Auto enable eagerfpu for xsaveopt */ /* Auto enable eagerfpu for xsaveopt */
......
...@@ -262,6 +262,11 @@ static void __init setup_xstate_comp(void) ...@@ -262,6 +262,11 @@ static void __init setup_xstate_comp(void)
*/ */
static void __init setup_init_fpu_buf(void) static void __init setup_init_fpu_buf(void)
{ {
static int on_boot_cpu = 1;
WARN_ON_FPU(!on_boot_cpu);
on_boot_cpu = 0;
if (!cpu_has_xsave) if (!cpu_has_xsave)
return; return;
...@@ -317,6 +322,10 @@ static void __init init_xstate_size(void) ...@@ -317,6 +322,10 @@ static void __init init_xstate_size(void)
void __init fpu__init_system_xstate(void) void __init fpu__init_system_xstate(void)
{ {
unsigned int eax, ebx, ecx, edx; unsigned int eax, ebx, ecx, edx;
static int on_boot_cpu = 1;
WARN_ON_FPU(!on_boot_cpu);
on_boot_cpu = 0;
if (!cpu_has_xsave) { if (!cpu_has_xsave) {
pr_info("x86/fpu: Legacy x87 FPU detected.\n"); pr_info("x86/fpu: Legacy x87 FPU detected.\n");
...@@ -324,7 +333,7 @@ void __init fpu__init_system_xstate(void) ...@@ -324,7 +333,7 @@ void __init fpu__init_system_xstate(void)
} }
if (boot_cpu_data.cpuid_level < XSTATE_CPUID) { if (boot_cpu_data.cpuid_level < XSTATE_CPUID) {
WARN(1, "x86/fpu: XSTATE_CPUID missing!\n"); WARN_ON_FPU(1);
return; return;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment