Commit c93eceda authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'x86-fpu-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull x86 fpu updates from Ingo Molnar:
 "Initial round of kernel_fpu_begin/end cleanups from Oleg Nesterov,
  plus a cleanup from Borislav Petkov"

* 'x86-fpu-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  x86, fpu: Fix math_state_restore() race with kernel_fpu_begin()
  x86, fpu: Don't abuse has_fpu in __kernel_fpu_begin/end()
  x86, fpu: Introduce per-cpu in_kernel_fpu state
  x86/fpu: Use a symbolic name for asm operand
parents 072bc448 7575637a
...@@ -207,7 +207,7 @@ static inline void fpu_fxsave(struct fpu *fpu) ...@@ -207,7 +207,7 @@ static inline void fpu_fxsave(struct fpu *fpu)
if (config_enabled(CONFIG_X86_32)) if (config_enabled(CONFIG_X86_32))
asm volatile( "fxsave %[fx]" : [fx] "=m" (fpu->state->fxsave)); asm volatile( "fxsave %[fx]" : [fx] "=m" (fpu->state->fxsave));
else if (config_enabled(CONFIG_AS_FXSAVEQ)) else if (config_enabled(CONFIG_AS_FXSAVEQ))
asm volatile("fxsaveq %0" : "=m" (fpu->state->fxsave)); asm volatile("fxsaveq %[fx]" : [fx] "=m" (fpu->state->fxsave));
else { else {
/* Using "rex64; fxsave %0" is broken because, if the memory /* Using "rex64; fxsave %0" is broken because, if the memory
* operand uses any extended registers for addressing, a second * operand uses any extended registers for addressing, a second
...@@ -290,9 +290,11 @@ static inline int fpu_restore_checking(struct fpu *fpu) ...@@ -290,9 +290,11 @@ static inline int fpu_restore_checking(struct fpu *fpu)
static inline int restore_fpu_checking(struct task_struct *tsk) static inline int restore_fpu_checking(struct task_struct *tsk)
{ {
/* AMD K7/K8 CPUs don't save/restore FDP/FIP/FOP unless an exception /*
is pending. Clear the x87 state here by setting it to fixed * AMD K7/K8 CPUs don't save/restore FDP/FIP/FOP unless an exception is
values. "m" is a random variable that should be in L1 */ * pending. Clear the x87 state here by setting it to fixed values.
* "m" is a random variable that should be in L1.
*/
if (unlikely(static_cpu_has_bug_safe(X86_BUG_FXSAVE_LEAK))) { if (unlikely(static_cpu_has_bug_safe(X86_BUG_FXSAVE_LEAK))) {
asm volatile( asm volatile(
"fnclex\n\t" "fnclex\n\t"
......
...@@ -40,8 +40,8 @@ extern void __kernel_fpu_end(void); ...@@ -40,8 +40,8 @@ extern void __kernel_fpu_end(void);
static inline void kernel_fpu_begin(void) static inline void kernel_fpu_begin(void)
{ {
WARN_ON_ONCE(!irq_fpu_usable());
preempt_disable(); preempt_disable();
WARN_ON_ONCE(!irq_fpu_usable());
__kernel_fpu_begin(); __kernel_fpu_begin();
} }
...@@ -51,6 +51,10 @@ static inline void kernel_fpu_end(void) ...@@ -51,6 +51,10 @@ static inline void kernel_fpu_end(void)
preempt_enable(); preempt_enable();
} }
/* Must be called with preempt disabled */
extern void kernel_fpu_disable(void);
extern void kernel_fpu_enable(void);
/* /*
* Some instructions like VIA's padlock instructions generate a spurious * Some instructions like VIA's padlock instructions generate a spurious
* DNA fault but don't modify SSE registers. And these instructions * DNA fault but don't modify SSE registers. And these instructions
......
...@@ -19,6 +19,19 @@ ...@@ -19,6 +19,19 @@
#include <asm/fpu-internal.h> #include <asm/fpu-internal.h>
#include <asm/user.h> #include <asm/user.h>
static DEFINE_PER_CPU(bool, in_kernel_fpu);
void kernel_fpu_disable(void)
{
WARN_ON(this_cpu_read(in_kernel_fpu));
this_cpu_write(in_kernel_fpu, true);
}
void kernel_fpu_enable(void)
{
this_cpu_write(in_kernel_fpu, false);
}
/* /*
* Were we in an interrupt that interrupted kernel mode? * Were we in an interrupt that interrupted kernel mode?
* *
...@@ -33,6 +46,9 @@ ...@@ -33,6 +46,9 @@
*/ */
static inline bool interrupted_kernel_fpu_idle(void) static inline bool interrupted_kernel_fpu_idle(void)
{ {
if (this_cpu_read(in_kernel_fpu))
return false;
if (use_eager_fpu()) if (use_eager_fpu())
return __thread_has_fpu(current); return __thread_has_fpu(current);
...@@ -73,10 +89,10 @@ void __kernel_fpu_begin(void) ...@@ -73,10 +89,10 @@ void __kernel_fpu_begin(void)
{ {
struct task_struct *me = current; struct task_struct *me = current;
this_cpu_write(in_kernel_fpu, true);
if (__thread_has_fpu(me)) { if (__thread_has_fpu(me)) {
__thread_clear_has_fpu(me);
__save_init_fpu(me); __save_init_fpu(me);
/* We do 'stts()' in __kernel_fpu_end() */
} else if (!use_eager_fpu()) { } else if (!use_eager_fpu()) {
this_cpu_write(fpu_owner_task, NULL); this_cpu_write(fpu_owner_task, NULL);
clts(); clts();
...@@ -86,19 +102,16 @@ EXPORT_SYMBOL(__kernel_fpu_begin); ...@@ -86,19 +102,16 @@ EXPORT_SYMBOL(__kernel_fpu_begin);
void __kernel_fpu_end(void) void __kernel_fpu_end(void)
{ {
if (use_eager_fpu()) { struct task_struct *me = current;
/*
* For eager fpu, most the time, tsk_used_math() is true. if (__thread_has_fpu(me)) {
* Restore the user math as we are done with the kernel usage. if (WARN_ON(restore_fpu_checking(me)))
* At few instances during thread exit, signal handling etc, drop_init_fpu(me);
* tsk_used_math() is false. Those few places will take proper } else if (!use_eager_fpu()) {
* actions, so we don't need to restore the math here.
*/
if (likely(tsk_used_math(current)))
math_state_restore();
} else {
stts(); stts();
} }
this_cpu_write(in_kernel_fpu, false);
} }
EXPORT_SYMBOL(__kernel_fpu_end); EXPORT_SYMBOL(__kernel_fpu_end);
......
...@@ -859,18 +859,16 @@ void math_state_restore(void) ...@@ -859,18 +859,16 @@ void math_state_restore(void)
local_irq_disable(); local_irq_disable();
} }
/* Avoid __kernel_fpu_begin() right after __thread_fpu_begin() */
kernel_fpu_disable();
__thread_fpu_begin(tsk); __thread_fpu_begin(tsk);
/*
* Paranoid restore. send a SIGSEGV if we fail to restore the state.
*/
if (unlikely(restore_fpu_checking(tsk))) { if (unlikely(restore_fpu_checking(tsk))) {
drop_init_fpu(tsk); drop_init_fpu(tsk);
force_sig_info(SIGSEGV, SEND_SIG_PRIV, tsk); force_sig_info(SIGSEGV, SEND_SIG_PRIV, tsk);
return; } else {
}
tsk->thread.fpu_counter++; tsk->thread.fpu_counter++;
}
kernel_fpu_enable();
} }
EXPORT_SYMBOL_GPL(math_state_restore); EXPORT_SYMBOL_GPL(math_state_restore);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment