Commit 276983f8 authored by Ingo Molnar's avatar Ingo Molnar

x86/fpu: Eliminate the __thread_has_fpu() wrapper

Start migrating FPU methods towards using 'struct fpu *fpu'
directly. __thread_has_fpu() is just a trivial wrapper around
fpu->has_fpu, eliminate it.
Reviewed-by: default avatarBorislav Petkov <bp@alien8.de>
Cc: Andy Lutomirski <luto@amacapital.net>
Cc: Dave Hansen <dave.hansen@linux.intel.com>
Cc: Fenghua Yu <fenghua.yu@intel.com>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Oleg Nesterov <oleg@redhat.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent 9a89b029
...@@ -323,16 +323,6 @@ static inline int restore_fpu_checking(struct task_struct *tsk) ...@@ -323,16 +323,6 @@ static inline int restore_fpu_checking(struct task_struct *tsk)
return fpu_restore_checking(&tsk->thread.fpu); return fpu_restore_checking(&tsk->thread.fpu);
} }
/*
* Software FPU state helpers. Careful: these need to
* be preemption protection *and* they need to be
* properly paired with the CR0.TS changes!
*/
static inline int __thread_has_fpu(struct task_struct *tsk)
{
return tsk->thread.fpu.has_fpu;
}
/* Must be paired with an 'stts' after! */ /* Must be paired with an 'stts' after! */
static inline void __thread_clear_has_fpu(struct task_struct *tsk) static inline void __thread_clear_has_fpu(struct task_struct *tsk)
{ {
...@@ -370,13 +360,14 @@ static inline void __thread_fpu_begin(struct task_struct *tsk) ...@@ -370,13 +360,14 @@ static inline void __thread_fpu_begin(struct task_struct *tsk)
static inline void drop_fpu(struct task_struct *tsk) static inline void drop_fpu(struct task_struct *tsk)
{ {
struct fpu *fpu = &tsk->thread.fpu;
/* /*
* Forget coprocessor state.. * Forget coprocessor state..
*/ */
preempt_disable(); preempt_disable();
tsk->thread.fpu.counter = 0; tsk->thread.fpu.counter = 0;
if (__thread_has_fpu(tsk)) { if (fpu->has_fpu) {
/* Ignore delayed exceptions from user space */ /* Ignore delayed exceptions from user space */
asm volatile("1: fwait\n" asm volatile("1: fwait\n"
"2:\n" "2:\n"
...@@ -424,6 +415,7 @@ typedef struct { int preload; } fpu_switch_t; ...@@ -424,6 +415,7 @@ typedef struct { int preload; } fpu_switch_t;
static inline fpu_switch_t switch_fpu_prepare(struct task_struct *old, struct task_struct *new, int cpu) static inline fpu_switch_t switch_fpu_prepare(struct task_struct *old, struct task_struct *new, int cpu)
{ {
struct fpu *old_fpu = &old->thread.fpu;
fpu_switch_t fpu; fpu_switch_t fpu;
/* /*
...@@ -433,7 +425,7 @@ static inline fpu_switch_t switch_fpu_prepare(struct task_struct *old, struct ta ...@@ -433,7 +425,7 @@ static inline fpu_switch_t switch_fpu_prepare(struct task_struct *old, struct ta
fpu.preload = tsk_used_math(new) && fpu.preload = tsk_used_math(new) &&
(use_eager_fpu() || new->thread.fpu.counter > 5); (use_eager_fpu() || new->thread.fpu.counter > 5);
if (__thread_has_fpu(old)) { if (old_fpu->has_fpu) {
if (!fpu_save_init(&old->thread.fpu)) if (!fpu_save_init(&old->thread.fpu))
task_disable_lazy_fpu_restore(old); task_disable_lazy_fpu_restore(old);
else else
......
...@@ -57,8 +57,7 @@ static bool interrupted_kernel_fpu_idle(void) ...@@ -57,8 +57,7 @@ static bool interrupted_kernel_fpu_idle(void)
if (use_eager_fpu()) if (use_eager_fpu())
return true; return true;
return !__thread_has_fpu(current) && return !current->thread.fpu.has_fpu && (read_cr0() & X86_CR0_TS);
(read_cr0() & X86_CR0_TS);
} }
/* /*
...@@ -93,11 +92,12 @@ EXPORT_SYMBOL(irq_fpu_usable); ...@@ -93,11 +92,12 @@ EXPORT_SYMBOL(irq_fpu_usable);
void __kernel_fpu_begin(void) void __kernel_fpu_begin(void)
{ {
struct task_struct *me = current; struct task_struct *me = current;
struct fpu *fpu = &me->thread.fpu;
kernel_fpu_disable(); kernel_fpu_disable();
if (__thread_has_fpu(me)) { if (fpu->has_fpu) {
fpu_save_init(&me->thread.fpu); fpu_save_init(fpu);
} else { } else {
this_cpu_write(fpu_owner_task, NULL); this_cpu_write(fpu_owner_task, NULL);
if (!use_eager_fpu()) if (!use_eager_fpu())
...@@ -109,8 +109,9 @@ EXPORT_SYMBOL(__kernel_fpu_begin); ...@@ -109,8 +109,9 @@ EXPORT_SYMBOL(__kernel_fpu_begin);
void __kernel_fpu_end(void) void __kernel_fpu_end(void)
{ {
struct task_struct *me = current; struct task_struct *me = current;
struct fpu *fpu = &me->thread.fpu;
if (__thread_has_fpu(me)) { if (fpu->has_fpu) {
if (WARN_ON(restore_fpu_checking(me))) if (WARN_ON(restore_fpu_checking(me)))
fpu_reset_state(me); fpu_reset_state(me);
} else if (!use_eager_fpu()) { } else if (!use_eager_fpu()) {
...@@ -128,14 +129,16 @@ EXPORT_SYMBOL(__kernel_fpu_end); ...@@ -128,14 +129,16 @@ EXPORT_SYMBOL(__kernel_fpu_end);
*/ */
void fpu__save(struct task_struct *tsk) void fpu__save(struct task_struct *tsk)
{ {
struct fpu *fpu = &tsk->thread.fpu;
WARN_ON(tsk != current); WARN_ON(tsk != current);
preempt_disable(); preempt_disable();
if (__thread_has_fpu(tsk)) { if (fpu->has_fpu) {
if (use_eager_fpu()) { if (use_eager_fpu()) {
__save_fpu(tsk); __save_fpu(tsk);
} else { } else {
fpu_save_init(&tsk->thread.fpu); fpu_save_init(fpu);
__thread_fpu_end(tsk); __thread_fpu_end(tsk);
} }
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment