Commit af2d94fd authored by Ingo Molnar's avatar Ingo Molnar

x86/fpu: Use 'struct fpu' in fpu_reset_state()

Migrate this function to pure 'struct fpu' usage.
Reviewed-by: default avatarBorislav Petkov <bp@alien8.de>
Cc: Andy Lutomirski <luto@amacapital.net>
Cc: Dave Hansen <dave.hansen@linux.intel.com>
Cc: Fenghua Yu <fenghua.yu@intel.com>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Oleg Nesterov <oleg@redhat.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent 11f2d50b
...@@ -380,10 +380,8 @@ static inline void restore_init_xstate(void) ...@@ -380,10 +380,8 @@ static inline void restore_init_xstate(void)
* Reset the FPU state in the eager case and drop it in the lazy case (later use * Reset the FPU state in the eager case and drop it in the lazy case (later use
* will reinit it). * will reinit it).
*/ */
static inline void fpu_reset_state(struct task_struct *tsk) static inline void fpu_reset_state(struct fpu *fpu)
{ {
struct fpu *fpu = &tsk->thread.fpu;
if (!use_eager_fpu()) if (!use_eager_fpu())
drop_fpu(fpu); drop_fpu(fpu);
else else
...@@ -460,7 +458,7 @@ static inline void switch_fpu_finish(struct task_struct *new, fpu_switch_t fpu) ...@@ -460,7 +458,7 @@ static inline void switch_fpu_finish(struct task_struct *new, fpu_switch_t fpu)
if (fpu.preload) { if (fpu.preload) {
if (unlikely(restore_fpu_checking(new_fpu))) if (unlikely(restore_fpu_checking(new_fpu)))
fpu_reset_state(new); fpu_reset_state(new_fpu);
} }
} }
......
...@@ -112,12 +112,11 @@ EXPORT_SYMBOL(__kernel_fpu_begin); ...@@ -112,12 +112,11 @@ EXPORT_SYMBOL(__kernel_fpu_begin);
void __kernel_fpu_end(void) void __kernel_fpu_end(void)
{ {
struct task_struct *me = current; struct fpu *fpu = &current->thread.fpu;
struct fpu *fpu = &me->thread.fpu;
if (fpu->has_fpu) { if (fpu->has_fpu) {
if (WARN_ON(restore_fpu_checking(fpu))) if (WARN_ON(restore_fpu_checking(fpu)))
fpu_reset_state(me); fpu_reset_state(fpu);
} else if (!use_eager_fpu()) { } else if (!use_eager_fpu()) {
stts(); stts();
} }
...@@ -371,7 +370,7 @@ void fpu__restore(void) ...@@ -371,7 +370,7 @@ void fpu__restore(void)
kernel_fpu_disable(); kernel_fpu_disable();
__thread_fpu_begin(fpu); __thread_fpu_begin(fpu);
if (unlikely(restore_fpu_checking(fpu))) { if (unlikely(restore_fpu_checking(fpu))) {
fpu_reset_state(tsk); fpu_reset_state(fpu);
force_sig_info(SIGSEGV, SEND_SIG_PRIV, tsk); force_sig_info(SIGSEGV, SEND_SIG_PRIV, tsk);
} else { } else {
tsk->thread.fpu.counter++; tsk->thread.fpu.counter++;
......
...@@ -343,7 +343,7 @@ int __restore_xstate_sig(void __user *buf, void __user *buf_fx, int size) ...@@ -343,7 +343,7 @@ int __restore_xstate_sig(void __user *buf, void __user *buf_fx, int size)
config_enabled(CONFIG_IA32_EMULATION)); config_enabled(CONFIG_IA32_EMULATION));
if (!buf) { if (!buf) {
fpu_reset_state(tsk); fpu_reset_state(fpu);
return 0; return 0;
} }
...@@ -417,7 +417,7 @@ int __restore_xstate_sig(void __user *buf, void __user *buf_fx, int size) ...@@ -417,7 +417,7 @@ int __restore_xstate_sig(void __user *buf, void __user *buf_fx, int size)
*/ */
user_fpu_begin(); user_fpu_begin();
if (restore_user_xstate(buf_fx, xstate_bv, fx_only)) { if (restore_user_xstate(buf_fx, xstate_bv, fx_only)) {
fpu_reset_state(tsk); fpu_reset_state(fpu);
return -1; return -1;
} }
} }
......
...@@ -667,7 +667,7 @@ handle_signal(struct ksignal *ksig, struct pt_regs *regs) ...@@ -667,7 +667,7 @@ handle_signal(struct ksignal *ksig, struct pt_regs *regs)
* Ensure the signal handler starts with the new fpu state. * Ensure the signal handler starts with the new fpu state.
*/ */
if (fpu->fpstate_active) if (fpu->fpstate_active)
fpu_reset_state(current); fpu_reset_state(fpu);
} }
signal_setup_done(failed, ksig, stepping); signal_setup_done(failed, ksig, stepping);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment