Commit b3a16308 authored by Ingo Molnar's avatar Ingo Molnar

x86/fpu: Simplify fpu->fpregs_active use

The fpregs_active() inline function is pretty pointless - in almost
all the callsites it can be replaced with a direct fpu->fpregs_active
access.

Do so and eliminate the extra layer of obfuscation.

Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Andy Lutomirski <luto@amacapital.net>
Cc: Andy Lutomirski <luto@kernel.org>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Dave Hansen <dave.hansen@linux.intel.com>
Cc: Eric Biggers <ebiggers3@gmail.com>
Cc: Fenghua Yu <fenghua.yu@intel.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Oleg Nesterov <oleg@redhat.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Rik van Riel <riel@redhat.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Yu-cheng Yu <yu-cheng.yu@intel.com>
Link: http://lkml.kernel.org/r/20170923130016.21448-16-mingo@kernel.orgSigned-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent 6d7f7da5
...@@ -542,21 +542,6 @@ static inline void fpregs_activate(struct fpu *fpu) ...@@ -542,21 +542,6 @@ static inline void fpregs_activate(struct fpu *fpu)
trace_x86_fpu_regs_activated(fpu); trace_x86_fpu_regs_activated(fpu);
} }
/*
* The question "does this thread have fpu access?"
* is slightly racy, since preemption could come in
* and revoke it immediately after the test.
*
* However, even in that very unlikely scenario,
* we can just assume we have FPU access - typically
* to save the FP state - we'll just take a #NM
* fault and get the FPU access back.
*/
static inline int fpregs_active(void)
{
return current->thread.fpu.fpregs_active;
}
/* /*
* FPU state switching for scheduling. * FPU state switching for scheduling.
* *
...@@ -617,7 +602,7 @@ static inline void user_fpu_begin(void) ...@@ -617,7 +602,7 @@ static inline void user_fpu_begin(void)
struct fpu *fpu = &current->thread.fpu; struct fpu *fpu = &current->thread.fpu;
preempt_disable(); preempt_disable();
if (!fpregs_active()) if (!fpu->fpregs_active)
fpregs_activate(fpu); fpregs_activate(fpu);
preempt_enable(); preempt_enable();
} }
......
...@@ -367,7 +367,7 @@ void fpu__current_fpstate_write_end(void) ...@@ -367,7 +367,7 @@ void fpu__current_fpstate_write_end(void)
* registers may still be out of date. Update them with * registers may still be out of date. Update them with
* an XRSTOR if they are active. * an XRSTOR if they are active.
*/ */
if (fpregs_active()) if (fpu->fpregs_active)
copy_kernel_to_fpregs(&fpu->state); copy_kernel_to_fpregs(&fpu->state);
/* /*
......
...@@ -155,7 +155,8 @@ static inline int copy_fpregs_to_sigframe(struct xregs_state __user *buf) ...@@ -155,7 +155,8 @@ static inline int copy_fpregs_to_sigframe(struct xregs_state __user *buf)
*/ */
int copy_fpstate_to_sigframe(void __user *buf, void __user *buf_fx, int size) int copy_fpstate_to_sigframe(void __user *buf, void __user *buf_fx, int size)
{ {
struct xregs_state *xsave = &current->thread.fpu.state.xsave; struct fpu *fpu = &current->thread.fpu;
struct xregs_state *xsave = &fpu->state.xsave;
struct task_struct *tsk = current; struct task_struct *tsk = current;
int ia32_fxstate = (buf != buf_fx); int ia32_fxstate = (buf != buf_fx);
...@@ -170,13 +171,13 @@ int copy_fpstate_to_sigframe(void __user *buf, void __user *buf_fx, int size) ...@@ -170,13 +171,13 @@ int copy_fpstate_to_sigframe(void __user *buf, void __user *buf_fx, int size)
sizeof(struct user_i387_ia32_struct), NULL, sizeof(struct user_i387_ia32_struct), NULL,
(struct _fpstate_32 __user *) buf) ? -1 : 1; (struct _fpstate_32 __user *) buf) ? -1 : 1;
if (fpregs_active() || using_compacted_format()) { if (fpu->fpregs_active || using_compacted_format()) {
/* Save the live register state to the user directly. */ /* Save the live register state to the user directly. */
if (copy_fpregs_to_sigframe(buf_fx)) if (copy_fpregs_to_sigframe(buf_fx))
return -1; return -1;
/* Update the thread's fxstate to save the fsave header. */ /* Update the thread's fxstate to save the fsave header. */
if (ia32_fxstate) if (ia32_fxstate)
copy_fxregs_to_kernel(&tsk->thread.fpu); copy_fxregs_to_kernel(fpu);
} else { } else {
/* /*
* It is a *bug* if kernel uses compacted-format for xsave * It is a *bug* if kernel uses compacted-format for xsave
...@@ -189,7 +190,7 @@ int copy_fpstate_to_sigframe(void __user *buf, void __user *buf_fx, int size) ...@@ -189,7 +190,7 @@ int copy_fpstate_to_sigframe(void __user *buf, void __user *buf_fx, int size)
return -1; return -1;
} }
fpstate_sanitize_xstate(&tsk->thread.fpu); fpstate_sanitize_xstate(fpu);
if (__copy_to_user(buf_fx, xsave, fpu_user_xstate_size)) if (__copy_to_user(buf_fx, xsave, fpu_user_xstate_size))
return -1; return -1;
} }
......
...@@ -45,7 +45,7 @@ int __execute_only_pkey(struct mm_struct *mm) ...@@ -45,7 +45,7 @@ int __execute_only_pkey(struct mm_struct *mm)
*/ */
preempt_disable(); preempt_disable();
if (!need_to_set_mm_pkey && if (!need_to_set_mm_pkey &&
fpregs_active() && current->thread.fpu.fpregs_active &&
!__pkru_allows_read(read_pkru(), execute_only_pkey)) { !__pkru_allows_read(read_pkru(), execute_only_pkey)) {
preempt_enable(); preempt_enable();
return execute_only_pkey; return execute_only_pkey;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment