Commit 4c138410 authored by Ingo Molnar's avatar Ingo Molnar

x86/fpu: Open code PF_USED_MATH usages

PF_USED_MATH is used directly, but also in a handful of helper inlines.

To ease the elimination of PF_USED_MATH, convert all inline helpers
to open-coded PF_USED_MATH usage.
Reviewed-by: default avatarBorislav Petkov <bp@alien8.de>
Cc: Andy Lutomirski <luto@amacapital.net>
Cc: Dave Hansen <dave.hansen@linux.intel.com>
Cc: Fenghua Yu <fenghua.yu@intel.com>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Oleg Nesterov <oleg@redhat.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent 4540d3fa
...@@ -321,7 +321,7 @@ static void __user *get_sigframe(struct ksignal *ksig, struct pt_regs *regs, ...@@ -321,7 +321,7 @@ static void __user *get_sigframe(struct ksignal *ksig, struct pt_regs *regs,
ksig->ka.sa.sa_restorer) ksig->ka.sa.sa_restorer)
sp = (unsigned long) ksig->ka.sa.sa_restorer; sp = (unsigned long) ksig->ka.sa.sa_restorer;
if (used_math()) { if (current->flags & PF_USED_MATH) {
unsigned long fx_aligned, math_size; unsigned long fx_aligned, math_size;
sp = alloc_mathframe(sp, 1, &fx_aligned, &math_size); sp = alloc_mathframe(sp, 1, &fx_aligned, &math_size);
......
...@@ -375,7 +375,8 @@ static inline void drop_fpu(struct task_struct *tsk) ...@@ -375,7 +375,8 @@ static inline void drop_fpu(struct task_struct *tsk)
__thread_fpu_end(fpu); __thread_fpu_end(fpu);
} }
clear_stopped_child_used_math(tsk); tsk->flags &= ~PF_USED_MATH;
preempt_enable(); preempt_enable();
} }
...@@ -423,7 +424,7 @@ static inline fpu_switch_t switch_fpu_prepare(struct task_struct *old, struct ta ...@@ -423,7 +424,7 @@ static inline fpu_switch_t switch_fpu_prepare(struct task_struct *old, struct ta
* If the task has used the math, pre-load the FPU on xsave processors * If the task has used the math, pre-load the FPU on xsave processors
* or if the past 5 consecutive context-switches used math. * or if the past 5 consecutive context-switches used math.
*/ */
fpu.preload = tsk_used_math(new) && fpu.preload = (new->flags & PF_USED_MATH) &&
(use_eager_fpu() || new->thread.fpu.counter > 5); (use_eager_fpu() || new->thread.fpu.counter > 5);
if (old_fpu->has_fpu) { if (old_fpu->has_fpu) {
......
...@@ -242,7 +242,7 @@ int fpu__copy(struct task_struct *dst, struct task_struct *src) ...@@ -242,7 +242,7 @@ int fpu__copy(struct task_struct *dst, struct task_struct *src)
task_disable_lazy_fpu_restore(dst); task_disable_lazy_fpu_restore(dst);
if (tsk_used_math(src)) { if (src->flags & PF_USED_MATH) {
int err = fpstate_alloc(&dst->thread.fpu); int err = fpstate_alloc(&dst->thread.fpu);
if (err) if (err)
...@@ -331,7 +331,7 @@ void fpu__restore(void) ...@@ -331,7 +331,7 @@ void fpu__restore(void)
struct task_struct *tsk = current; struct task_struct *tsk = current;
struct fpu *fpu = &tsk->thread.fpu; struct fpu *fpu = &tsk->thread.fpu;
if (!tsk_used_math(tsk)) { if (!(tsk->flags & PF_USED_MATH)) {
local_irq_enable(); local_irq_enable();
/* /*
* does a slab alloc which can sleep * does a slab alloc which can sleep
...@@ -361,12 +361,14 @@ EXPORT_SYMBOL_GPL(fpu__restore); ...@@ -361,12 +361,14 @@ EXPORT_SYMBOL_GPL(fpu__restore);
void fpu__flush_thread(struct task_struct *tsk) void fpu__flush_thread(struct task_struct *tsk)
{ {
WARN_ON(tsk != current);
if (!use_eager_fpu()) { if (!use_eager_fpu()) {
/* FPU state will be reallocated lazily at the first use. */ /* FPU state will be reallocated lazily at the first use. */
drop_fpu(tsk); drop_fpu(tsk);
fpstate_free(&tsk->thread.fpu); fpstate_free(&tsk->thread.fpu);
} else { } else {
if (!tsk_used_math(tsk)) { if (!(tsk->flags & PF_USED_MATH)) {
/* kthread execs. TODO: cleanup this horror. */ /* kthread execs. TODO: cleanup this horror. */
if (WARN_ON(fpstate_alloc_init(tsk))) if (WARN_ON(fpstate_alloc_init(tsk)))
force_sig(SIGKILL, tsk); force_sig(SIGKILL, tsk);
...@@ -383,12 +385,12 @@ void fpu__flush_thread(struct task_struct *tsk) ...@@ -383,12 +385,12 @@ void fpu__flush_thread(struct task_struct *tsk)
*/ */
int fpregs_active(struct task_struct *target, const struct user_regset *regset) int fpregs_active(struct task_struct *target, const struct user_regset *regset)
{ {
return tsk_used_math(target) ? regset->n : 0; return (target->flags & PF_USED_MATH) ? regset->n : 0;
} }
int xfpregs_active(struct task_struct *target, const struct user_regset *regset) int xfpregs_active(struct task_struct *target, const struct user_regset *regset)
{ {
return (cpu_has_fxsr && tsk_used_math(target)) ? regset->n : 0; return (cpu_has_fxsr && (target->flags & PF_USED_MATH)) ? regset->n : 0;
} }
int xfpregs_get(struct task_struct *target, const struct user_regset *regset, int xfpregs_get(struct task_struct *target, const struct user_regset *regset,
...@@ -719,7 +721,7 @@ int dump_fpu(struct pt_regs *regs, struct user_i387_struct *fpu) ...@@ -719,7 +721,7 @@ int dump_fpu(struct pt_regs *regs, struct user_i387_struct *fpu)
struct task_struct *tsk = current; struct task_struct *tsk = current;
int fpvalid; int fpvalid;
fpvalid = !!used_math(); fpvalid = !!(tsk->flags & PF_USED_MATH);
if (fpvalid) if (fpvalid)
fpvalid = !fpregs_get(tsk, NULL, fpvalid = !fpregs_get(tsk, NULL,
0, sizeof(struct user_i387_ia32_struct), 0, sizeof(struct user_i387_ia32_struct),
......
...@@ -349,7 +349,7 @@ int __restore_xstate_sig(void __user *buf, void __user *buf_fx, int size) ...@@ -349,7 +349,7 @@ int __restore_xstate_sig(void __user *buf, void __user *buf_fx, int size)
if (!access_ok(VERIFY_READ, buf, size)) if (!access_ok(VERIFY_READ, buf, size))
return -EACCES; return -EACCES;
if (!used_math() && fpstate_alloc_init(tsk)) if (!(tsk->flags & PF_USED_MATH) && fpstate_alloc_init(tsk))
return -1; return -1;
if (!static_cpu_has(X86_FEATURE_FPU)) if (!static_cpu_has(X86_FEATURE_FPU))
...@@ -384,12 +384,12 @@ int __restore_xstate_sig(void __user *buf, void __user *buf_fx, int size) ...@@ -384,12 +384,12 @@ int __restore_xstate_sig(void __user *buf, void __user *buf_fx, int size)
int err = 0; int err = 0;
/* /*
* Drop the current fpu which clears used_math(). This ensures * Drop the current fpu which clears PF_USED_MATH. This ensures
* that any context-switch during the copy of the new state, * that any context-switch during the copy of the new state,
* avoids the intermediate state from getting restored/saved. * avoids the intermediate state from getting restored/saved.
* Thus avoiding the new restored state from getting corrupted. * Thus avoiding the new restored state from getting corrupted.
* We will be ready to restore/save the state only after * We will be ready to restore/save the state only after
* set_used_math() is again set. * PF_USED_MATH is again set.
*/ */
drop_fpu(tsk); drop_fpu(tsk);
...@@ -401,7 +401,7 @@ int __restore_xstate_sig(void __user *buf, void __user *buf_fx, int size) ...@@ -401,7 +401,7 @@ int __restore_xstate_sig(void __user *buf, void __user *buf_fx, int size)
sanitize_restored_xstate(tsk, &env, xstate_bv, fx_only); sanitize_restored_xstate(tsk, &env, xstate_bv, fx_only);
} }
set_used_math(); tsk->flags |= PF_USED_MATH;
if (use_eager_fpu()) { if (use_eager_fpu()) {
preempt_disable(); preempt_disable();
fpu__restore(); fpu__restore();
...@@ -685,7 +685,7 @@ void xsave_init(void) ...@@ -685,7 +685,7 @@ void xsave_init(void)
*/ */
void __init_refok eager_fpu_init(void) void __init_refok eager_fpu_init(void)
{ {
WARN_ON(used_math()); WARN_ON(current->flags & PF_USED_MATH);
current_thread_info()->status = 0; current_thread_info()->status = 0;
if (eagerfpu == ENABLE) if (eagerfpu == ENABLE)
......
...@@ -217,7 +217,7 @@ get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, size_t frame_size, ...@@ -217,7 +217,7 @@ get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, size_t frame_size,
} }
} }
if (used_math()) { if (current->flags & PF_USED_MATH) {
sp = alloc_mathframe(sp, config_enabled(CONFIG_X86_32), sp = alloc_mathframe(sp, config_enabled(CONFIG_X86_32),
&buf_fx, &math_size); &buf_fx, &math_size);
*fpstate = (void __user *)sp; *fpstate = (void __user *)sp;
...@@ -233,7 +233,7 @@ get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, size_t frame_size, ...@@ -233,7 +233,7 @@ get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, size_t frame_size,
return (void __user *)-1L; return (void __user *)-1L;
/* save i387 and extended state */ /* save i387 and extended state */
if (used_math() && if ((current->flags & PF_USED_MATH) &&
save_xstate_sig(*fpstate, (void __user *)buf_fx, math_size) < 0) save_xstate_sig(*fpstate, (void __user *)buf_fx, math_size) < 0)
return (void __user *)-1L; return (void __user *)-1L;
...@@ -664,7 +664,7 @@ handle_signal(struct ksignal *ksig, struct pt_regs *regs) ...@@ -664,7 +664,7 @@ handle_signal(struct ksignal *ksig, struct pt_regs *regs)
/* /*
* Ensure the signal handler starts with the new fpu state. * Ensure the signal handler starts with the new fpu state.
*/ */
if (used_math()) if (current->flags & PF_USED_MATH)
fpu_reset_state(current); fpu_reset_state(current);
} }
signal_setup_done(failed, ksig, stepping); signal_setup_done(failed, ksig, stepping);
......
...@@ -6600,7 +6600,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) ...@@ -6600,7 +6600,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
int r; int r;
sigset_t sigsaved; sigset_t sigsaved;
if (!tsk_used_math(current) && fpstate_alloc_init(current)) if (!(current->flags & PF_USED_MATH) && fpstate_alloc_init(current))
return -ENOMEM; return -ENOMEM;
if (vcpu->sigset_active) if (vcpu->sigset_active)
......
...@@ -148,7 +148,7 @@ void math_emulate(struct math_emu_info *info) ...@@ -148,7 +148,7 @@ void math_emulate(struct math_emu_info *info)
unsigned long code_limit = 0; /* Initialized to stop compiler warnings */ unsigned long code_limit = 0; /* Initialized to stop compiler warnings */
struct desc_struct code_descriptor; struct desc_struct code_descriptor;
if (!used_math()) { if (!(current->flags & PF_USED_MATH)) {
if (fpstate_alloc_init(current)) { if (fpstate_alloc_init(current)) {
do_group_exit(SIGKILL); do_group_exit(SIGKILL);
return; return;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment