Commit 91d93d0e authored by Ingo Molnar's avatar Ingo Molnar

x86/fpu: Remove failure return from fpstate_alloc_init()

Remove the failure code and propagate this down to callers.

Note that this function still has an 'init' aspect, which must be
called.
Reviewed-by: default avatarBorislav Petkov <bp@alien8.de>
Cc: Andy Lutomirski <luto@amacapital.net>
Cc: Dave Hansen <dave.hansen@linux.intel.com>
Cc: Fenghua Yu <fenghua.yu@intel.com>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Oleg Nesterov <oleg@redhat.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent c4d6ee6e
......@@ -44,7 +44,7 @@ extern void fpu__init_system_xstate(void);
extern void fpu__init_cpu_xstate(void);
extern void fpu__init_system(struct cpuinfo_x86 *c);
extern int fpstate_alloc_init(struct fpu *fpu);
extern void fpstate_alloc_init(struct fpu *fpu);
extern void fpstate_init(struct fpu *fpu);
extern void fpu__clear(struct task_struct *tsk);
......
......@@ -259,26 +259,17 @@ int fpu__copy(struct fpu *dst_fpu, struct fpu *src_fpu)
}
/*
* Allocate the backing store for the current task's FPU registers
* and initialize the registers themselves as well.
*
* Can fail.
* Initialize the current task's in-memory FPU context:
*/
int fpstate_alloc_init(struct fpu *fpu)
void fpstate_alloc_init(struct fpu *fpu)
{
int ret;
if (WARN_ON_ONCE(fpu != &current->thread.fpu))
return -EINVAL;
if (WARN_ON_ONCE(fpu->fpstate_active))
return -EINVAL;
WARN_ON_ONCE(fpu != &current->thread.fpu);
WARN_ON_ONCE(fpu->fpstate_active);
fpstate_init(fpu);
/* Safe to do for the current task: */
fpu->fpstate_active = 1;
return 0;
}
EXPORT_SYMBOL_GPL(fpstate_alloc_init);
......@@ -340,20 +331,8 @@ void fpu__restore(void)
struct task_struct *tsk = current;
struct fpu *fpu = &tsk->thread.fpu;
if (!fpu->fpstate_active) {
local_irq_enable();
/*
* does a slab alloc which can sleep
*/
if (fpstate_alloc_init(fpu)) {
/*
* ran out of memory!
*/
do_group_exit(SIGKILL);
return;
}
local_irq_disable();
}
if (!fpu->fpstate_active)
fpstate_alloc_init(fpu);
/* Avoid __kernel_fpu_begin() right after fpregs_activate() */
kernel_fpu_disable();
......@@ -379,9 +358,7 @@ void fpu__clear(struct task_struct *tsk)
drop_fpu(fpu);
} else {
if (!fpu->fpstate_active) {
/* kthread execs. TODO: cleanup this horror. */
if (WARN_ON(fpstate_alloc_init(fpu)))
force_sig(SIGKILL, tsk);
fpstate_alloc_init(fpu);
user_fpu_begin();
}
restore_init_xstate();
......
......@@ -358,8 +358,8 @@ int __restore_xstate_sig(void __user *buf, void __user *buf_fx, int size)
if (!access_ok(VERIFY_READ, buf, size))
return -EACCES;
if (!fpu->fpstate_active && fpstate_alloc_init(fpu))
return -1;
if (!fpu->fpstate_active)
fpstate_alloc_init(fpu);
if (!static_cpu_has(X86_FEATURE_FPU))
return fpregs_soft_set(current, NULL,
......
......@@ -6601,8 +6601,8 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
int r;
sigset_t sigsaved;
if (!fpu->fpstate_active && fpstate_alloc_init(fpu))
return -ENOMEM;
if (!fpu->fpstate_active)
fpstate_alloc_init(fpu);
if (vcpu->sigset_active)
sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
......
......@@ -149,12 +149,8 @@ void math_emulate(struct math_emu_info *info)
struct desc_struct code_descriptor;
struct fpu *fpu = &current->thread.fpu;
if (!fpu->fpstate_active) {
if (fpstate_alloc_init(fpu)) {
do_group_exit(SIGKILL);
return;
}
}
if (!fpu->fpstate_active)
fpstate_alloc_init(fpu);
#ifdef RE_ENTRANT_CHECKING
if (emulating) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment