Commit 91d93d0e authored by Ingo Molnar's avatar Ingo Molnar

x86/fpu: Remove failure return from fpstate_alloc_init()

Remove the failure code and propagate this down to callers.

Note that this function still has an 'init' aspect, which must be
called.
Reviewed-by: default avatarBorislav Petkov <bp@alien8.de>
Cc: Andy Lutomirski <luto@amacapital.net>
Cc: Dave Hansen <dave.hansen@linux.intel.com>
Cc: Fenghua Yu <fenghua.yu@intel.com>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Oleg Nesterov <oleg@redhat.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent c4d6ee6e
...@@ -44,7 +44,7 @@ extern void fpu__init_system_xstate(void); ...@@ -44,7 +44,7 @@ extern void fpu__init_system_xstate(void);
extern void fpu__init_cpu_xstate(void); extern void fpu__init_cpu_xstate(void);
extern void fpu__init_system(struct cpuinfo_x86 *c); extern void fpu__init_system(struct cpuinfo_x86 *c);
extern int fpstate_alloc_init(struct fpu *fpu); extern void fpstate_alloc_init(struct fpu *fpu);
extern void fpstate_init(struct fpu *fpu); extern void fpstate_init(struct fpu *fpu);
extern void fpu__clear(struct task_struct *tsk); extern void fpu__clear(struct task_struct *tsk);
......
...@@ -259,26 +259,17 @@ int fpu__copy(struct fpu *dst_fpu, struct fpu *src_fpu) ...@@ -259,26 +259,17 @@ int fpu__copy(struct fpu *dst_fpu, struct fpu *src_fpu)
} }
/* /*
* Allocate the backing store for the current task's FPU registers * Initialize the current task's in-memory FPU context:
* and initialize the registers themselves as well.
*
* Can fail.
*/ */
int fpstate_alloc_init(struct fpu *fpu) void fpstate_alloc_init(struct fpu *fpu)
{ {
int ret; WARN_ON_ONCE(fpu != &current->thread.fpu);
WARN_ON_ONCE(fpu->fpstate_active);
if (WARN_ON_ONCE(fpu != &current->thread.fpu))
return -EINVAL;
if (WARN_ON_ONCE(fpu->fpstate_active))
return -EINVAL;
fpstate_init(fpu); fpstate_init(fpu);
/* Safe to do for the current task: */ /* Safe to do for the current task: */
fpu->fpstate_active = 1; fpu->fpstate_active = 1;
return 0;
} }
EXPORT_SYMBOL_GPL(fpstate_alloc_init); EXPORT_SYMBOL_GPL(fpstate_alloc_init);
...@@ -340,20 +331,8 @@ void fpu__restore(void) ...@@ -340,20 +331,8 @@ void fpu__restore(void)
struct task_struct *tsk = current; struct task_struct *tsk = current;
struct fpu *fpu = &tsk->thread.fpu; struct fpu *fpu = &tsk->thread.fpu;
if (!fpu->fpstate_active) { if (!fpu->fpstate_active)
local_irq_enable(); fpstate_alloc_init(fpu);
/*
* does a slab alloc which can sleep
*/
if (fpstate_alloc_init(fpu)) {
/*
* ran out of memory!
*/
do_group_exit(SIGKILL);
return;
}
local_irq_disable();
}
/* Avoid __kernel_fpu_begin() right after fpregs_activate() */ /* Avoid __kernel_fpu_begin() right after fpregs_activate() */
kernel_fpu_disable(); kernel_fpu_disable();
...@@ -379,9 +358,7 @@ void fpu__clear(struct task_struct *tsk) ...@@ -379,9 +358,7 @@ void fpu__clear(struct task_struct *tsk)
drop_fpu(fpu); drop_fpu(fpu);
} else { } else {
if (!fpu->fpstate_active) { if (!fpu->fpstate_active) {
/* kthread execs. TODO: cleanup this horror. */ fpstate_alloc_init(fpu);
if (WARN_ON(fpstate_alloc_init(fpu)))
force_sig(SIGKILL, tsk);
user_fpu_begin(); user_fpu_begin();
} }
restore_init_xstate(); restore_init_xstate();
......
...@@ -358,8 +358,8 @@ int __restore_xstate_sig(void __user *buf, void __user *buf_fx, int size) ...@@ -358,8 +358,8 @@ int __restore_xstate_sig(void __user *buf, void __user *buf_fx, int size)
if (!access_ok(VERIFY_READ, buf, size)) if (!access_ok(VERIFY_READ, buf, size))
return -EACCES; return -EACCES;
if (!fpu->fpstate_active && fpstate_alloc_init(fpu)) if (!fpu->fpstate_active)
return -1; fpstate_alloc_init(fpu);
if (!static_cpu_has(X86_FEATURE_FPU)) if (!static_cpu_has(X86_FEATURE_FPU))
return fpregs_soft_set(current, NULL, return fpregs_soft_set(current, NULL,
......
...@@ -6601,8 +6601,8 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) ...@@ -6601,8 +6601,8 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
int r; int r;
sigset_t sigsaved; sigset_t sigsaved;
if (!fpu->fpstate_active && fpstate_alloc_init(fpu)) if (!fpu->fpstate_active)
return -ENOMEM; fpstate_alloc_init(fpu);
if (vcpu->sigset_active) if (vcpu->sigset_active)
sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved); sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
......
...@@ -149,12 +149,8 @@ void math_emulate(struct math_emu_info *info) ...@@ -149,12 +149,8 @@ void math_emulate(struct math_emu_info *info)
struct desc_struct code_descriptor; struct desc_struct code_descriptor;
struct fpu *fpu = &current->thread.fpu; struct fpu *fpu = &current->thread.fpu;
if (!fpu->fpstate_active) { if (!fpu->fpstate_active)
if (fpstate_alloc_init(fpu)) { fpstate_alloc_init(fpu);
do_group_exit(SIGKILL);
return;
}
}
#ifdef RE_ENTRANT_CHECKING #ifdef RE_ENTRANT_CHECKING
if (emulating) { if (emulating) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment