Commit 9ccc27a5 authored by Ingo Molnar's avatar Ingo Molnar

x86/fpu: Remove error return values from copy_kernel_to_*regs() functions

None of the copy_kernel_to_*regs() FPU register copying functions are
supposed to fail, and all of them have debugging checks that enforce
this.

Remove their return values and simplify their call sites, which have
redundant error checks and error handling code paths.

Cc: Andy Lutomirski <luto@amacapital.net>
Cc: Bobby Powers <bobbypowers@gmail.com>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Dave Hansen <dave.hansen@linux.intel.com>
Cc: Fenghua Yu <fenghua.yu@intel.com>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Oleg Nesterov <oleg@redhat.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent 3e1bf47e
...@@ -141,7 +141,7 @@ static inline int copy_fxregs_to_user(struct fxregs_state __user *fx) ...@@ -141,7 +141,7 @@ static inline int copy_fxregs_to_user(struct fxregs_state __user *fx)
return user_insn(rex64/fxsave (%[fx]), "=m" (*fx), [fx] "R" (fx)); return user_insn(rex64/fxsave (%[fx]), "=m" (*fx), [fx] "R" (fx));
} }
static inline int copy_kernel_to_fxregs(struct fxregs_state *fx) static inline void copy_kernel_to_fxregs(struct fxregs_state *fx)
{ {
int err; int err;
...@@ -157,8 +157,6 @@ static inline int copy_kernel_to_fxregs(struct fxregs_state *fx) ...@@ -157,8 +157,6 @@ static inline int copy_kernel_to_fxregs(struct fxregs_state *fx)
} }
/* Copying from a kernel buffer to FPU registers should never fail: */ /* Copying from a kernel buffer to FPU registers should never fail: */
WARN_ON_FPU(err); WARN_ON_FPU(err);
return err;
} }
static inline int copy_user_to_fxregs(struct fxregs_state __user *fx) static inline int copy_user_to_fxregs(struct fxregs_state __user *fx)
...@@ -173,13 +171,11 @@ static inline int copy_user_to_fxregs(struct fxregs_state __user *fx) ...@@ -173,13 +171,11 @@ static inline int copy_user_to_fxregs(struct fxregs_state __user *fx)
"m" (*fx)); "m" (*fx));
} }
static inline int copy_kernel_to_fregs(struct fregs_state *fx) static inline void copy_kernel_to_fregs(struct fregs_state *fx)
{ {
int err = check_insn(frstor %[fx], "=m" (*fx), [fx] "m" (*fx)); int err = check_insn(frstor %[fx], "=m" (*fx), [fx] "m" (*fx));
WARN_ON_FPU(err); WARN_ON_FPU(err);
return err;
} }
static inline int copy_user_to_fregs(struct fregs_state __user *fx) static inline int copy_user_to_fregs(struct fregs_state __user *fx)
...@@ -450,20 +446,19 @@ static inline int copy_fpregs_to_fpstate(struct fpu *fpu) ...@@ -450,20 +446,19 @@ static inline int copy_fpregs_to_fpstate(struct fpu *fpu)
return 0; return 0;
} }
static inline int __copy_kernel_to_fpregs(struct fpu *fpu) static inline void __copy_kernel_to_fpregs(struct fpu *fpu)
{ {
if (use_xsave()) { if (use_xsave()) {
copy_kernel_to_xregs(&fpu->state.xsave, -1); copy_kernel_to_xregs(&fpu->state.xsave, -1);
return 0;
} else { } else {
if (use_fxsr()) if (use_fxsr())
return copy_kernel_to_fxregs(&fpu->state.fxsave); copy_kernel_to_fxregs(&fpu->state.fxsave);
else else
return copy_kernel_to_fregs(&fpu->state.fsave); copy_kernel_to_fregs(&fpu->state.fsave);
} }
} }
static inline int copy_kernel_to_fpregs(struct fpu *fpu) static inline void copy_kernel_to_fpregs(struct fpu *fpu)
{ {
/* /*
* AMD K7/K8 CPUs don't save/restore FDP/FIP/FOP unless an exception is * AMD K7/K8 CPUs don't save/restore FDP/FIP/FOP unless an exception is
...@@ -478,7 +473,7 @@ static inline int copy_kernel_to_fpregs(struct fpu *fpu) ...@@ -478,7 +473,7 @@ static inline int copy_kernel_to_fpregs(struct fpu *fpu)
: : [addr] "m" (fpu->fpregs_active)); : : [addr] "m" (fpu->fpregs_active));
} }
return __copy_kernel_to_fpregs(fpu); __copy_kernel_to_fpregs(fpu);
} }
extern int copy_fpstate_to_sigframe(void __user *buf, void __user *fp, int size); extern int copy_fpstate_to_sigframe(void __user *buf, void __user *fp, int size);
...@@ -646,12 +641,8 @@ switch_fpu_prepare(struct fpu *old_fpu, struct fpu *new_fpu, int cpu) ...@@ -646,12 +641,8 @@ switch_fpu_prepare(struct fpu *old_fpu, struct fpu *new_fpu, int cpu)
*/ */
static inline void switch_fpu_finish(struct fpu *new_fpu, fpu_switch_t fpu_switch) static inline void switch_fpu_finish(struct fpu *new_fpu, fpu_switch_t fpu_switch)
{ {
if (fpu_switch.preload) { if (fpu_switch.preload)
if (unlikely(copy_kernel_to_fpregs(new_fpu))) { copy_kernel_to_fpregs(new_fpu);
WARN_ON_FPU(1);
fpu__clear(new_fpu);
}
}
} }
/* /*
......
...@@ -126,12 +126,10 @@ void __kernel_fpu_end(void) ...@@ -126,12 +126,10 @@ void __kernel_fpu_end(void)
{ {
struct fpu *fpu = &current->thread.fpu; struct fpu *fpu = &current->thread.fpu;
if (fpu->fpregs_active) { if (fpu->fpregs_active)
if (WARN_ON_FPU(copy_kernel_to_fpregs(fpu))) copy_kernel_to_fpregs(fpu);
fpu__clear(fpu); else
} else {
__fpregs_deactivate_hw(); __fpregs_deactivate_hw();
}
kernel_fpu_enable(); kernel_fpu_enable();
} }
...@@ -370,14 +368,8 @@ void fpu__restore(struct fpu *fpu) ...@@ -370,14 +368,8 @@ void fpu__restore(struct fpu *fpu)
/* Avoid __kernel_fpu_begin() right after fpregs_activate() */ /* Avoid __kernel_fpu_begin() right after fpregs_activate() */
kernel_fpu_disable(); kernel_fpu_disable();
fpregs_activate(fpu); fpregs_activate(fpu);
if (unlikely(copy_kernel_to_fpregs(fpu))) { copy_kernel_to_fpregs(fpu);
/* Copying the kernel state to FPU registers should never fail: */ fpu->counter++;
WARN_ON_FPU(1);
fpu__clear(fpu);
force_sig_info(SIGSEGV, SEND_SIG_PRIV, current);
} else {
fpu->counter++;
}
kernel_fpu_enable(); kernel_fpu_enable();
} }
EXPORT_SYMBOL_GPL(fpu__restore); EXPORT_SYMBOL_GPL(fpu__restore);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment