Commit c6813144 authored by Ingo Molnar's avatar Ingo Molnar

x86/fpu: Rename all the fpregs, xregs, fxregs and fregs handling functions

Standardize the naming of the various functions that copy register
content in specific FPU context formats:

  copy_fxregs_to_kernel()         # was: fpu_fxsave()
  copy_xregs_to_kernel()          # was: xsave_state()

  copy_kernel_to_fregs()          # was: frstor_checking()
  copy_kernel_to_fxregs()         # was: fxrstor_checking()
  copy_kernel_to_xregs()          # was: fpu_xrstor_checking()
  copy_kernel_to_xregs_booting()  # was: xrstor_state_booting()

  copy_fregs_to_user()            # was: fsave_user()
  copy_fxregs_to_user()           # was: fxsave_user()
  copy_xregs_to_user()            # was: xsave_user()

  copy_user_to_fregs()            # was: frstor_user()
  copy_user_to_fxregs()           # was: fxrstor_user()
  copy_user_to_xregs()            # was: xrestore_user()
  copy_user_to_fpregs_zeroing()   # was: restore_user_xstate()

Eliminate fpu_xrstor_checking(), because it was just a wrapper.

No change in functionality.

Cc: Andy Lutomirski <luto@amacapital.net>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Dave Hansen <dave.hansen@linux.intel.com>
Cc: Fenghua Yu <fenghua.yu@intel.com>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Oleg Nesterov <oleg@redhat.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent 81541889
......@@ -133,57 +133,57 @@ extern void fpstate_sanitize_xstate(struct fpu *fpu);
err; \
})
static inline int fsave_user(struct i387_fsave_struct __user *fx)
static inline int copy_fregs_to_user(struct i387_fsave_struct __user *fx)
{
return user_insn(fnsave %[fx]; fwait, [fx] "=m" (*fx), "m" (*fx));
}
static inline int fxsave_user(struct i387_fxsave_struct __user *fx)
static inline int copy_fxregs_to_user(struct i387_fxsave_struct __user *fx)
{
if (config_enabled(CONFIG_X86_32))
return user_insn(fxsave %[fx], [fx] "=m" (*fx), "m" (*fx));
else if (config_enabled(CONFIG_AS_FXSAVEQ))
return user_insn(fxsaveq %[fx], [fx] "=m" (*fx), "m" (*fx));
/* See comment in fpu_fxsave() below. */
/* See comment in copy_fxregs_to_kernel() below. */
return user_insn(rex64/fxsave (%[fx]), "=m" (*fx), [fx] "R" (fx));
}
static inline int fxrstor_checking(struct i387_fxsave_struct *fx)
static inline int copy_kernel_to_fxregs(struct i387_fxsave_struct *fx)
{
if (config_enabled(CONFIG_X86_32))
return check_insn(fxrstor %[fx], "=m" (*fx), [fx] "m" (*fx));
else if (config_enabled(CONFIG_AS_FXSAVEQ))
return check_insn(fxrstorq %[fx], "=m" (*fx), [fx] "m" (*fx));
/* See comment in fpu_fxsave() below. */
/* See comment in copy_fxregs_to_kernel() below. */
return check_insn(rex64/fxrstor (%[fx]), "=m" (*fx), [fx] "R" (fx),
"m" (*fx));
}
static inline int fxrstor_user(struct i387_fxsave_struct __user *fx)
static inline int copy_user_to_fxregs(struct i387_fxsave_struct __user *fx)
{
if (config_enabled(CONFIG_X86_32))
return user_insn(fxrstor %[fx], "=m" (*fx), [fx] "m" (*fx));
else if (config_enabled(CONFIG_AS_FXSAVEQ))
return user_insn(fxrstorq %[fx], "=m" (*fx), [fx] "m" (*fx));
/* See comment in fpu_fxsave() below. */
/* See comment in copy_fxregs_to_kernel() below. */
return user_insn(rex64/fxrstor (%[fx]), "=m" (*fx), [fx] "R" (fx),
"m" (*fx));
}
static inline int frstor_checking(struct i387_fsave_struct *fx)
static inline int copy_kernel_to_fregs(struct i387_fsave_struct *fx)
{
return check_insn(frstor %[fx], "=m" (*fx), [fx] "m" (*fx));
}
static inline int frstor_user(struct i387_fsave_struct __user *fx)
static inline int copy_user_to_fregs(struct i387_fsave_struct __user *fx)
{
return user_insn(frstor %[fx], "=m" (*fx), [fx] "m" (*fx));
}
static inline void fpu_fxsave(struct fpu *fpu)
static inline void copy_fxregs_to_kernel(struct fpu *fpu)
{
if (config_enabled(CONFIG_X86_32))
asm volatile( "fxsave %[fx]" : [fx] "=m" (fpu->state.fxsave));
......@@ -230,12 +230,12 @@ static inline void fpu_fxsave(struct fpu *fpu)
static inline int copy_fpregs_to_fpstate(struct fpu *fpu)
{
if (likely(use_xsave())) {
xsave_state(&fpu->state.xsave);
copy_xregs_to_kernel(&fpu->state.xsave);
return 1;
}
if (likely(use_fxsr())) {
fpu_fxsave(fpu);
copy_fxregs_to_kernel(fpu);
return 1;
}
......@@ -251,11 +251,11 @@ static inline int copy_fpregs_to_fpstate(struct fpu *fpu)
static inline int __copy_fpstate_to_fpregs(struct fpu *fpu)
{
if (use_xsave())
return fpu_xrstor_checking(&fpu->state.xsave);
return copy_kernel_to_xregs(&fpu->state.xsave, -1);
else if (use_fxsr())
return fxrstor_checking(&fpu->state.fxsave);
return copy_kernel_to_fxregs(&fpu->state.fxsave);
else
return frstor_checking(&fpu->state.fsave);
return copy_kernel_to_fregs(&fpu->state.fsave);
}
static inline int copy_fpstate_to_fpregs(struct fpu *fpu)
......
......@@ -58,7 +58,7 @@ extern void update_regset_xstate_info(unsigned int size, u64 xstate_mask);
* This function is called only during boot time when x86 caps are not set
* up and alternative can not be used yet.
*/
static inline int xsave_state_booting(struct xsave_struct *fx)
static inline int copy_xregs_to_kernel_booting(struct xsave_struct *fx)
{
u64 mask = -1;
u32 lmask = mask;
......@@ -86,7 +86,7 @@ static inline int xsave_state_booting(struct xsave_struct *fx)
* This function is called only during boot time when x86 caps are not set
* up and alternative can not be used yet.
*/
static inline int xrstor_state_booting(struct xsave_struct *fx, u64 mask)
static inline int copy_kernel_to_xregs_booting(struct xsave_struct *fx, u64 mask)
{
u32 lmask = mask;
u32 hmask = mask >> 32;
......@@ -112,7 +112,7 @@ static inline int xrstor_state_booting(struct xsave_struct *fx, u64 mask)
/*
* Save processor xstate to xsave area.
*/
static inline int xsave_state(struct xsave_struct *fx)
static inline int copy_xregs_to_kernel(struct xsave_struct *fx)
{
u64 mask = -1;
u32 lmask = mask;
......@@ -151,7 +151,7 @@ static inline int xsave_state(struct xsave_struct *fx)
/*
* Restore processor xstate from xsave area.
*/
static inline int xrstor_state(struct xsave_struct *fx, u64 mask)
static inline int copy_kernel_to_xregs(struct xsave_struct *fx, u64 mask)
{
int err = 0;
u32 lmask = mask;
......@@ -176,14 +176,6 @@ static inline int xrstor_state(struct xsave_struct *fx, u64 mask)
return err;
}
/*
* Restore xstate context for new process during context switch.
*/
static inline int fpu_xrstor_checking(struct xsave_struct *fx)
{
return xrstor_state(fx, -1);
}
/*
* Save xstate to user space xsave area.
*
......@@ -194,7 +186,7 @@ static inline int fpu_xrstor_checking(struct xsave_struct *fx)
* backward compatibility for old applications which don't understand
* compacted format of xsave area.
*/
static inline int xsave_user(struct xsave_struct __user *buf)
static inline int copy_xregs_to_user(struct xsave_struct __user *buf)
{
int err;
......@@ -218,7 +210,7 @@ static inline int xsave_user(struct xsave_struct __user *buf)
/*
* Restore xstate from user space xsave area.
*/
static inline int xrestore_user(struct xsave_struct __user *buf, u64 mask)
static inline int copy_user_to_xregs(struct xsave_struct __user *buf, u64 mask)
{
int err = 0;
struct xsave_struct *xstate = ((__force struct xsave_struct *)buf);
......
......@@ -398,9 +398,9 @@ void fpu__drop(struct fpu *fpu)
static inline void copy_init_fpstate_to_fpregs(void)
{
if (use_xsave())
xrstor_state(&init_fpstate.xsave, -1);
copy_kernel_to_xregs(&init_fpstate.xsave, -1);
else
fxrstor_checking(&init_fpstate.fxsave);
copy_kernel_to_fxregs(&init_fpstate.fxsave);
}
/*
......
......@@ -260,11 +260,11 @@ static inline int copy_fpregs_to_sigframe(struct xsave_struct __user *buf)
int err;
if (use_xsave())
err = xsave_user(buf);
err = copy_xregs_to_user(buf);
else if (use_fxsr())
err = fxsave_user((struct i387_fxsave_struct __user *) buf);
err = copy_fxregs_to_user((struct i387_fxsave_struct __user *) buf);
else
err = fsave_user((struct i387_fsave_struct __user *) buf);
err = copy_fregs_to_user((struct i387_fsave_struct __user *) buf);
if (unlikely(err) && __clear_user(buf, xstate_size))
err = -EFAULT;
......@@ -314,7 +314,7 @@ int copy_fpstate_to_sigframe(void __user *buf, void __user *buf_fx, int size)
return -1;
/* Update the thread's fxstate to save the fsave header. */
if (ia32_fxstate)
fpu_fxsave(&tsk->thread.fpu);
copy_fxregs_to_kernel(&tsk->thread.fpu);
} else {
fpstate_sanitize_xstate(&tsk->thread.fpu);
if (__copy_to_user(buf_fx, xsave, xstate_size))
......@@ -367,23 +367,23 @@ sanitize_restored_xstate(struct task_struct *tsk,
/*
* Restore the extended state if present. Otherwise, restore the FP/SSE state.
*/
static inline int restore_user_xstate(void __user *buf, u64 xbv, int fx_only)
static inline int copy_user_to_fpregs_zeroing(void __user *buf, u64 xbv, int fx_only)
{
if (use_xsave()) {
if ((unsigned long)buf % 64 || fx_only) {
u64 init_bv = xfeatures_mask & ~XSTATE_FPSSE;
xrstor_state(&init_fpstate.xsave, init_bv);
return fxrstor_user(buf);
copy_kernel_to_xregs(&init_fpstate.xsave, init_bv);
return copy_user_to_fxregs(buf);
} else {
u64 init_bv = xfeatures_mask & ~xbv;
if (unlikely(init_bv))
xrstor_state(&init_fpstate.xsave, init_bv);
return xrestore_user(buf, xbv);
copy_kernel_to_xregs(&init_fpstate.xsave, init_bv);
return copy_user_to_xregs(buf, xbv);
}
} else if (use_fxsr()) {
return fxrstor_user(buf);
return copy_user_to_fxregs(buf);
} else
return frstor_user(buf);
return copy_user_to_fregs(buf);
}
static int __fpu__restore_sig(void __user *buf, void __user *buf_fx, int size)
......@@ -471,7 +471,7 @@ static int __fpu__restore_sig(void __user *buf, void __user *buf_fx, int size)
* state to the registers directly (with exceptions handled).
*/
user_fpu_begin();
if (restore_user_xstate(buf_fx, xfeatures, fx_only)) {
if (copy_user_to_fpregs_zeroing(buf_fx, xfeatures, fx_only)) {
fpu__clear(fpu);
return -1;
}
......@@ -667,13 +667,13 @@ static void setup_init_fpu_buf(void)
/*
* Init all the features state with header_bv being 0x0
*/
xrstor_state_booting(&init_fpstate.xsave, -1);
copy_kernel_to_xregs_booting(&init_fpstate.xsave, -1);
/*
* Dump the init state again. This is to identify the init state
* of any feature which is not represented by all zero's.
*/
xsave_state_booting(&init_fpstate.xsave);
copy_xregs_to_kernel_booting(&init_fpstate.xsave);
}
/*
......
......@@ -389,7 +389,7 @@ int mpx_enable_management(struct task_struct *tsk)
* directory into XSAVE/XRSTOR Save Area and enable MPX through
* XRSTOR instruction.
*
* xsave_state() is expected to be very expensive. Storing the bounds
* copy_xregs_to_kernel() is expected to be very expensive. Storing the bounds
* directory here means that we do not have to do xsave in the unmap
* path; we can just use mm->bd_addr instead.
*/
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment