Commit 6896d9f7 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'x86-fpu-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull x86 fpu updates from Ingo Molnar:
 "This cleans up the FPU fault handling methods to be more robust, and
  moves eligible variables to .init.data"

* 'x86-fpu-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  x86/fpu: Put a few variables in .init.data
  x86/fpu: Get rid of xstate_fault()
  x86/fpu: Add an XSTATE_OP() macro
parents 671d5532 e49a449b
...@@ -224,18 +224,67 @@ static inline void copy_fxregs_to_kernel(struct fpu *fpu) ...@@ -224,18 +224,67 @@ static inline void copy_fxregs_to_kernel(struct fpu *fpu)
#define XRSTOR ".byte " REX_PREFIX "0x0f,0xae,0x2f" #define XRSTOR ".byte " REX_PREFIX "0x0f,0xae,0x2f"
#define XRSTORS ".byte " REX_PREFIX "0x0f,0xc7,0x1f" #define XRSTORS ".byte " REX_PREFIX "0x0f,0xc7,0x1f"
/* xstate instruction fault handler: */ #define XSTATE_OP(op, st, lmask, hmask, err) \
#define xstate_fault(__err) \ asm volatile("1:" op "\n\t" \
\ "xor %[err], %[err]\n" \
".section .fixup,\"ax\"\n" \ "2:\n\t" \
\ ".pushsection .fixup,\"ax\"\n\t" \
"3: movl $-2,%[_err]\n" \ "3: movl $-2,%[err]\n\t" \
" jmp 2b\n" \ "jmp 2b\n\t" \
\ ".popsection\n\t" \
".previous\n" \
\
_ASM_EXTABLE(1b, 3b) \ _ASM_EXTABLE(1b, 3b) \
: [_err] "=r" (__err) : [err] "=r" (err) \
: "D" (st), "m" (*st), "a" (lmask), "d" (hmask) \
: "memory")
/*
* If XSAVES is enabled, it replaces XSAVEOPT because it supports a compact
* format and supervisor states in addition to modified optimization in
* XSAVEOPT.
*
* Otherwise, if XSAVEOPT is enabled, XSAVEOPT replaces XSAVE because XSAVEOPT
* supports modified optimization which is not supported by XSAVE.
*
* We use XSAVE as a fallback.
*
* The 661 label is defined in the ALTERNATIVE* macros as the address of the
* original instruction which gets replaced. We need to use it here as the
* address of the instruction where we might get an exception at.
*/
#define XSTATE_XSAVE(st, lmask, hmask, err) \
asm volatile(ALTERNATIVE_2(XSAVE, \
XSAVEOPT, X86_FEATURE_XSAVEOPT, \
XSAVES, X86_FEATURE_XSAVES) \
"\n" \
"xor %[err], %[err]\n" \
"3:\n" \
".pushsection .fixup,\"ax\"\n" \
"4: movl $-2, %[err]\n" \
"jmp 3b\n" \
".popsection\n" \
_ASM_EXTABLE(661b, 4b) \
: [err] "=r" (err) \
: "D" (st), "m" (*st), "a" (lmask), "d" (hmask) \
: "memory")
/*
* Use XRSTORS to restore context if it is enabled. XRSTORS supports compact
* XSAVE area format.
*/
#define XSTATE_XRESTORE(st, lmask, hmask, err) \
asm volatile(ALTERNATIVE(XRSTOR, \
XRSTORS, X86_FEATURE_XSAVES) \
"\n" \
"xor %[err], %[err]\n" \
"3:\n" \
".pushsection .fixup,\"ax\"\n" \
"4: movl $-2, %[err]\n" \
"jmp 3b\n" \
".popsection\n" \
_ASM_EXTABLE(661b, 4b) \
: [err] "=r" (err) \
: "D" (st), "m" (*st), "a" (lmask), "d" (hmask) \
: "memory")
/* /*
* This function is called only during boot time when x86 caps are not set * This function is called only during boot time when x86 caps are not set
...@@ -246,22 +295,14 @@ static inline void copy_xregs_to_kernel_booting(struct xregs_state *xstate) ...@@ -246,22 +295,14 @@ static inline void copy_xregs_to_kernel_booting(struct xregs_state *xstate)
u64 mask = -1; u64 mask = -1;
u32 lmask = mask; u32 lmask = mask;
u32 hmask = mask >> 32; u32 hmask = mask >> 32;
int err = 0; int err;
WARN_ON(system_state != SYSTEM_BOOTING); WARN_ON(system_state != SYSTEM_BOOTING);
if (boot_cpu_has(X86_FEATURE_XSAVES)) if (static_cpu_has_safe(X86_FEATURE_XSAVES))
asm volatile("1:"XSAVES"\n\t" XSTATE_OP(XSAVES, xstate, lmask, hmask, err);
"2:\n\t"
xstate_fault(err)
: "D" (xstate), "m" (*xstate), "a" (lmask), "d" (hmask), "0" (err)
: "memory");
else else
asm volatile("1:"XSAVE"\n\t" XSTATE_OP(XSAVE, xstate, lmask, hmask, err);
"2:\n\t"
xstate_fault(err)
: "D" (xstate), "m" (*xstate), "a" (lmask), "d" (hmask), "0" (err)
: "memory");
/* We should never fault when copying to a kernel buffer: */ /* We should never fault when copying to a kernel buffer: */
WARN_ON_FPU(err); WARN_ON_FPU(err);
...@@ -276,22 +317,14 @@ static inline void copy_kernel_to_xregs_booting(struct xregs_state *xstate) ...@@ -276,22 +317,14 @@ static inline void copy_kernel_to_xregs_booting(struct xregs_state *xstate)
u64 mask = -1; u64 mask = -1;
u32 lmask = mask; u32 lmask = mask;
u32 hmask = mask >> 32; u32 hmask = mask >> 32;
int err = 0; int err;
WARN_ON(system_state != SYSTEM_BOOTING); WARN_ON(system_state != SYSTEM_BOOTING);
if (boot_cpu_has(X86_FEATURE_XSAVES)) if (static_cpu_has_safe(X86_FEATURE_XSAVES))
asm volatile("1:"XRSTORS"\n\t" XSTATE_OP(XRSTORS, xstate, lmask, hmask, err);
"2:\n\t"
xstate_fault(err)
: "D" (xstate), "m" (*xstate), "a" (lmask), "d" (hmask), "0" (err)
: "memory");
else else
asm volatile("1:"XRSTOR"\n\t" XSTATE_OP(XRSTOR, xstate, lmask, hmask, err);
"2:\n\t"
xstate_fault(err)
: "D" (xstate), "m" (*xstate), "a" (lmask), "d" (hmask), "0" (err)
: "memory");
/* We should never fault when copying from a kernel buffer: */ /* We should never fault when copying from a kernel buffer: */
WARN_ON_FPU(err); WARN_ON_FPU(err);
...@@ -305,33 +338,11 @@ static inline void copy_xregs_to_kernel(struct xregs_state *xstate) ...@@ -305,33 +338,11 @@ static inline void copy_xregs_to_kernel(struct xregs_state *xstate)
u64 mask = -1; u64 mask = -1;
u32 lmask = mask; u32 lmask = mask;
u32 hmask = mask >> 32; u32 hmask = mask >> 32;
int err = 0; int err;
WARN_ON(!alternatives_patched); WARN_ON(!alternatives_patched);
/* XSTATE_XSAVE(xstate, lmask, hmask, err);
* If xsaves is enabled, xsaves replaces xsaveopt because
* it supports compact format and supervisor states in addition to
* modified optimization in xsaveopt.
*
* Otherwise, if xsaveopt is enabled, xsaveopt replaces xsave
* because xsaveopt supports modified optimization which is not
* supported by xsave.
*
* If none of xsaves and xsaveopt is enabled, use xsave.
*/
alternative_input_2(
"1:"XSAVE,
XSAVEOPT,
X86_FEATURE_XSAVEOPT,
XSAVES,
X86_FEATURE_XSAVES,
[xstate] "D" (xstate), "a" (lmask), "d" (hmask) :
"memory");
asm volatile("2:\n\t"
xstate_fault(err)
: "0" (err)
: "memory");
/* We should never fault when copying to a kernel buffer: */ /* We should never fault when copying to a kernel buffer: */
WARN_ON_FPU(err); WARN_ON_FPU(err);
...@@ -344,23 +355,9 @@ static inline void copy_kernel_to_xregs(struct xregs_state *xstate, u64 mask) ...@@ -344,23 +355,9 @@ static inline void copy_kernel_to_xregs(struct xregs_state *xstate, u64 mask)
{ {
u32 lmask = mask; u32 lmask = mask;
u32 hmask = mask >> 32; u32 hmask = mask >> 32;
int err = 0; int err;
/* XSTATE_XRESTORE(xstate, lmask, hmask, err);
* Use xrstors to restore context if it is enabled. xrstors supports
* compacted format of xsave area which is not supported by xrstor.
*/
alternative_input(
"1: " XRSTOR,
XRSTORS,
X86_FEATURE_XSAVES,
"D" (xstate), "m" (*xstate), "a" (lmask), "d" (hmask)
: "memory");
asm volatile("2:\n"
xstate_fault(err)
: "0" (err)
: "memory");
/* We should never fault when copying from a kernel buffer: */ /* We should never fault when copying from a kernel buffer: */
WARN_ON_FPU(err); WARN_ON_FPU(err);
...@@ -388,12 +385,10 @@ static inline int copy_xregs_to_user(struct xregs_state __user *buf) ...@@ -388,12 +385,10 @@ static inline int copy_xregs_to_user(struct xregs_state __user *buf)
if (unlikely(err)) if (unlikely(err))
return -EFAULT; return -EFAULT;
__asm__ __volatile__(ASM_STAC "\n" stac();
"1:"XSAVE"\n" XSTATE_OP(XSAVE, buf, -1, -1, err);
"2: " ASM_CLAC "\n" clac();
xstate_fault(err)
: "D" (buf), "a" (-1), "d" (-1), "0" (err)
: "memory");
return err; return err;
} }
...@@ -405,14 +400,12 @@ static inline int copy_user_to_xregs(struct xregs_state __user *buf, u64 mask) ...@@ -405,14 +400,12 @@ static inline int copy_user_to_xregs(struct xregs_state __user *buf, u64 mask)
struct xregs_state *xstate = ((__force struct xregs_state *)buf); struct xregs_state *xstate = ((__force struct xregs_state *)buf);
u32 lmask = mask; u32 lmask = mask;
u32 hmask = mask >> 32; u32 hmask = mask >> 32;
int err = 0; int err;
__asm__ __volatile__(ASM_STAC "\n" stac();
"1:"XRSTOR"\n" XSTATE_OP(XRSTOR, xstate, lmask, hmask, err);
"2: " ASM_CLAC "\n" clac();
xstate_fault(err)
: "D" (xstate), "a" (lmask), "d" (hmask), "0" (err)
: "memory"); /* memory required? */
return err; return err;
} }
......
...@@ -197,7 +197,7 @@ static void __init fpu__init_task_struct_size(void) ...@@ -197,7 +197,7 @@ static void __init fpu__init_task_struct_size(void)
*/ */
static void __init fpu__init_system_xstate_size_legacy(void) static void __init fpu__init_system_xstate_size_legacy(void)
{ {
static int on_boot_cpu = 1; static int on_boot_cpu __initdata = 1;
WARN_ON_FPU(!on_boot_cpu); WARN_ON_FPU(!on_boot_cpu);
on_boot_cpu = 0; on_boot_cpu = 0;
...@@ -287,7 +287,7 @@ __setup("eagerfpu=", eager_fpu_setup); ...@@ -287,7 +287,7 @@ __setup("eagerfpu=", eager_fpu_setup);
*/ */
static void __init fpu__init_system_ctx_switch(void) static void __init fpu__init_system_ctx_switch(void)
{ {
static bool on_boot_cpu = 1; static bool on_boot_cpu __initdata = 1;
WARN_ON_FPU(!on_boot_cpu); WARN_ON_FPU(!on_boot_cpu);
on_boot_cpu = 0; on_boot_cpu = 0;
......
...@@ -297,7 +297,7 @@ static void __init setup_xstate_comp(void) ...@@ -297,7 +297,7 @@ static void __init setup_xstate_comp(void)
*/ */
static void __init setup_init_fpu_buf(void) static void __init setup_init_fpu_buf(void)
{ {
static int on_boot_cpu = 1; static int on_boot_cpu __initdata = 1;
WARN_ON_FPU(!on_boot_cpu); WARN_ON_FPU(!on_boot_cpu);
on_boot_cpu = 0; on_boot_cpu = 0;
...@@ -608,7 +608,7 @@ static void fpu__init_disable_system_xstate(void) ...@@ -608,7 +608,7 @@ static void fpu__init_disable_system_xstate(void)
void __init fpu__init_system_xstate(void) void __init fpu__init_system_xstate(void)
{ {
unsigned int eax, ebx, ecx, edx; unsigned int eax, ebx, ecx, edx;
static int on_boot_cpu = 1; static int on_boot_cpu __initdata = 1;
int err; int err;
WARN_ON_FPU(!on_boot_cpu); WARN_ON_FPU(!on_boot_cpu);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment