Commit e75c73ad authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'x86-fpu-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull x86 FPU updates from Ingo Molnar:
 "This tree contains two main changes:

   - The big FPU code rewrite: wide reaching cleanups and reorganization
     that pulls all the FPU code together into a clean base in
     arch/x86/fpu/.

     The resulting code is leaner and faster, and much easier to
     understand.  This enables future work to further simplify the FPU
     code (such as removing lazy FPU restores).

     By its nature these changes have a substantial regression risk: FPU
     code related bugs are long lived, because races are often subtle
     and bugs mask as user-space failures that are difficult to track
     back to kernel side backs.  I'm aware of no unfixed (or even
     suspected) FPU related regression so far.

   - MPX support rework/fixes.  As this is still not a released CPU
     feature, there were some buglets in the code - should be much more
     robust now (Dave Hansen)"

* 'x86-fpu-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (250 commits)
  x86/fpu: Fix double-increment in setup_xstate_features()
  x86/mpx: Allow 32-bit binaries on 64-bit kernels again
  x86/mpx: Do not count MPX VMAs as neighbors when unmapping
  x86/mpx: Rewrite the unmap code
  x86/mpx: Support 32-bit binaries on 64-bit kernels
  x86/mpx: Use 32-bit-only cmpxchg() for 32-bit apps
  x86/mpx: Introduce new 'directory entry' to 'addr' helper function
  x86/mpx: Add temporary variable to reduce masking
  x86: Make is_64bit_mm() widely available
  x86/mpx: Trace allocation of new bounds tables
  x86/mpx: Trace the attempts to find bounds tables
  x86/mpx: Trace entry to bounds exception paths
  x86/mpx: Trace #BR exceptions
  x86/mpx: Introduce a boot-time disable flag
  x86/mpx: Restrict the mmap() size check to bounds tables
  x86/mpx: Remove redundant MPX_BNDCFG_ADDR_MASK
  x86/mpx: Clean up the code by not passing a task pointer around when unnecessary
  x86/mpx: Use the new get_xsave_field_ptr()API
  x86/fpu/xstate: Wrap get_xsave_addr() to make it safer
  x86/fpu/xstate: Fix up bad get_xsave_addr() assumptions
  ...
parents cfe3eceb a8424003
...@@ -937,6 +937,10 @@ bytes respectively. Such letter suffixes can also be entirely omitted. ...@@ -937,6 +937,10 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
Enable debug messages at boot time. See Enable debug messages at boot time. See
Documentation/dynamic-debug-howto.txt for details. Documentation/dynamic-debug-howto.txt for details.
nompx [X86] Disables Intel Memory Protection Extensions.
See Documentation/x86/intel_mpx.txt for more
information about the feature.
eagerfpu= [X86] eagerfpu= [X86]
on enable eager fpu restore on enable eager fpu restore
off disable eager fpu restore off disable eager fpu restore
......
...@@ -48,7 +48,7 @@ preemption must be disabled around such regions. ...@@ -48,7 +48,7 @@ preemption must be disabled around such regions.
Note, some FPU functions are already explicitly preempt safe. For example, Note, some FPU functions are already explicitly preempt safe. For example,
kernel_fpu_begin and kernel_fpu_end will disable and enable preemption. kernel_fpu_begin and kernel_fpu_end will disable and enable preemption.
However, math_state_restore must be called with preemption disabled. However, fpu__restore() must be called with preemption disabled.
RULE #3: Lock acquire and release must be performed by same task RULE #3: Lock acquire and release must be performed by same task
......
...@@ -332,4 +332,16 @@ config X86_DEBUG_STATIC_CPU_HAS ...@@ -332,4 +332,16 @@ config X86_DEBUG_STATIC_CPU_HAS
If unsure, say N. If unsure, say N.
config X86_DEBUG_FPU
bool "Debug the x86 FPU code"
depends on DEBUG_KERNEL
default y
---help---
If this option is enabled then there will be extra sanity
checks and (boot time) debug printouts added to the kernel.
This debugging adds some small amount of runtime overhead
to the kernel.
If unsure, say N.
endmenu endmenu
...@@ -32,7 +32,7 @@ ...@@ -32,7 +32,7 @@
#include <crypto/lrw.h> #include <crypto/lrw.h>
#include <crypto/xts.h> #include <crypto/xts.h>
#include <asm/cpu_device_id.h> #include <asm/cpu_device_id.h>
#include <asm/i387.h> #include <asm/fpu/api.h>
#include <asm/crypto/aes.h> #include <asm/crypto/aes.h>
#include <crypto/ablk_helper.h> #include <crypto/ablk_helper.h>
#include <crypto/scatterwalk.h> #include <crypto/scatterwalk.h>
......
...@@ -19,8 +19,7 @@ ...@@ -19,8 +19,7 @@
#include <crypto/ctr.h> #include <crypto/ctr.h>
#include <crypto/lrw.h> #include <crypto/lrw.h>
#include <crypto/xts.h> #include <crypto/xts.h>
#include <asm/xcr.h> #include <asm/fpu/api.h>
#include <asm/xsave.h>
#include <asm/crypto/camellia.h> #include <asm/crypto/camellia.h>
#include <asm/crypto/glue_helper.h> #include <asm/crypto/glue_helper.h>
...@@ -561,16 +560,15 @@ static struct crypto_alg cmll_algs[10] = { { ...@@ -561,16 +560,15 @@ static struct crypto_alg cmll_algs[10] = { {
static int __init camellia_aesni_init(void) static int __init camellia_aesni_init(void)
{ {
u64 xcr0; const char *feature_name;
if (!cpu_has_avx2 || !cpu_has_avx || !cpu_has_aes || !cpu_has_osxsave) { if (!cpu_has_avx2 || !cpu_has_avx || !cpu_has_aes || !cpu_has_osxsave) {
pr_info("AVX2 or AES-NI instructions are not detected.\n"); pr_info("AVX2 or AES-NI instructions are not detected.\n");
return -ENODEV; return -ENODEV;
} }
xcr0 = xgetbv(XCR_XFEATURE_ENABLED_MASK); if (!cpu_has_xfeatures(XSTATE_SSE | XSTATE_YMM, &feature_name)) {
if ((xcr0 & (XSTATE_SSE | XSTATE_YMM)) != (XSTATE_SSE | XSTATE_YMM)) { pr_info("CPU feature '%s' is not supported.\n", feature_name);
pr_info("AVX2 detected but unusable.\n");
return -ENODEV; return -ENODEV;
} }
......
...@@ -19,8 +19,7 @@ ...@@ -19,8 +19,7 @@
#include <crypto/ctr.h> #include <crypto/ctr.h>
#include <crypto/lrw.h> #include <crypto/lrw.h>
#include <crypto/xts.h> #include <crypto/xts.h>
#include <asm/xcr.h> #include <asm/fpu/api.h>
#include <asm/xsave.h>
#include <asm/crypto/camellia.h> #include <asm/crypto/camellia.h>
#include <asm/crypto/glue_helper.h> #include <asm/crypto/glue_helper.h>
...@@ -553,16 +552,10 @@ static struct crypto_alg cmll_algs[10] = { { ...@@ -553,16 +552,10 @@ static struct crypto_alg cmll_algs[10] = { {
static int __init camellia_aesni_init(void) static int __init camellia_aesni_init(void)
{ {
u64 xcr0; const char *feature_name;
if (!cpu_has_avx || !cpu_has_aes || !cpu_has_osxsave) { if (!cpu_has_xfeatures(XSTATE_SSE | XSTATE_YMM, &feature_name)) {
pr_info("AVX or AES-NI instructions are not detected.\n"); pr_info("CPU feature '%s' is not supported.\n", feature_name);
return -ENODEV;
}
xcr0 = xgetbv(XCR_XFEATURE_ENABLED_MASK);
if ((xcr0 & (XSTATE_SSE | XSTATE_YMM)) != (XSTATE_SSE | XSTATE_YMM)) {
pr_info("AVX detected but unusable.\n");
return -ENODEV; return -ENODEV;
} }
......
...@@ -31,8 +31,7 @@ ...@@ -31,8 +31,7 @@
#include <crypto/cast5.h> #include <crypto/cast5.h>
#include <crypto/cryptd.h> #include <crypto/cryptd.h>
#include <crypto/ctr.h> #include <crypto/ctr.h>
#include <asm/xcr.h> #include <asm/fpu/api.h>
#include <asm/xsave.h>
#include <asm/crypto/glue_helper.h> #include <asm/crypto/glue_helper.h>
#define CAST5_PARALLEL_BLOCKS 16 #define CAST5_PARALLEL_BLOCKS 16
...@@ -468,16 +467,10 @@ static struct crypto_alg cast5_algs[6] = { { ...@@ -468,16 +467,10 @@ static struct crypto_alg cast5_algs[6] = { {
static int __init cast5_init(void) static int __init cast5_init(void)
{ {
u64 xcr0; const char *feature_name;
if (!cpu_has_avx || !cpu_has_osxsave) { if (!cpu_has_xfeatures(XSTATE_SSE | XSTATE_YMM, &feature_name)) {
pr_info("AVX instructions are not detected.\n"); pr_info("CPU feature '%s' is not supported.\n", feature_name);
return -ENODEV;
}
xcr0 = xgetbv(XCR_XFEATURE_ENABLED_MASK);
if ((xcr0 & (XSTATE_SSE | XSTATE_YMM)) != (XSTATE_SSE | XSTATE_YMM)) {
pr_info("AVX detected but unusable.\n");
return -ENODEV; return -ENODEV;
} }
......
...@@ -36,8 +36,7 @@ ...@@ -36,8 +36,7 @@
#include <crypto/ctr.h> #include <crypto/ctr.h>
#include <crypto/lrw.h> #include <crypto/lrw.h>
#include <crypto/xts.h> #include <crypto/xts.h>
#include <asm/xcr.h> #include <asm/fpu/api.h>
#include <asm/xsave.h>
#include <asm/crypto/glue_helper.h> #include <asm/crypto/glue_helper.h>
#define CAST6_PARALLEL_BLOCKS 8 #define CAST6_PARALLEL_BLOCKS 8
...@@ -590,16 +589,10 @@ static struct crypto_alg cast6_algs[10] = { { ...@@ -590,16 +589,10 @@ static struct crypto_alg cast6_algs[10] = { {
static int __init cast6_init(void) static int __init cast6_init(void)
{ {
u64 xcr0; const char *feature_name;
if (!cpu_has_avx || !cpu_has_osxsave) { if (!cpu_has_xfeatures(XSTATE_SSE | XSTATE_YMM, &feature_name)) {
pr_info("AVX instructions are not detected.\n"); pr_info("CPU feature '%s' is not supported.\n", feature_name);
return -ENODEV;
}
xcr0 = xgetbv(XCR_XFEATURE_ENABLED_MASK);
if ((xcr0 & (XSTATE_SSE | XSTATE_YMM)) != (XSTATE_SSE | XSTATE_YMM)) {
pr_info("AVX detected but unusable.\n");
return -ENODEV; return -ENODEV;
} }
......
...@@ -35,7 +35,7 @@ ...@@ -35,7 +35,7 @@
#include <asm/cpufeature.h> #include <asm/cpufeature.h>
#include <asm/cpu_device_id.h> #include <asm/cpu_device_id.h>
#include <asm/i387.h> #include <asm/fpu/api.h>
#define CHKSUM_BLOCK_SIZE 1 #define CHKSUM_BLOCK_SIZE 1
#define CHKSUM_DIGEST_SIZE 4 #define CHKSUM_DIGEST_SIZE 4
......
...@@ -32,8 +32,7 @@ ...@@ -32,8 +32,7 @@
#include <asm/cpufeature.h> #include <asm/cpufeature.h>
#include <asm/cpu_device_id.h> #include <asm/cpu_device_id.h>
#include <asm/i387.h> #include <asm/fpu/internal.h>
#include <asm/fpu-internal.h>
#define CHKSUM_BLOCK_SIZE 1 #define CHKSUM_BLOCK_SIZE 1
#define CHKSUM_DIGEST_SIZE 4 #define CHKSUM_DIGEST_SIZE 4
......
...@@ -29,7 +29,7 @@ ...@@ -29,7 +29,7 @@
#include <linux/init.h> #include <linux/init.h>
#include <linux/string.h> #include <linux/string.h>
#include <linux/kernel.h> #include <linux/kernel.h>
#include <asm/i387.h> #include <asm/fpu/api.h>
#include <asm/cpufeature.h> #include <asm/cpufeature.h>
#include <asm/cpu_device_id.h> #include <asm/cpu_device_id.h>
......
...@@ -18,7 +18,7 @@ ...@@ -18,7 +18,7 @@
#include <linux/module.h> #include <linux/module.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/crypto.h> #include <linux/crypto.h>
#include <asm/i387.h> #include <asm/fpu/api.h>
struct crypto_fpu_ctx { struct crypto_fpu_ctx {
struct crypto_blkcipher *child; struct crypto_blkcipher *child;
......
...@@ -19,7 +19,7 @@ ...@@ -19,7 +19,7 @@
#include <crypto/cryptd.h> #include <crypto/cryptd.h>
#include <crypto/gf128mul.h> #include <crypto/gf128mul.h>
#include <crypto/internal/hash.h> #include <crypto/internal/hash.h>
#include <asm/i387.h> #include <asm/fpu/api.h>
#include <asm/cpu_device_id.h> #include <asm/cpu_device_id.h>
#define GHASH_BLOCK_SIZE 16 #define GHASH_BLOCK_SIZE 16
......
...@@ -20,8 +20,7 @@ ...@@ -20,8 +20,7 @@
#include <crypto/lrw.h> #include <crypto/lrw.h>
#include <crypto/xts.h> #include <crypto/xts.h>
#include <crypto/serpent.h> #include <crypto/serpent.h>
#include <asm/xcr.h> #include <asm/fpu/api.h>
#include <asm/xsave.h>
#include <asm/crypto/serpent-avx.h> #include <asm/crypto/serpent-avx.h>
#include <asm/crypto/glue_helper.h> #include <asm/crypto/glue_helper.h>
...@@ -537,16 +536,14 @@ static struct crypto_alg srp_algs[10] = { { ...@@ -537,16 +536,14 @@ static struct crypto_alg srp_algs[10] = { {
static int __init init(void) static int __init init(void)
{ {
u64 xcr0; const char *feature_name;
if (!cpu_has_avx2 || !cpu_has_osxsave) { if (!cpu_has_avx2 || !cpu_has_osxsave) {
pr_info("AVX2 instructions are not detected.\n"); pr_info("AVX2 instructions are not detected.\n");
return -ENODEV; return -ENODEV;
} }
if (!cpu_has_xfeatures(XSTATE_SSE | XSTATE_YMM, &feature_name)) {
xcr0 = xgetbv(XCR_XFEATURE_ENABLED_MASK); pr_info("CPU feature '%s' is not supported.\n", feature_name);
if ((xcr0 & (XSTATE_SSE | XSTATE_YMM)) != (XSTATE_SSE | XSTATE_YMM)) {
pr_info("AVX detected but unusable.\n");
return -ENODEV; return -ENODEV;
} }
......
...@@ -36,8 +36,7 @@ ...@@ -36,8 +36,7 @@
#include <crypto/ctr.h> #include <crypto/ctr.h>
#include <crypto/lrw.h> #include <crypto/lrw.h>
#include <crypto/xts.h> #include <crypto/xts.h>
#include <asm/xcr.h> #include <asm/fpu/api.h>
#include <asm/xsave.h>
#include <asm/crypto/serpent-avx.h> #include <asm/crypto/serpent-avx.h>
#include <asm/crypto/glue_helper.h> #include <asm/crypto/glue_helper.h>
...@@ -596,16 +595,10 @@ static struct crypto_alg serpent_algs[10] = { { ...@@ -596,16 +595,10 @@ static struct crypto_alg serpent_algs[10] = { {
static int __init serpent_init(void) static int __init serpent_init(void)
{ {
u64 xcr0; const char *feature_name;
if (!cpu_has_avx || !cpu_has_osxsave) { if (!cpu_has_xfeatures(XSTATE_SSE | XSTATE_YMM, &feature_name)) {
printk(KERN_INFO "AVX instructions are not detected.\n"); pr_info("CPU feature '%s' is not supported.\n", feature_name);
return -ENODEV;
}
xcr0 = xgetbv(XCR_XFEATURE_ENABLED_MASK);
if ((xcr0 & (XSTATE_SSE | XSTATE_YMM)) != (XSTATE_SSE | XSTATE_YMM)) {
printk(KERN_INFO "AVX detected but unusable.\n");
return -ENODEV; return -ENODEV;
} }
......
...@@ -65,11 +65,8 @@ ...@@ -65,11 +65,8 @@
#include <crypto/mcryptd.h> #include <crypto/mcryptd.h>
#include <crypto/crypto_wq.h> #include <crypto/crypto_wq.h>
#include <asm/byteorder.h> #include <asm/byteorder.h>
#include <asm/i387.h>
#include <asm/xcr.h>
#include <asm/xsave.h>
#include <linux/hardirq.h> #include <linux/hardirq.h>
#include <asm/fpu-internal.h> #include <asm/fpu/api.h>
#include "sha_mb_ctx.h" #include "sha_mb_ctx.h"
#define FLUSH_INTERVAL 1000 /* in usec */ #define FLUSH_INTERVAL 1000 /* in usec */
......
...@@ -29,9 +29,7 @@ ...@@ -29,9 +29,7 @@
#include <linux/types.h> #include <linux/types.h>
#include <crypto/sha.h> #include <crypto/sha.h>
#include <crypto/sha1_base.h> #include <crypto/sha1_base.h>
#include <asm/i387.h> #include <asm/fpu/api.h>
#include <asm/xcr.h>
#include <asm/xsave.h>
asmlinkage void sha1_transform_ssse3(u32 *digest, const char *data, asmlinkage void sha1_transform_ssse3(u32 *digest, const char *data,
...@@ -123,15 +121,9 @@ static struct shash_alg alg = { ...@@ -123,15 +121,9 @@ static struct shash_alg alg = {
#ifdef CONFIG_AS_AVX #ifdef CONFIG_AS_AVX
static bool __init avx_usable(void) static bool __init avx_usable(void)
{ {
u64 xcr0; if (!cpu_has_xfeatures(XSTATE_SSE | XSTATE_YMM, NULL)) {
if (cpu_has_avx)
if (!cpu_has_avx || !cpu_has_osxsave)
return false;
xcr0 = xgetbv(XCR_XFEATURE_ENABLED_MASK);
if ((xcr0 & (XSTATE_SSE | XSTATE_YMM)) != (XSTATE_SSE | XSTATE_YMM)) {
pr_info("AVX detected but unusable.\n"); pr_info("AVX detected but unusable.\n");
return false; return false;
} }
......
...@@ -37,9 +37,7 @@ ...@@ -37,9 +37,7 @@
#include <linux/types.h> #include <linux/types.h>
#include <crypto/sha.h> #include <crypto/sha.h>
#include <crypto/sha256_base.h> #include <crypto/sha256_base.h>
#include <asm/i387.h> #include <asm/fpu/api.h>
#include <asm/xcr.h>
#include <asm/xsave.h>
#include <linux/string.h> #include <linux/string.h>
asmlinkage void sha256_transform_ssse3(u32 *digest, const char *data, asmlinkage void sha256_transform_ssse3(u32 *digest, const char *data,
...@@ -132,15 +130,9 @@ static struct shash_alg algs[] = { { ...@@ -132,15 +130,9 @@ static struct shash_alg algs[] = { {
#ifdef CONFIG_AS_AVX #ifdef CONFIG_AS_AVX
static bool __init avx_usable(void) static bool __init avx_usable(void)
{ {
u64 xcr0; if (!cpu_has_xfeatures(XSTATE_SSE | XSTATE_YMM, NULL)) {
if (cpu_has_avx)
if (!cpu_has_avx || !cpu_has_osxsave)
return false;
xcr0 = xgetbv(XCR_XFEATURE_ENABLED_MASK);
if ((xcr0 & (XSTATE_SSE | XSTATE_YMM)) != (XSTATE_SSE | XSTATE_YMM)) {
pr_info("AVX detected but unusable.\n"); pr_info("AVX detected but unusable.\n");
return false; return false;
} }
......
...@@ -35,9 +35,7 @@ ...@@ -35,9 +35,7 @@
#include <linux/types.h> #include <linux/types.h>
#include <crypto/sha.h> #include <crypto/sha.h>
#include <crypto/sha512_base.h> #include <crypto/sha512_base.h>
#include <asm/i387.h> #include <asm/fpu/api.h>
#include <asm/xcr.h>
#include <asm/xsave.h>
#include <linux/string.h> #include <linux/string.h>
...@@ -131,15 +129,9 @@ static struct shash_alg algs[] = { { ...@@ -131,15 +129,9 @@ static struct shash_alg algs[] = { {
#ifdef CONFIG_AS_AVX #ifdef CONFIG_AS_AVX
static bool __init avx_usable(void) static bool __init avx_usable(void)
{ {
u64 xcr0; if (!cpu_has_xfeatures(XSTATE_SSE | XSTATE_YMM, NULL)) {
if (cpu_has_avx)
if (!cpu_has_avx || !cpu_has_osxsave)
return false;
xcr0 = xgetbv(XCR_XFEATURE_ENABLED_MASK);
if ((xcr0 & (XSTATE_SSE | XSTATE_YMM)) != (XSTATE_SSE | XSTATE_YMM)) {
pr_info("AVX detected but unusable.\n"); pr_info("AVX detected but unusable.\n");
return false; return false;
} }
......
...@@ -36,9 +36,7 @@ ...@@ -36,9 +36,7 @@
#include <crypto/ctr.h> #include <crypto/ctr.h>
#include <crypto/lrw.h> #include <crypto/lrw.h>
#include <crypto/xts.h> #include <crypto/xts.h>
#include <asm/i387.h> #include <asm/fpu/api.h>
#include <asm/xcr.h>
#include <asm/xsave.h>
#include <asm/crypto/twofish.h> #include <asm/crypto/twofish.h>
#include <asm/crypto/glue_helper.h> #include <asm/crypto/glue_helper.h>
#include <crypto/scatterwalk.h> #include <crypto/scatterwalk.h>
...@@ -558,16 +556,10 @@ static struct crypto_alg twofish_algs[10] = { { ...@@ -558,16 +556,10 @@ static struct crypto_alg twofish_algs[10] = { {
static int __init twofish_init(void) static int __init twofish_init(void)
{ {
u64 xcr0; const char *feature_name;
if (!cpu_has_avx || !cpu_has_osxsave) { if (!cpu_has_xfeatures(XSTATE_SSE | XSTATE_YMM, &feature_name)) {
printk(KERN_INFO "AVX instructions are not detected.\n"); pr_info("CPU feature '%s' is not supported.\n", feature_name);
return -ENODEV;
}
xcr0 = xgetbv(XCR_XFEATURE_ENABLED_MASK);
if ((xcr0 & (XSTATE_SSE | XSTATE_YMM)) != (XSTATE_SSE | XSTATE_YMM)) {
printk(KERN_INFO "AVX detected but unusable.\n");
return -ENODEV; return -ENODEV;
} }
......
...@@ -21,8 +21,8 @@ ...@@ -21,8 +21,8 @@
#include <linux/binfmts.h> #include <linux/binfmts.h>
#include <asm/ucontext.h> #include <asm/ucontext.h>
#include <asm/uaccess.h> #include <asm/uaccess.h>
#include <asm/i387.h> #include <asm/fpu/internal.h>
#include <asm/fpu-internal.h> #include <asm/fpu/signal.h>
#include <asm/ptrace.h> #include <asm/ptrace.h>
#include <asm/ia32_unistd.h> #include <asm/ia32_unistd.h>
#include <asm/user32.h> #include <asm/user32.h>
...@@ -198,7 +198,7 @@ static int ia32_restore_sigcontext(struct pt_regs *regs, ...@@ -198,7 +198,7 @@ static int ia32_restore_sigcontext(struct pt_regs *regs,
buf = compat_ptr(tmp); buf = compat_ptr(tmp);
} get_user_catch(err); } get_user_catch(err);
err |= restore_xstate_sig(buf, 1); err |= fpu__restore_sig(buf, 1);
force_iret(); force_iret();
...@@ -308,6 +308,7 @@ static void __user *get_sigframe(struct ksignal *ksig, struct pt_regs *regs, ...@@ -308,6 +308,7 @@ static void __user *get_sigframe(struct ksignal *ksig, struct pt_regs *regs,
size_t frame_size, size_t frame_size,
void __user **fpstate) void __user **fpstate)
{ {
struct fpu *fpu = &current->thread.fpu;
unsigned long sp; unsigned long sp;
/* Default to using normal stack */ /* Default to using normal stack */
...@@ -322,12 +323,12 @@ static void __user *get_sigframe(struct ksignal *ksig, struct pt_regs *regs, ...@@ -322,12 +323,12 @@ static void __user *get_sigframe(struct ksignal *ksig, struct pt_regs *regs,
ksig->ka.sa.sa_restorer) ksig->ka.sa.sa_restorer)
sp = (unsigned long) ksig->ka.sa.sa_restorer; sp = (unsigned long) ksig->ka.sa.sa_restorer;
if (used_math()) { if (fpu->fpstate_active) {
unsigned long fx_aligned, math_size; unsigned long fx_aligned, math_size;
sp = alloc_mathframe(sp, 1, &fx_aligned, &math_size); sp = fpu__alloc_mathframe(sp, 1, &fx_aligned, &math_size);
*fpstate = (struct _fpstate_ia32 __user *) sp; *fpstate = (struct _fpstate_ia32 __user *) sp;
if (save_xstate_sig(*fpstate, (void __user *)fx_aligned, if (copy_fpstate_to_sigframe(*fpstate, (void __user *)fx_aligned,
math_size) < 0) math_size) < 0)
return (void __user *) -1L; return (void __user *) -1L;
} }
......
...@@ -52,6 +52,12 @@ struct alt_instr { ...@@ -52,6 +52,12 @@ struct alt_instr {
u8 padlen; /* length of build-time padding */ u8 padlen; /* length of build-time padding */
} __packed; } __packed;
/*
* Debug flag that can be tested to see whether alternative
* instructions were patched in already:
*/
extern int alternatives_patched;
extern void alternative_instructions(void); extern void alternative_instructions(void);
extern void apply_alternatives(struct alt_instr *start, struct alt_instr *end); extern void apply_alternatives(struct alt_instr *start, struct alt_instr *end);
......
...@@ -7,7 +7,7 @@ ...@@ -7,7 +7,7 @@
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/crypto.h> #include <linux/crypto.h>
#include <asm/i387.h> #include <asm/fpu/api.h>
#include <crypto/b128ops.h> #include <crypto/b128ops.h>
typedef void (*common_glue_func_t)(void *ctx, u8 *dst, const u8 *src); typedef void (*common_glue_func_t)(void *ctx, u8 *dst, const u8 *src);
......
#ifndef _ASM_X86_EFI_H #ifndef _ASM_X86_EFI_H
#define _ASM_X86_EFI_H #define _ASM_X86_EFI_H
#include <asm/i387.h> #include <asm/fpu/api.h>
#include <asm/pgtable.h> #include <asm/pgtable.h>
/* /*
......
...@@ -7,23 +7,8 @@ ...@@ -7,23 +7,8 @@
* x86-64 work by Andi Kleen 2002 * x86-64 work by Andi Kleen 2002
*/ */
#ifndef _ASM_X86_I387_H #ifndef _ASM_X86_FPU_API_H
#define _ASM_X86_I387_H #define _ASM_X86_FPU_API_H
#ifndef __ASSEMBLY__
#include <linux/sched.h>
#include <linux/hardirq.h>
struct pt_regs;
struct user_i387_struct;
extern int init_fpu(struct task_struct *child);
extern void fpu_finit(struct fpu *fpu);
extern int dump_fpu(struct pt_regs *, struct user_i387_struct *);
extern void math_state_restore(void);
extern bool irq_fpu_usable(void);
/* /*
* Careful: __kernel_fpu_begin/end() must be called with preempt disabled * Careful: __kernel_fpu_begin/end() must be called with preempt disabled
...@@ -37,23 +22,9 @@ extern bool irq_fpu_usable(void); ...@@ -37,23 +22,9 @@ extern bool irq_fpu_usable(void);
*/ */
extern void __kernel_fpu_begin(void); extern void __kernel_fpu_begin(void);
extern void __kernel_fpu_end(void); extern void __kernel_fpu_end(void);
extern void kernel_fpu_begin(void);
static inline void kernel_fpu_begin(void) extern void kernel_fpu_end(void);
{ extern bool irq_fpu_usable(void);
preempt_disable();
WARN_ON_ONCE(!irq_fpu_usable());
__kernel_fpu_begin();
}
static inline void kernel_fpu_end(void)
{
__kernel_fpu_end();
preempt_enable();
}
/* Must be called with preempt disabled */
extern void kernel_fpu_disable(void);
extern void kernel_fpu_enable(void);
/* /*
* Some instructions like VIA's padlock instructions generate a spurious * Some instructions like VIA's padlock instructions generate a spurious
...@@ -62,47 +33,16 @@ extern void kernel_fpu_enable(void); ...@@ -62,47 +33,16 @@ extern void kernel_fpu_enable(void);
* in interrupt context interacting wrongly with other user/kernel fpu usage, we * in interrupt context interacting wrongly with other user/kernel fpu usage, we
* should use them only in the context of irq_ts_save/restore() * should use them only in the context of irq_ts_save/restore()
*/ */
static inline int irq_ts_save(void) extern int irq_ts_save(void);
{ extern void irq_ts_restore(int TS_state);
/*
* If in process context and not atomic, we can take a spurious DNA fault.
* Otherwise, doing clts() in process context requires disabling preemption
* or some heavy lifting like kernel_fpu_begin()
*/
if (!in_atomic())
return 0;
if (read_cr0() & X86_CR0_TS) {
clts();
return 1;
}
return 0;
}
static inline void irq_ts_restore(int TS_state)
{
if (TS_state)
stts();
}
/* /*
* The question "does this thread have fpu access?" * Query the presence of one or more xfeatures. Works on any legacy CPU as well.
* is slightly racy, since preemption could come in
* and revoke it immediately after the test.
* *
* However, even in that very unlikely scenario, * If 'feature_name' is set then put a human-readable description of
* we can just assume we have FPU access - typically * the feature there as well - this can be used to print error (or success)
* to save the FP state - we'll just take a #NM * messages.
* fault and get the FPU access back.
*/ */
static inline int user_has_fpu(void) extern int cpu_has_xfeatures(u64 xfeatures_mask, const char **feature_name);
{
return current->thread.fpu.has_fpu;
}
extern void unlazy_fpu(struct task_struct *tsk);
#endif /* __ASSEMBLY__ */
#endif /* _ASM_X86_I387_H */ #endif /* _ASM_X86_FPU_API_H */
/*
* FPU regset handling methods:
*/
#ifndef _ASM_X86_FPU_REGSET_H
#define _ASM_X86_FPU_REGSET_H
#include <linux/regset.h>
extern user_regset_active_fn regset_fpregs_active, regset_xregset_fpregs_active;
extern user_regset_get_fn fpregs_get, xfpregs_get, fpregs_soft_get,
xstateregs_get;
extern user_regset_set_fn fpregs_set, xfpregs_set, fpregs_soft_set,
xstateregs_set;
/*
* xstateregs_active == regset_fpregs_active. Please refer to the comment
* at the definition of regset_fpregs_active.
*/
#define xstateregs_active regset_fpregs_active
#endif /* _ASM_X86_FPU_REGSET_H */
/*
* x86 FPU signal frame handling methods:
*/
#ifndef _ASM_X86_FPU_SIGNAL_H
#define _ASM_X86_FPU_SIGNAL_H
#ifdef CONFIG_X86_64
# include <asm/sigcontext32.h>
# include <asm/user32.h>
struct ksignal;
int ia32_setup_rt_frame(int sig, struct ksignal *ksig,
compat_sigset_t *set, struct pt_regs *regs);
int ia32_setup_frame(int sig, struct ksignal *ksig,
compat_sigset_t *set, struct pt_regs *regs);
#else
# define user_i387_ia32_struct user_i387_struct
# define user32_fxsr_struct user_fxsr_struct
# define ia32_setup_frame __setup_frame
# define ia32_setup_rt_frame __setup_rt_frame
#endif
extern void convert_from_fxsr(struct user_i387_ia32_struct *env,
struct task_struct *tsk);
extern void convert_to_fxsr(struct task_struct *tsk,
const struct user_i387_ia32_struct *env);
unsigned long
fpu__alloc_mathframe(unsigned long sp, int ia32_frame,
unsigned long *buf_fx, unsigned long *size);
extern void fpu__init_prepare_fx_sw_frame(void);
#endif /* _ASM_X86_FPU_SIGNAL_H */
/*
* FPU data structures:
*/
#ifndef _ASM_X86_FPU_H
#define _ASM_X86_FPU_H
/*
* The legacy x87 FPU state format, as saved by FSAVE and
* restored by the FRSTOR instructions:
*/
struct fregs_state {
u32 cwd; /* FPU Control Word */
u32 swd; /* FPU Status Word */
u32 twd; /* FPU Tag Word */
u32 fip; /* FPU IP Offset */
u32 fcs; /* FPU IP Selector */
u32 foo; /* FPU Operand Pointer Offset */
u32 fos; /* FPU Operand Pointer Selector */
/* 8*10 bytes for each FP-reg = 80 bytes: */
u32 st_space[20];
/* Software status information [not touched by FSAVE]: */
u32 status;
};
/*
* The legacy fx SSE/MMX FPU state format, as saved by FXSAVE and
* restored by the FXRSTOR instructions. It's similar to the FSAVE
* format, but differs in some areas, plus has extensions at
* the end for the XMM registers.
*/
struct fxregs_state {
u16 cwd; /* Control Word */
u16 swd; /* Status Word */
u16 twd; /* Tag Word */
u16 fop; /* Last Instruction Opcode */
union {
struct {
u64 rip; /* Instruction Pointer */
u64 rdp; /* Data Pointer */
};
struct {
u32 fip; /* FPU IP Offset */
u32 fcs; /* FPU IP Selector */
u32 foo; /* FPU Operand Offset */
u32 fos; /* FPU Operand Selector */
};
};
u32 mxcsr; /* MXCSR Register State */
u32 mxcsr_mask; /* MXCSR Mask */
/* 8*16 bytes for each FP-reg = 128 bytes: */
u32 st_space[32];
/* 16*16 bytes for each XMM-reg = 256 bytes: */
u32 xmm_space[64];
u32 padding[12];
union {
u32 padding1[12];
u32 sw_reserved[12];
};
} __attribute__((aligned(16)));
/* Default value for fxregs_state.mxcsr: */
#define MXCSR_DEFAULT 0x1f80
/*
* Software based FPU emulation state. This is arbitrary really,
* it matches the x87 format to make it easier to understand:
*/
struct swregs_state {
u32 cwd;
u32 swd;
u32 twd;
u32 fip;
u32 fcs;
u32 foo;
u32 fos;
/* 8*10 bytes for each FP-reg = 80 bytes: */
u32 st_space[20];
u8 ftop;
u8 changed;
u8 lookahead;
u8 no_update;
u8 rm;
u8 alimit;
struct math_emu_info *info;
u32 entry_eip;
};
/*
* List of XSAVE features Linux knows about:
*/
enum xfeature_bit {
XSTATE_BIT_FP,
XSTATE_BIT_SSE,
XSTATE_BIT_YMM,
XSTATE_BIT_BNDREGS,
XSTATE_BIT_BNDCSR,
XSTATE_BIT_OPMASK,
XSTATE_BIT_ZMM_Hi256,
XSTATE_BIT_Hi16_ZMM,
XFEATURES_NR_MAX,
};
#define XSTATE_FP (1 << XSTATE_BIT_FP)
#define XSTATE_SSE (1 << XSTATE_BIT_SSE)
#define XSTATE_YMM (1 << XSTATE_BIT_YMM)
#define XSTATE_BNDREGS (1 << XSTATE_BIT_BNDREGS)
#define XSTATE_BNDCSR (1 << XSTATE_BIT_BNDCSR)
#define XSTATE_OPMASK (1 << XSTATE_BIT_OPMASK)
#define XSTATE_ZMM_Hi256 (1 << XSTATE_BIT_ZMM_Hi256)
#define XSTATE_Hi16_ZMM (1 << XSTATE_BIT_Hi16_ZMM)
#define XSTATE_FPSSE (XSTATE_FP | XSTATE_SSE)
#define XSTATE_AVX512 (XSTATE_OPMASK | XSTATE_ZMM_Hi256 | XSTATE_Hi16_ZMM)
/*
* There are 16x 256-bit AVX registers named YMM0-YMM15.
* The low 128 bits are aliased to the 16 SSE registers (XMM0-XMM15)
* and are stored in 'struct fxregs_state::xmm_space[]'.
*
* The high 128 bits are stored here:
* 16x 128 bits == 256 bytes.
*/
struct ymmh_struct {
u8 ymmh_space[256];
};
/* We don't support LWP yet: */
struct lwp_struct {
u8 reserved[128];
};
/* Intel MPX support: */
struct bndreg {
u64 lower_bound;
u64 upper_bound;
} __packed;
struct bndcsr {
u64 bndcfgu;
u64 bndstatus;
} __packed;
struct mpx_struct {
struct bndreg bndreg[4];
struct bndcsr bndcsr;
};
struct xstate_header {
u64 xfeatures;
u64 xcomp_bv;
u64 reserved[6];
} __attribute__((packed));
/* New processor state extensions should be added here: */
#define XSTATE_RESERVE (sizeof(struct ymmh_struct) + \
sizeof(struct lwp_struct) + \
sizeof(struct mpx_struct) )
/*
* This is our most modern FPU state format, as saved by the XSAVE
* and restored by the XRSTOR instructions.
*
* It consists of a legacy fxregs portion, an xstate header and
* subsequent fixed size areas as defined by the xstate header.
* Not all CPUs support all the extensions.
*/
struct xregs_state {
struct fxregs_state i387;
struct xstate_header header;
u8 __reserved[XSTATE_RESERVE];
} __attribute__ ((packed, aligned (64)));
/*
* This is a union of all the possible FPU state formats
* put together, so that we can pick the right one runtime.
*
* The size of the structure is determined by the largest
* member - which is the xsave area:
*/
union fpregs_state {
struct fregs_state fsave;
struct fxregs_state fxsave;
struct swregs_state soft;
struct xregs_state xsave;
};
/*
* Highest level per task FPU state data structure that
* contains the FPU register state plus various FPU
* state fields:
*/
struct fpu {
/*
* @state:
*
* In-memory copy of all FPU registers that we save/restore
* over context switches. If the task is using the FPU then
* the registers in the FPU are more recent than this state
* copy. If the task context-switches away then they get
* saved here and represent the FPU state.
*
* After context switches there may be a (short) time period
* during which the in-FPU hardware registers are unchanged
* and still perfectly match this state, if the tasks
* scheduled afterwards are not using the FPU.
*
* This is the 'lazy restore' window of optimization, which
* we track though 'fpu_fpregs_owner_ctx' and 'fpu->last_cpu'.
*
* We detect whether a subsequent task uses the FPU via setting
* CR0::TS to 1, which causes any FPU use to raise a #NM fault.
*
* During this window, if the task gets scheduled again, we
* might be able to skip having to do a restore from this
* memory buffer to the hardware registers - at the cost of
* incurring the overhead of #NM fault traps.
*
* Note that on modern CPUs that support the XSAVEOPT (or other
* optimized XSAVE instructions), we don't use #NM traps anymore,
* as the hardware can track whether FPU registers need saving
* or not. On such CPUs we activate the non-lazy ('eagerfpu')
* logic, which unconditionally saves/restores all FPU state
* across context switches. (if FPU state exists.)
*/
union fpregs_state state;
/*
* @last_cpu:
*
* Records the last CPU on which this context was loaded into
* FPU registers. (In the lazy-restore case we might be
* able to reuse FPU registers across multiple context switches
* this way, if no intermediate task used the FPU.)
*
* A value of -1 is used to indicate that the FPU state in context
* memory is newer than the FPU state in registers, and that the
* FPU state should be reloaded next time the task is run.
*/
unsigned int last_cpu;
/*
* @fpstate_active:
*
* This flag indicates whether this context is active: if the task
* is not running then we can restore from this context, if the task
* is running then we should save into this context.
*/
unsigned char fpstate_active;
/*
* @fpregs_active:
*
* This flag determines whether a given context is actively
* loaded into the FPU's registers and that those registers
* represent the task's current FPU state.
*
* Note the interaction with fpstate_active:
*
* # task does not use the FPU:
* fpstate_active == 0
*
* # task uses the FPU and regs are active:
* fpstate_active == 1 && fpregs_active == 1
*
* # the regs are inactive but still match fpstate:
* fpstate_active == 1 && fpregs_active == 0 && fpregs_owner == fpu
*
* The third state is what we use for the lazy restore optimization
* on lazy-switching CPUs.
*/
unsigned char fpregs_active;
/*
* @counter:
*
* This counter contains the number of consecutive context switches
* during which the FPU stays used. If this is over a threshold, the
* lazy FPU restore logic becomes eager, to save the trap overhead.
* This is an unsigned char so that after 256 iterations the counter
* wraps and the context switch behavior turns lazy again; this is to
* deal with bursty apps that only use the FPU for a short time:
*/
unsigned char counter;
};
#endif /* _ASM_X86_FPU_H */
#ifndef __ASM_X86_XSAVE_H
#define __ASM_X86_XSAVE_H
#include <linux/types.h>
#include <asm/processor.h>
#include <linux/uaccess.h>
/* Bit 63 of XCR0 is reserved for future expansion */
#define XSTATE_EXTEND_MASK (~(XSTATE_FPSSE | (1ULL << 63)))
#define XSTATE_CPUID 0x0000000d
#define FXSAVE_SIZE 512
#define XSAVE_HDR_SIZE 64
#define XSAVE_HDR_OFFSET FXSAVE_SIZE
#define XSAVE_YMM_SIZE 256
#define XSAVE_YMM_OFFSET (XSAVE_HDR_SIZE + XSAVE_HDR_OFFSET)
/* Supported features which support lazy state saving */
#define XSTATE_LAZY (XSTATE_FP | XSTATE_SSE | XSTATE_YMM \
| XSTATE_OPMASK | XSTATE_ZMM_Hi256 | XSTATE_Hi16_ZMM)
/* Supported features which require eager state saving */
#define XSTATE_EAGER (XSTATE_BNDREGS | XSTATE_BNDCSR)
/* All currently supported features */
#define XCNTXT_MASK (XSTATE_LAZY | XSTATE_EAGER)
#ifdef CONFIG_X86_64
#define REX_PREFIX "0x48, "
#else
#define REX_PREFIX
#endif
extern unsigned int xstate_size;
extern u64 xfeatures_mask;
extern u64 xstate_fx_sw_bytes[USER_XSTATE_FX_SW_WORDS];
extern void update_regset_xstate_info(unsigned int size, u64 xstate_mask);
void *get_xsave_addr(struct xregs_state *xsave, int xstate);
const void *get_xsave_field_ptr(int xstate_field);
#endif
...@@ -1002,8 +1002,6 @@ void kvm_pic_clear_all(struct kvm_pic *pic, int irq_source_id); ...@@ -1002,8 +1002,6 @@ void kvm_pic_clear_all(struct kvm_pic *pic, int irq_source_id);
void kvm_inject_nmi(struct kvm_vcpu *vcpu); void kvm_inject_nmi(struct kvm_vcpu *vcpu);
int fx_init(struct kvm_vcpu *vcpu);
void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa, void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
const u8 *new, int bytes); const u8 *new, int bytes);
int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn); int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn);
......
...@@ -142,6 +142,19 @@ static inline void arch_exit_mmap(struct mm_struct *mm) ...@@ -142,6 +142,19 @@ static inline void arch_exit_mmap(struct mm_struct *mm)
paravirt_arch_exit_mmap(mm); paravirt_arch_exit_mmap(mm);
} }
#ifdef CONFIG_X86_64
static inline bool is_64bit_mm(struct mm_struct *mm)
{
return !config_enabled(CONFIG_IA32_EMULATION) ||
!(mm->context.ia32_compat == TIF_IA32);
}
#else
static inline bool is_64bit_mm(struct mm_struct *mm)
{
return false;
}
#endif
static inline void arch_bprm_mm_init(struct mm_struct *mm, static inline void arch_bprm_mm_init(struct mm_struct *mm,
struct vm_area_struct *vma) struct vm_area_struct *vma)
{ {
......
...@@ -13,55 +13,50 @@ ...@@ -13,55 +13,50 @@
#define MPX_BNDCFG_ENABLE_FLAG 0x1 #define MPX_BNDCFG_ENABLE_FLAG 0x1
#define MPX_BD_ENTRY_VALID_FLAG 0x1 #define MPX_BD_ENTRY_VALID_FLAG 0x1
#ifdef CONFIG_X86_64 /*
* The upper 28 bits [47:20] of the virtual address in 64-bit
/* upper 28 bits [47:20] of the virtual address in 64-bit used to * are used to index into bounds directory (BD).
* index into bounds directory (BD). *
*/ * The directory is 2G (2^31) in size, and with 8-byte entries
#define MPX_BD_ENTRY_OFFSET 28 * it has 2^28 entries.
#define MPX_BD_ENTRY_SHIFT 3
/* bits [19:3] of the virtual address in 64-bit used to index into
* bounds table (BT).
*/ */
#define MPX_BT_ENTRY_OFFSET 17 #define MPX_BD_SIZE_BYTES_64 (1UL<<31)
#define MPX_BT_ENTRY_SHIFT 5 #define MPX_BD_ENTRY_BYTES_64 8
#define MPX_IGN_BITS 3 #define MPX_BD_NR_ENTRIES_64 (MPX_BD_SIZE_BYTES_64/MPX_BD_ENTRY_BYTES_64)
#define MPX_BD_ENTRY_TAIL 3
#else /*
* The 32-bit directory is 4MB (2^22) in size, and with 4-byte
#define MPX_BD_ENTRY_OFFSET 20 * entries it has 2^20 entries.
#define MPX_BD_ENTRY_SHIFT 2 */
#define MPX_BT_ENTRY_OFFSET 10 #define MPX_BD_SIZE_BYTES_32 (1UL<<22)
#define MPX_BT_ENTRY_SHIFT 4 #define MPX_BD_ENTRY_BYTES_32 4
#define MPX_IGN_BITS 2 #define MPX_BD_NR_ENTRIES_32 (MPX_BD_SIZE_BYTES_32/MPX_BD_ENTRY_BYTES_32)
#define MPX_BD_ENTRY_TAIL 2
#endif /*
* A 64-bit table is 4MB total in size, and an entry is
* 4 64-bit pointers in size.
*/
#define MPX_BT_SIZE_BYTES_64 (1UL<<22)
#define MPX_BT_ENTRY_BYTES_64 32
#define MPX_BT_NR_ENTRIES_64 (MPX_BT_SIZE_BYTES_64/MPX_BT_ENTRY_BYTES_64)
#define MPX_BD_SIZE_BYTES (1UL<<(MPX_BD_ENTRY_OFFSET+MPX_BD_ENTRY_SHIFT)) /*
#define MPX_BT_SIZE_BYTES (1UL<<(MPX_BT_ENTRY_OFFSET+MPX_BT_ENTRY_SHIFT)) * A 32-bit table is 16kB total in size, and an entry is
* 4 32-bit pointers in size.
*/
#define MPX_BT_SIZE_BYTES_32 (1UL<<14)
#define MPX_BT_ENTRY_BYTES_32 16
#define MPX_BT_NR_ENTRIES_32 (MPX_BT_SIZE_BYTES_32/MPX_BT_ENTRY_BYTES_32)
#define MPX_BNDSTA_TAIL 2 #define MPX_BNDSTA_TAIL 2
#define MPX_BNDCFG_TAIL 12 #define MPX_BNDCFG_TAIL 12
#define MPX_BNDSTA_ADDR_MASK (~((1UL<<MPX_BNDSTA_TAIL)-1)) #define MPX_BNDSTA_ADDR_MASK (~((1UL<<MPX_BNDSTA_TAIL)-1))
#define MPX_BNDCFG_ADDR_MASK (~((1UL<<MPX_BNDCFG_TAIL)-1))
#define MPX_BT_ADDR_MASK (~((1UL<<MPX_BD_ENTRY_TAIL)-1))
#define MPX_BNDCFG_ADDR_MASK (~((1UL<<MPX_BNDCFG_TAIL)-1)) #define MPX_BNDCFG_ADDR_MASK (~((1UL<<MPX_BNDCFG_TAIL)-1))
#define MPX_BNDSTA_ERROR_CODE 0x3 #define MPX_BNDSTA_ERROR_CODE 0x3
#define MPX_BD_ENTRY_MASK ((1<<MPX_BD_ENTRY_OFFSET)-1)
#define MPX_BT_ENTRY_MASK ((1<<MPX_BT_ENTRY_OFFSET)-1)
#define MPX_GET_BD_ENTRY_OFFSET(addr) ((((addr)>>(MPX_BT_ENTRY_OFFSET+ \
MPX_IGN_BITS)) & MPX_BD_ENTRY_MASK) << MPX_BD_ENTRY_SHIFT)
#define MPX_GET_BT_ENTRY_OFFSET(addr) ((((addr)>>MPX_IGN_BITS) & \
MPX_BT_ENTRY_MASK) << MPX_BT_ENTRY_SHIFT)
#ifdef CONFIG_X86_INTEL_MPX #ifdef CONFIG_X86_INTEL_MPX
siginfo_t *mpx_generate_siginfo(struct pt_regs *regs, siginfo_t *mpx_generate_siginfo(struct pt_regs *regs);
struct xsave_struct *xsave_buf); int mpx_handle_bd_fault(void);
int mpx_handle_bd_fault(struct xsave_struct *xsave_buf);
static inline int kernel_managing_mpx_tables(struct mm_struct *mm) static inline int kernel_managing_mpx_tables(struct mm_struct *mm)
{ {
return (mm->bd_addr != MPX_INVALID_BOUNDS_DIR); return (mm->bd_addr != MPX_INVALID_BOUNDS_DIR);
...@@ -77,12 +72,11 @@ static inline void mpx_mm_init(struct mm_struct *mm) ...@@ -77,12 +72,11 @@ static inline void mpx_mm_init(struct mm_struct *mm)
void mpx_notify_unmap(struct mm_struct *mm, struct vm_area_struct *vma, void mpx_notify_unmap(struct mm_struct *mm, struct vm_area_struct *vma,
unsigned long start, unsigned long end); unsigned long start, unsigned long end);
#else #else
static inline siginfo_t *mpx_generate_siginfo(struct pt_regs *regs, static inline siginfo_t *mpx_generate_siginfo(struct pt_regs *regs)
struct xsave_struct *xsave_buf)
{ {
return NULL; return NULL;
} }
static inline int mpx_handle_bd_fault(struct xsave_struct *xsave_buf) static inline int mpx_handle_bd_fault(void)
{ {
return -EINVAL; return -EINVAL;
} }
......
...@@ -21,6 +21,7 @@ struct mm_struct; ...@@ -21,6 +21,7 @@ struct mm_struct;
#include <asm/desc_defs.h> #include <asm/desc_defs.h>
#include <asm/nops.h> #include <asm/nops.h>
#include <asm/special_insns.h> #include <asm/special_insns.h>
#include <asm/fpu/types.h>
#include <linux/personality.h> #include <linux/personality.h>
#include <linux/cpumask.h> #include <linux/cpumask.h>
...@@ -52,11 +53,16 @@ static inline void *current_text_addr(void) ...@@ -52,11 +53,16 @@ static inline void *current_text_addr(void)
return pc; return pc;
} }
/*
* These alignment constraints are for performance in the vSMP case,
* but in the task_struct case we must also meet hardware imposed
* alignment requirements of the FPU state:
*/
#ifdef CONFIG_X86_VSMP #ifdef CONFIG_X86_VSMP
# define ARCH_MIN_TASKALIGN (1 << INTERNODE_CACHE_SHIFT) # define ARCH_MIN_TASKALIGN (1 << INTERNODE_CACHE_SHIFT)
# define ARCH_MIN_MMSTRUCT_ALIGN (1 << INTERNODE_CACHE_SHIFT) # define ARCH_MIN_MMSTRUCT_ALIGN (1 << INTERNODE_CACHE_SHIFT)
#else #else
# define ARCH_MIN_TASKALIGN 16 # define ARCH_MIN_TASKALIGN __alignof__(union fpregs_state)
# define ARCH_MIN_MMSTRUCT_ALIGN 0 # define ARCH_MIN_MMSTRUCT_ALIGN 0
#endif #endif
...@@ -166,7 +172,6 @@ extern const struct seq_operations cpuinfo_op; ...@@ -166,7 +172,6 @@ extern const struct seq_operations cpuinfo_op;
#define cache_line_size() (boot_cpu_data.x86_cache_alignment) #define cache_line_size() (boot_cpu_data.x86_cache_alignment)
extern void cpu_detect(struct cpuinfo_x86 *c); extern void cpu_detect(struct cpuinfo_x86 *c);
extern void fpu_detect(struct cpuinfo_x86 *c);
extern void early_cpu_init(void); extern void early_cpu_init(void);
extern void identify_boot_cpu(void); extern void identify_boot_cpu(void);
...@@ -313,128 +318,6 @@ struct orig_ist { ...@@ -313,128 +318,6 @@ struct orig_ist {
unsigned long ist[7]; unsigned long ist[7];
}; };
#define MXCSR_DEFAULT 0x1f80
struct i387_fsave_struct {
u32 cwd; /* FPU Control Word */
u32 swd; /* FPU Status Word */
u32 twd; /* FPU Tag Word */
u32 fip; /* FPU IP Offset */
u32 fcs; /* FPU IP Selector */
u32 foo; /* FPU Operand Pointer Offset */
u32 fos; /* FPU Operand Pointer Selector */
/* 8*10 bytes for each FP-reg = 80 bytes: */
u32 st_space[20];
/* Software status information [not touched by FSAVE ]: */
u32 status;
};
struct i387_fxsave_struct {
u16 cwd; /* Control Word */
u16 swd; /* Status Word */
u16 twd; /* Tag Word */
u16 fop; /* Last Instruction Opcode */
union {
struct {
u64 rip; /* Instruction Pointer */
u64 rdp; /* Data Pointer */
};
struct {
u32 fip; /* FPU IP Offset */
u32 fcs; /* FPU IP Selector */
u32 foo; /* FPU Operand Offset */
u32 fos; /* FPU Operand Selector */
};
};
u32 mxcsr; /* MXCSR Register State */
u32 mxcsr_mask; /* MXCSR Mask */
/* 8*16 bytes for each FP-reg = 128 bytes: */
u32 st_space[32];
/* 16*16 bytes for each XMM-reg = 256 bytes: */
u32 xmm_space[64];
u32 padding[12];
union {
u32 padding1[12];
u32 sw_reserved[12];
};
} __attribute__((aligned(16)));
struct i387_soft_struct {
u32 cwd;
u32 swd;
u32 twd;
u32 fip;
u32 fcs;
u32 foo;
u32 fos;
/* 8*10 bytes for each FP-reg = 80 bytes: */
u32 st_space[20];
u8 ftop;
u8 changed;
u8 lookahead;
u8 no_update;
u8 rm;
u8 alimit;
struct math_emu_info *info;
u32 entry_eip;
};
struct ymmh_struct {
/* 16 * 16 bytes for each YMMH-reg = 256 bytes */
u32 ymmh_space[64];
};
/* We don't support LWP yet: */
struct lwp_struct {
u8 reserved[128];
};
struct bndreg {
u64 lower_bound;
u64 upper_bound;
} __packed;
struct bndcsr {
u64 bndcfgu;
u64 bndstatus;
} __packed;
struct xsave_hdr_struct {
u64 xstate_bv;
u64 xcomp_bv;
u64 reserved[6];
} __attribute__((packed));
struct xsave_struct {
struct i387_fxsave_struct i387;
struct xsave_hdr_struct xsave_hdr;
struct ymmh_struct ymmh;
struct lwp_struct lwp;
struct bndreg bndreg[4];
struct bndcsr bndcsr;
/* new processor state extensions will go here */
} __attribute__ ((packed, aligned (64)));
union thread_xstate {
struct i387_fsave_struct fsave;
struct i387_fxsave_struct fxsave;
struct i387_soft_struct soft;
struct xsave_struct xsave;
};
struct fpu {
unsigned int last_cpu;
unsigned int has_fpu;
union thread_xstate *state;
};
#ifdef CONFIG_X86_64 #ifdef CONFIG_X86_64
DECLARE_PER_CPU(struct orig_ist, orig_ist); DECLARE_PER_CPU(struct orig_ist, orig_ist);
...@@ -483,8 +366,6 @@ DECLARE_PER_CPU(struct irq_stack *, softirq_stack); ...@@ -483,8 +366,6 @@ DECLARE_PER_CPU(struct irq_stack *, softirq_stack);
#endif /* X86_64 */ #endif /* X86_64 */
extern unsigned int xstate_size; extern unsigned int xstate_size;
extern void free_thread_xstate(struct task_struct *);
extern struct kmem_cache *task_xstate_cachep;
struct perf_event; struct perf_event;
...@@ -508,6 +389,10 @@ struct thread_struct { ...@@ -508,6 +389,10 @@ struct thread_struct {
unsigned long fs; unsigned long fs;
#endif #endif
unsigned long gs; unsigned long gs;
/* Floating point and extended processor state */
struct fpu fpu;
/* Save middle states of ptrace breakpoints */ /* Save middle states of ptrace breakpoints */
struct perf_event *ptrace_bps[HBP_NUM]; struct perf_event *ptrace_bps[HBP_NUM];
/* Debug status used for traps, single steps, etc... */ /* Debug status used for traps, single steps, etc... */
...@@ -518,8 +403,6 @@ struct thread_struct { ...@@ -518,8 +403,6 @@ struct thread_struct {
unsigned long cr2; unsigned long cr2;
unsigned long trap_nr; unsigned long trap_nr;
unsigned long error_code; unsigned long error_code;
/* floating point and extended processor state */
struct fpu fpu;
#ifdef CONFIG_X86_32 #ifdef CONFIG_X86_32
/* Virtual 86 mode info */ /* Virtual 86 mode info */
struct vm86_struct __user *vm86_info; struct vm86_struct __user *vm86_info;
...@@ -535,15 +418,6 @@ struct thread_struct { ...@@ -535,15 +418,6 @@ struct thread_struct {
unsigned long iopl; unsigned long iopl;
/* Max allowed port in the bitmap, in bytes: */ /* Max allowed port in the bitmap, in bytes: */
unsigned io_bitmap_max; unsigned io_bitmap_max;
/*
* fpu_counter contains the number of consecutive context switches
* that the FPU is used. If this is over a threshold, the lazy fpu
* saving becomes unlazy to save the trap. This is an unsigned char
* so that after 256 times the counter wraps and the behavior turns
* lazy again; this to deal with bursty apps that only use FPU for
* a short time
*/
unsigned char fpu_counter;
}; };
/* /*
...@@ -928,18 +802,18 @@ extern int get_tsc_mode(unsigned long adr); ...@@ -928,18 +802,18 @@ extern int get_tsc_mode(unsigned long adr);
extern int set_tsc_mode(unsigned int val); extern int set_tsc_mode(unsigned int val);
/* Register/unregister a process' MPX related resource */ /* Register/unregister a process' MPX related resource */
#define MPX_ENABLE_MANAGEMENT(tsk) mpx_enable_management((tsk)) #define MPX_ENABLE_MANAGEMENT() mpx_enable_management()
#define MPX_DISABLE_MANAGEMENT(tsk) mpx_disable_management((tsk)) #define MPX_DISABLE_MANAGEMENT() mpx_disable_management()
#ifdef CONFIG_X86_INTEL_MPX #ifdef CONFIG_X86_INTEL_MPX
extern int mpx_enable_management(struct task_struct *tsk); extern int mpx_enable_management(void);
extern int mpx_disable_management(struct task_struct *tsk); extern int mpx_disable_management(void);
#else #else
static inline int mpx_enable_management(struct task_struct *tsk) static inline int mpx_enable_management(void)
{ {
return -EINVAL; return -EINVAL;
} }
static inline int mpx_disable_management(struct task_struct *tsk) static inline int mpx_disable_management(void)
{ {
return -EINVAL; return -EINVAL;
} }
......
#include <asm/i387.h> #include <asm/fpu/api.h>
/* /*
* may_use_simd - whether it is allowable at this time to issue SIMD * may_use_simd - whether it is allowable at this time to issue SIMD
......
...@@ -39,7 +39,9 @@ ...@@ -39,7 +39,9 @@
#include <asm/processor.h> #include <asm/processor.h>
#include <asm/percpu.h> #include <asm/percpu.h>
#include <asm/desc.h> #include <asm/desc.h>
#include <linux/random.h> #include <linux/random.h>
#include <linux/sched.h>
/* /*
* 24 byte read-only segment initializer for stack canary. Linker * 24 byte read-only segment initializer for stack canary. Linker
......
...@@ -7,7 +7,7 @@ ...@@ -7,7 +7,7 @@
#define _ASM_X86_SUSPEND_32_H #define _ASM_X86_SUSPEND_32_H
#include <asm/desc.h> #include <asm/desc.h>
#include <asm/i387.h> #include <asm/fpu/api.h>
/* image of the saved processor state */ /* image of the saved processor state */
struct saved_context { struct saved_context {
......
...@@ -7,7 +7,7 @@ ...@@ -7,7 +7,7 @@
#define _ASM_X86_SUSPEND_64_H #define _ASM_X86_SUSPEND_64_H
#include <asm/desc.h> #include <asm/desc.h>
#include <asm/i387.h> #include <asm/fpu/api.h>
/* /*
* Image of the saved processor state, used by the low level ACPI suspend to * Image of the saved processor state, used by the low level ACPI suspend to
......
#undef TRACE_SYSTEM
#define TRACE_SYSTEM mpx
#if !defined(_TRACE_MPX_H) || defined(TRACE_HEADER_MULTI_READ)
#define _TRACE_MPX_H
#include <linux/tracepoint.h>
#ifdef CONFIG_X86_INTEL_MPX
TRACE_EVENT(mpx_bounds_register_exception,
TP_PROTO(void *addr_referenced,
const struct bndreg *bndreg),
TP_ARGS(addr_referenced, bndreg),
TP_STRUCT__entry(
__field(void *, addr_referenced)
__field(u64, lower_bound)
__field(u64, upper_bound)
),
TP_fast_assign(
__entry->addr_referenced = addr_referenced;
__entry->lower_bound = bndreg->lower_bound;
__entry->upper_bound = bndreg->upper_bound;
),
/*
* Note that we are printing out the '~' of the upper
* bounds register here. It is actually stored in its
* one's complement form so that its 'init' state
* corresponds to all 0's. But, that looks like
* gibberish when printed out, so print out the 1's
* complement instead of the actual value here. Note
* though that you still need to specify filters for the
* actual value, not the displayed one.
*/
TP_printk("address referenced: 0x%p bounds: lower: 0x%llx ~upper: 0x%llx",
__entry->addr_referenced,
__entry->lower_bound,
~__entry->upper_bound
)
);
TRACE_EVENT(bounds_exception_mpx,
TP_PROTO(const struct bndcsr *bndcsr),
TP_ARGS(bndcsr),
TP_STRUCT__entry(
__field(u64, bndcfgu)
__field(u64, bndstatus)
),
TP_fast_assign(
/* need to get rid of the 'const' on bndcsr */
__entry->bndcfgu = (u64)bndcsr->bndcfgu;
__entry->bndstatus = (u64)bndcsr->bndstatus;
),
TP_printk("bndcfgu:0x%llx bndstatus:0x%llx",
__entry->bndcfgu,
__entry->bndstatus)
);
DECLARE_EVENT_CLASS(mpx_range_trace,
TP_PROTO(unsigned long start,
unsigned long end),
TP_ARGS(start, end),
TP_STRUCT__entry(
__field(unsigned long, start)
__field(unsigned long, end)
),
TP_fast_assign(
__entry->start = start;
__entry->end = end;
),
TP_printk("[0x%p:0x%p]",
(void *)__entry->start,
(void *)__entry->end
)
);
DEFINE_EVENT(mpx_range_trace, mpx_unmap_zap,
TP_PROTO(unsigned long start, unsigned long end),
TP_ARGS(start, end)
);
DEFINE_EVENT(mpx_range_trace, mpx_unmap_search,
TP_PROTO(unsigned long start, unsigned long end),
TP_ARGS(start, end)
);
TRACE_EVENT(mpx_new_bounds_table,
TP_PROTO(unsigned long table_vaddr),
TP_ARGS(table_vaddr),
TP_STRUCT__entry(
__field(unsigned long, table_vaddr)
),
TP_fast_assign(
__entry->table_vaddr = table_vaddr;
),
TP_printk("table vaddr:%p", (void *)__entry->table_vaddr)
);
#else
/*
* This gets used outside of MPX-specific code, so we need a stub.
*/
static inline void trace_bounds_exception_mpx(const struct bndcsr *bndcsr)
{
}
#endif /* CONFIG_X86_INTEL_MPX */
#undef TRACE_INCLUDE_PATH
#define TRACE_INCLUDE_PATH asm/trace/
#undef TRACE_INCLUDE_FILE
#define TRACE_INCLUDE_FILE mpx
#endif /* _TRACE_MPX_H */
/* This part must be outside protection */
#include <trace/define_trace.h>
...@@ -14,8 +14,8 @@ struct user_ymmh_regs { ...@@ -14,8 +14,8 @@ struct user_ymmh_regs {
__u32 ymmh_space[64]; __u32 ymmh_space[64];
}; };
struct user_xsave_hdr { struct user_xstate_header {
__u64 xstate_bv; __u64 xfeatures;
__u64 reserved1[2]; __u64 reserved1[2];
__u64 reserved2[5]; __u64 reserved2[5];
}; };
...@@ -41,11 +41,11 @@ struct user_xsave_hdr { ...@@ -41,11 +41,11 @@ struct user_xsave_hdr {
* particular process/thread. * particular process/thread.
* *
* Also when the user modifies certain state FP/SSE/etc through the * Also when the user modifies certain state FP/SSE/etc through the
* ptrace interface, they must ensure that the xsave_hdr.xstate_bv * ptrace interface, they must ensure that the header.xfeatures
* bytes[512..519] of the memory layout are updated correspondingly. * bytes[512..519] of the memory layout are updated correspondingly.
* i.e., for example when FP state is modified to a non-init state, * i.e., for example when FP state is modified to a non-init state,
* xsave_hdr.xstate_bv's bit 0 must be set to '1', when SSE is modified to * header.xfeatures's bit 0 must be set to '1', when SSE is modified to
* non-init state, xsave_hdr.xstate_bv's bit 1 must to be set to '1', etc. * non-init state, header.xfeatures's bit 1 must to be set to '1', etc.
*/ */
#define USER_XSTATE_FX_SW_WORDS 6 #define USER_XSTATE_FX_SW_WORDS 6
#define USER_XSTATE_XCR0_WORD 0 #define USER_XSTATE_XCR0_WORD 0
...@@ -55,7 +55,7 @@ struct user_xstateregs { ...@@ -55,7 +55,7 @@ struct user_xstateregs {
__u64 fpx_space[58]; __u64 fpx_space[58];
__u64 xstate_fx_sw[USER_XSTATE_FX_SW_WORDS]; __u64 xstate_fx_sw[USER_XSTATE_FX_SW_WORDS];
} i387; } i387;
struct user_xsave_hdr xsave_hdr; struct user_xstate_header header;
struct user_ymmh_regs ymmh; struct user_ymmh_regs ymmh;
/* further processor state extensions go here */ /* further processor state extensions go here */
}; };
......
/* -*- linux-c -*- ------------------------------------------------------- *
*
* Copyright 2008 rPath, Inc. - All Rights Reserved
*
* This file is part of the Linux kernel, and is made available under
* the terms of the GNU General Public License version 2 or (at your
* option) any later version; incorporated herein by reference.
*
* ----------------------------------------------------------------------- */
/*
* asm-x86/xcr.h
*
* Definitions for the eXtended Control Register instructions
*/
#ifndef _ASM_X86_XCR_H
#define _ASM_X86_XCR_H
#define XCR_XFEATURE_ENABLED_MASK 0x00000000
#ifdef __KERNEL__
# ifndef __ASSEMBLY__
#include <linux/types.h>
static inline u64 xgetbv(u32 index)
{
u32 eax, edx;
asm volatile(".byte 0x0f,0x01,0xd0" /* xgetbv */
: "=a" (eax), "=d" (edx)
: "c" (index));
return eax + ((u64)edx << 32);
}
static inline void xsetbv(u32 index, u64 value)
{
u32 eax = value;
u32 edx = value >> 32;
asm volatile(".byte 0x0f,0x01,0xd1" /* xsetbv */
: : "a" (eax), "d" (edx), "c" (index));
}
# endif /* __ASSEMBLY__ */
#endif /* __KERNEL__ */
#endif /* _ASM_X86_XCR_H */
...@@ -36,7 +36,7 @@ ...@@ -36,7 +36,7 @@
* no advantages to be gotten from x86-64 here anyways. * no advantages to be gotten from x86-64 here anyways.
*/ */
#include <asm/i387.h> #include <asm/fpu/api.h>
#ifdef CONFIG_X86_32 #ifdef CONFIG_X86_32
/* reduce register pressure */ /* reduce register pressure */
......
...@@ -26,7 +26,7 @@ ...@@ -26,7 +26,7 @@
#define XO3(x, y) " pxor 8*("#x")(%4), %%mm"#y" ;\n" #define XO3(x, y) " pxor 8*("#x")(%4), %%mm"#y" ;\n"
#define XO4(x, y) " pxor 8*("#x")(%5), %%mm"#y" ;\n" #define XO4(x, y) " pxor 8*("#x")(%5), %%mm"#y" ;\n"
#include <asm/i387.h> #include <asm/fpu/api.h>
static void static void
xor_pII_mmx_2(unsigned long bytes, unsigned long *p1, unsigned long *p2) xor_pII_mmx_2(unsigned long bytes, unsigned long *p1, unsigned long *p2)
......
...@@ -18,7 +18,7 @@ ...@@ -18,7 +18,7 @@
#ifdef CONFIG_AS_AVX #ifdef CONFIG_AS_AVX
#include <linux/compiler.h> #include <linux/compiler.h>
#include <asm/i387.h> #include <asm/fpu/api.h>
#define BLOCK4(i) \ #define BLOCK4(i) \
BLOCK(32 * i, 0) \ BLOCK(32 * i, 0) \
......
#ifndef __ASM_X86_XSAVE_H
#define __ASM_X86_XSAVE_H
#include <linux/types.h>
#include <asm/processor.h>
#define XSTATE_CPUID 0x0000000d
#define XSTATE_FP 0x1
#define XSTATE_SSE 0x2
#define XSTATE_YMM 0x4
#define XSTATE_BNDREGS 0x8
#define XSTATE_BNDCSR 0x10
#define XSTATE_OPMASK 0x20
#define XSTATE_ZMM_Hi256 0x40
#define XSTATE_Hi16_ZMM 0x80
#define XSTATE_FPSSE (XSTATE_FP | XSTATE_SSE)
#define XSTATE_AVX512 (XSTATE_OPMASK | XSTATE_ZMM_Hi256 | XSTATE_Hi16_ZMM)
/* Bit 63 of XCR0 is reserved for future expansion */
#define XSTATE_EXTEND_MASK (~(XSTATE_FPSSE | (1ULL << 63)))
#define FXSAVE_SIZE 512
#define XSAVE_HDR_SIZE 64
#define XSAVE_HDR_OFFSET FXSAVE_SIZE
#define XSAVE_YMM_SIZE 256
#define XSAVE_YMM_OFFSET (XSAVE_HDR_SIZE + XSAVE_HDR_OFFSET)
/* Supported features which support lazy state saving */
#define XSTATE_LAZY (XSTATE_FP | XSTATE_SSE | XSTATE_YMM \
| XSTATE_OPMASK | XSTATE_ZMM_Hi256 | XSTATE_Hi16_ZMM)
/* Supported features which require eager state saving */
#define XSTATE_EAGER (XSTATE_BNDREGS | XSTATE_BNDCSR)
/* All currently supported features */
#define XCNTXT_MASK (XSTATE_LAZY | XSTATE_EAGER)
#ifdef CONFIG_X86_64
#define REX_PREFIX "0x48, "
#else
#define REX_PREFIX
#endif
extern unsigned int xstate_size;
extern u64 pcntxt_mask;
extern u64 xstate_fx_sw_bytes[USER_XSTATE_FX_SW_WORDS];
extern struct xsave_struct *init_xstate_buf;
extern void xsave_init(void);
extern void update_regset_xstate_info(unsigned int size, u64 xstate_mask);
extern int init_fpu(struct task_struct *child);
/* These macros all use (%edi)/(%rdi) as the single memory argument. */
#define XSAVE ".byte " REX_PREFIX "0x0f,0xae,0x27"
#define XSAVEOPT ".byte " REX_PREFIX "0x0f,0xae,0x37"
#define XSAVES ".byte " REX_PREFIX "0x0f,0xc7,0x2f"
#define XRSTOR ".byte " REX_PREFIX "0x0f,0xae,0x2f"
#define XRSTORS ".byte " REX_PREFIX "0x0f,0xc7,0x1f"
#define xstate_fault ".section .fixup,\"ax\"\n" \
"3: movl $-1,%[err]\n" \
" jmp 2b\n" \
".previous\n" \
_ASM_EXTABLE(1b, 3b) \
: [err] "=r" (err)
/*
* This function is called only during boot time when x86 caps are not set
* up and alternative can not be used yet.
*/
static inline int xsave_state_booting(struct xsave_struct *fx, u64 mask)
{
u32 lmask = mask;
u32 hmask = mask >> 32;
int err = 0;
WARN_ON(system_state != SYSTEM_BOOTING);
if (boot_cpu_has(X86_FEATURE_XSAVES))
asm volatile("1:"XSAVES"\n\t"
"2:\n\t"
xstate_fault
: "D" (fx), "m" (*fx), "a" (lmask), "d" (hmask)
: "memory");
else
asm volatile("1:"XSAVE"\n\t"
"2:\n\t"
xstate_fault
: "D" (fx), "m" (*fx), "a" (lmask), "d" (hmask)
: "memory");
return err;
}
/*
* This function is called only during boot time when x86 caps are not set
* up and alternative can not be used yet.
*/
static inline int xrstor_state_booting(struct xsave_struct *fx, u64 mask)
{
u32 lmask = mask;
u32 hmask = mask >> 32;
int err = 0;
WARN_ON(system_state != SYSTEM_BOOTING);
if (boot_cpu_has(X86_FEATURE_XSAVES))
asm volatile("1:"XRSTORS"\n\t"
"2:\n\t"
xstate_fault
: "D" (fx), "m" (*fx), "a" (lmask), "d" (hmask)
: "memory");
else
asm volatile("1:"XRSTOR"\n\t"
"2:\n\t"
xstate_fault
: "D" (fx), "m" (*fx), "a" (lmask), "d" (hmask)
: "memory");
return err;
}
/*
* Save processor xstate to xsave area.
*/
static inline int xsave_state(struct xsave_struct *fx, u64 mask)
{
u32 lmask = mask;
u32 hmask = mask >> 32;
int err = 0;
/*
* If xsaves is enabled, xsaves replaces xsaveopt because
* it supports compact format and supervisor states in addition to
* modified optimization in xsaveopt.
*
* Otherwise, if xsaveopt is enabled, xsaveopt replaces xsave
* because xsaveopt supports modified optimization which is not
* supported by xsave.
*
* If none of xsaves and xsaveopt is enabled, use xsave.
*/
alternative_input_2(
"1:"XSAVE,
XSAVEOPT,
X86_FEATURE_XSAVEOPT,
XSAVES,
X86_FEATURE_XSAVES,
[fx] "D" (fx), "a" (lmask), "d" (hmask) :
"memory");
asm volatile("2:\n\t"
xstate_fault
: "0" (0)
: "memory");
return err;
}
/*
* Restore processor xstate from xsave area.
*/
static inline int xrstor_state(struct xsave_struct *fx, u64 mask)
{
int err = 0;
u32 lmask = mask;
u32 hmask = mask >> 32;
/*
* Use xrstors to restore context if it is enabled. xrstors supports
* compacted format of xsave area which is not supported by xrstor.
*/
alternative_input(
"1: " XRSTOR,
XRSTORS,
X86_FEATURE_XSAVES,
"D" (fx), "m" (*fx), "a" (lmask), "d" (hmask)
: "memory");
asm volatile("2:\n"
xstate_fault
: "0" (0)
: "memory");
return err;
}
/*
* Save xstate context for old process during context switch.
*/
static inline void fpu_xsave(struct fpu *fpu)
{
xsave_state(&fpu->state->xsave, -1);
}
/*
* Restore xstate context for new process during context switch.
*/
static inline int fpu_xrstor_checking(struct xsave_struct *fx)
{
return xrstor_state(fx, -1);
}
/*
* Save xstate to user space xsave area.
*
* We don't use modified optimization because xrstor/xrstors might track
* a different application.
*
* We don't use compacted format xsave area for
* backward compatibility for old applications which don't understand
* compacted format of xsave area.
*/
static inline int xsave_user(struct xsave_struct __user *buf)
{
int err;
/*
* Clear the xsave header first, so that reserved fields are
* initialized to zero.
*/
err = __clear_user(&buf->xsave_hdr, sizeof(buf->xsave_hdr));
if (unlikely(err))
return -EFAULT;
__asm__ __volatile__(ASM_STAC "\n"
"1:"XSAVE"\n"
"2: " ASM_CLAC "\n"
xstate_fault
: "D" (buf), "a" (-1), "d" (-1), "0" (0)
: "memory");
return err;
}
/*
* Restore xstate from user space xsave area.
*/
static inline int xrestore_user(struct xsave_struct __user *buf, u64 mask)
{
int err = 0;
struct xsave_struct *xstate = ((__force struct xsave_struct *)buf);
u32 lmask = mask;
u32 hmask = mask >> 32;
__asm__ __volatile__(ASM_STAC "\n"
"1:"XRSTOR"\n"
"2: " ASM_CLAC "\n"
xstate_fault
: "D" (xstate), "a" (lmask), "d" (hmask), "0" (0)
: "memory"); /* memory required? */
return err;
}
void *get_xsave_addr(struct xsave_struct *xsave, int xstate);
void setup_xstate_comp(void);
#endif
...@@ -25,7 +25,7 @@ struct _fpx_sw_bytes { ...@@ -25,7 +25,7 @@ struct _fpx_sw_bytes {
__u32 extended_size; /* total size of the layout referred by __u32 extended_size; /* total size of the layout referred by
* fpstate pointer in the sigcontext. * fpstate pointer in the sigcontext.
*/ */
__u64 xstate_bv; __u64 xfeatures;
/* feature bit mask (including fp/sse/extended /* feature bit mask (including fp/sse/extended
* state) that is present in the memory * state) that is present in the memory
* layout. * layout.
...@@ -209,8 +209,8 @@ struct sigcontext { ...@@ -209,8 +209,8 @@ struct sigcontext {
#endif /* !__i386__ */ #endif /* !__i386__ */
struct _xsave_hdr { struct _header {
__u64 xstate_bv; __u64 xfeatures;
__u64 reserved1[2]; __u64 reserved1[2];
__u64 reserved2[5]; __u64 reserved2[5];
}; };
...@@ -228,7 +228,7 @@ struct _ymmh_state { ...@@ -228,7 +228,7 @@ struct _ymmh_state {
*/ */
struct _xstate { struct _xstate {
struct _fpstate fpstate; struct _fpstate fpstate;
struct _xsave_hdr xstate_hdr; struct _header xstate_hdr;
struct _ymmh_state ymmh; struct _ymmh_state ymmh;
/* new processor state extensions go here */ /* new processor state extensions go here */
}; };
......
...@@ -44,7 +44,7 @@ obj-y += pci-iommu_table.o ...@@ -44,7 +44,7 @@ obj-y += pci-iommu_table.o
obj-y += resource.o obj-y += resource.o
obj-y += process.o obj-y += process.o
obj-y += i387.o xsave.o obj-y += fpu/
obj-y += ptrace.o obj-y += ptrace.o
obj-$(CONFIG_X86_32) += tls.o obj-$(CONFIG_X86_32) += tls.o
obj-$(CONFIG_IA32_EMULATION) += tls.o obj-$(CONFIG_IA32_EMULATION) += tls.o
......
...@@ -21,6 +21,10 @@ ...@@ -21,6 +21,10 @@
#include <asm/io.h> #include <asm/io.h>
#include <asm/fixmap.h> #include <asm/fixmap.h>
int __read_mostly alternatives_patched;
EXPORT_SYMBOL_GPL(alternatives_patched);
#define MAX_PATCH_LEN (255-1) #define MAX_PATCH_LEN (255-1)
static int __initdata_or_module debug_alternative; static int __initdata_or_module debug_alternative;
...@@ -627,6 +631,7 @@ void __init alternative_instructions(void) ...@@ -627,6 +631,7 @@ void __init alternative_instructions(void)
apply_paravirt(__parainstructions, __parainstructions_end); apply_paravirt(__parainstructions, __parainstructions_end);
restart_nmi(); restart_nmi();
alternatives_patched = 1;
} }
/** /**
......
...@@ -12,57 +12,11 @@ ...@@ -12,57 +12,11 @@
#include <asm/bugs.h> #include <asm/bugs.h>
#include <asm/processor.h> #include <asm/processor.h>
#include <asm/processor-flags.h> #include <asm/processor-flags.h>
#include <asm/i387.h> #include <asm/fpu/internal.h>
#include <asm/msr.h> #include <asm/msr.h>
#include <asm/paravirt.h> #include <asm/paravirt.h>
#include <asm/alternative.h> #include <asm/alternative.h>
static double __initdata x = 4195835.0;
static double __initdata y = 3145727.0;
/*
* This used to check for exceptions..
* However, it turns out that to support that,
* the XMM trap handlers basically had to
* be buggy. So let's have a correct XMM trap
* handler, and forget about printing out
* some status at boot.
*
* We should really only care about bugs here
* anyway. Not features.
*/
static void __init check_fpu(void)
{
s32 fdiv_bug;
kernel_fpu_begin();
/*
* trap_init() enabled FXSR and company _before_ testing for FP
* problems here.
*
* Test for the divl bug: http://en.wikipedia.org/wiki/Fdiv_bug
*/
__asm__("fninit\n\t"
"fldl %1\n\t"
"fdivl %2\n\t"
"fmull %2\n\t"
"fldl %1\n\t"
"fsubp %%st,%%st(1)\n\t"
"fistpl %0\n\t"
"fwait\n\t"
"fninit"
: "=m" (*&fdiv_bug)
: "m" (*&x), "m" (*&y));
kernel_fpu_end();
if (fdiv_bug) {
set_cpu_bug(&boot_cpu_data, X86_BUG_FDIV);
pr_warn("Hmm, FPU with FDIV bug\n");
}
}
void __init check_bugs(void) void __init check_bugs(void)
{ {
identify_boot_cpu(); identify_boot_cpu();
...@@ -85,10 +39,5 @@ void __init check_bugs(void) ...@@ -85,10 +39,5 @@ void __init check_bugs(void)
'0' + (boot_cpu_data.x86 > 6 ? 6 : boot_cpu_data.x86); '0' + (boot_cpu_data.x86 > 6 ? 6 : boot_cpu_data.x86);
alternative_instructions(); alternative_instructions();
/* fpu__init_check_bugs();
* kernel_fpu_begin/end() in check_fpu() relies on the patched
* alternative instructions.
*/
if (cpu_has_fpu)
check_fpu();
} }
...@@ -32,8 +32,7 @@ ...@@ -32,8 +32,7 @@
#include <asm/setup.h> #include <asm/setup.h>
#include <asm/apic.h> #include <asm/apic.h>
#include <asm/desc.h> #include <asm/desc.h>
#include <asm/i387.h> #include <asm/fpu/internal.h>
#include <asm/fpu-internal.h>
#include <asm/mtrr.h> #include <asm/mtrr.h>
#include <linux/numa.h> #include <linux/numa.h>
#include <asm/asm.h> #include <asm/asm.h>
...@@ -146,32 +145,21 @@ DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = { ...@@ -146,32 +145,21 @@ DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = {
} }; } };
EXPORT_PER_CPU_SYMBOL_GPL(gdt_page); EXPORT_PER_CPU_SYMBOL_GPL(gdt_page);
static int __init x86_xsave_setup(char *s) static int __init x86_mpx_setup(char *s)
{ {
/* require an exact match without trailing characters */
if (strlen(s)) if (strlen(s))
return 0; return 0;
setup_clear_cpu_cap(X86_FEATURE_XSAVE);
setup_clear_cpu_cap(X86_FEATURE_XSAVEOPT);
setup_clear_cpu_cap(X86_FEATURE_XSAVES);
setup_clear_cpu_cap(X86_FEATURE_AVX);
setup_clear_cpu_cap(X86_FEATURE_AVX2);
return 1;
}
__setup("noxsave", x86_xsave_setup);
static int __init x86_xsaveopt_setup(char *s) /* do not emit a message if the feature is not present */
{ if (!boot_cpu_has(X86_FEATURE_MPX))
setup_clear_cpu_cap(X86_FEATURE_XSAVEOPT);
return 1; return 1;
}
__setup("noxsaveopt", x86_xsaveopt_setup);
static int __init x86_xsaves_setup(char *s) setup_clear_cpu_cap(X86_FEATURE_MPX);
{ pr_info("nompx: Intel Memory Protection Extensions (MPX) disabled\n");
setup_clear_cpu_cap(X86_FEATURE_XSAVES);
return 1; return 1;
} }
__setup("noxsaves", x86_xsaves_setup); __setup("nompx", x86_mpx_setup);
#ifdef CONFIG_X86_32 #ifdef CONFIG_X86_32
static int cachesize_override = -1; static int cachesize_override = -1;
...@@ -184,14 +172,6 @@ static int __init cachesize_setup(char *str) ...@@ -184,14 +172,6 @@ static int __init cachesize_setup(char *str)
} }
__setup("cachesize=", cachesize_setup); __setup("cachesize=", cachesize_setup);
static int __init x86_fxsr_setup(char *s)
{
setup_clear_cpu_cap(X86_FEATURE_FXSR);
setup_clear_cpu_cap(X86_FEATURE_XMM);
return 1;
}
__setup("nofxsr", x86_fxsr_setup);
static int __init x86_sep_setup(char *s) static int __init x86_sep_setup(char *s)
{ {
setup_clear_cpu_cap(X86_FEATURE_SEP); setup_clear_cpu_cap(X86_FEATURE_SEP);
...@@ -762,7 +742,7 @@ static void __init early_identify_cpu(struct cpuinfo_x86 *c) ...@@ -762,7 +742,7 @@ static void __init early_identify_cpu(struct cpuinfo_x86 *c)
cpu_detect(c); cpu_detect(c);
get_cpu_vendor(c); get_cpu_vendor(c);
get_cpu_cap(c); get_cpu_cap(c);
fpu_detect(c); fpu__init_system(c);
if (this_cpu->c_early_init) if (this_cpu->c_early_init)
this_cpu->c_early_init(c); this_cpu->c_early_init(c);
...@@ -1186,8 +1166,6 @@ DEFINE_PER_CPU(unsigned int, irq_count) __visible = -1; ...@@ -1186,8 +1166,6 @@ DEFINE_PER_CPU(unsigned int, irq_count) __visible = -1;
DEFINE_PER_CPU(int, __preempt_count) = INIT_PREEMPT_COUNT; DEFINE_PER_CPU(int, __preempt_count) = INIT_PREEMPT_COUNT;
EXPORT_PER_CPU_SYMBOL(__preempt_count); EXPORT_PER_CPU_SYMBOL(__preempt_count);
DEFINE_PER_CPU(struct task_struct *, fpu_owner_task);
/* /*
* Special IST stacks which the CPU switches to when it calls * Special IST stacks which the CPU switches to when it calls
* an IST-marked descriptor entry. Up to 7 stacks (hardware * an IST-marked descriptor entry. Up to 7 stacks (hardware
...@@ -1278,7 +1256,6 @@ DEFINE_PER_CPU(struct task_struct *, current_task) = &init_task; ...@@ -1278,7 +1256,6 @@ DEFINE_PER_CPU(struct task_struct *, current_task) = &init_task;
EXPORT_PER_CPU_SYMBOL(current_task); EXPORT_PER_CPU_SYMBOL(current_task);
DEFINE_PER_CPU(int, __preempt_count) = INIT_PREEMPT_COUNT; DEFINE_PER_CPU(int, __preempt_count) = INIT_PREEMPT_COUNT;
EXPORT_PER_CPU_SYMBOL(__preempt_count); EXPORT_PER_CPU_SYMBOL(__preempt_count);
DEFINE_PER_CPU(struct task_struct *, fpu_owner_task);
/* /*
* On x86_32, vm86 modifies tss.sp0, so sp0 isn't a reliable way to find * On x86_32, vm86 modifies tss.sp0, so sp0 isn't a reliable way to find
...@@ -1442,7 +1419,7 @@ void cpu_init(void) ...@@ -1442,7 +1419,7 @@ void cpu_init(void)
clear_all_debug_regs(); clear_all_debug_regs();
dbg_restore_debug_regs(); dbg_restore_debug_regs();
fpu_init(); fpu__init_cpu();
if (is_uv_system()) if (is_uv_system())
uv_cpu_init(); uv_cpu_init();
...@@ -1498,7 +1475,7 @@ void cpu_init(void) ...@@ -1498,7 +1475,7 @@ void cpu_init(void)
clear_all_debug_regs(); clear_all_debug_regs();
dbg_restore_debug_regs(); dbg_restore_debug_regs();
fpu_init(); fpu__init_cpu();
} }
#endif #endif
......
#
# Build rules for the FPU support code:
#
obj-y += init.o bugs.o core.o regset.o signal.o xstate.o
/*
* x86 FPU bug checks:
*/
#include <asm/fpu/internal.h>
/*
* Boot time CPU/FPU FDIV bug detection code:
*/
static double __initdata x = 4195835.0;
static double __initdata y = 3145727.0;
/*
* This used to check for exceptions..
* However, it turns out that to support that,
* the XMM trap handlers basically had to
* be buggy. So let's have a correct XMM trap
* handler, and forget about printing out
* some status at boot.
*
* We should really only care about bugs here
* anyway. Not features.
*/
static void __init check_fpu(void)
{
u32 cr0_saved;
s32 fdiv_bug;
/* We might have CR0::TS set already, clear it: */
cr0_saved = read_cr0();
write_cr0(cr0_saved & ~X86_CR0_TS);
kernel_fpu_begin();
/*
* trap_init() enabled FXSR and company _before_ testing for FP
* problems here.
*
* Test for the divl bug: http://en.wikipedia.org/wiki/Fdiv_bug
*/
__asm__("fninit\n\t"
"fldl %1\n\t"
"fdivl %2\n\t"
"fmull %2\n\t"
"fldl %1\n\t"
"fsubp %%st,%%st(1)\n\t"
"fistpl %0\n\t"
"fwait\n\t"
"fninit"
: "=m" (*&fdiv_bug)
: "m" (*&x), "m" (*&y));
kernel_fpu_end();
write_cr0(cr0_saved);
if (fdiv_bug) {
set_cpu_bug(&boot_cpu_data, X86_BUG_FDIV);
pr_warn("Hmm, FPU with FDIV bug\n");
}
}
void __init fpu__init_check_bugs(void)
{
/*
* kernel_fpu_begin/end() in check_fpu() relies on the patched
* alternative instructions.
*/
if (cpu_has_fpu)
check_fpu();
}
This diff is collapsed.
/*
* x86 FPU boot time init code:
*/
#include <asm/fpu/internal.h>
#include <asm/tlbflush.h>
/*
* Initialize the TS bit in CR0 according to the style of context-switches
* we are using:
*/
static void fpu__init_cpu_ctx_switch(void)
{
if (!cpu_has_eager_fpu)
stts();
else
clts();
}
/*
* Initialize the registers found in all CPUs, CR0 and CR4:
*/
static void fpu__init_cpu_generic(void)
{
unsigned long cr0;
unsigned long cr4_mask = 0;
if (cpu_has_fxsr)
cr4_mask |= X86_CR4_OSFXSR;
if (cpu_has_xmm)
cr4_mask |= X86_CR4_OSXMMEXCPT;
if (cr4_mask)
cr4_set_bits(cr4_mask);
cr0 = read_cr0();
cr0 &= ~(X86_CR0_TS|X86_CR0_EM); /* clear TS and EM */
if (!cpu_has_fpu)
cr0 |= X86_CR0_EM;
write_cr0(cr0);
/* Flush out any pending x87 state: */
asm volatile ("fninit");
}
/*
* Enable all supported FPU features. Called when a CPU is brought online:
*/
void fpu__init_cpu(void)
{
fpu__init_cpu_generic();
fpu__init_cpu_xstate();
fpu__init_cpu_ctx_switch();
}
/*
* The earliest FPU detection code.
*
* Set the X86_FEATURE_FPU CPU-capability bit based on
* trying to execute an actual sequence of FPU instructions:
*/
static void fpu__init_system_early_generic(struct cpuinfo_x86 *c)
{
unsigned long cr0;
u16 fsw, fcw;
fsw = fcw = 0xffff;
cr0 = read_cr0();
cr0 &= ~(X86_CR0_TS | X86_CR0_EM);
write_cr0(cr0);
asm volatile("fninit ; fnstsw %0 ; fnstcw %1"
: "+m" (fsw), "+m" (fcw));
if (fsw == 0 && (fcw & 0x103f) == 0x003f)
set_cpu_cap(c, X86_FEATURE_FPU);
else
clear_cpu_cap(c, X86_FEATURE_FPU);
#ifndef CONFIG_MATH_EMULATION
if (!cpu_has_fpu) {
pr_emerg("x86/fpu: Giving up, no FPU found and no math emulation present\n");
for (;;)
asm volatile("hlt");
}
#endif
}
/*
* Boot time FPU feature detection code:
*/
unsigned int mxcsr_feature_mask __read_mostly = 0xffffffffu;
static void __init fpu__init_system_mxcsr(void)
{
unsigned int mask = 0;
if (cpu_has_fxsr) {
struct fxregs_state fx_tmp __aligned(32) = { };
asm volatile("fxsave %0" : "+m" (fx_tmp));
mask = fx_tmp.mxcsr_mask;
/*
* If zero then use the default features mask,
* which has all features set, except the
* denormals-are-zero feature bit:
*/
if (mask == 0)
mask = 0x0000ffbf;
}
mxcsr_feature_mask &= mask;
}
/*
* Once per bootup FPU initialization sequences that will run on most x86 CPUs:
*/
static void __init fpu__init_system_generic(void)
{
/*
* Set up the legacy init FPU context. (xstate init might overwrite this
* with a more modern format, if the CPU supports it.)
*/
fpstate_init_fxstate(&init_fpstate.fxsave);
fpu__init_system_mxcsr();
}
/*
* Size of the FPU context state. All tasks in the system use the
* same context size, regardless of what portion they use.
* This is inherent to the XSAVE architecture which puts all state
* components into a single, continuous memory block:
*/
unsigned int xstate_size;
EXPORT_SYMBOL_GPL(xstate_size);
/*
* Set up the xstate_size based on the legacy FPU context size.
*
* We set this up first, and later it will be overwritten by
* fpu__init_system_xstate() if the CPU knows about xstates.
*/
static void __init fpu__init_system_xstate_size_legacy(void)
{
static int on_boot_cpu = 1;
WARN_ON_FPU(!on_boot_cpu);
on_boot_cpu = 0;
/*
* Note that xstate_size might be overwriten later during
* fpu__init_system_xstate().
*/
if (!cpu_has_fpu) {
/*
* Disable xsave as we do not support it if i387
* emulation is enabled.
*/
setup_clear_cpu_cap(X86_FEATURE_XSAVE);
setup_clear_cpu_cap(X86_FEATURE_XSAVEOPT);
xstate_size = sizeof(struct swregs_state);
} else {
if (cpu_has_fxsr)
xstate_size = sizeof(struct fxregs_state);
else
xstate_size = sizeof(struct fregs_state);
}
/*
* Quirk: we don't yet handle the XSAVES* instructions
* correctly, as we don't correctly convert between
* standard and compacted format when interfacing
* with user-space - so disable it for now.
*
* The difference is small: with recent CPUs the
* compacted format is only marginally smaller than
* the standard FPU state format.
*
* ( This is easy to backport while we are fixing
* XSAVES* support. )
*/
setup_clear_cpu_cap(X86_FEATURE_XSAVES);
}
/*
* FPU context switching strategies:
*
* Against popular belief, we don't do lazy FPU saves, due to the
* task migration complications it brings on SMP - we only do
* lazy FPU restores.
*
* 'lazy' is the traditional strategy, which is based on setting
* CR0::TS to 1 during context-switch (instead of doing a full
* restore of the FPU state), which causes the first FPU instruction
* after the context switch (whenever it is executed) to fault - at
* which point we lazily restore the FPU state into FPU registers.
*
* Tasks are of course under no obligation to execute FPU instructions,
* so it can easily happen that another context-switch occurs without
* a single FPU instruction being executed. If we eventually switch
* back to the original task (that still owns the FPU) then we have
* not only saved the restores along the way, but we also have the
* FPU ready to be used for the original task.
*
* 'eager' switching is used on modern CPUs, there we switch the FPU
* state during every context switch, regardless of whether the task
* has used FPU instructions in that time slice or not. This is done
* because modern FPU context saving instructions are able to optimize
* state saving and restoration in hardware: they can detect both
* unused and untouched FPU state and optimize accordingly.
*
* [ Note that even in 'lazy' mode we might optimize context switches
* to use 'eager' restores, if we detect that a task is using the FPU
* frequently. See the fpu->counter logic in fpu/internal.h for that. ]
*/
static enum { AUTO, ENABLE, DISABLE } eagerfpu = AUTO;
static int __init eager_fpu_setup(char *s)
{
if (!strcmp(s, "on"))
eagerfpu = ENABLE;
else if (!strcmp(s, "off"))
eagerfpu = DISABLE;
else if (!strcmp(s, "auto"))
eagerfpu = AUTO;
return 1;
}
__setup("eagerfpu=", eager_fpu_setup);
/*
* Pick the FPU context switching strategy:
*/
static void __init fpu__init_system_ctx_switch(void)
{
static bool on_boot_cpu = 1;
WARN_ON_FPU(!on_boot_cpu);
on_boot_cpu = 0;
WARN_ON_FPU(current->thread.fpu.fpstate_active);
current_thread_info()->status = 0;
/* Auto enable eagerfpu for xsaveopt */
if (cpu_has_xsaveopt && eagerfpu != DISABLE)
eagerfpu = ENABLE;
if (xfeatures_mask & XSTATE_EAGER) {
if (eagerfpu == DISABLE) {
pr_err("x86/fpu: eagerfpu switching disabled, disabling the following xstate features: 0x%llx.\n",
xfeatures_mask & XSTATE_EAGER);
xfeatures_mask &= ~XSTATE_EAGER;
} else {
eagerfpu = ENABLE;
}
}
if (eagerfpu == ENABLE)
setup_force_cpu_cap(X86_FEATURE_EAGER_FPU);
printk(KERN_INFO "x86/fpu: Using '%s' FPU context switches.\n", eagerfpu == ENABLE ? "eager" : "lazy");
}
/*
* Called on the boot CPU once per system bootup, to set up the initial
* FPU state that is later cloned into all processes:
*/
void __init fpu__init_system(struct cpuinfo_x86 *c)
{
fpu__init_system_early_generic(c);
/*
* The FPU has to be operational for some of the
* later FPU init activities:
*/
fpu__init_cpu();
/*
* But don't leave CR0::TS set yet, as some of the FPU setup
* methods depend on being able to execute FPU instructions
* that will fault on a set TS, such as the FXSAVE in
* fpu__init_system_mxcsr().
*/
clts();
fpu__init_system_generic();
fpu__init_system_xstate_size_legacy();
fpu__init_system_xstate();
fpu__init_system_ctx_switch();
}
/*
* Boot parameter to turn off FPU support and fall back to math-emu:
*/
static int __init no_387(char *s)
{
setup_clear_cpu_cap(X86_FEATURE_FPU);
return 1;
}
__setup("no387", no_387);
/*
* Disable all xstate CPU features:
*/
static int __init x86_noxsave_setup(char *s)
{
if (strlen(s))
return 0;
setup_clear_cpu_cap(X86_FEATURE_XSAVE);
setup_clear_cpu_cap(X86_FEATURE_XSAVEOPT);
setup_clear_cpu_cap(X86_FEATURE_XSAVES);
setup_clear_cpu_cap(X86_FEATURE_AVX);
setup_clear_cpu_cap(X86_FEATURE_AVX2);
return 1;
}
__setup("noxsave", x86_noxsave_setup);
/*
* Disable the XSAVEOPT instruction specifically:
*/
static int __init x86_noxsaveopt_setup(char *s)
{
setup_clear_cpu_cap(X86_FEATURE_XSAVEOPT);
return 1;
}
__setup("noxsaveopt", x86_noxsaveopt_setup);
/*
* Disable the XSAVES instruction:
*/
static int __init x86_noxsaves_setup(char *s)
{
setup_clear_cpu_cap(X86_FEATURE_XSAVES);
return 1;
}
__setup("noxsaves", x86_noxsaves_setup);
/*
* Disable FX save/restore and SSE support:
*/
static int __init x86_nofxsr_setup(char *s)
{
setup_clear_cpu_cap(X86_FEATURE_FXSR);
setup_clear_cpu_cap(X86_FEATURE_FXSR_OPT);
setup_clear_cpu_cap(X86_FEATURE_XMM);
return 1;
}
__setup("nofxsr", x86_nofxsr_setup);
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
...@@ -68,8 +68,7 @@ ...@@ -68,8 +68,7 @@
#include <asm/mwait.h> #include <asm/mwait.h>
#include <asm/apic.h> #include <asm/apic.h>
#include <asm/io_apic.h> #include <asm/io_apic.h>
#include <asm/i387.h> #include <asm/fpu/internal.h>
#include <asm/fpu-internal.h>
#include <asm/setup.h> #include <asm/setup.h>
#include <asm/uv/uv.h> #include <asm/uv/uv.h>
#include <linux/mc146818rtc.h> #include <linux/mc146818rtc.h>
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment