Commit 918d80a1 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'x86-cpu-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull x86 cpu handling changes from Ingo Molnar:
 "Bigger changes:

   - Intel CPU hardware-enablement: new vector instructions support
     (AVX-512), by Fenghua Yu.

   - Support the clflushopt instruction and use it in appropriate
     places.  clflushopt is similar to clflush but with more relaxed
     ordering, by Ross Zwisler.

   - MSR accessor cleanups, by Borislav Petkov.

   - 'forcepae' boot flag for those who have way too much time to spend
     on way too old Pentium-M systems and want to live way too
     dangerously, by Chris Bainbridge"

* 'x86-cpu-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  x86, cpu: Add forcepae parameter for booting PAE kernels on PAE-disabled Pentium M
  Rename TAINT_UNSAFE_SMP to TAINT_CPU_OUT_OF_SPEC
  x86, intel: Make MSR_IA32_MISC_ENABLE bit constants systematic
  x86, Intel: Convert to the new bit access MSR accessors
  x86, AMD: Convert to the new bit access MSR accessors
  x86: Add another set of MSR accessor functions
  x86: Use clflushopt in drm_clflush_virt_range
  x86: Use clflushopt in drm_clflush_page
  x86: Use clflushopt in clflush_cache_range
  x86: Add support for the clflushopt instruction
  x86, AVX-512: Enable AVX-512 States Context Switch
  x86, AVX-512: AVX-512 Feature Detection
parents 26a5c0df 69f2366c
...@@ -1011,6 +1011,13 @@ bytes respectively. Such letter suffixes can also be entirely omitted. ...@@ -1011,6 +1011,13 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
parameter will force ia64_sal_cache_flush to call parameter will force ia64_sal_cache_flush to call
ia64_pal_cache_flush instead of SAL_CACHE_FLUSH. ia64_pal_cache_flush instead of SAL_CACHE_FLUSH.
forcepae [X86-32]
Forcefully enable Physical Address Extension (PAE).
Many Pentium M systems disable PAE but may have a
functionally usable PAE implementation.
Warning: use of this parameter will taint the kernel
and may cause unknown problems.
ftrace=[tracer] ftrace=[tracer]
[FTRACE] will set and start the specified tracer [FTRACE] will set and start the specified tracer
as early as possible in order to facilitate early as early as possible in order to facilitate early
......
...@@ -67,6 +67,13 @@ static int is_transmeta(void) ...@@ -67,6 +67,13 @@ static int is_transmeta(void)
cpu_vendor[2] == A32('M', 'x', '8', '6'); cpu_vendor[2] == A32('M', 'x', '8', '6');
} }
static int is_intel(void)
{
return cpu_vendor[0] == A32('G', 'e', 'n', 'u') &&
cpu_vendor[1] == A32('i', 'n', 'e', 'I') &&
cpu_vendor[2] == A32('n', 't', 'e', 'l');
}
/* Returns a bitmask of which words we have error bits in */ /* Returns a bitmask of which words we have error bits in */
static int check_cpuflags(void) static int check_cpuflags(void)
{ {
...@@ -153,6 +160,19 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr) ...@@ -153,6 +160,19 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx)); asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
err = check_cpuflags(); err = check_cpuflags();
} else if (err == 0x01 &&
!(err_flags[0] & ~(1 << X86_FEATURE_PAE)) &&
is_intel() && cpu.level == 6 &&
(cpu.model == 9 || cpu.model == 13)) {
/* PAE is disabled on this Pentium M but can be forced */
if (cmdline_find_option_bool("forcepae")) {
puts("WARNING: Forcing PAE in CPU flags\n");
set_bit(X86_FEATURE_PAE, cpu.flags);
err = check_cpuflags();
}
else {
puts("WARNING: PAE disabled. Use parameter 'forcepae' to enable at your own risk!\n");
}
} }
if (err_flags_ptr) if (err_flags_ptr)
......
...@@ -217,9 +217,14 @@ ...@@ -217,9 +217,14 @@
#define X86_FEATURE_INVPCID (9*32+10) /* Invalidate Processor Context ID */ #define X86_FEATURE_INVPCID (9*32+10) /* Invalidate Processor Context ID */
#define X86_FEATURE_RTM (9*32+11) /* Restricted Transactional Memory */ #define X86_FEATURE_RTM (9*32+11) /* Restricted Transactional Memory */
#define X86_FEATURE_MPX (9*32+14) /* Memory Protection Extension */ #define X86_FEATURE_MPX (9*32+14) /* Memory Protection Extension */
#define X86_FEATURE_AVX512F (9*32+16) /* AVX-512 Foundation */
#define X86_FEATURE_RDSEED (9*32+18) /* The RDSEED instruction */ #define X86_FEATURE_RDSEED (9*32+18) /* The RDSEED instruction */
#define X86_FEATURE_ADX (9*32+19) /* The ADCX and ADOX instructions */ #define X86_FEATURE_ADX (9*32+19) /* The ADCX and ADOX instructions */
#define X86_FEATURE_SMAP (9*32+20) /* Supervisor Mode Access Prevention */ #define X86_FEATURE_SMAP (9*32+20) /* Supervisor Mode Access Prevention */
#define X86_FEATURE_CLFLUSHOPT (9*32+23) /* CLFLUSHOPT instruction */
#define X86_FEATURE_AVX512PF (9*32+26) /* AVX-512 Prefetch */
#define X86_FEATURE_AVX512ER (9*32+27) /* AVX-512 Exponential and Reciprocal */
#define X86_FEATURE_AVX512CD (9*32+28) /* AVX-512 Conflict Detection */
/* /*
* BUG word(s) * BUG word(s)
......
...@@ -214,6 +214,8 @@ do { \ ...@@ -214,6 +214,8 @@ do { \
struct msr *msrs_alloc(void); struct msr *msrs_alloc(void);
void msrs_free(struct msr *msrs); void msrs_free(struct msr *msrs);
int msr_set_bit(u32 msr, u8 bit);
int msr_clear_bit(u32 msr, u8 bit);
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
int rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h); int rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h);
......
...@@ -191,6 +191,14 @@ static inline void clflush(volatile void *__p) ...@@ -191,6 +191,14 @@ static inline void clflush(volatile void *__p)
asm volatile("clflush %0" : "+m" (*(volatile char __force *)__p)); asm volatile("clflush %0" : "+m" (*(volatile char __force *)__p));
} }
static inline void clflushopt(volatile void *__p)
{
alternative_io(".byte " __stringify(NOP_DS_PREFIX) "; clflush %P0",
".byte 0x66; clflush %P0",
X86_FEATURE_CLFLUSHOPT,
"+m" (*(volatile char __force *)__p));
}
#define nop() asm volatile ("nop") #define nop() asm volatile ("nop")
......
...@@ -6,11 +6,14 @@ ...@@ -6,11 +6,14 @@
#define XSTATE_CPUID 0x0000000d #define XSTATE_CPUID 0x0000000d
#define XSTATE_FP 0x1 #define XSTATE_FP 0x1
#define XSTATE_SSE 0x2 #define XSTATE_SSE 0x2
#define XSTATE_YMM 0x4 #define XSTATE_YMM 0x4
#define XSTATE_BNDREGS 0x8 #define XSTATE_BNDREGS 0x8
#define XSTATE_BNDCSR 0x10 #define XSTATE_BNDCSR 0x10
#define XSTATE_OPMASK 0x20
#define XSTATE_ZMM_Hi256 0x40
#define XSTATE_Hi16_ZMM 0x80
#define XSTATE_FPSSE (XSTATE_FP | XSTATE_SSE) #define XSTATE_FPSSE (XSTATE_FP | XSTATE_SSE)
...@@ -23,7 +26,8 @@ ...@@ -23,7 +26,8 @@
#define XSAVE_YMM_OFFSET (XSAVE_HDR_SIZE + XSAVE_HDR_OFFSET) #define XSAVE_YMM_OFFSET (XSAVE_HDR_SIZE + XSAVE_HDR_OFFSET)
/* Supported features which support lazy state saving */ /* Supported features which support lazy state saving */
#define XSTATE_LAZY (XSTATE_FP | XSTATE_SSE | XSTATE_YMM) #define XSTATE_LAZY (XSTATE_FP | XSTATE_SSE | XSTATE_YMM \
| XSTATE_OPMASK | XSTATE_ZMM_Hi256 | XSTATE_Hi16_ZMM)
/* Supported features which require eager state saving */ /* Supported features which require eager state saving */
#define XSTATE_EAGER (XSTATE_BNDREGS | XSTATE_BNDCSR) #define XSTATE_EAGER (XSTATE_BNDREGS | XSTATE_BNDCSR)
......
...@@ -368,33 +368,58 @@ ...@@ -368,33 +368,58 @@
#define THERM_LOG_THRESHOLD1 (1 << 9) #define THERM_LOG_THRESHOLD1 (1 << 9)
/* MISC_ENABLE bits: architectural */ /* MISC_ENABLE bits: architectural */
#define MSR_IA32_MISC_ENABLE_FAST_STRING (1ULL << 0) #define MSR_IA32_MISC_ENABLE_FAST_STRING_BIT 0
#define MSR_IA32_MISC_ENABLE_TCC (1ULL << 1) #define MSR_IA32_MISC_ENABLE_FAST_STRING (1ULL << MSR_IA32_MISC_ENABLE_FAST_STRING_BIT)
#define MSR_IA32_MISC_ENABLE_EMON (1ULL << 7) #define MSR_IA32_MISC_ENABLE_TCC_BIT 1
#define MSR_IA32_MISC_ENABLE_BTS_UNAVAIL (1ULL << 11) #define MSR_IA32_MISC_ENABLE_TCC (1ULL << MSR_IA32_MISC_ENABLE_TCC_BIT)
#define MSR_IA32_MISC_ENABLE_PEBS_UNAVAIL (1ULL << 12) #define MSR_IA32_MISC_ENABLE_EMON_BIT 7
#define MSR_IA32_MISC_ENABLE_ENHANCED_SPEEDSTEP (1ULL << 16) #define MSR_IA32_MISC_ENABLE_EMON (1ULL << MSR_IA32_MISC_ENABLE_EMON_BIT)
#define MSR_IA32_MISC_ENABLE_MWAIT (1ULL << 18) #define MSR_IA32_MISC_ENABLE_BTS_UNAVAIL_BIT 11
#define MSR_IA32_MISC_ENABLE_LIMIT_CPUID (1ULL << 22) #define MSR_IA32_MISC_ENABLE_BTS_UNAVAIL (1ULL << MSR_IA32_MISC_ENABLE_BTS_UNAVAIL_BIT)
#define MSR_IA32_MISC_ENABLE_XTPR_DISABLE (1ULL << 23) #define MSR_IA32_MISC_ENABLE_PEBS_UNAVAIL_BIT 12
#define MSR_IA32_MISC_ENABLE_XD_DISABLE (1ULL << 34) #define MSR_IA32_MISC_ENABLE_PEBS_UNAVAIL (1ULL << MSR_IA32_MISC_ENABLE_PEBS_UNAVAIL_BIT)
#define MSR_IA32_MISC_ENABLE_ENHANCED_SPEEDSTEP_BIT 16
#define MSR_IA32_MISC_ENABLE_ENHANCED_SPEEDSTEP (1ULL << MSR_IA32_MISC_ENABLE_ENHANCED_SPEEDSTEP_BIT)
#define MSR_IA32_MISC_ENABLE_MWAIT_BIT 18
#define MSR_IA32_MISC_ENABLE_MWAIT (1ULL << MSR_IA32_MISC_ENABLE_MWAIT_BIT)
#define MSR_IA32_MISC_ENABLE_LIMIT_CPUID_BIT 22
#define MSR_IA32_MISC_ENABLE_LIMIT_CPUID (1ULL << MSR_IA32_MISC_ENABLE_LIMIT_CPUID_BIT);
#define MSR_IA32_MISC_ENABLE_XTPR_DISABLE_BIT 23
#define MSR_IA32_MISC_ENABLE_XTPR_DISABLE (1ULL << MSR_IA32_MISC_ENABLE_XTPR_DISABLE_BIT)
#define MSR_IA32_MISC_ENABLE_XD_DISABLE_BIT 34
#define MSR_IA32_MISC_ENABLE_XD_DISABLE (1ULL << MSR_IA32_MISC_ENABLE_XD_DISABLE_BIT)
/* MISC_ENABLE bits: model-specific, meaning may vary from core to core */ /* MISC_ENABLE bits: model-specific, meaning may vary from core to core */
#define MSR_IA32_MISC_ENABLE_X87_COMPAT (1ULL << 2) #define MSR_IA32_MISC_ENABLE_X87_COMPAT_BIT 2
#define MSR_IA32_MISC_ENABLE_TM1 (1ULL << 3) #define MSR_IA32_MISC_ENABLE_X87_COMPAT (1ULL << MSR_IA32_MISC_ENABLE_X87_COMPAT_BIT)
#define MSR_IA32_MISC_ENABLE_SPLIT_LOCK_DISABLE (1ULL << 4) #define MSR_IA32_MISC_ENABLE_TM1_BIT 3
#define MSR_IA32_MISC_ENABLE_L3CACHE_DISABLE (1ULL << 6) #define MSR_IA32_MISC_ENABLE_TM1 (1ULL << MSR_IA32_MISC_ENABLE_TM1_BIT)
#define MSR_IA32_MISC_ENABLE_SUPPRESS_LOCK (1ULL << 8) #define MSR_IA32_MISC_ENABLE_SPLIT_LOCK_DISABLE_BIT 4
#define MSR_IA32_MISC_ENABLE_PREFETCH_DISABLE (1ULL << 9) #define MSR_IA32_MISC_ENABLE_SPLIT_LOCK_DISABLE (1ULL << MSR_IA32_MISC_ENABLE_SPLIT_LOCK_DISABLE_BIT)
#define MSR_IA32_MISC_ENABLE_FERR (1ULL << 10) #define MSR_IA32_MISC_ENABLE_L3CACHE_DISABLE_BIT 6
#define MSR_IA32_MISC_ENABLE_FERR_MULTIPLEX (1ULL << 10) #define MSR_IA32_MISC_ENABLE_L3CACHE_DISABLE (1ULL << MSR_IA32_MISC_ENABLE_L3CACHE_DISABLE_BIT)
#define MSR_IA32_MISC_ENABLE_TM2 (1ULL << 13) #define MSR_IA32_MISC_ENABLE_SUPPRESS_LOCK_BIT 8
#define MSR_IA32_MISC_ENABLE_ADJ_PREF_DISABLE (1ULL << 19) #define MSR_IA32_MISC_ENABLE_SUPPRESS_LOCK (1ULL << MSR_IA32_MISC_ENABLE_SUPPRESS_LOCK_BIT)
#define MSR_IA32_MISC_ENABLE_SPEEDSTEP_LOCK (1ULL << 20) #define MSR_IA32_MISC_ENABLE_PREFETCH_DISABLE_BIT 9
#define MSR_IA32_MISC_ENABLE_L1D_CONTEXT (1ULL << 24) #define MSR_IA32_MISC_ENABLE_PREFETCH_DISABLE (1ULL << MSR_IA32_MISC_ENABLE_PREFETCH_DISABLE_BIT)
#define MSR_IA32_MISC_ENABLE_DCU_PREF_DISABLE (1ULL << 37) #define MSR_IA32_MISC_ENABLE_FERR_BIT 10
#define MSR_IA32_MISC_ENABLE_TURBO_DISABLE (1ULL << 38) #define MSR_IA32_MISC_ENABLE_FERR (1ULL << MSR_IA32_MISC_ENABLE_FERR_BIT)
#define MSR_IA32_MISC_ENABLE_IP_PREF_DISABLE (1ULL << 39) #define MSR_IA32_MISC_ENABLE_FERR_MULTIPLEX_BIT 10
#define MSR_IA32_MISC_ENABLE_FERR_MULTIPLEX (1ULL << MSR_IA32_MISC_ENABLE_FERR_MULTIPLEX_BIT)
#define MSR_IA32_MISC_ENABLE_TM2_BIT 13
#define MSR_IA32_MISC_ENABLE_TM2 (1ULL << MSR_IA32_MISC_ENABLE_TM2_BIT)
#define MSR_IA32_MISC_ENABLE_ADJ_PREF_DISABLE_BIT 19
#define MSR_IA32_MISC_ENABLE_ADJ_PREF_DISABLE (1ULL << MSR_IA32_MISC_ENABLE_ADJ_PREF_DISABLE_BIT)
#define MSR_IA32_MISC_ENABLE_SPEEDSTEP_LOCK_BIT 20
#define MSR_IA32_MISC_ENABLE_SPEEDSTEP_LOCK (1ULL << MSR_IA32_MISC_ENABLE_SPEEDSTEP_LOCK_BIT)
#define MSR_IA32_MISC_ENABLE_L1D_CONTEXT_BIT 24
#define MSR_IA32_MISC_ENABLE_L1D_CONTEXT (1ULL << MSR_IA32_MISC_ENABLE_L1D_CONTEXT_BIT)
#define MSR_IA32_MISC_ENABLE_DCU_PREF_DISABLE_BIT 37
#define MSR_IA32_MISC_ENABLE_DCU_PREF_DISABLE (1ULL << MSR_IA32_MISC_ENABLE_DCU_PREF_DISABLE_BIT)
#define MSR_IA32_MISC_ENABLE_TURBO_DISABLE_BIT 38
#define MSR_IA32_MISC_ENABLE_TURBO_DISABLE (1ULL << MSR_IA32_MISC_ENABLE_TURBO_DISABLE_BIT)
#define MSR_IA32_MISC_ENABLE_IP_PREF_DISABLE_BIT 39
#define MSR_IA32_MISC_ENABLE_IP_PREF_DISABLE (1ULL << MSR_IA32_MISC_ENABLE_IP_PREF_DISABLE_BIT)
#define MSR_IA32_TSC_DEADLINE 0x000006E0 #define MSR_IA32_TSC_DEADLINE 0x000006E0
......
...@@ -218,7 +218,7 @@ static void amd_k7_smp_check(struct cpuinfo_x86 *c) ...@@ -218,7 +218,7 @@ static void amd_k7_smp_check(struct cpuinfo_x86 *c)
*/ */
WARN_ONCE(1, "WARNING: This combination of AMD" WARN_ONCE(1, "WARNING: This combination of AMD"
" processors is not suitable for SMP.\n"); " processors is not suitable for SMP.\n");
add_taint(TAINT_UNSAFE_SMP, LOCKDEP_NOW_UNRELIABLE); add_taint(TAINT_CPU_OUT_OF_SPEC, LOCKDEP_NOW_UNRELIABLE);
} }
static void init_amd_k7(struct cpuinfo_x86 *c) static void init_amd_k7(struct cpuinfo_x86 *c)
...@@ -233,9 +233,7 @@ static void init_amd_k7(struct cpuinfo_x86 *c) ...@@ -233,9 +233,7 @@ static void init_amd_k7(struct cpuinfo_x86 *c)
if (c->x86_model >= 6 && c->x86_model <= 10) { if (c->x86_model >= 6 && c->x86_model <= 10) {
if (!cpu_has(c, X86_FEATURE_XMM)) { if (!cpu_has(c, X86_FEATURE_XMM)) {
printk(KERN_INFO "Enabling disabled K7/SSE Support.\n"); printk(KERN_INFO "Enabling disabled K7/SSE Support.\n");
rdmsr(MSR_K7_HWCR, l, h); msr_clear_bit(MSR_K7_HWCR, 15);
l &= ~0x00008000;
wrmsr(MSR_K7_HWCR, l, h);
set_cpu_cap(c, X86_FEATURE_XMM); set_cpu_cap(c, X86_FEATURE_XMM);
} }
} }
...@@ -509,14 +507,8 @@ static void early_init_amd(struct cpuinfo_x86 *c) ...@@ -509,14 +507,8 @@ static void early_init_amd(struct cpuinfo_x86 *c)
#endif #endif
/* F16h erratum 793, CVE-2013-6885 */ /* F16h erratum 793, CVE-2013-6885 */
if (c->x86 == 0x16 && c->x86_model <= 0xf) { if (c->x86 == 0x16 && c->x86_model <= 0xf)
u64 val; msr_set_bit(MSR_AMD64_LS_CFG, 15);
rdmsrl(MSR_AMD64_LS_CFG, val);
if (!(val & BIT(15)))
wrmsrl(MSR_AMD64_LS_CFG, val | BIT(15));
}
} }
static const int amd_erratum_383[]; static const int amd_erratum_383[];
...@@ -536,11 +528,8 @@ static void init_amd(struct cpuinfo_x86 *c) ...@@ -536,11 +528,8 @@ static void init_amd(struct cpuinfo_x86 *c)
* Errata 63 for SH-B3 steppings * Errata 63 for SH-B3 steppings
* Errata 122 for all steppings (F+ have it disabled by default) * Errata 122 for all steppings (F+ have it disabled by default)
*/ */
if (c->x86 == 0xf) { if (c->x86 == 0xf)
rdmsrl(MSR_K7_HWCR, value); msr_set_bit(MSR_K7_HWCR, 6);
value |= 1 << 6;
wrmsrl(MSR_K7_HWCR, value);
}
#endif #endif
early_init_amd(c); early_init_amd(c);
...@@ -623,14 +612,11 @@ static void init_amd(struct cpuinfo_x86 *c) ...@@ -623,14 +612,11 @@ static void init_amd(struct cpuinfo_x86 *c)
(c->x86_model >= 0x10) && (c->x86_model <= 0x1f) && (c->x86_model >= 0x10) && (c->x86_model <= 0x1f) &&
!cpu_has(c, X86_FEATURE_TOPOEXT)) { !cpu_has(c, X86_FEATURE_TOPOEXT)) {
if (!rdmsrl_safe(0xc0011005, &value)) { if (msr_set_bit(0xc0011005, 54) > 0) {
value |= 1ULL << 54;
wrmsrl_safe(0xc0011005, value);
rdmsrl(0xc0011005, value); rdmsrl(0xc0011005, value);
if (value & (1ULL << 54)) { if (value & BIT_64(54)) {
set_cpu_cap(c, X86_FEATURE_TOPOEXT); set_cpu_cap(c, X86_FEATURE_TOPOEXT);
printk(KERN_INFO FW_INFO "CPU: Re-enabling " pr_info(FW_INFO "CPU: Re-enabling disabled Topology Extensions Support.\n");
"disabled Topology Extensions Support\n");
} }
} }
} }
...@@ -709,19 +695,12 @@ static void init_amd(struct cpuinfo_x86 *c) ...@@ -709,19 +695,12 @@ static void init_amd(struct cpuinfo_x86 *c)
* Disable GART TLB Walk Errors on Fam10h. We do this here * Disable GART TLB Walk Errors on Fam10h. We do this here
* because this is always needed when GART is enabled, even in a * because this is always needed when GART is enabled, even in a
* kernel which has no MCE support built in. * kernel which has no MCE support built in.
* BIOS should disable GartTlbWlk Errors themself. If * BIOS should disable GartTlbWlk Errors already. If
* it doesn't do it here as suggested by the BKDG. * it doesn't, do it here as suggested by the BKDG.
* *
* Fixes: https://bugzilla.kernel.org/show_bug.cgi?id=33012 * Fixes: https://bugzilla.kernel.org/show_bug.cgi?id=33012
*/ */
u64 mask; msr_set_bit(MSR_AMD64_MCx_MASK(4), 10);
int err;
err = rdmsrl_safe(MSR_AMD64_MCx_MASK(4), &mask);
if (err == 0) {
mask |= (1 << 10);
wrmsrl_safe(MSR_AMD64_MCx_MASK(4), mask);
}
/* /*
* On family 10h BIOS may not have properly enabled WC+ support, * On family 10h BIOS may not have properly enabled WC+ support,
...@@ -733,10 +712,7 @@ static void init_amd(struct cpuinfo_x86 *c) ...@@ -733,10 +712,7 @@ static void init_amd(struct cpuinfo_x86 *c)
* NOTE: we want to use the _safe accessors so as not to #GP kvm * NOTE: we want to use the _safe accessors so as not to #GP kvm
* guests on older kvm hosts. * guests on older kvm hosts.
*/ */
msr_clear_bit(MSR_AMD64_BU_CFG2, 24);
rdmsrl_safe(MSR_AMD64_BU_CFG2, &value);
value &= ~(1ULL << 24);
wrmsrl_safe(MSR_AMD64_BU_CFG2, value);
if (cpu_has_amd_erratum(c, amd_erratum_383)) if (cpu_has_amd_erratum(c, amd_erratum_383))
set_cpu_bug(c, X86_BUG_AMD_TLB_MMATCH); set_cpu_bug(c, X86_BUG_AMD_TLB_MMATCH);
......
...@@ -31,11 +31,8 @@ static void early_init_intel(struct cpuinfo_x86 *c) ...@@ -31,11 +31,8 @@ static void early_init_intel(struct cpuinfo_x86 *c)
/* Unmask CPUID levels if masked: */ /* Unmask CPUID levels if masked: */
if (c->x86 > 6 || (c->x86 == 6 && c->x86_model >= 0xd)) { if (c->x86 > 6 || (c->x86 == 6 && c->x86_model >= 0xd)) {
rdmsrl(MSR_IA32_MISC_ENABLE, misc_enable); if (msr_clear_bit(MSR_IA32_MISC_ENABLE,
MSR_IA32_MISC_ENABLE_LIMIT_CPUID_BIT) > 0) {
if (misc_enable & MSR_IA32_MISC_ENABLE_LIMIT_CPUID) {
misc_enable &= ~MSR_IA32_MISC_ENABLE_LIMIT_CPUID;
wrmsrl(MSR_IA32_MISC_ENABLE, misc_enable);
c->cpuid_level = cpuid_eax(0); c->cpuid_level = cpuid_eax(0);
get_cpu_cap(c); get_cpu_cap(c);
} }
...@@ -129,16 +126,10 @@ static void early_init_intel(struct cpuinfo_x86 *c) ...@@ -129,16 +126,10 @@ static void early_init_intel(struct cpuinfo_x86 *c)
* Ingo Molnar reported a Pentium D (model 6) and a Xeon * Ingo Molnar reported a Pentium D (model 6) and a Xeon
* (model 2) with the same problem. * (model 2) with the same problem.
*/ */
if (c->x86 == 15) { if (c->x86 == 15)
rdmsrl(MSR_IA32_MISC_ENABLE, misc_enable); if (msr_clear_bit(MSR_IA32_MISC_ENABLE,
MSR_IA32_MISC_ENABLE_FAST_STRING_BIT) > 0)
if (misc_enable & MSR_IA32_MISC_ENABLE_FAST_STRING) { pr_info("kmemcheck: Disabling fast string operations\n");
printk(KERN_INFO "kmemcheck: Disabling fast string operations\n");
misc_enable &= ~MSR_IA32_MISC_ENABLE_FAST_STRING;
wrmsrl(MSR_IA32_MISC_ENABLE, misc_enable);
}
}
#endif #endif
/* /*
...@@ -195,10 +186,16 @@ static void intel_smp_check(struct cpuinfo_x86 *c) ...@@ -195,10 +186,16 @@ static void intel_smp_check(struct cpuinfo_x86 *c)
} }
} }
static void intel_workarounds(struct cpuinfo_x86 *c) static int forcepae;
static int __init forcepae_setup(char *__unused)
{ {
unsigned long lo, hi; forcepae = 1;
return 1;
}
__setup("forcepae", forcepae_setup);
static void intel_workarounds(struct cpuinfo_x86 *c)
{
#ifdef CONFIG_X86_F00F_BUG #ifdef CONFIG_X86_F00F_BUG
/* /*
* All current models of Pentium and Pentium with MMX technology CPUs * All current models of Pentium and Pentium with MMX technology CPUs
...@@ -224,17 +221,27 @@ static void intel_workarounds(struct cpuinfo_x86 *c) ...@@ -224,17 +221,27 @@ static void intel_workarounds(struct cpuinfo_x86 *c)
if ((c->x86<<8 | c->x86_model<<4 | c->x86_mask) < 0x633) if ((c->x86<<8 | c->x86_model<<4 | c->x86_mask) < 0x633)
clear_cpu_cap(c, X86_FEATURE_SEP); clear_cpu_cap(c, X86_FEATURE_SEP);
/*
* PAE CPUID issue: many Pentium M report no PAE but may have a
* functionally usable PAE implementation.
* Forcefully enable PAE if kernel parameter "forcepae" is present.
*/
if (forcepae) {
printk(KERN_WARNING "PAE forced!\n");
set_cpu_cap(c, X86_FEATURE_PAE);
add_taint(TAINT_CPU_OUT_OF_SPEC, LOCKDEP_NOW_UNRELIABLE);
}
/* /*
* P4 Xeon errata 037 workaround. * P4 Xeon errata 037 workaround.
* Hardware prefetcher may cause stale data to be loaded into the cache. * Hardware prefetcher may cause stale data to be loaded into the cache.
*/ */
if ((c->x86 == 15) && (c->x86_model == 1) && (c->x86_mask == 1)) { if ((c->x86 == 15) && (c->x86_model == 1) && (c->x86_mask == 1)) {
rdmsr(MSR_IA32_MISC_ENABLE, lo, hi); if (msr_set_bit(MSR_IA32_MISC_ENABLE,
if ((lo & MSR_IA32_MISC_ENABLE_PREFETCH_DISABLE) == 0) { MSR_IA32_MISC_ENABLE_PREFETCH_DISABLE_BIT)
printk (KERN_INFO "CPU: C0 stepping P4 Xeon detected.\n"); > 0) {
printk (KERN_INFO "CPU: Disabling hardware prefetching (Errata 037)\n"); pr_info("CPU: C0 stepping P4 Xeon detected.\n");
lo |= MSR_IA32_MISC_ENABLE_PREFETCH_DISABLE; pr_info("CPU: Disabling hardware prefetching (Errata 037)\n");
wrmsr(MSR_IA32_MISC_ENABLE, lo, hi);
} }
} }
......
...@@ -8,7 +8,7 @@ struct msr *msrs_alloc(void) ...@@ -8,7 +8,7 @@ struct msr *msrs_alloc(void)
msrs = alloc_percpu(struct msr); msrs = alloc_percpu(struct msr);
if (!msrs) { if (!msrs) {
pr_warning("%s: error allocating msrs\n", __func__); pr_warn("%s: error allocating msrs\n", __func__);
return NULL; return NULL;
} }
...@@ -21,3 +21,90 @@ void msrs_free(struct msr *msrs) ...@@ -21,3 +21,90 @@ void msrs_free(struct msr *msrs)
free_percpu(msrs); free_percpu(msrs);
} }
EXPORT_SYMBOL(msrs_free); EXPORT_SYMBOL(msrs_free);
/**
* Read an MSR with error handling
*
* @msr: MSR to read
* @m: value to read into
*
* It returns read data only on success, otherwise it doesn't change the output
* argument @m.
*
*/
int msr_read(u32 msr, struct msr *m)
{
int err;
u64 val;
err = rdmsrl_safe(msr, &val);
if (!err)
m->q = val;
return err;
}
/**
* Write an MSR with error handling
*
* @msr: MSR to write
* @m: value to write
*/
int msr_write(u32 msr, struct msr *m)
{
return wrmsrl_safe(msr, m->q);
}
static inline int __flip_bit(u32 msr, u8 bit, bool set)
{
struct msr m, m1;
int err = -EINVAL;
if (bit > 63)
return err;
err = msr_read(msr, &m);
if (err)
return err;
m1 = m;
if (set)
m1.q |= BIT_64(bit);
else
m1.q &= ~BIT_64(bit);
if (m1.q == m.q)
return 0;
err = msr_write(msr, &m);
if (err)
return err;
return 1;
}
/**
* Set @bit in a MSR @msr.
*
* Retval:
* < 0: An error was encountered.
* = 0: Bit was already set.
* > 0: Hardware accepted the MSR write.
*/
int msr_set_bit(u32 msr, u8 bit)
{
return __flip_bit(msr, bit, true);
}
/**
* Clear @bit in a MSR @msr.
*
* Retval:
* < 0: An error was encountered.
* = 0: Bit was already cleared.
* > 0: Hardware accepted the MSR write.
*/
int msr_clear_bit(u32 msr, u8 bit)
{
return __flip_bit(msr, bit, false);
}
...@@ -126,8 +126,8 @@ within(unsigned long addr, unsigned long start, unsigned long end) ...@@ -126,8 +126,8 @@ within(unsigned long addr, unsigned long start, unsigned long end)
* @vaddr: virtual start address * @vaddr: virtual start address
* @size: number of bytes to flush * @size: number of bytes to flush
* *
* clflush is an unordered instruction which needs fencing with mfence * clflushopt is an unordered instruction which needs fencing with mfence or
* to avoid ordering issues. * sfence to avoid ordering issues.
*/ */
void clflush_cache_range(void *vaddr, unsigned int size) void clflush_cache_range(void *vaddr, unsigned int size)
{ {
...@@ -136,11 +136,11 @@ void clflush_cache_range(void *vaddr, unsigned int size) ...@@ -136,11 +136,11 @@ void clflush_cache_range(void *vaddr, unsigned int size)
mb(); mb();
for (; vaddr < vend; vaddr += boot_cpu_data.x86_clflush_size) for (; vaddr < vend; vaddr += boot_cpu_data.x86_clflush_size)
clflush(vaddr); clflushopt(vaddr);
/* /*
* Flush any possible final partial cacheline: * Flush any possible final partial cacheline:
*/ */
clflush(vend); clflushopt(vend);
mb(); mb();
} }
......
...@@ -32,6 +32,12 @@ ...@@ -32,6 +32,12 @@
#include <drm/drmP.h> #include <drm/drmP.h>
#if defined(CONFIG_X86) #if defined(CONFIG_X86)
/*
* clflushopt is an unordered instruction which needs fencing with mfence or
* sfence to avoid ordering issues. For drm_clflush_page this fencing happens
* in the caller.
*/
static void static void
drm_clflush_page(struct page *page) drm_clflush_page(struct page *page)
{ {
...@@ -44,7 +50,7 @@ drm_clflush_page(struct page *page) ...@@ -44,7 +50,7 @@ drm_clflush_page(struct page *page)
page_virtual = kmap_atomic(page); page_virtual = kmap_atomic(page);
for (i = 0; i < PAGE_SIZE; i += size) for (i = 0; i < PAGE_SIZE; i += size)
clflush(page_virtual + i); clflushopt(page_virtual + i);
kunmap_atomic(page_virtual); kunmap_atomic(page_virtual);
} }
...@@ -133,7 +139,7 @@ drm_clflush_virt_range(char *addr, unsigned long length) ...@@ -133,7 +139,7 @@ drm_clflush_virt_range(char *addr, unsigned long length)
mb(); mb();
for (; addr < end; addr += boot_cpu_data.x86_clflush_size) for (; addr < end; addr += boot_cpu_data.x86_clflush_size)
clflush(addr); clflush(addr);
clflush(end - 1); clflushopt(end - 1);
mb(); mb();
return; return;
} }
......
...@@ -458,7 +458,7 @@ extern enum system_states { ...@@ -458,7 +458,7 @@ extern enum system_states {
#define TAINT_PROPRIETARY_MODULE 0 #define TAINT_PROPRIETARY_MODULE 0
#define TAINT_FORCED_MODULE 1 #define TAINT_FORCED_MODULE 1
#define TAINT_UNSAFE_SMP 2 #define TAINT_CPU_OUT_OF_SPEC 2
#define TAINT_FORCED_RMMOD 3 #define TAINT_FORCED_RMMOD 3
#define TAINT_MACHINE_CHECK 4 #define TAINT_MACHINE_CHECK 4
#define TAINT_BAD_PAGE 5 #define TAINT_BAD_PAGE 5
......
...@@ -1015,7 +1015,7 @@ static size_t module_flags_taint(struct module *mod, char *buf) ...@@ -1015,7 +1015,7 @@ static size_t module_flags_taint(struct module *mod, char *buf)
buf[l++] = 'C'; buf[l++] = 'C';
/* /*
* TAINT_FORCED_RMMOD: could be added. * TAINT_FORCED_RMMOD: could be added.
* TAINT_UNSAFE_SMP, TAINT_MACHINE_CHECK, TAINT_BAD_PAGE don't * TAINT_CPU_OUT_OF_SPEC, TAINT_MACHINE_CHECK, TAINT_BAD_PAGE don't
* apply to modules. * apply to modules.
*/ */
return l; return l;
......
...@@ -199,7 +199,7 @@ struct tnt { ...@@ -199,7 +199,7 @@ struct tnt {
static const struct tnt tnts[] = { static const struct tnt tnts[] = {
{ TAINT_PROPRIETARY_MODULE, 'P', 'G' }, { TAINT_PROPRIETARY_MODULE, 'P', 'G' },
{ TAINT_FORCED_MODULE, 'F', ' ' }, { TAINT_FORCED_MODULE, 'F', ' ' },
{ TAINT_UNSAFE_SMP, 'S', ' ' }, { TAINT_CPU_OUT_OF_SPEC, 'S', ' ' },
{ TAINT_FORCED_RMMOD, 'R', ' ' }, { TAINT_FORCED_RMMOD, 'R', ' ' },
{ TAINT_MACHINE_CHECK, 'M', ' ' }, { TAINT_MACHINE_CHECK, 'M', ' ' },
{ TAINT_BAD_PAGE, 'B', ' ' }, { TAINT_BAD_PAGE, 'B', ' ' },
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment