Commit 67c707e4 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'x86-cleanups-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull x86 cleanups from Ingo Molnar:
 "The main changes in this cycle were:

   - code patching and cpu_has cleanups (Borislav Petkov)

   - paravirt cleanups (Juergen Gross)

   - TSC cleanup (Thomas Gleixner)

   - ptrace cleanup (Chen Gang)"

* 'x86-cleanups-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  arch/x86/kernel/ptrace.c: Remove unused arg_offs_table
  x86/mm: Align macro defines
  x86/cpu: Provide a config option to disable static_cpu_has
  x86/cpufeature: Remove unused and seldomly used cpu_has_xx macros
  x86/cpufeature: Cleanup get_cpu_cap()
  x86/cpufeature: Move some of the scattered feature bits to x86_capability
  x86/paravirt: Remove paravirt ops pmd_update[_defer] and pte_update_defer
  x86/paravirt: Remove unused pv_apic_ops structure
  x86/tsc: Remove unused tsc_pre_init() hook
  x86: Remove unused function cpu_has_ht_siblings()
  x86/paravirt: Kill some unused patching functions
parents 463eb8ac 0105c8d8
...@@ -349,6 +349,17 @@ config X86_FEATURE_NAMES ...@@ -349,6 +349,17 @@ config X86_FEATURE_NAMES
If in doubt, say Y. If in doubt, say Y.
config X86_FAST_FEATURE_TESTS
bool "Fast CPU feature tests" if EMBEDDED
default y
---help---
Some fast-paths in the kernel depend on the capabilities of the CPU.
Say Y here for the kernel to patch in the appropriate code at runtime
based on the capabilities of the CPU. The infrastructure for patching
code at runtime takes up some additional space; space-constrained
embedded systems may wish to say N here to produce smaller, slightly
slower code.
config X86_X2APIC config X86_X2APIC
bool "Support x2apic" bool "Support x2apic"
depends on X86_LOCAL_APIC && X86_64 && (IRQ_REMAP || HYPERVISOR_GUEST) depends on X86_LOCAL_APIC && X86_64 && (IRQ_REMAP || HYPERVISOR_GUEST)
......
...@@ -125,7 +125,7 @@ static struct crypto_alg alg = { ...@@ -125,7 +125,7 @@ static struct crypto_alg alg = {
static int __init chacha20_simd_mod_init(void) static int __init chacha20_simd_mod_init(void)
{ {
if (!cpu_has_ssse3) if (!boot_cpu_has(X86_FEATURE_SSSE3))
return -ENODEV; return -ENODEV;
#ifdef CONFIG_AS_AVX2 #ifdef CONFIG_AS_AVX2
......
...@@ -257,7 +257,7 @@ static int __init crc32c_intel_mod_init(void) ...@@ -257,7 +257,7 @@ static int __init crc32c_intel_mod_init(void)
if (!x86_match_cpu(crc32c_cpu_id)) if (!x86_match_cpu(crc32c_cpu_id))
return -ENODEV; return -ENODEV;
#ifdef CONFIG_X86_64 #ifdef CONFIG_X86_64
if (cpu_has_pclmulqdq) { if (boot_cpu_has(X86_FEATURE_PCLMULQDQ)) {
alg.update = crc32c_pcl_intel_update; alg.update = crc32c_pcl_intel_update;
alg.finup = crc32c_pcl_intel_finup; alg.finup = crc32c_pcl_intel_finup;
alg.digest = crc32c_pcl_intel_digest; alg.digest = crc32c_pcl_intel_digest;
......
...@@ -109,6 +109,6 @@ static inline u64 __cmpxchg64_local(volatile u64 *ptr, u64 old, u64 new) ...@@ -109,6 +109,6 @@ static inline u64 __cmpxchg64_local(volatile u64 *ptr, u64 old, u64 new)
#endif #endif
#define system_has_cmpxchg_double() cpu_has_cx8 #define system_has_cmpxchg_double() boot_cpu_has(X86_FEATURE_CX8)
#endif /* _ASM_X86_CMPXCHG_32_H */ #endif /* _ASM_X86_CMPXCHG_32_H */
...@@ -18,6 +18,6 @@ static inline void set_64bit(volatile u64 *ptr, u64 val) ...@@ -18,6 +18,6 @@ static inline void set_64bit(volatile u64 *ptr, u64 val)
cmpxchg_local((ptr), (o), (n)); \ cmpxchg_local((ptr), (o), (n)); \
}) })
#define system_has_cmpxchg_double() cpu_has_cx16 #define system_has_cmpxchg_double() boot_cpu_has(X86_FEATURE_CX16)
#endif /* _ASM_X86_CMPXCHG_64_H */ #endif /* _ASM_X86_CMPXCHG_64_H */
...@@ -12,7 +12,7 @@ ...@@ -12,7 +12,7 @@
#include <asm/disabled-features.h> #include <asm/disabled-features.h>
#endif #endif
#define NCAPINTS 14 /* N 32-bit words worth of info */ #define NCAPINTS 16 /* N 32-bit words worth of info */
#define NBUGINTS 1 /* N 32-bit bug flags */ #define NBUGINTS 1 /* N 32-bit bug flags */
/* /*
...@@ -181,22 +181,17 @@ ...@@ -181,22 +181,17 @@
/* /*
* Auxiliary flags: Linux defined - For features scattered in various * Auxiliary flags: Linux defined - For features scattered in various
* CPUID levels like 0x6, 0xA etc, word 7 * CPUID levels like 0x6, 0xA etc, word 7.
*
* Reuse free bits when adding new feature flags!
*/ */
#define X86_FEATURE_IDA ( 7*32+ 0) /* Intel Dynamic Acceleration */
#define X86_FEATURE_ARAT ( 7*32+ 1) /* Always Running APIC Timer */
#define X86_FEATURE_CPB ( 7*32+ 2) /* AMD Core Performance Boost */ #define X86_FEATURE_CPB ( 7*32+ 2) /* AMD Core Performance Boost */
#define X86_FEATURE_EPB ( 7*32+ 3) /* IA32_ENERGY_PERF_BIAS support */ #define X86_FEATURE_EPB ( 7*32+ 3) /* IA32_ENERGY_PERF_BIAS support */
#define X86_FEATURE_PLN ( 7*32+ 5) /* Intel Power Limit Notification */
#define X86_FEATURE_PTS ( 7*32+ 6) /* Intel Package Thermal Status */
#define X86_FEATURE_DTHERM ( 7*32+ 7) /* Digital Thermal Sensor */
#define X86_FEATURE_HW_PSTATE ( 7*32+ 8) /* AMD HW-PState */ #define X86_FEATURE_HW_PSTATE ( 7*32+ 8) /* AMD HW-PState */
#define X86_FEATURE_PROC_FEEDBACK ( 7*32+ 9) /* AMD ProcFeedbackInterface */ #define X86_FEATURE_PROC_FEEDBACK ( 7*32+ 9) /* AMD ProcFeedbackInterface */
#define X86_FEATURE_HWP ( 7*32+ 10) /* "hwp" Intel HWP */
#define X86_FEATURE_HWP_NOTIFY ( 7*32+ 11) /* Intel HWP_NOTIFY */
#define X86_FEATURE_HWP_ACT_WINDOW ( 7*32+ 12) /* Intel HWP_ACT_WINDOW */
#define X86_FEATURE_HWP_EPP ( 7*32+13) /* Intel HWP_EPP */
#define X86_FEATURE_HWP_PKG_REQ ( 7*32+14) /* Intel HWP_PKG_REQ */
#define X86_FEATURE_INTEL_PT ( 7*32+15) /* Intel Processor Trace */ #define X86_FEATURE_INTEL_PT ( 7*32+15) /* Intel Processor Trace */
/* Virtualization flags: Linux defined, word 8 */ /* Virtualization flags: Linux defined, word 8 */
...@@ -205,16 +200,7 @@ ...@@ -205,16 +200,7 @@
#define X86_FEATURE_FLEXPRIORITY ( 8*32+ 2) /* Intel FlexPriority */ #define X86_FEATURE_FLEXPRIORITY ( 8*32+ 2) /* Intel FlexPriority */
#define X86_FEATURE_EPT ( 8*32+ 3) /* Intel Extended Page Table */ #define X86_FEATURE_EPT ( 8*32+ 3) /* Intel Extended Page Table */
#define X86_FEATURE_VPID ( 8*32+ 4) /* Intel Virtual Processor ID */ #define X86_FEATURE_VPID ( 8*32+ 4) /* Intel Virtual Processor ID */
#define X86_FEATURE_NPT ( 8*32+ 5) /* AMD Nested Page Table support */
#define X86_FEATURE_LBRV ( 8*32+ 6) /* AMD LBR Virtualization support */
#define X86_FEATURE_SVML ( 8*32+ 7) /* "svm_lock" AMD SVM locking MSR */
#define X86_FEATURE_NRIPS ( 8*32+ 8) /* "nrip_save" AMD SVM next_rip save */
#define X86_FEATURE_TSCRATEMSR ( 8*32+ 9) /* "tsc_scale" AMD TSC scaling support */
#define X86_FEATURE_VMCBCLEAN ( 8*32+10) /* "vmcb_clean" AMD VMCB clean bits support */
#define X86_FEATURE_FLUSHBYASID ( 8*32+11) /* AMD flush-by-ASID support */
#define X86_FEATURE_DECODEASSISTS ( 8*32+12) /* AMD Decode Assists support */
#define X86_FEATURE_PAUSEFILTER ( 8*32+13) /* AMD filtered pause intercept */
#define X86_FEATURE_PFTHRESHOLD ( 8*32+14) /* AMD pause filter threshold */
#define X86_FEATURE_VMMCALL ( 8*32+15) /* Prefer vmmcall to vmcall */ #define X86_FEATURE_VMMCALL ( 8*32+15) /* Prefer vmmcall to vmcall */
#define X86_FEATURE_XENPV ( 8*32+16) /* "" Xen paravirtual guest */ #define X86_FEATURE_XENPV ( 8*32+16) /* "" Xen paravirtual guest */
...@@ -259,6 +245,30 @@ ...@@ -259,6 +245,30 @@
/* AMD-defined CPU features, CPUID level 0x80000008 (ebx), word 13 */ /* AMD-defined CPU features, CPUID level 0x80000008 (ebx), word 13 */
#define X86_FEATURE_CLZERO (13*32+0) /* CLZERO instruction */ #define X86_FEATURE_CLZERO (13*32+0) /* CLZERO instruction */
/* Thermal and Power Management Leaf, CPUID level 0x00000006 (eax), word 14 */
#define X86_FEATURE_DTHERM (14*32+ 0) /* Digital Thermal Sensor */
#define X86_FEATURE_IDA (14*32+ 1) /* Intel Dynamic Acceleration */
#define X86_FEATURE_ARAT (14*32+ 2) /* Always Running APIC Timer */
#define X86_FEATURE_PLN (14*32+ 4) /* Intel Power Limit Notification */
#define X86_FEATURE_PTS (14*32+ 6) /* Intel Package Thermal Status */
#define X86_FEATURE_HWP (14*32+ 7) /* Intel Hardware P-states */
#define X86_FEATURE_HWP_NOTIFY (14*32+ 8) /* HWP Notification */
#define X86_FEATURE_HWP_ACT_WINDOW (14*32+ 9) /* HWP Activity Window */
#define X86_FEATURE_HWP_EPP (14*32+10) /* HWP Energy Perf. Preference */
#define X86_FEATURE_HWP_PKG_REQ (14*32+11) /* HWP Package Level Request */
/* AMD SVM Feature Identification, CPUID level 0x8000000a (edx), word 15 */
#define X86_FEATURE_NPT (15*32+ 0) /* Nested Page Table support */
#define X86_FEATURE_LBRV (15*32+ 1) /* LBR Virtualization support */
#define X86_FEATURE_SVML (15*32+ 2) /* "svm_lock" SVM locking MSR */
#define X86_FEATURE_NRIPS (15*32+ 3) /* "nrip_save" SVM next_rip save */
#define X86_FEATURE_TSCRATEMSR (15*32+ 4) /* "tsc_scale" TSC scaling support */
#define X86_FEATURE_VMCBCLEAN (15*32+ 5) /* "vmcb_clean" VMCB clean bits support */
#define X86_FEATURE_FLUSHBYASID (15*32+ 6) /* flush-by-ASID support */
#define X86_FEATURE_DECODEASSISTS (15*32+ 7) /* Decode Assists support */
#define X86_FEATURE_PAUSEFILTER (15*32+10) /* filtered pause intercept */
#define X86_FEATURE_PFTHRESHOLD (15*32+12) /* pause filter threshold */
/* /*
* BUG word(s) * BUG word(s)
*/ */
...@@ -279,6 +289,26 @@ ...@@ -279,6 +289,26 @@
#include <asm/asm.h> #include <asm/asm.h>
#include <linux/bitops.h> #include <linux/bitops.h>
enum cpuid_leafs
{
CPUID_1_EDX = 0,
CPUID_8000_0001_EDX,
CPUID_8086_0001_EDX,
CPUID_LNX_1,
CPUID_1_ECX,
CPUID_C000_0001_EDX,
CPUID_8000_0001_ECX,
CPUID_LNX_2,
CPUID_LNX_3,
CPUID_7_0_EBX,
CPUID_D_1_EAX,
CPUID_F_0_EDX,
CPUID_F_1_EDX,
CPUID_8000_0008_EBX,
CPUID_6_EAX,
CPUID_8000_000A_EDX,
};
#ifdef CONFIG_X86_FEATURE_NAMES #ifdef CONFIG_X86_FEATURE_NAMES
extern const char * const x86_cap_flags[NCAPINTS*32]; extern const char * const x86_cap_flags[NCAPINTS*32];
extern const char * const x86_power_flags[32]; extern const char * const x86_power_flags[32];
...@@ -356,60 +386,31 @@ extern const char * const x86_bug_flags[NBUGINTS*32]; ...@@ -356,60 +386,31 @@ extern const char * const x86_bug_flags[NBUGINTS*32];
} while (0) } while (0)
#define cpu_has_fpu boot_cpu_has(X86_FEATURE_FPU) #define cpu_has_fpu boot_cpu_has(X86_FEATURE_FPU)
#define cpu_has_de boot_cpu_has(X86_FEATURE_DE)
#define cpu_has_pse boot_cpu_has(X86_FEATURE_PSE) #define cpu_has_pse boot_cpu_has(X86_FEATURE_PSE)
#define cpu_has_tsc boot_cpu_has(X86_FEATURE_TSC) #define cpu_has_tsc boot_cpu_has(X86_FEATURE_TSC)
#define cpu_has_pge boot_cpu_has(X86_FEATURE_PGE) #define cpu_has_pge boot_cpu_has(X86_FEATURE_PGE)
#define cpu_has_apic boot_cpu_has(X86_FEATURE_APIC) #define cpu_has_apic boot_cpu_has(X86_FEATURE_APIC)
#define cpu_has_sep boot_cpu_has(X86_FEATURE_SEP)
#define cpu_has_mtrr boot_cpu_has(X86_FEATURE_MTRR)
#define cpu_has_mmx boot_cpu_has(X86_FEATURE_MMX)
#define cpu_has_fxsr boot_cpu_has(X86_FEATURE_FXSR) #define cpu_has_fxsr boot_cpu_has(X86_FEATURE_FXSR)
#define cpu_has_xmm boot_cpu_has(X86_FEATURE_XMM) #define cpu_has_xmm boot_cpu_has(X86_FEATURE_XMM)
#define cpu_has_xmm2 boot_cpu_has(X86_FEATURE_XMM2) #define cpu_has_xmm2 boot_cpu_has(X86_FEATURE_XMM2)
#define cpu_has_xmm3 boot_cpu_has(X86_FEATURE_XMM3)
#define cpu_has_ssse3 boot_cpu_has(X86_FEATURE_SSSE3)
#define cpu_has_aes boot_cpu_has(X86_FEATURE_AES) #define cpu_has_aes boot_cpu_has(X86_FEATURE_AES)
#define cpu_has_avx boot_cpu_has(X86_FEATURE_AVX) #define cpu_has_avx boot_cpu_has(X86_FEATURE_AVX)
#define cpu_has_avx2 boot_cpu_has(X86_FEATURE_AVX2) #define cpu_has_avx2 boot_cpu_has(X86_FEATURE_AVX2)
#define cpu_has_ht boot_cpu_has(X86_FEATURE_HT)
#define cpu_has_nx boot_cpu_has(X86_FEATURE_NX)
#define cpu_has_xstore boot_cpu_has(X86_FEATURE_XSTORE)
#define cpu_has_xstore_enabled boot_cpu_has(X86_FEATURE_XSTORE_EN)
#define cpu_has_xcrypt boot_cpu_has(X86_FEATURE_XCRYPT)
#define cpu_has_xcrypt_enabled boot_cpu_has(X86_FEATURE_XCRYPT_EN)
#define cpu_has_ace2 boot_cpu_has(X86_FEATURE_ACE2)
#define cpu_has_ace2_enabled boot_cpu_has(X86_FEATURE_ACE2_EN)
#define cpu_has_phe boot_cpu_has(X86_FEATURE_PHE)
#define cpu_has_phe_enabled boot_cpu_has(X86_FEATURE_PHE_EN)
#define cpu_has_pmm boot_cpu_has(X86_FEATURE_PMM)
#define cpu_has_pmm_enabled boot_cpu_has(X86_FEATURE_PMM_EN)
#define cpu_has_ds boot_cpu_has(X86_FEATURE_DS)
#define cpu_has_pebs boot_cpu_has(X86_FEATURE_PEBS)
#define cpu_has_clflush boot_cpu_has(X86_FEATURE_CLFLUSH) #define cpu_has_clflush boot_cpu_has(X86_FEATURE_CLFLUSH)
#define cpu_has_bts boot_cpu_has(X86_FEATURE_BTS)
#define cpu_has_gbpages boot_cpu_has(X86_FEATURE_GBPAGES) #define cpu_has_gbpages boot_cpu_has(X86_FEATURE_GBPAGES)
#define cpu_has_arch_perfmon boot_cpu_has(X86_FEATURE_ARCH_PERFMON) #define cpu_has_arch_perfmon boot_cpu_has(X86_FEATURE_ARCH_PERFMON)
#define cpu_has_pat boot_cpu_has(X86_FEATURE_PAT) #define cpu_has_pat boot_cpu_has(X86_FEATURE_PAT)
#define cpu_has_xmm4_1 boot_cpu_has(X86_FEATURE_XMM4_1)
#define cpu_has_xmm4_2 boot_cpu_has(X86_FEATURE_XMM4_2)
#define cpu_has_x2apic boot_cpu_has(X86_FEATURE_X2APIC) #define cpu_has_x2apic boot_cpu_has(X86_FEATURE_X2APIC)
#define cpu_has_xsave boot_cpu_has(X86_FEATURE_XSAVE) #define cpu_has_xsave boot_cpu_has(X86_FEATURE_XSAVE)
#define cpu_has_xsaveopt boot_cpu_has(X86_FEATURE_XSAVEOPT)
#define cpu_has_xsaves boot_cpu_has(X86_FEATURE_XSAVES) #define cpu_has_xsaves boot_cpu_has(X86_FEATURE_XSAVES)
#define cpu_has_osxsave boot_cpu_has(X86_FEATURE_OSXSAVE) #define cpu_has_osxsave boot_cpu_has(X86_FEATURE_OSXSAVE)
#define cpu_has_hypervisor boot_cpu_has(X86_FEATURE_HYPERVISOR) #define cpu_has_hypervisor boot_cpu_has(X86_FEATURE_HYPERVISOR)
#define cpu_has_pclmulqdq boot_cpu_has(X86_FEATURE_PCLMULQDQ) /*
#define cpu_has_perfctr_core boot_cpu_has(X86_FEATURE_PERFCTR_CORE) * Do not add any more of those clumsy macros - use static_cpu_has_safe() for
#define cpu_has_perfctr_nb boot_cpu_has(X86_FEATURE_PERFCTR_NB) * fast paths and boot_cpu_has() otherwise!
#define cpu_has_perfctr_l2 boot_cpu_has(X86_FEATURE_PERFCTR_L2) */
#define cpu_has_cx8 boot_cpu_has(X86_FEATURE_CX8)
#define cpu_has_cx16 boot_cpu_has(X86_FEATURE_CX16) #if __GNUC__ >= 4 && defined(CONFIG_X86_FAST_FEATURE_TESTS)
#define cpu_has_eager_fpu boot_cpu_has(X86_FEATURE_EAGER_FPU)
#define cpu_has_topoext boot_cpu_has(X86_FEATURE_TOPOEXT)
#define cpu_has_bpext boot_cpu_has(X86_FEATURE_BPEXT)
#if __GNUC__ >= 4
extern void warn_pre_alternatives(void); extern void warn_pre_alternatives(void);
extern bool __static_cpu_has_safe(u16 bit); extern bool __static_cpu_has_safe(u16 bit);
......
...@@ -291,15 +291,6 @@ static inline void slow_down_io(void) ...@@ -291,15 +291,6 @@ static inline void slow_down_io(void)
#endif #endif
} }
#ifdef CONFIG_SMP
static inline void startup_ipi_hook(int phys_apicid, unsigned long start_eip,
unsigned long start_esp)
{
PVOP_VCALL3(pv_apic_ops.startup_ipi_hook,
phys_apicid, start_eip, start_esp);
}
#endif
static inline void paravirt_activate_mm(struct mm_struct *prev, static inline void paravirt_activate_mm(struct mm_struct *prev,
struct mm_struct *next) struct mm_struct *next)
{ {
...@@ -381,23 +372,6 @@ static inline void pte_update(struct mm_struct *mm, unsigned long addr, ...@@ -381,23 +372,6 @@ static inline void pte_update(struct mm_struct *mm, unsigned long addr,
{ {
PVOP_VCALL3(pv_mmu_ops.pte_update, mm, addr, ptep); PVOP_VCALL3(pv_mmu_ops.pte_update, mm, addr, ptep);
} }
static inline void pmd_update(struct mm_struct *mm, unsigned long addr,
pmd_t *pmdp)
{
PVOP_VCALL3(pv_mmu_ops.pmd_update, mm, addr, pmdp);
}
static inline void pte_update_defer(struct mm_struct *mm, unsigned long addr,
pte_t *ptep)
{
PVOP_VCALL3(pv_mmu_ops.pte_update_defer, mm, addr, ptep);
}
static inline void pmd_update_defer(struct mm_struct *mm, unsigned long addr,
pmd_t *pmdp)
{
PVOP_VCALL3(pv_mmu_ops.pmd_update_defer, mm, addr, pmdp);
}
static inline pte_t __pte(pteval_t val) static inline pte_t __pte(pteval_t val)
{ {
......
...@@ -203,14 +203,6 @@ struct pv_irq_ops { ...@@ -203,14 +203,6 @@ struct pv_irq_ops {
#endif #endif
}; };
struct pv_apic_ops {
#ifdef CONFIG_X86_LOCAL_APIC
void (*startup_ipi_hook)(int phys_apicid,
unsigned long start_eip,
unsigned long start_esp);
#endif
};
struct pv_mmu_ops { struct pv_mmu_ops {
unsigned long (*read_cr2)(void); unsigned long (*read_cr2)(void);
void (*write_cr2)(unsigned long); void (*write_cr2)(unsigned long);
...@@ -262,12 +254,6 @@ struct pv_mmu_ops { ...@@ -262,12 +254,6 @@ struct pv_mmu_ops {
pmd_t *pmdp, pmd_t pmdval); pmd_t *pmdp, pmd_t pmdval);
void (*pte_update)(struct mm_struct *mm, unsigned long addr, void (*pte_update)(struct mm_struct *mm, unsigned long addr,
pte_t *ptep); pte_t *ptep);
void (*pte_update_defer)(struct mm_struct *mm,
unsigned long addr, pte_t *ptep);
void (*pmd_update)(struct mm_struct *mm, unsigned long addr,
pmd_t *pmdp);
void (*pmd_update_defer)(struct mm_struct *mm,
unsigned long addr, pmd_t *pmdp);
pte_t (*ptep_modify_prot_start)(struct mm_struct *mm, unsigned long addr, pte_t (*ptep_modify_prot_start)(struct mm_struct *mm, unsigned long addr,
pte_t *ptep); pte_t *ptep);
...@@ -342,7 +328,6 @@ struct paravirt_patch_template { ...@@ -342,7 +328,6 @@ struct paravirt_patch_template {
struct pv_time_ops pv_time_ops; struct pv_time_ops pv_time_ops;
struct pv_cpu_ops pv_cpu_ops; struct pv_cpu_ops pv_cpu_ops;
struct pv_irq_ops pv_irq_ops; struct pv_irq_ops pv_irq_ops;
struct pv_apic_ops pv_apic_ops;
struct pv_mmu_ops pv_mmu_ops; struct pv_mmu_ops pv_mmu_ops;
struct pv_lock_ops pv_lock_ops; struct pv_lock_ops pv_lock_ops;
}; };
...@@ -352,7 +337,6 @@ extern struct pv_init_ops pv_init_ops; ...@@ -352,7 +337,6 @@ extern struct pv_init_ops pv_init_ops;
extern struct pv_time_ops pv_time_ops; extern struct pv_time_ops pv_time_ops;
extern struct pv_cpu_ops pv_cpu_ops; extern struct pv_cpu_ops pv_cpu_ops;
extern struct pv_irq_ops pv_irq_ops; extern struct pv_irq_ops pv_irq_ops;
extern struct pv_apic_ops pv_apic_ops;
extern struct pv_mmu_ops pv_mmu_ops; extern struct pv_mmu_ops pv_mmu_ops;
extern struct pv_lock_ops pv_lock_ops; extern struct pv_lock_ops pv_lock_ops;
...@@ -390,10 +374,8 @@ extern struct pv_lock_ops pv_lock_ops; ...@@ -390,10 +374,8 @@ extern struct pv_lock_ops pv_lock_ops;
__visible extern const char start_##ops##_##name[], end_##ops##_##name[]; \ __visible extern const char start_##ops##_##name[], end_##ops##_##name[]; \
asm(NATIVE_LABEL("start_", ops, name) code NATIVE_LABEL("end_", ops, name)) asm(NATIVE_LABEL("start_", ops, name) code NATIVE_LABEL("end_", ops, name))
unsigned paravirt_patch_nop(void);
unsigned paravirt_patch_ident_32(void *insnbuf, unsigned len); unsigned paravirt_patch_ident_32(void *insnbuf, unsigned len);
unsigned paravirt_patch_ident_64(void *insnbuf, unsigned len); unsigned paravirt_patch_ident_64(void *insnbuf, unsigned len);
unsigned paravirt_patch_ignore(unsigned len);
unsigned paravirt_patch_call(void *insnbuf, unsigned paravirt_patch_call(void *insnbuf,
const void *target, u16 tgt_clobbers, const void *target, u16 tgt_clobbers,
unsigned long addr, u16 site_clobbers, unsigned long addr, u16 site_clobbers,
......
...@@ -69,9 +69,6 @@ extern struct mm_struct *pgd_page_get_mm(struct page *page); ...@@ -69,9 +69,6 @@ extern struct mm_struct *pgd_page_get_mm(struct page *page);
#define pmd_clear(pmd) native_pmd_clear(pmd) #define pmd_clear(pmd) native_pmd_clear(pmd)
#define pte_update(mm, addr, ptep) do { } while (0) #define pte_update(mm, addr, ptep) do { } while (0)
#define pte_update_defer(mm, addr, ptep) do { } while (0)
#define pmd_update(mm, addr, ptep) do { } while (0)
#define pmd_update_defer(mm, addr, ptep) do { } while (0)
#define pgd_val(x) native_pgd_val(x) #define pgd_val(x) native_pgd_val(x)
#define __pgd(x) native_make_pgd(x) #define __pgd(x) native_make_pgd(x)
...@@ -731,14 +728,9 @@ static inline void native_set_pmd_at(struct mm_struct *mm, unsigned long addr, ...@@ -731,14 +728,9 @@ static inline void native_set_pmd_at(struct mm_struct *mm, unsigned long addr,
* updates should either be sets, clears, or set_pte_atomic for P->P * updates should either be sets, clears, or set_pte_atomic for P->P
* transitions, which means this hook should only be called for user PTEs. * transitions, which means this hook should only be called for user PTEs.
* This hook implies a P->P protection or access change has taken place, which * This hook implies a P->P protection or access change has taken place, which
* requires a subsequent TLB flush. The notification can optionally be delayed * requires a subsequent TLB flush.
* until the TLB flush event by using the pte_update_defer form of the
* interface, but care must be taken to assure that the flush happens while
* still holding the same page table lock so that the shadow and primary pages
* do not become out of sync on SMP.
*/ */
#define pte_update(mm, addr, ptep) do { } while (0) #define pte_update(mm, addr, ptep) do { } while (0)
#define pte_update_defer(mm, addr, ptep) do { } while (0)
#endif #endif
/* /*
...@@ -830,9 +822,7 @@ static inline int pmd_write(pmd_t pmd) ...@@ -830,9 +822,7 @@ static inline int pmd_write(pmd_t pmd)
static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm, unsigned long addr, static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm, unsigned long addr,
pmd_t *pmdp) pmd_t *pmdp)
{ {
pmd_t pmd = native_pmdp_get_and_clear(pmdp); return native_pmdp_get_and_clear(pmdp);
pmd_update(mm, addr, pmdp);
return pmd;
} }
#define __HAVE_ARCH_PMDP_SET_WRPROTECT #define __HAVE_ARCH_PMDP_SET_WRPROTECT
...@@ -840,7 +830,6 @@ static inline void pmdp_set_wrprotect(struct mm_struct *mm, ...@@ -840,7 +830,6 @@ static inline void pmdp_set_wrprotect(struct mm_struct *mm,
unsigned long addr, pmd_t *pmdp) unsigned long addr, pmd_t *pmdp)
{ {
clear_bit(_PAGE_BIT_RW, (unsigned long *)pmdp); clear_bit(_PAGE_BIT_RW, (unsigned long *)pmdp);
pmd_update(mm, addr, pmdp);
} }
/* /*
......
...@@ -21,15 +21,6 @@ ...@@ -21,15 +21,6 @@
extern int smp_num_siblings; extern int smp_num_siblings;
extern unsigned int num_processors; extern unsigned int num_processors;
static inline bool cpu_has_ht_siblings(void)
{
bool has_siblings = false;
#ifdef CONFIG_SMP
has_siblings = cpu_has_ht && smp_num_siblings > 1;
#endif
return has_siblings;
}
DECLARE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_sibling_map); DECLARE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_sibling_map);
DECLARE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_core_map); DECLARE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_core_map);
/* cpus sharing the last level cache: */ /* cpus sharing the last level cache: */
...@@ -74,9 +65,6 @@ struct smp_ops { ...@@ -74,9 +65,6 @@ struct smp_ops {
extern void set_cpu_sibling_map(int cpu); extern void set_cpu_sibling_map(int cpu);
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
#ifndef CONFIG_PARAVIRT
#define startup_ipi_hook(phys_apicid, start_eip, start_esp) do { } while (0)
#endif
extern struct smp_ops smp_ops; extern struct smp_ops smp_ops;
static inline void smp_send_stop(void) static inline void smp_send_stop(void)
......
...@@ -82,13 +82,11 @@ struct x86_init_paging { ...@@ -82,13 +82,11 @@ struct x86_init_paging {
* struct x86_init_timers - platform specific timer setup * struct x86_init_timers - platform specific timer setup
* @setup_perpcu_clockev: set up the per cpu clock event device for the * @setup_perpcu_clockev: set up the per cpu clock event device for the
* boot cpu * boot cpu
* @tsc_pre_init: platform function called before TSC init
* @timer_init: initialize the platform timer (default PIT/HPET) * @timer_init: initialize the platform timer (default PIT/HPET)
* @wallclock_init: init the wallclock device * @wallclock_init: init the wallclock device
*/ */
struct x86_init_timers { struct x86_init_timers {
void (*setup_percpu_clockev)(void); void (*setup_percpu_clockev)(void);
void (*tsc_pre_init)(void);
void (*timer_init)(void); void (*timer_init)(void);
void (*wallclock_init)(void); void (*wallclock_init)(void);
}; };
......
...@@ -553,7 +553,7 @@ do { \ ...@@ -553,7 +553,7 @@ do { \
if (cpu_has_xmm) { \ if (cpu_has_xmm) { \
xor_speed(&xor_block_pIII_sse); \ xor_speed(&xor_block_pIII_sse); \
xor_speed(&xor_block_sse_pf64); \ xor_speed(&xor_block_sse_pf64); \
} else if (cpu_has_mmx) { \ } else if (boot_cpu_has(X86_FEATURE_MMX)) { \
xor_speed(&xor_block_pII_mmx); \ xor_speed(&xor_block_pII_mmx); \
xor_speed(&xor_block_p5_mmx); \ xor_speed(&xor_block_p5_mmx); \
} else { \ } else { \
......
...@@ -304,7 +304,7 @@ static void amd_get_topology(struct cpuinfo_x86 *c) ...@@ -304,7 +304,7 @@ static void amd_get_topology(struct cpuinfo_x86 *c)
int cpu = smp_processor_id(); int cpu = smp_processor_id();
/* get information required for multi-node processors */ /* get information required for multi-node processors */
if (cpu_has_topoext) { if (boot_cpu_has(X86_FEATURE_TOPOEXT)) {
u32 eax, ebx, ecx, edx; u32 eax, ebx, ecx, edx;
cpuid(0x8000001e, &eax, &ebx, &ecx, &edx); cpuid(0x8000001e, &eax, &ebx, &ecx, &edx);
...@@ -922,7 +922,7 @@ static bool cpu_has_amd_erratum(struct cpuinfo_x86 *cpu, const int *erratum) ...@@ -922,7 +922,7 @@ static bool cpu_has_amd_erratum(struct cpuinfo_x86 *cpu, const int *erratum)
void set_dr_addr_mask(unsigned long mask, int dr) void set_dr_addr_mask(unsigned long mask, int dr)
{ {
if (!cpu_has_bpext) if (!boot_cpu_has(X86_FEATURE_BPEXT))
return; return;
switch (dr) { switch (dr) {
......
...@@ -43,7 +43,7 @@ static void init_c3(struct cpuinfo_x86 *c) ...@@ -43,7 +43,7 @@ static void init_c3(struct cpuinfo_x86 *c)
/* store Centaur Extended Feature Flags as /* store Centaur Extended Feature Flags as
* word 5 of the CPU capability bit array * word 5 of the CPU capability bit array
*/ */
c->x86_capability[5] = cpuid_edx(0xC0000001); c->x86_capability[CPUID_C000_0001_EDX] = cpuid_edx(0xC0000001);
} }
#ifdef CONFIG_X86_32 #ifdef CONFIG_X86_32
/* Cyrix III family needs CX8 & PGE explicitly enabled. */ /* Cyrix III family needs CX8 & PGE explicitly enabled. */
......
...@@ -599,50 +599,47 @@ void cpu_detect(struct cpuinfo_x86 *c) ...@@ -599,50 +599,47 @@ void cpu_detect(struct cpuinfo_x86 *c)
void get_cpu_cap(struct cpuinfo_x86 *c) void get_cpu_cap(struct cpuinfo_x86 *c)
{ {
u32 tfms, xlvl; u32 eax, ebx, ecx, edx;
u32 ebx;
/* Intel-defined flags: level 0x00000001 */ /* Intel-defined flags: level 0x00000001 */
if (c->cpuid_level >= 0x00000001) { if (c->cpuid_level >= 0x00000001) {
u32 capability, excap; cpuid(0x00000001, &eax, &ebx, &ecx, &edx);
cpuid(0x00000001, &tfms, &ebx, &excap, &capability); c->x86_capability[CPUID_1_ECX] = ecx;
c->x86_capability[0] = capability; c->x86_capability[CPUID_1_EDX] = edx;
c->x86_capability[4] = excap;
} }
/* Additional Intel-defined flags: level 0x00000007 */ /* Additional Intel-defined flags: level 0x00000007 */
if (c->cpuid_level >= 0x00000007) { if (c->cpuid_level >= 0x00000007) {
u32 eax, ebx, ecx, edx;
cpuid_count(0x00000007, 0, &eax, &ebx, &ecx, &edx); cpuid_count(0x00000007, 0, &eax, &ebx, &ecx, &edx);
c->x86_capability[9] = ebx; c->x86_capability[CPUID_7_0_EBX] = ebx;
c->x86_capability[CPUID_6_EAX] = cpuid_eax(0x00000006);
} }
/* Extended state features: level 0x0000000d */ /* Extended state features: level 0x0000000d */
if (c->cpuid_level >= 0x0000000d) { if (c->cpuid_level >= 0x0000000d) {
u32 eax, ebx, ecx, edx;
cpuid_count(0x0000000d, 1, &eax, &ebx, &ecx, &edx); cpuid_count(0x0000000d, 1, &eax, &ebx, &ecx, &edx);
c->x86_capability[10] = eax; c->x86_capability[CPUID_D_1_EAX] = eax;
} }
/* Additional Intel-defined flags: level 0x0000000F */ /* Additional Intel-defined flags: level 0x0000000F */
if (c->cpuid_level >= 0x0000000F) { if (c->cpuid_level >= 0x0000000F) {
u32 eax, ebx, ecx, edx;
/* QoS sub-leaf, EAX=0Fh, ECX=0 */ /* QoS sub-leaf, EAX=0Fh, ECX=0 */
cpuid_count(0x0000000F, 0, &eax, &ebx, &ecx, &edx); cpuid_count(0x0000000F, 0, &eax, &ebx, &ecx, &edx);
c->x86_capability[11] = edx; c->x86_capability[CPUID_F_0_EDX] = edx;
if (cpu_has(c, X86_FEATURE_CQM_LLC)) { if (cpu_has(c, X86_FEATURE_CQM_LLC)) {
/* will be overridden if occupancy monitoring exists */ /* will be overridden if occupancy monitoring exists */
c->x86_cache_max_rmid = ebx; c->x86_cache_max_rmid = ebx;
/* QoS sub-leaf, EAX=0Fh, ECX=1 */ /* QoS sub-leaf, EAX=0Fh, ECX=1 */
cpuid_count(0x0000000F, 1, &eax, &ebx, &ecx, &edx); cpuid_count(0x0000000F, 1, &eax, &ebx, &ecx, &edx);
c->x86_capability[12] = edx; c->x86_capability[CPUID_F_1_EDX] = edx;
if (cpu_has(c, X86_FEATURE_CQM_OCCUP_LLC)) { if (cpu_has(c, X86_FEATURE_CQM_OCCUP_LLC)) {
c->x86_cache_max_rmid = ecx; c->x86_cache_max_rmid = ecx;
c->x86_cache_occ_scale = ebx; c->x86_cache_occ_scale = ebx;
...@@ -654,22 +651,24 @@ void get_cpu_cap(struct cpuinfo_x86 *c) ...@@ -654,22 +651,24 @@ void get_cpu_cap(struct cpuinfo_x86 *c)
} }
/* AMD-defined flags: level 0x80000001 */ /* AMD-defined flags: level 0x80000001 */
xlvl = cpuid_eax(0x80000000); eax = cpuid_eax(0x80000000);
c->extended_cpuid_level = xlvl; c->extended_cpuid_level = eax;
if ((xlvl & 0xffff0000) == 0x80000000) { if ((eax & 0xffff0000) == 0x80000000) {
if (xlvl >= 0x80000001) { if (eax >= 0x80000001) {
c->x86_capability[1] = cpuid_edx(0x80000001); cpuid(0x80000001, &eax, &ebx, &ecx, &edx);
c->x86_capability[6] = cpuid_ecx(0x80000001);
c->x86_capability[CPUID_8000_0001_ECX] = ecx;
c->x86_capability[CPUID_8000_0001_EDX] = edx;
} }
} }
if (c->extended_cpuid_level >= 0x80000008) { if (c->extended_cpuid_level >= 0x80000008) {
u32 eax = cpuid_eax(0x80000008); cpuid(0x80000008, &eax, &ebx, &ecx, &edx);
c->x86_virt_bits = (eax >> 8) & 0xff; c->x86_virt_bits = (eax >> 8) & 0xff;
c->x86_phys_bits = eax & 0xff; c->x86_phys_bits = eax & 0xff;
c->x86_capability[13] = cpuid_ebx(0x80000008); c->x86_capability[CPUID_8000_0008_EBX] = ebx;
} }
#ifdef CONFIG_X86_32 #ifdef CONFIG_X86_32
else if (cpu_has(c, X86_FEATURE_PAE) || cpu_has(c, X86_FEATURE_PSE36)) else if (cpu_has(c, X86_FEATURE_PAE) || cpu_has(c, X86_FEATURE_PSE36))
...@@ -679,6 +678,9 @@ void get_cpu_cap(struct cpuinfo_x86 *c) ...@@ -679,6 +678,9 @@ void get_cpu_cap(struct cpuinfo_x86 *c)
if (c->extended_cpuid_level >= 0x80000007) if (c->extended_cpuid_level >= 0x80000007)
c->x86_power = cpuid_edx(0x80000007); c->x86_power = cpuid_edx(0x80000007);
if (c->extended_cpuid_level >= 0x8000000a)
c->x86_capability[CPUID_8000_000A_EDX] = cpuid_edx(0x8000000a);
init_scattered_cpuid_features(c); init_scattered_cpuid_features(c);
} }
...@@ -1443,7 +1445,9 @@ void cpu_init(void) ...@@ -1443,7 +1445,9 @@ void cpu_init(void)
printk(KERN_INFO "Initializing CPU#%d\n", cpu); printk(KERN_INFO "Initializing CPU#%d\n", cpu);
if (cpu_feature_enabled(X86_FEATURE_VME) || cpu_has_tsc || cpu_has_de) if (cpu_feature_enabled(X86_FEATURE_VME) ||
cpu_has_tsc ||
boot_cpu_has(X86_FEATURE_DE))
cr4_clear_bits(X86_CR4_VME|X86_CR4_PVI|X86_CR4_TSD|X86_CR4_DE); cr4_clear_bits(X86_CR4_VME|X86_CR4_PVI|X86_CR4_TSD|X86_CR4_DE);
load_current_idt(); load_current_idt();
......
...@@ -445,7 +445,8 @@ static void init_intel(struct cpuinfo_x86 *c) ...@@ -445,7 +445,8 @@ static void init_intel(struct cpuinfo_x86 *c)
if (cpu_has_xmm2) if (cpu_has_xmm2)
set_cpu_cap(c, X86_FEATURE_LFENCE_RDTSC); set_cpu_cap(c, X86_FEATURE_LFENCE_RDTSC);
if (cpu_has_ds) {
if (boot_cpu_has(X86_FEATURE_DS)) {
unsigned int l1; unsigned int l1;
rdmsr(MSR_IA32_MISC_ENABLE, l1, l2); rdmsr(MSR_IA32_MISC_ENABLE, l1, l2);
if (!(l1 & (1<<11))) if (!(l1 & (1<<11)))
......
...@@ -591,7 +591,7 @@ cpuid4_cache_lookup_regs(int index, struct _cpuid4_info_regs *this_leaf) ...@@ -591,7 +591,7 @@ cpuid4_cache_lookup_regs(int index, struct _cpuid4_info_regs *this_leaf)
unsigned edx; unsigned edx;
if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) { if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) {
if (cpu_has_topoext) if (boot_cpu_has(X86_FEATURE_TOPOEXT))
cpuid_count(0x8000001d, index, &eax.full, cpuid_count(0x8000001d, index, &eax.full,
&ebx.full, &ecx.full, &edx); &ebx.full, &ecx.full, &edx);
else else
...@@ -637,7 +637,7 @@ static int find_num_cache_leaves(struct cpuinfo_x86 *c) ...@@ -637,7 +637,7 @@ static int find_num_cache_leaves(struct cpuinfo_x86 *c)
void init_amd_cacheinfo(struct cpuinfo_x86 *c) void init_amd_cacheinfo(struct cpuinfo_x86 *c)
{ {
if (cpu_has_topoext) { if (boot_cpu_has(X86_FEATURE_TOPOEXT)) {
num_cache_leaves = find_num_cache_leaves(c); num_cache_leaves = find_num_cache_leaves(c);
} else if (c->extended_cpuid_level >= 0x80000006) { } else if (c->extended_cpuid_level >= 0x80000006) {
if (cpuid_edx(0x80000006) & 0xf000) if (cpuid_edx(0x80000006) & 0xf000)
...@@ -809,7 +809,7 @@ static int __cache_amd_cpumap_setup(unsigned int cpu, int index, ...@@ -809,7 +809,7 @@ static int __cache_amd_cpumap_setup(unsigned int cpu, int index,
struct cacheinfo *this_leaf; struct cacheinfo *this_leaf;
int i, sibling; int i, sibling;
if (cpu_has_topoext) { if (boot_cpu_has(X86_FEATURE_TOPOEXT)) {
unsigned int apicid, nshared, first, last; unsigned int apicid, nshared, first, last;
this_leaf = this_cpu_ci->info_list + index; this_leaf = this_cpu_ci->info_list + index;
......
...@@ -349,7 +349,7 @@ static void get_fixed_ranges(mtrr_type *frs) ...@@ -349,7 +349,7 @@ static void get_fixed_ranges(mtrr_type *frs)
void mtrr_save_fixed_ranges(void *info) void mtrr_save_fixed_ranges(void *info)
{ {
if (cpu_has_mtrr) if (boot_cpu_has(X86_FEATURE_MTRR))
get_fixed_ranges(mtrr_state.fixed_ranges); get_fixed_ranges(mtrr_state.fixed_ranges);
} }
......
...@@ -682,7 +682,7 @@ void __init mtrr_bp_init(void) ...@@ -682,7 +682,7 @@ void __init mtrr_bp_init(void)
phys_addr = 32; phys_addr = 32;
if (cpu_has_mtrr) { if (boot_cpu_has(X86_FEATURE_MTRR)) {
mtrr_if = &generic_mtrr_ops; mtrr_if = &generic_mtrr_ops;
size_or_mask = SIZE_OR_MASK_BITS(36); size_or_mask = SIZE_OR_MASK_BITS(36);
size_and_mask = 0x00f00000; size_and_mask = 0x00f00000;
......
...@@ -160,7 +160,7 @@ static inline int amd_pmu_addr_offset(int index, bool eventsel) ...@@ -160,7 +160,7 @@ static inline int amd_pmu_addr_offset(int index, bool eventsel)
if (offset) if (offset)
return offset; return offset;
if (!cpu_has_perfctr_core) if (!boot_cpu_has(X86_FEATURE_PERFCTR_CORE))
offset = index; offset = index;
else else
offset = index << 1; offset = index << 1;
...@@ -652,7 +652,7 @@ static __initconst const struct x86_pmu amd_pmu = { ...@@ -652,7 +652,7 @@ static __initconst const struct x86_pmu amd_pmu = {
static int __init amd_core_pmu_init(void) static int __init amd_core_pmu_init(void)
{ {
if (!cpu_has_perfctr_core) if (!boot_cpu_has(X86_FEATURE_PERFCTR_CORE))
return 0; return 0;
switch (boot_cpu_data.x86) { switch (boot_cpu_data.x86) {
......
...@@ -523,10 +523,10 @@ static int __init amd_uncore_init(void) ...@@ -523,10 +523,10 @@ static int __init amd_uncore_init(void)
if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD) if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD)
goto fail_nodev; goto fail_nodev;
if (!cpu_has_topoext) if (!boot_cpu_has(X86_FEATURE_TOPOEXT))
goto fail_nodev; goto fail_nodev;
if (cpu_has_perfctr_nb) { if (boot_cpu_has(X86_FEATURE_PERFCTR_NB)) {
amd_uncore_nb = alloc_percpu(struct amd_uncore *); amd_uncore_nb = alloc_percpu(struct amd_uncore *);
if (!amd_uncore_nb) { if (!amd_uncore_nb) {
ret = -ENOMEM; ret = -ENOMEM;
...@@ -540,7 +540,7 @@ static int __init amd_uncore_init(void) ...@@ -540,7 +540,7 @@ static int __init amd_uncore_init(void)
ret = 0; ret = 0;
} }
if (cpu_has_perfctr_l2) { if (boot_cpu_has(X86_FEATURE_PERFCTR_L2)) {
amd_uncore_l2 = alloc_percpu(struct amd_uncore *); amd_uncore_l2 = alloc_percpu(struct amd_uncore *);
if (!amd_uncore_l2) { if (!amd_uncore_l2) {
ret = -ENOMEM; ret = -ENOMEM;
...@@ -583,10 +583,11 @@ static int __init amd_uncore_init(void) ...@@ -583,10 +583,11 @@ static int __init amd_uncore_init(void)
/* amd_uncore_nb/l2 should have been freed by cleanup_cpu_online */ /* amd_uncore_nb/l2 should have been freed by cleanup_cpu_online */
amd_uncore_nb = amd_uncore_l2 = NULL; amd_uncore_nb = amd_uncore_l2 = NULL;
if (cpu_has_perfctr_l2)
if (boot_cpu_has(X86_FEATURE_PERFCTR_L2))
perf_pmu_unregister(&amd_l2_pmu); perf_pmu_unregister(&amd_l2_pmu);
fail_l2: fail_l2:
if (cpu_has_perfctr_nb) if (boot_cpu_has(X86_FEATURE_PERFCTR_NB))
perf_pmu_unregister(&amd_nb_pmu); perf_pmu_unregister(&amd_nb_pmu);
if (amd_uncore_l2) if (amd_uncore_l2)
free_percpu(amd_uncore_l2); free_percpu(amd_uncore_l2);
......
...@@ -31,32 +31,12 @@ void init_scattered_cpuid_features(struct cpuinfo_x86 *c) ...@@ -31,32 +31,12 @@ void init_scattered_cpuid_features(struct cpuinfo_x86 *c)
const struct cpuid_bit *cb; const struct cpuid_bit *cb;
static const struct cpuid_bit cpuid_bits[] = { static const struct cpuid_bit cpuid_bits[] = {
{ X86_FEATURE_DTHERM, CR_EAX, 0, 0x00000006, 0 },
{ X86_FEATURE_IDA, CR_EAX, 1, 0x00000006, 0 },
{ X86_FEATURE_ARAT, CR_EAX, 2, 0x00000006, 0 },
{ X86_FEATURE_PLN, CR_EAX, 4, 0x00000006, 0 },
{ X86_FEATURE_PTS, CR_EAX, 6, 0x00000006, 0 },
{ X86_FEATURE_HWP, CR_EAX, 7, 0x00000006, 0 },
{ X86_FEATURE_HWP_NOTIFY, CR_EAX, 8, 0x00000006, 0 },
{ X86_FEATURE_HWP_ACT_WINDOW, CR_EAX, 9, 0x00000006, 0 },
{ X86_FEATURE_HWP_EPP, CR_EAX,10, 0x00000006, 0 },
{ X86_FEATURE_HWP_PKG_REQ, CR_EAX,11, 0x00000006, 0 },
{ X86_FEATURE_INTEL_PT, CR_EBX,25, 0x00000007, 0 }, { X86_FEATURE_INTEL_PT, CR_EBX,25, 0x00000007, 0 },
{ X86_FEATURE_APERFMPERF, CR_ECX, 0, 0x00000006, 0 }, { X86_FEATURE_APERFMPERF, CR_ECX, 0, 0x00000006, 0 },
{ X86_FEATURE_EPB, CR_ECX, 3, 0x00000006, 0 }, { X86_FEATURE_EPB, CR_ECX, 3, 0x00000006, 0 },
{ X86_FEATURE_HW_PSTATE, CR_EDX, 7, 0x80000007, 0 }, { X86_FEATURE_HW_PSTATE, CR_EDX, 7, 0x80000007, 0 },
{ X86_FEATURE_CPB, CR_EDX, 9, 0x80000007, 0 }, { X86_FEATURE_CPB, CR_EDX, 9, 0x80000007, 0 },
{ X86_FEATURE_PROC_FEEDBACK, CR_EDX,11, 0x80000007, 0 }, { X86_FEATURE_PROC_FEEDBACK, CR_EDX,11, 0x80000007, 0 },
{ X86_FEATURE_NPT, CR_EDX, 0, 0x8000000a, 0 },
{ X86_FEATURE_LBRV, CR_EDX, 1, 0x8000000a, 0 },
{ X86_FEATURE_SVML, CR_EDX, 2, 0x8000000a, 0 },
{ X86_FEATURE_NRIPS, CR_EDX, 3, 0x8000000a, 0 },
{ X86_FEATURE_TSCRATEMSR, CR_EDX, 4, 0x8000000a, 0 },
{ X86_FEATURE_VMCBCLEAN, CR_EDX, 5, 0x8000000a, 0 },
{ X86_FEATURE_FLUSHBYASID, CR_EDX, 6, 0x8000000a, 0 },
{ X86_FEATURE_DECODEASSISTS, CR_EDX, 7, 0x8000000a, 0 },
{ X86_FEATURE_PAUSEFILTER, CR_EDX,10, 0x8000000a, 0 },
{ X86_FEATURE_PFTHRESHOLD, CR_EDX,12, 0x8000000a, 0 },
{ 0, 0, 0, 0, 0 } { 0, 0, 0, 0, 0 }
}; };
......
...@@ -12,7 +12,7 @@ static void early_init_transmeta(struct cpuinfo_x86 *c) ...@@ -12,7 +12,7 @@ static void early_init_transmeta(struct cpuinfo_x86 *c)
xlvl = cpuid_eax(0x80860000); xlvl = cpuid_eax(0x80860000);
if ((xlvl & 0xffff0000) == 0x80860000) { if ((xlvl & 0xffff0000) == 0x80860000) {
if (xlvl >= 0x80860001) if (xlvl >= 0x80860001)
c->x86_capability[2] = cpuid_edx(0x80860001); c->x86_capability[CPUID_8086_0001_EDX] = cpuid_edx(0x80860001);
} }
} }
...@@ -82,7 +82,7 @@ static void init_transmeta(struct cpuinfo_x86 *c) ...@@ -82,7 +82,7 @@ static void init_transmeta(struct cpuinfo_x86 *c)
/* Unhide possibly hidden capability flags */ /* Unhide possibly hidden capability flags */
rdmsr(0x80860004, cap_mask, uk); rdmsr(0x80860004, cap_mask, uk);
wrmsr(0x80860004, ~0, uk); wrmsr(0x80860004, ~0, uk);
c->x86_capability[0] = cpuid_edx(0x00000001); c->x86_capability[CPUID_1_EDX] = cpuid_edx(0x00000001);
wrmsr(0x80860004, cap_mask, uk); wrmsr(0x80860004, cap_mask, uk);
/* All Transmeta CPUs have a constant TSC */ /* All Transmeta CPUs have a constant TSC */
......
...@@ -12,7 +12,7 @@ ...@@ -12,7 +12,7 @@
*/ */
static void fpu__init_cpu_ctx_switch(void) static void fpu__init_cpu_ctx_switch(void)
{ {
if (!cpu_has_eager_fpu) if (!boot_cpu_has(X86_FEATURE_EAGER_FPU))
stts(); stts();
else else
clts(); clts();
...@@ -296,7 +296,7 @@ static void __init fpu__init_system_ctx_switch(void) ...@@ -296,7 +296,7 @@ static void __init fpu__init_system_ctx_switch(void)
current_thread_info()->status = 0; current_thread_info()->status = 0;
/* Auto enable eagerfpu for xsaveopt */ /* Auto enable eagerfpu for xsaveopt */
if (cpu_has_xsaveopt && eagerfpu != DISABLE) if (boot_cpu_has(X86_FEATURE_XSAVEOPT) && eagerfpu != DISABLE)
eagerfpu = ENABLE; eagerfpu = ENABLE;
if (xfeatures_mask & XFEATURE_MASK_EAGER) { if (xfeatures_mask & XFEATURE_MASK_EAGER) {
......
...@@ -300,6 +300,10 @@ static int arch_build_bp_info(struct perf_event *bp) ...@@ -300,6 +300,10 @@ static int arch_build_bp_info(struct perf_event *bp)
return -EINVAL; return -EINVAL;
if (bp->attr.bp_addr & (bp->attr.bp_len - 1)) if (bp->attr.bp_addr & (bp->attr.bp_len - 1))
return -EINVAL; return -EINVAL;
if (!boot_cpu_has(X86_FEATURE_BPEXT))
return -EOPNOTSUPP;
/* /*
* It's impossible to use a range breakpoint to fake out * It's impossible to use a range breakpoint to fake out
* user vs kernel detection because bp_len - 1 can't * user vs kernel detection because bp_len - 1 can't
...@@ -307,8 +311,6 @@ static int arch_build_bp_info(struct perf_event *bp) ...@@ -307,8 +311,6 @@ static int arch_build_bp_info(struct perf_event *bp)
* breakpoints, then we'll have to check for kprobe-blacklisted * breakpoints, then we'll have to check for kprobe-blacklisted
* addresses anywhere in the range. * addresses anywhere in the range.
*/ */
if (!cpu_has_bpext)
return -EOPNOTSUPP;
info->mask = bp->attr.bp_len - 1; info->mask = bp->attr.bp_len - 1;
info->len = X86_BREAKPOINT_LEN_1; info->len = X86_BREAKPOINT_LEN_1;
} }
......
...@@ -74,16 +74,6 @@ void __init default_banner(void) ...@@ -74,16 +74,6 @@ void __init default_banner(void)
/* Undefined instruction for dealing with missing ops pointers. */ /* Undefined instruction for dealing with missing ops pointers. */
static const unsigned char ud2a[] = { 0x0f, 0x0b }; static const unsigned char ud2a[] = { 0x0f, 0x0b };
unsigned paravirt_patch_nop(void)
{
return 0;
}
unsigned paravirt_patch_ignore(unsigned len)
{
return len;
}
struct branch { struct branch {
unsigned char opcode; unsigned char opcode;
u32 delta; u32 delta;
...@@ -133,7 +123,6 @@ static void *get_call_destination(u8 type) ...@@ -133,7 +123,6 @@ static void *get_call_destination(u8 type)
.pv_time_ops = pv_time_ops, .pv_time_ops = pv_time_ops,
.pv_cpu_ops = pv_cpu_ops, .pv_cpu_ops = pv_cpu_ops,
.pv_irq_ops = pv_irq_ops, .pv_irq_ops = pv_irq_ops,
.pv_apic_ops = pv_apic_ops,
.pv_mmu_ops = pv_mmu_ops, .pv_mmu_ops = pv_mmu_ops,
#ifdef CONFIG_PARAVIRT_SPINLOCKS #ifdef CONFIG_PARAVIRT_SPINLOCKS
.pv_lock_ops = pv_lock_ops, .pv_lock_ops = pv_lock_ops,
...@@ -152,8 +141,7 @@ unsigned paravirt_patch_default(u8 type, u16 clobbers, void *insnbuf, ...@@ -152,8 +141,7 @@ unsigned paravirt_patch_default(u8 type, u16 clobbers, void *insnbuf,
/* If there's no function, patch it with a ud2a (BUG) */ /* If there's no function, patch it with a ud2a (BUG) */
ret = paravirt_patch_insns(insnbuf, len, ud2a, ud2a+sizeof(ud2a)); ret = paravirt_patch_insns(insnbuf, len, ud2a, ud2a+sizeof(ud2a));
else if (opfunc == _paravirt_nop) else if (opfunc == _paravirt_nop)
/* If the operation is a nop, then nop the callsite */ ret = 0;
ret = paravirt_patch_nop();
/* identity functions just return their single argument */ /* identity functions just return their single argument */
else if (opfunc == _paravirt_ident_32) else if (opfunc == _paravirt_ident_32)
...@@ -391,12 +379,6 @@ NOKPROBE_SYMBOL(native_get_debugreg); ...@@ -391,12 +379,6 @@ NOKPROBE_SYMBOL(native_get_debugreg);
NOKPROBE_SYMBOL(native_set_debugreg); NOKPROBE_SYMBOL(native_set_debugreg);
NOKPROBE_SYMBOL(native_load_idt); NOKPROBE_SYMBOL(native_load_idt);
struct pv_apic_ops pv_apic_ops = {
#ifdef CONFIG_X86_LOCAL_APIC
.startup_ipi_hook = paravirt_nop,
#endif
};
#if defined(CONFIG_X86_32) && !defined(CONFIG_X86_PAE) #if defined(CONFIG_X86_32) && !defined(CONFIG_X86_PAE)
/* 32-bit pagetable entries */ /* 32-bit pagetable entries */
#define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_32) #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_32)
...@@ -432,9 +414,6 @@ struct pv_mmu_ops pv_mmu_ops = { ...@@ -432,9 +414,6 @@ struct pv_mmu_ops pv_mmu_ops = {
.set_pmd = native_set_pmd, .set_pmd = native_set_pmd,
.set_pmd_at = native_set_pmd_at, .set_pmd_at = native_set_pmd_at,
.pte_update = paravirt_nop, .pte_update = paravirt_nop,
.pte_update_defer = paravirt_nop,
.pmd_update = paravirt_nop,
.pmd_update_defer = paravirt_nop,
.ptep_modify_prot_start = __ptep_modify_prot_start, .ptep_modify_prot_start = __ptep_modify_prot_start,
.ptep_modify_prot_commit = __ptep_modify_prot_commit, .ptep_modify_prot_commit = __ptep_modify_prot_commit,
...@@ -480,6 +459,5 @@ struct pv_mmu_ops pv_mmu_ops = { ...@@ -480,6 +459,5 @@ struct pv_mmu_ops pv_mmu_ops = {
EXPORT_SYMBOL_GPL(pv_time_ops); EXPORT_SYMBOL_GPL(pv_time_ops);
EXPORT_SYMBOL (pv_cpu_ops); EXPORT_SYMBOL (pv_cpu_ops);
EXPORT_SYMBOL (pv_mmu_ops); EXPORT_SYMBOL (pv_mmu_ops);
EXPORT_SYMBOL_GPL(pv_apic_ops);
EXPORT_SYMBOL_GPL(pv_info); EXPORT_SYMBOL_GPL(pv_info);
EXPORT_SYMBOL (pv_irq_ops); EXPORT_SYMBOL (pv_irq_ops);
...@@ -124,21 +124,6 @@ const char *regs_query_register_name(unsigned int offset) ...@@ -124,21 +124,6 @@ const char *regs_query_register_name(unsigned int offset)
return NULL; return NULL;
} }
static const int arg_offs_table[] = {
#ifdef CONFIG_X86_32
[0] = offsetof(struct pt_regs, ax),
[1] = offsetof(struct pt_regs, dx),
[2] = offsetof(struct pt_regs, cx)
#else /* CONFIG_X86_64 */
[0] = offsetof(struct pt_regs, di),
[1] = offsetof(struct pt_regs, si),
[2] = offsetof(struct pt_regs, dx),
[3] = offsetof(struct pt_regs, cx),
[4] = offsetof(struct pt_regs, r8),
[5] = offsetof(struct pt_regs, r9)
#endif
};
/* /*
* does not yet catch signals sent when the child dies. * does not yet catch signals sent when the child dies.
* in exit.c or in signal.c. * in exit.c or in signal.c.
......
...@@ -304,7 +304,7 @@ do { \ ...@@ -304,7 +304,7 @@ do { \
static bool match_smt(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o) static bool match_smt(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o)
{ {
if (cpu_has_topoext) { if (boot_cpu_has(X86_FEATURE_TOPOEXT)) {
int cpu1 = c->cpu_index, cpu2 = o->cpu_index; int cpu1 = c->cpu_index, cpu2 = o->cpu_index;
if (c->phys_proc_id == o->phys_proc_id && if (c->phys_proc_id == o->phys_proc_id &&
...@@ -629,13 +629,6 @@ wakeup_secondary_cpu_via_init(int phys_apicid, unsigned long start_eip) ...@@ -629,13 +629,6 @@ wakeup_secondary_cpu_via_init(int phys_apicid, unsigned long start_eip)
else else
num_starts = 0; num_starts = 0;
/*
* Paravirt / VMI wants a startup IPI hook here to set up the
* target processor state.
*/
startup_ipi_hook(phys_apicid, (unsigned long) start_secondary,
stack_start);
/* /*
* Run STARTUP IPI loop. * Run STARTUP IPI loop.
*/ */
......
...@@ -1185,8 +1185,6 @@ void __init tsc_init(void) ...@@ -1185,8 +1185,6 @@ void __init tsc_init(void)
u64 lpj; u64 lpj;
int cpu; int cpu;
x86_init.timers.tsc_pre_init();
if (!cpu_has_tsc) { if (!cpu_has_tsc) {
setup_clear_cpu_cap(X86_FEATURE_TSC_DEADLINE_TIMER); setup_clear_cpu_cap(X86_FEATURE_TSC_DEADLINE_TIMER);
return; return;
......
...@@ -357,8 +357,10 @@ static long do_sys_vm86(struct vm86plus_struct __user *user_vm86, bool plus) ...@@ -357,8 +357,10 @@ static long do_sys_vm86(struct vm86plus_struct __user *user_vm86, bool plus)
tss = &per_cpu(cpu_tss, get_cpu()); tss = &per_cpu(cpu_tss, get_cpu());
/* make room for real-mode segments */ /* make room for real-mode segments */
tsk->thread.sp0 += 16; tsk->thread.sp0 += 16;
if (cpu_has_sep)
if (static_cpu_has_safe(X86_FEATURE_SEP))
tsk->thread.sysenter_cs = 0; tsk->thread.sysenter_cs = 0;
load_sp0(tss, &tsk->thread); load_sp0(tss, &tsk->thread);
put_cpu(); put_cpu();
......
...@@ -68,7 +68,6 @@ struct x86_init_ops x86_init __initdata = { ...@@ -68,7 +68,6 @@ struct x86_init_ops x86_init __initdata = {
.timers = { .timers = {
.setup_percpu_clockev = setup_boot_APIC_clock, .setup_percpu_clockev = setup_boot_APIC_clock,
.tsc_pre_init = x86_init_noop,
.timer_init = hpet_time_init, .timer_init = hpet_time_init,
.wallclock_init = x86_init_noop, .wallclock_init = x86_init_noop,
}, },
......
...@@ -1473,7 +1473,6 @@ __init void lguest_init(void) ...@@ -1473,7 +1473,6 @@ __init void lguest_init(void)
pv_mmu_ops.lazy_mode.leave = lguest_leave_lazy_mmu_mode; pv_mmu_ops.lazy_mode.leave = lguest_leave_lazy_mmu_mode;
pv_mmu_ops.lazy_mode.flush = paravirt_flush_lazy_mmu; pv_mmu_ops.lazy_mode.flush = paravirt_flush_lazy_mmu;
pv_mmu_ops.pte_update = lguest_pte_update; pv_mmu_ops.pte_update = lguest_pte_update;
pv_mmu_ops.pte_update_defer = lguest_pte_update;
#ifdef CONFIG_X86_LOCAL_APIC #ifdef CONFIG_X86_LOCAL_APIC
/* APIC read/write intercepts */ /* APIC read/write intercepts */
......
...@@ -414,7 +414,7 @@ int ptep_set_access_flags(struct vm_area_struct *vma, ...@@ -414,7 +414,7 @@ int ptep_set_access_flags(struct vm_area_struct *vma,
if (changed && dirty) { if (changed && dirty) {
*ptep = entry; *ptep = entry;
pte_update_defer(vma->vm_mm, address, ptep); pte_update(vma->vm_mm, address, ptep);
} }
return changed; return changed;
...@@ -431,7 +431,6 @@ int pmdp_set_access_flags(struct vm_area_struct *vma, ...@@ -431,7 +431,6 @@ int pmdp_set_access_flags(struct vm_area_struct *vma,
if (changed && dirty) { if (changed && dirty) {
*pmdp = entry; *pmdp = entry;
pmd_update_defer(vma->vm_mm, address, pmdp);
/* /*
* We had a write-protection fault here and changed the pmd * We had a write-protection fault here and changed the pmd
* to to more permissive. No need to flush the TLB for that, * to to more permissive. No need to flush the TLB for that,
...@@ -469,9 +468,6 @@ int pmdp_test_and_clear_young(struct vm_area_struct *vma, ...@@ -469,9 +468,6 @@ int pmdp_test_and_clear_young(struct vm_area_struct *vma,
ret = test_and_clear_bit(_PAGE_BIT_ACCESSED, ret = test_and_clear_bit(_PAGE_BIT_ACCESSED,
(unsigned long *)pmdp); (unsigned long *)pmdp);
if (ret)
pmd_update(vma->vm_mm, addr, pmdp);
return ret; return ret;
} }
#endif #endif
...@@ -518,7 +514,6 @@ void pmdp_splitting_flush(struct vm_area_struct *vma, ...@@ -518,7 +514,6 @@ void pmdp_splitting_flush(struct vm_area_struct *vma,
set = !test_and_set_bit(_PAGE_BIT_SPLITTING, set = !test_and_set_bit(_PAGE_BIT_SPLITTING,
(unsigned long *)pmdp); (unsigned long *)pmdp);
if (set) { if (set) {
pmd_update(vma->vm_mm, address, pmdp);
/* need tlb flush only to serialize against gup-fast */ /* need tlb flush only to serialize against gup-fast */
flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE); flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
} }
......
...@@ -31,7 +31,7 @@ early_param("noexec", noexec_setup); ...@@ -31,7 +31,7 @@ early_param("noexec", noexec_setup);
void x86_configure_nx(void) void x86_configure_nx(void)
{ {
if (cpu_has_nx && !disable_nx) if (boot_cpu_has(X86_FEATURE_NX) && !disable_nx)
__supported_pte_mask |= _PAGE_NX; __supported_pte_mask |= _PAGE_NX;
else else
__supported_pte_mask &= ~_PAGE_NX; __supported_pte_mask &= ~_PAGE_NX;
...@@ -39,7 +39,7 @@ void x86_configure_nx(void) ...@@ -39,7 +39,7 @@ void x86_configure_nx(void)
void __init x86_report_nx(void) void __init x86_report_nx(void)
{ {
if (!cpu_has_nx) { if (!boot_cpu_has(X86_FEATURE_NX)) {
printk(KERN_NOTICE "Notice: NX (Execute Disable) protection " printk(KERN_NOTICE "Notice: NX (Execute Disable) protection "
"missing in CPU!\n"); "missing in CPU!\n");
} else { } else {
......
...@@ -1262,12 +1262,6 @@ static const struct pv_cpu_ops xen_cpu_ops __initconst = { ...@@ -1262,12 +1262,6 @@ static const struct pv_cpu_ops xen_cpu_ops __initconst = {
.end_context_switch = xen_end_context_switch, .end_context_switch = xen_end_context_switch,
}; };
static const struct pv_apic_ops xen_apic_ops __initconst = {
#ifdef CONFIG_X86_LOCAL_APIC
.startup_ipi_hook = paravirt_nop,
#endif
};
static void xen_reboot(int reason) static void xen_reboot(int reason)
{ {
struct sched_shutdown r = { .reason = reason }; struct sched_shutdown r = { .reason = reason };
...@@ -1535,7 +1529,6 @@ asmlinkage __visible void __init xen_start_kernel(void) ...@@ -1535,7 +1529,6 @@ asmlinkage __visible void __init xen_start_kernel(void)
if (xen_initial_domain()) if (xen_initial_domain())
pv_info.features |= PV_SUPPORTED_RTC; pv_info.features |= PV_SUPPORTED_RTC;
pv_init_ops = xen_init_ops; pv_init_ops = xen_init_ops;
pv_apic_ops = xen_apic_ops;
if (!xen_pvh_domain()) { if (!xen_pvh_domain()) {
pv_cpu_ops = xen_cpu_ops; pv_cpu_ops = xen_cpu_ops;
......
...@@ -2436,7 +2436,6 @@ static const struct pv_mmu_ops xen_mmu_ops __initconst = { ...@@ -2436,7 +2436,6 @@ static const struct pv_mmu_ops xen_mmu_ops __initconst = {
.flush_tlb_others = xen_flush_tlb_others, .flush_tlb_others = xen_flush_tlb_others,
.pte_update = paravirt_nop, .pte_update = paravirt_nop,
.pte_update_defer = paravirt_nop,
.pgd_alloc = xen_pgd_alloc, .pgd_alloc = xen_pgd_alloc,
.pgd_free = xen_pgd_free, .pgd_free = xen_pgd_free,
......
...@@ -140,7 +140,7 @@ static int via_rng_init(struct hwrng *rng) ...@@ -140,7 +140,7 @@ static int via_rng_init(struct hwrng *rng)
* RNG configuration like it used to be the case in this * RNG configuration like it used to be the case in this
* register */ * register */
if ((c->x86 == 6) && (c->x86_model >= 0x0f)) { if ((c->x86 == 6) && (c->x86_model >= 0x0f)) {
if (!cpu_has_xstore_enabled) { if (!boot_cpu_has(X86_FEATURE_XSTORE_EN)) {
pr_err(PFX "can't enable hardware RNG " pr_err(PFX "can't enable hardware RNG "
"if XSTORE is not enabled\n"); "if XSTORE is not enabled\n");
return -ENODEV; return -ENODEV;
...@@ -200,8 +200,9 @@ static int __init mod_init(void) ...@@ -200,8 +200,9 @@ static int __init mod_init(void)
{ {
int err; int err;
if (!cpu_has_xstore) if (!boot_cpu_has(X86_FEATURE_XSTORE))
return -ENODEV; return -ENODEV;
pr_info("VIA RNG detected\n"); pr_info("VIA RNG detected\n");
err = hwrng_register(&via_rng); err = hwrng_register(&via_rng);
if (err) { if (err) {
......
...@@ -515,7 +515,7 @@ static int __init padlock_init(void) ...@@ -515,7 +515,7 @@ static int __init padlock_init(void)
if (!x86_match_cpu(padlock_cpu_id)) if (!x86_match_cpu(padlock_cpu_id))
return -ENODEV; return -ENODEV;
if (!cpu_has_xcrypt_enabled) { if (!boot_cpu_has(X86_FEATURE_XCRYPT_EN)) {
printk(KERN_NOTICE PFX "VIA PadLock detected, but not enabled. Hmm, strange...\n"); printk(KERN_NOTICE PFX "VIA PadLock detected, but not enabled. Hmm, strange...\n");
return -ENODEV; return -ENODEV;
} }
......
...@@ -540,7 +540,7 @@ static int __init padlock_init(void) ...@@ -540,7 +540,7 @@ static int __init padlock_init(void)
struct shash_alg *sha1; struct shash_alg *sha1;
struct shash_alg *sha256; struct shash_alg *sha256;
if (!x86_match_cpu(padlock_sha_ids) || !cpu_has_phe_enabled) if (!x86_match_cpu(padlock_sha_ids) || !boot_cpu_has(X86_FEATURE_PHE_EN))
return -ENODEV; return -ENODEV;
/* Register the newly added algorithm module if on * /* Register the newly added algorithm module if on *
......
...@@ -753,7 +753,7 @@ static inline void set_irq_posting_cap(void) ...@@ -753,7 +753,7 @@ static inline void set_irq_posting_cap(void)
* should have X86_FEATURE_CX16 support, this has been confirmed * should have X86_FEATURE_CX16 support, this has been confirmed
* with Intel hardware guys. * with Intel hardware guys.
*/ */
if ( cpu_has_cx16 ) if (boot_cpu_has(X86_FEATURE_CX16))
intel_irq_remap_ops.capability |= 1 << IRQ_POSTING_CAP; intel_irq_remap_ops.capability |= 1 << IRQ_POSTING_CAP;
for_each_iommu(iommu, drhd) for_each_iommu(iommu, drhd)
......
...@@ -923,7 +923,7 @@ static int check_async_write(struct inode *inode, unsigned long bio_flags) ...@@ -923,7 +923,7 @@ static int check_async_write(struct inode *inode, unsigned long bio_flags)
if (bio_flags & EXTENT_BIO_TREE_LOG) if (bio_flags & EXTENT_BIO_TREE_LOG)
return 0; return 0;
#ifdef CONFIG_X86 #ifdef CONFIG_X86
if (cpu_has_xmm4_2) if (static_cpu_has_safe(X86_FEATURE_XMM4_2))
return 0; return 0;
#endif #endif
return 1; return 1;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment