Commit fcc19657 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'x86-cleanups-2024-03-11' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull x86 cleanups from Ingo Molnar:
 "Misc cleanups, including a large series from Thomas Gleixner to cure
  sparse warnings"

* tag 'x86-cleanups-2024-03-11' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  x86/nmi: Drop unused declaration of proc_nmi_enabled()
  x86/callthunks: Use EXPORT_PER_CPU_SYMBOL_GPL() for per CPU variables
  x86/cpu: Provide a declaration for itlb_multihit_kvm_mitigation
  x86/cpu: Use EXPORT_PER_CPU_SYMBOL_GPL() for x86_spec_ctrl_current
  x86/uaccess: Add missing __force to casts in __access_ok() and valid_user_address()
  x86/percpu: Cure per CPU madness on UP
  smp: Consolidate smp_prepare_boot_cpu()
  x86/msr: Add missing __percpu annotations
  x86/msr: Prepare for including <linux/percpu.h> into <asm/msr.h>
  perf/x86/amd/uncore: Fix __percpu annotation
  x86/nmi: Remove an unnecessary IS_ENABLED(CONFIG_SMP)
  x86/apm_32: Remove dead function apm_get_battery_status()
  x86/insn-eval: Fix function param name in get_eff_addr_sib()
parents d69ad12c 774a86f1
...@@ -467,11 +467,6 @@ smp_prepare_cpus(unsigned int max_cpus) ...@@ -467,11 +467,6 @@ smp_prepare_cpus(unsigned int max_cpus)
smp_num_cpus = smp_num_probed; smp_num_cpus = smp_num_probed;
} }
void
smp_prepare_boot_cpu(void)
{
}
int int
__cpu_up(unsigned int cpu, struct task_struct *tidle) __cpu_up(unsigned int cpu, struct task_struct *tidle)
{ {
......
...@@ -39,11 +39,6 @@ struct plat_smp_ops __weak plat_smp_ops; ...@@ -39,11 +39,6 @@ struct plat_smp_ops __weak plat_smp_ops;
/* XXX: per cpu ? Only needed once in early secondary boot */ /* XXX: per cpu ? Only needed once in early secondary boot */
struct task_struct *secondary_idle_tsk; struct task_struct *secondary_idle_tsk;
/* Called from start_kernel */
void __init smp_prepare_boot_cpu(void)
{
}
static int __init arc_get_cpu_map(const char *name, struct cpumask *cpumask) static int __init arc_get_cpu_map(const char *name, struct cpumask *cpumask)
{ {
unsigned long dt_root = of_get_flat_dt_root(); unsigned long dt_root = of_get_flat_dt_root();
......
...@@ -152,10 +152,6 @@ void arch_irq_work_raise(void) ...@@ -152,10 +152,6 @@ void arch_irq_work_raise(void)
} }
#endif #endif
void __init smp_prepare_boot_cpu(void)
{
}
void __init smp_prepare_cpus(unsigned int max_cpus) void __init smp_prepare_cpus(unsigned int max_cpus)
{ {
} }
......
...@@ -114,10 +114,6 @@ void send_ipi(const struct cpumask *cpumask, enum ipi_message_type msg) ...@@ -114,10 +114,6 @@ void send_ipi(const struct cpumask *cpumask, enum ipi_message_type msg)
local_irq_restore(flags); local_irq_restore(flags);
} }
void __init smp_prepare_boot_cpu(void)
{
}
/* /*
* interrupts should already be disabled from the VM * interrupts should already be disabled from the VM
* SP should already be correct; need to set THREADINFO_REG * SP should already be correct; need to set THREADINFO_REG
......
...@@ -57,10 +57,6 @@ static void boot_secondary(unsigned int cpu, struct task_struct *idle) ...@@ -57,10 +57,6 @@ static void boot_secondary(unsigned int cpu, struct task_struct *idle)
spin_unlock(&boot_lock); spin_unlock(&boot_lock);
} }
void __init smp_prepare_boot_cpu(void)
{
}
void __init smp_init_cpus(void) void __init smp_init_cpus(void)
{ {
struct device_node *cpu; struct device_node *cpu;
......
...@@ -42,10 +42,6 @@ ...@@ -42,10 +42,6 @@
static DECLARE_COMPLETION(cpu_running); static DECLARE_COMPLETION(cpu_running);
void __init smp_prepare_boot_cpu(void)
{
}
void __init smp_prepare_cpus(unsigned int max_cpus) void __init smp_prepare_cpus(unsigned int max_cpus)
{ {
int cpuid; int cpuid;
......
...@@ -1206,10 +1206,6 @@ void __init smp_prepare_cpus(unsigned int max_cpus) ...@@ -1206,10 +1206,6 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
{ {
} }
void smp_prepare_boot_cpu(void)
{
}
void __init smp_setup_processor_id(void) void __init smp_setup_processor_id(void)
{ {
if (tlb_type == spitfire) if (tlb_type == spitfire)
......
...@@ -71,7 +71,7 @@ union amd_uncore_info { ...@@ -71,7 +71,7 @@ union amd_uncore_info {
}; };
struct amd_uncore { struct amd_uncore {
union amd_uncore_info * __percpu info; union amd_uncore_info __percpu *info;
struct amd_uncore_pmu *pmus; struct amd_uncore_pmu *pmus;
unsigned int num_pmus; unsigned int num_pmus;
bool init_done; bool init_done;
......
...@@ -17,6 +17,7 @@ ...@@ -17,6 +17,7 @@
#include <linux/kvm_host.h> #include <linux/kvm_host.h>
#include <asm/cpufeature.h> #include <asm/cpufeature.h>
#include <asm/debugreg.h>
#include <asm/hardirq.h> #include <asm/hardirq.h>
#include <asm/intel-family.h> #include <asm/intel-family.h>
#include <asm/intel_pt.h> #include <asm/intel_pt.h>
......
...@@ -5,6 +5,7 @@ ...@@ -5,6 +5,7 @@
#include <linux/sched/clock.h> #include <linux/sched/clock.h>
#include <asm/cpu_entry_area.h> #include <asm/cpu_entry_area.h>
#include <asm/debugreg.h>
#include <asm/perf_event.h> #include <asm/perf_event.h>
#include <asm/tlbflush.h> #include <asm/tlbflush.h>
#include <asm/insn.h> #include <asm/insn.h>
......
...@@ -5,7 +5,9 @@ ...@@ -5,7 +5,9 @@
#include <linux/bug.h> #include <linux/bug.h>
#include <linux/percpu.h> #include <linux/percpu.h>
#include <uapi/asm/debugreg.h> #include <uapi/asm/debugreg.h>
#include <asm/cpufeature.h> #include <asm/cpufeature.h>
#include <asm/msr.h>
DECLARE_PER_CPU(unsigned long, cpu_dr7); DECLARE_PER_CPU(unsigned long, cpu_dr7);
...@@ -159,4 +161,26 @@ static inline unsigned long amd_get_dr_addr_mask(unsigned int dr) ...@@ -159,4 +161,26 @@ static inline unsigned long amd_get_dr_addr_mask(unsigned int dr)
} }
#endif #endif
static inline unsigned long get_debugctlmsr(void)
{
unsigned long debugctlmsr = 0;
#ifndef CONFIG_X86_DEBUGCTLMSR
if (boot_cpu_data.x86 < 6)
return 0;
#endif
rdmsrl(MSR_IA32_DEBUGCTLMSR, debugctlmsr);
return debugctlmsr;
}
static inline void update_debugctlmsr(unsigned long debugctlmsr)
{
#ifndef CONFIG_X86_DEBUGCTLMSR
if (boot_cpu_data.x86 < 6)
return;
#endif
wrmsrl(MSR_IA32_DEBUGCTLMSR, debugctlmsr);
}
#endif /* _ASM_X86_DEBUGREG_H */ #endif /* _ASM_X86_DEBUGREG_H */
...@@ -6,7 +6,7 @@ ...@@ -6,7 +6,7 @@
#ifdef CONFIG_X86_64 #ifdef CONFIG_X86_64
#include <asm/msr-index.h> #include <asm/msr.h>
/* /*
* Read/write a task's FSBASE or GSBASE. This returns the value that * Read/write a task's FSBASE or GSBASE. This returns the value that
......
...@@ -12,10 +12,12 @@ ...@@ -12,10 +12,12 @@
#include <uapi/asm/msr.h> #include <uapi/asm/msr.h>
#include <asm/shared/msr.h> #include <asm/shared/msr.h>
#include <linux/percpu.h>
struct msr_info { struct msr_info {
u32 msr_no; u32 msr_no;
struct msr reg; struct msr reg;
struct msr *msrs; struct msr __percpu *msrs;
int err; int err;
}; };
...@@ -323,8 +325,8 @@ static inline int wrmsrl_safe(u32 msr, u64 val) ...@@ -323,8 +325,8 @@ static inline int wrmsrl_safe(u32 msr, u64 val)
return wrmsr_safe(msr, (u32)val, (u32)(val >> 32)); return wrmsr_safe(msr, (u32)val, (u32)(val >> 32));
} }
struct msr *msrs_alloc(void); struct msr __percpu *msrs_alloc(void);
void msrs_free(struct msr *msrs); void msrs_free(struct msr __percpu *msrs);
int msr_set_bit(u32 msr, u8 bit); int msr_set_bit(u32 msr, u8 bit);
int msr_clear_bit(u32 msr, u8 bit); int msr_clear_bit(u32 msr, u8 bit);
...@@ -333,8 +335,8 @@ int rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h); ...@@ -333,8 +335,8 @@ int rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h);
int wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h); int wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h);
int rdmsrl_on_cpu(unsigned int cpu, u32 msr_no, u64 *q); int rdmsrl_on_cpu(unsigned int cpu, u32 msr_no, u64 *q);
int wrmsrl_on_cpu(unsigned int cpu, u32 msr_no, u64 q); int wrmsrl_on_cpu(unsigned int cpu, u32 msr_no, u64 q);
void rdmsr_on_cpus(const struct cpumask *mask, u32 msr_no, struct msr *msrs); void rdmsr_on_cpus(const struct cpumask *mask, u32 msr_no, struct msr __percpu *msrs);
void wrmsr_on_cpus(const struct cpumask *mask, u32 msr_no, struct msr *msrs); void wrmsr_on_cpus(const struct cpumask *mask, u32 msr_no, struct msr __percpu *msrs);
int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h); int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h);
int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h); int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h);
int rdmsrl_safe_on_cpu(unsigned int cpu, u32 msr_no, u64 *q); int rdmsrl_safe_on_cpu(unsigned int cpu, u32 msr_no, u64 *q);
...@@ -363,14 +365,14 @@ static inline int wrmsrl_on_cpu(unsigned int cpu, u32 msr_no, u64 q) ...@@ -363,14 +365,14 @@ static inline int wrmsrl_on_cpu(unsigned int cpu, u32 msr_no, u64 q)
return 0; return 0;
} }
static inline void rdmsr_on_cpus(const struct cpumask *m, u32 msr_no, static inline void rdmsr_on_cpus(const struct cpumask *m, u32 msr_no,
struct msr *msrs) struct msr __percpu *msrs)
{ {
rdmsr_on_cpu(0, msr_no, &(msrs[0].l), &(msrs[0].h)); rdmsr_on_cpu(0, msr_no, raw_cpu_ptr(&msrs->l), raw_cpu_ptr(&msrs->h));
} }
static inline void wrmsr_on_cpus(const struct cpumask *m, u32 msr_no, static inline void wrmsr_on_cpus(const struct cpumask *m, u32 msr_no,
struct msr *msrs) struct msr __percpu *msrs)
{ {
wrmsr_on_cpu(0, msr_no, msrs[0].l, msrs[0].h); wrmsr_on_cpu(0, msr_no, raw_cpu_read(msrs->l), raw_cpu_read(msrs->h));
} }
static inline int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, static inline int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no,
u32 *l, u32 *h) u32 *l, u32 *h)
......
...@@ -14,9 +14,6 @@ extern void release_perfctr_nmi(unsigned int); ...@@ -14,9 +14,6 @@ extern void release_perfctr_nmi(unsigned int);
extern int reserve_evntsel_nmi(unsigned int); extern int reserve_evntsel_nmi(unsigned int);
extern void release_evntsel_nmi(unsigned int); extern void release_evntsel_nmi(unsigned int);
struct ctl_table;
extern int proc_nmi_enabled(struct ctl_table *, int ,
void __user *, size_t *, loff_t *);
extern int unknown_nmi_panic; extern int unknown_nmi_panic;
#endif /* CONFIG_X86_LOCAL_APIC */ #endif /* CONFIG_X86_LOCAL_APIC */
......
...@@ -20,7 +20,6 @@ struct vm86; ...@@ -20,7 +20,6 @@ struct vm86;
#include <asm/page.h> #include <asm/page.h>
#include <asm/pgtable_types.h> #include <asm/pgtable_types.h>
#include <asm/percpu.h> #include <asm/percpu.h>
#include <asm/msr.h>
#include <asm/desc_defs.h> #include <asm/desc_defs.h>
#include <asm/nops.h> #include <asm/nops.h>
#include <asm/special_insns.h> #include <asm/special_insns.h>
...@@ -185,13 +184,8 @@ extern struct cpuinfo_x86 new_cpu_data; ...@@ -185,13 +184,8 @@ extern struct cpuinfo_x86 new_cpu_data;
extern __u32 cpu_caps_cleared[NCAPINTS + NBUGINTS]; extern __u32 cpu_caps_cleared[NCAPINTS + NBUGINTS];
extern __u32 cpu_caps_set[NCAPINTS + NBUGINTS]; extern __u32 cpu_caps_set[NCAPINTS + NBUGINTS];
#ifdef CONFIG_SMP
DECLARE_PER_CPU_READ_MOSTLY(struct cpuinfo_x86, cpu_info); DECLARE_PER_CPU_READ_MOSTLY(struct cpuinfo_x86, cpu_info);
#define cpu_data(cpu) per_cpu(cpu_info, cpu) #define cpu_data(cpu) per_cpu(cpu_info, cpu)
#else
#define cpu_info boot_cpu_data
#define cpu_data(cpu) boot_cpu_data
#endif
extern const struct seq_operations cpuinfo_op; extern const struct seq_operations cpuinfo_op;
...@@ -575,28 +569,6 @@ extern void cpu_init(void); ...@@ -575,28 +569,6 @@ extern void cpu_init(void);
extern void cpu_init_exception_handling(void); extern void cpu_init_exception_handling(void);
extern void cr4_init(void); extern void cr4_init(void);
static inline unsigned long get_debugctlmsr(void)
{
unsigned long debugctlmsr = 0;
#ifndef CONFIG_X86_DEBUGCTLMSR
if (boot_cpu_data.x86 < 6)
return 0;
#endif
rdmsrl(MSR_IA32_DEBUGCTLMSR, debugctlmsr);
return debugctlmsr;
}
static inline void update_debugctlmsr(unsigned long debugctlmsr)
{
#ifndef CONFIG_X86_DEBUGCTLMSR
if (boot_cpu_data.x86 < 6)
return;
#endif
wrmsrl(MSR_IA32_DEBUGCTLMSR, debugctlmsr);
}
extern void set_task_blockstep(struct task_struct *task, bool on); extern void set_task_blockstep(struct task_struct *task, bool on);
/* Boot loader type from the setup header: */ /* Boot loader type from the setup header: */
......
...@@ -56,11 +56,6 @@ static inline void stop_other_cpus(void) ...@@ -56,11 +56,6 @@ static inline void stop_other_cpus(void)
smp_ops.stop_other_cpus(1); smp_ops.stop_other_cpus(1);
} }
static inline void smp_prepare_boot_cpu(void)
{
smp_ops.smp_prepare_boot_cpu();
}
static inline void smp_prepare_cpus(unsigned int max_cpus) static inline void smp_prepare_cpus(unsigned int max_cpus)
{ {
smp_ops.smp_prepare_cpus(max_cpus); smp_ops.smp_prepare_cpus(max_cpus);
......
...@@ -96,4 +96,6 @@ static inline void speculative_store_bypass_ht_init(void) { } ...@@ -96,4 +96,6 @@ static inline void speculative_store_bypass_ht_init(void) { }
extern void speculation_ctrl_update(unsigned long tif); extern void speculation_ctrl_update(unsigned long tif);
extern void speculation_ctrl_update_current(void); extern void speculation_ctrl_update_current(void);
extern bool itlb_multihit_kvm_mitigation;
#endif #endif
...@@ -2,11 +2,11 @@ ...@@ -2,11 +2,11 @@
#ifndef _ASM_X86_SPECIAL_INSNS_H #ifndef _ASM_X86_SPECIAL_INSNS_H
#define _ASM_X86_SPECIAL_INSNS_H #define _ASM_X86_SPECIAL_INSNS_H
#ifdef __KERNEL__ #ifdef __KERNEL__
#include <asm/nops.h> #include <asm/nops.h>
#include <asm/processor-flags.h> #include <asm/processor-flags.h>
#include <linux/errno.h>
#include <linux/irqflags.h> #include <linux/irqflags.h>
#include <linux/jump_label.h> #include <linux/jump_label.h>
......
...@@ -5,8 +5,9 @@ ...@@ -5,8 +5,9 @@
#ifndef _ASM_X86_TSC_H #ifndef _ASM_X86_TSC_H
#define _ASM_X86_TSC_H #define _ASM_X86_TSC_H
#include <asm/processor.h>
#include <asm/cpufeature.h> #include <asm/cpufeature.h>
#include <asm/processor.h>
#include <asm/msr.h>
/* /*
* Standard way to access the cycle counter. * Standard way to access the cycle counter.
......
...@@ -54,7 +54,7 @@ static inline unsigned long __untagged_addr_remote(struct mm_struct *mm, ...@@ -54,7 +54,7 @@ static inline unsigned long __untagged_addr_remote(struct mm_struct *mm,
* half and a user half. When cast to a signed type, user pointers * half and a user half. When cast to a signed type, user pointers
* are positive and kernel pointers are negative. * are positive and kernel pointers are negative.
*/ */
#define valid_user_address(x) ((long)(x) >= 0) #define valid_user_address(x) ((__force long)(x) >= 0)
/* /*
* User pointers can have tag bits on x86-64. This scheme tolerates * User pointers can have tag bits on x86-64. This scheme tolerates
...@@ -87,8 +87,9 @@ static inline bool __access_ok(const void __user *ptr, unsigned long size) ...@@ -87,8 +87,9 @@ static inline bool __access_ok(const void __user *ptr, unsigned long size)
if (__builtin_constant_p(size <= PAGE_SIZE) && size <= PAGE_SIZE) { if (__builtin_constant_p(size <= PAGE_SIZE) && size <= PAGE_SIZE) {
return valid_user_address(ptr); return valid_user_address(ptr);
} else { } else {
unsigned long sum = size + (unsigned long)ptr; unsigned long sum = size + (__force unsigned long)ptr;
return valid_user_address(sum) && sum >= (unsigned long)ptr;
return valid_user_address(sum) && sum >= (__force unsigned long)ptr;
} }
} }
#define __access_ok __access_ok #define __access_ok __access_ok
......
...@@ -1055,35 +1055,6 @@ static int apm_get_power_status(u_short *status, u_short *bat, u_short *life) ...@@ -1055,35 +1055,6 @@ static int apm_get_power_status(u_short *status, u_short *bat, u_short *life)
return APM_SUCCESS; return APM_SUCCESS;
} }
#if 0
static int apm_get_battery_status(u_short which, u_short *status,
u_short *bat, u_short *life, u_short *nbat)
{
u32 eax;
u32 ebx;
u32 ecx;
u32 edx;
u32 esi;
if (apm_info.connection_version < 0x0102) {
/* pretend we only have one battery. */
if (which != 1)
return APM_BAD_DEVICE;
*nbat = 1;
return apm_get_power_status(status, bat, life);
}
if (apm_bios_call(APM_FUNC_GET_STATUS, (0x8000 | (which)), 0, &eax,
&ebx, &ecx, &edx, &esi))
return (eax >> 8) & 0xff;
*status = ebx;
*bat = ecx;
*life = edx;
*nbat = esi;
return APM_SUCCESS;
}
#endif
/** /**
* apm_engage_power_management - enable PM on a device * apm_engage_power_management - enable PM on a device
* @device: identity of device * @device: identity of device
......
...@@ -42,8 +42,8 @@ DEFINE_PER_CPU(u64, __x86_call_count); ...@@ -42,8 +42,8 @@ DEFINE_PER_CPU(u64, __x86_call_count);
DEFINE_PER_CPU(u64, __x86_ret_count); DEFINE_PER_CPU(u64, __x86_ret_count);
DEFINE_PER_CPU(u64, __x86_stuffs_count); DEFINE_PER_CPU(u64, __x86_stuffs_count);
DEFINE_PER_CPU(u64, __x86_ctxsw_count); DEFINE_PER_CPU(u64, __x86_ctxsw_count);
EXPORT_SYMBOL_GPL(__x86_ctxsw_count); EXPORT_PER_CPU_SYMBOL_GPL(__x86_ctxsw_count);
EXPORT_SYMBOL_GPL(__x86_call_count); EXPORT_PER_CPU_SYMBOL_GPL(__x86_call_count);
#endif #endif
extern s32 __call_sites[], __call_sites_end[]; extern s32 __call_sites[], __call_sites_end[];
......
...@@ -56,7 +56,7 @@ EXPORT_SYMBOL_GPL(x86_spec_ctrl_base); ...@@ -56,7 +56,7 @@ EXPORT_SYMBOL_GPL(x86_spec_ctrl_base);
/* The current value of the SPEC_CTRL MSR with task-specific bits set */ /* The current value of the SPEC_CTRL MSR with task-specific bits set */
DEFINE_PER_CPU(u64, x86_spec_ctrl_current); DEFINE_PER_CPU(u64, x86_spec_ctrl_current);
EXPORT_SYMBOL_GPL(x86_spec_ctrl_current); EXPORT_PER_CPU_SYMBOL_GPL(x86_spec_ctrl_current);
u64 x86_pred_cmd __ro_after_init = PRED_CMD_IBPB; u64 x86_pred_cmd __ro_after_init = PRED_CMD_IBPB;
EXPORT_SYMBOL_GPL(x86_pred_cmd); EXPORT_SYMBOL_GPL(x86_pred_cmd);
......
...@@ -71,6 +71,9 @@ ...@@ -71,6 +71,9 @@
#include "cpu.h" #include "cpu.h"
DEFINE_PER_CPU_READ_MOSTLY(struct cpuinfo_x86, cpu_info);
EXPORT_PER_CPU_SYMBOL(cpu_info);
u32 elf_hwcap2 __read_mostly; u32 elf_hwcap2 __read_mostly;
/* Number of siblings per CPU package */ /* Number of siblings per CPU package */
......
...@@ -7,6 +7,8 @@ ...@@ -7,6 +7,8 @@
* Author: * Author:
* Kirill A. Shutemov <kirill.shutemov@linux.intel.com> * Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
*/ */
#include <linux/bug.h>
#include <linux/limits.h>
#include <asm/cpufeature.h> #include <asm/cpufeature.h>
#include <asm/intel_pconfig.h> #include <asm/intel_pconfig.h>
......
...@@ -6,6 +6,7 @@ ...@@ -6,6 +6,7 @@
* Authors: Fenghua Yu <fenghua.yu@intel.com>, * Authors: Fenghua Yu <fenghua.yu@intel.com>,
* H. Peter Anvin <hpa@linux.intel.com> * H. Peter Anvin <hpa@linux.intel.com>
*/ */
#include <linux/printk.h>
#include <asm/processor.h> #include <asm/processor.h>
#include <asm/archrandom.h> #include <asm/archrandom.h>
......
...@@ -2,6 +2,8 @@ ...@@ -2,6 +2,8 @@
/* /*
* x86 FPU bug checks: * x86 FPU bug checks:
*/ */
#include <linux/printk.h>
#include <asm/cpufeature.h> #include <asm/cpufeature.h>
#include <asm/fpu/api.h> #include <asm/fpu/api.h>
......
...@@ -503,7 +503,7 @@ DEFINE_IDTENTRY_RAW(exc_nmi) ...@@ -503,7 +503,7 @@ DEFINE_IDTENTRY_RAW(exc_nmi)
if (IS_ENABLED(CONFIG_NMI_CHECK_CPU)) if (IS_ENABLED(CONFIG_NMI_CHECK_CPU))
raw_atomic_long_inc(&nsp->idt_calls); raw_atomic_long_inc(&nsp->idt_calls);
if (IS_ENABLED(CONFIG_SMP) && arch_cpu_is_offline(smp_processor_id())) { if (arch_cpu_is_offline(smp_processor_id())) {
if (microcode_nmi_handler_enabled()) if (microcode_nmi_handler_enabled())
microcode_offline_nmi_handler(); microcode_offline_nmi_handler();
return; return;
......
...@@ -1206,6 +1206,16 @@ void __init i386_reserve_resources(void) ...@@ -1206,6 +1206,16 @@ void __init i386_reserve_resources(void)
#endif /* CONFIG_X86_32 */ #endif /* CONFIG_X86_32 */
#ifndef CONFIG_SMP
void __init smp_prepare_boot_cpu(void)
{
struct cpuinfo_x86 *c = &cpu_data(0);
*c = boot_cpu_data;
c->initialized = true;
}
#endif
static struct notifier_block kernel_offset_notifier = { static struct notifier_block kernel_offset_notifier = {
.notifier_call = dump_kernel_offset .notifier_call = dump_kernel_offset
}; };
......
...@@ -101,10 +101,6 @@ EXPORT_PER_CPU_SYMBOL(cpu_core_map); ...@@ -101,10 +101,6 @@ EXPORT_PER_CPU_SYMBOL(cpu_core_map);
DEFINE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_die_map); DEFINE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_die_map);
EXPORT_PER_CPU_SYMBOL(cpu_die_map); EXPORT_PER_CPU_SYMBOL(cpu_die_map);
/* Per CPU bogomips and other parameters */
DEFINE_PER_CPU_READ_MOSTLY(struct cpuinfo_x86, cpu_info);
EXPORT_PER_CPU_SYMBOL(cpu_info);
/* CPUs which are the primary SMT threads */ /* CPUs which are the primary SMT threads */
struct cpumask __cpu_primary_thread_mask __read_mostly; struct cpumask __cpu_primary_thread_mask __read_mostly;
...@@ -1078,6 +1074,11 @@ void __init smp_prepare_cpus_common(void) ...@@ -1078,6 +1074,11 @@ void __init smp_prepare_cpus_common(void)
set_cpu_sibling_map(0); set_cpu_sibling_map(0);
} }
void __init smp_prepare_boot_cpu(void)
{
smp_ops.smp_prepare_boot_cpu();
}
#ifdef CONFIG_X86_64 #ifdef CONFIG_X86_64
/* Establish whether parallel bringup can be supported. */ /* Establish whether parallel bringup can be supported. */
bool __init arch_cpuhp_init_parallel_bringup(void) bool __init arch_cpuhp_init_parallel_bringup(void)
......
...@@ -6,7 +6,9 @@ ...@@ -6,7 +6,9 @@
#include <linux/sched/task_stack.h> #include <linux/sched/task_stack.h>
#include <linux/mm.h> #include <linux/mm.h>
#include <linux/ptrace.h> #include <linux/ptrace.h>
#include <asm/desc.h> #include <asm/desc.h>
#include <asm/debugreg.h>
#include <asm/mmu_context.h> #include <asm/mmu_context.h>
unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *regs) unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *regs)
......
...@@ -53,12 +53,11 @@ ...@@ -53,12 +53,11 @@
#include <asm/cmpxchg.h> #include <asm/cmpxchg.h>
#include <asm/io.h> #include <asm/io.h>
#include <asm/set_memory.h> #include <asm/set_memory.h>
#include <asm/spec-ctrl.h>
#include <asm/vmx.h> #include <asm/vmx.h>
#include "trace.h" #include "trace.h"
extern bool itlb_multihit_kvm_mitigation;
static bool nx_hugepage_mitigation_hard_disabled; static bool nx_hugepage_mitigation_hard_disabled;
int __read_mostly nx_huge_pages = -1; int __read_mostly nx_huge_pages = -1;
......
...@@ -1129,15 +1129,15 @@ static int get_eff_addr_modrm_16(struct insn *insn, struct pt_regs *regs, ...@@ -1129,15 +1129,15 @@ static int get_eff_addr_modrm_16(struct insn *insn, struct pt_regs *regs,
* get_eff_addr_sib() - Obtain referenced effective address via SIB * get_eff_addr_sib() - Obtain referenced effective address via SIB
* @insn: Instruction. Must be valid. * @insn: Instruction. Must be valid.
* @regs: Register values as seen when entering kernel mode * @regs: Register values as seen when entering kernel mode
* @regoff: Obtained operand offset, in pt_regs, associated with segment * @base_offset: Obtained operand offset, in pt_regs, associated with segment
* @eff_addr: Obtained effective address * @eff_addr: Obtained effective address
* *
* Obtain the effective address referenced by the SIB byte of @insn. After * Obtain the effective address referenced by the SIB byte of @insn. After
* identifying the registers involved in the indexed, register-indirect memory * identifying the registers involved in the indexed, register-indirect memory
* reference, its value is obtained from the operands in @regs. The computed * reference, its value is obtained from the operands in @regs. The computed
* address is stored @eff_addr. Also, the register operand that indicates the * address is stored @eff_addr. Also, the register operand that indicates the
* associated segment is stored in @regoff, this parameter can later be used to * associated segment is stored in @base_offset; this parameter can later be
* determine such segment. * used to determine such segment.
* *
* Returns: * Returns:
* *
......
...@@ -9,10 +9,9 @@ static void __rdmsr_on_cpu(void *info) ...@@ -9,10 +9,9 @@ static void __rdmsr_on_cpu(void *info)
{ {
struct msr_info *rv = info; struct msr_info *rv = info;
struct msr *reg; struct msr *reg;
int this_cpu = raw_smp_processor_id();
if (rv->msrs) if (rv->msrs)
reg = per_cpu_ptr(rv->msrs, this_cpu); reg = this_cpu_ptr(rv->msrs);
else else
reg = &rv->reg; reg = &rv->reg;
...@@ -23,10 +22,9 @@ static void __wrmsr_on_cpu(void *info) ...@@ -23,10 +22,9 @@ static void __wrmsr_on_cpu(void *info)
{ {
struct msr_info *rv = info; struct msr_info *rv = info;
struct msr *reg; struct msr *reg;
int this_cpu = raw_smp_processor_id();
if (rv->msrs) if (rv->msrs)
reg = per_cpu_ptr(rv->msrs, this_cpu); reg = this_cpu_ptr(rv->msrs);
else else
reg = &rv->reg; reg = &rv->reg;
...@@ -97,7 +95,7 @@ int wrmsrl_on_cpu(unsigned int cpu, u32 msr_no, u64 q) ...@@ -97,7 +95,7 @@ int wrmsrl_on_cpu(unsigned int cpu, u32 msr_no, u64 q)
EXPORT_SYMBOL(wrmsrl_on_cpu); EXPORT_SYMBOL(wrmsrl_on_cpu);
static void __rwmsr_on_cpus(const struct cpumask *mask, u32 msr_no, static void __rwmsr_on_cpus(const struct cpumask *mask, u32 msr_no,
struct msr *msrs, struct msr __percpu *msrs,
void (*msr_func) (void *info)) void (*msr_func) (void *info))
{ {
struct msr_info rv; struct msr_info rv;
...@@ -124,7 +122,7 @@ static void __rwmsr_on_cpus(const struct cpumask *mask, u32 msr_no, ...@@ -124,7 +122,7 @@ static void __rwmsr_on_cpus(const struct cpumask *mask, u32 msr_no,
* @msrs: array of MSR values * @msrs: array of MSR values
* *
*/ */
void rdmsr_on_cpus(const struct cpumask *mask, u32 msr_no, struct msr *msrs) void rdmsr_on_cpus(const struct cpumask *mask, u32 msr_no, struct msr __percpu *msrs)
{ {
__rwmsr_on_cpus(mask, msr_no, msrs, __rdmsr_on_cpu); __rwmsr_on_cpus(mask, msr_no, msrs, __rdmsr_on_cpu);
} }
...@@ -138,7 +136,7 @@ EXPORT_SYMBOL(rdmsr_on_cpus); ...@@ -138,7 +136,7 @@ EXPORT_SYMBOL(rdmsr_on_cpus);
* @msrs: array of MSR values * @msrs: array of MSR values
* *
*/ */
void wrmsr_on_cpus(const struct cpumask *mask, u32 msr_no, struct msr *msrs) void wrmsr_on_cpus(const struct cpumask *mask, u32 msr_no, struct msr __percpu *msrs)
{ {
__rwmsr_on_cpus(mask, msr_no, msrs, __wrmsr_on_cpu); __rwmsr_on_cpus(mask, msr_no, msrs, __wrmsr_on_cpu);
} }
......
...@@ -6,9 +6,9 @@ ...@@ -6,9 +6,9 @@
#define CREATE_TRACE_POINTS #define CREATE_TRACE_POINTS
#include <asm/msr-trace.h> #include <asm/msr-trace.h>
struct msr *msrs_alloc(void) struct msr __percpu *msrs_alloc(void)
{ {
struct msr *msrs = NULL; struct msr __percpu *msrs = NULL;
msrs = alloc_percpu(struct msr); msrs = alloc_percpu(struct msr);
if (!msrs) { if (!msrs) {
...@@ -20,7 +20,7 @@ struct msr *msrs_alloc(void) ...@@ -20,7 +20,7 @@ struct msr *msrs_alloc(void)
} }
EXPORT_SYMBOL(msrs_alloc); EXPORT_SYMBOL(msrs_alloc);
void msrs_free(struct msr *msrs) void msrs_free(struct msr __percpu *msrs)
{ {
free_percpu(msrs); free_percpu(msrs);
} }
......
...@@ -105,6 +105,12 @@ static inline void on_each_cpu_cond(smp_cond_func_t cond_func, ...@@ -105,6 +105,12 @@ static inline void on_each_cpu_cond(smp_cond_func_t cond_func,
on_each_cpu_cond_mask(cond_func, func, info, wait, cpu_online_mask); on_each_cpu_cond_mask(cond_func, func, info, wait, cpu_online_mask);
} }
/*
* Architecture specific boot CPU setup. Defined as empty weak function in
* init/main.c. Architectures can override it.
*/
void smp_prepare_boot_cpu(void);
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
#include <linux/preempt.h> #include <linux/preempt.h>
...@@ -171,12 +177,6 @@ void generic_smp_call_function_single_interrupt(void); ...@@ -171,12 +177,6 @@ void generic_smp_call_function_single_interrupt(void);
#define generic_smp_call_function_interrupt \ #define generic_smp_call_function_interrupt \
generic_smp_call_function_single_interrupt generic_smp_call_function_single_interrupt
/*
* Mark the boot cpu "online" so that it can call console drivers in
* printk() and can access its per-cpu storage.
*/
void smp_prepare_boot_cpu(void);
extern unsigned int setup_max_cpus; extern unsigned int setup_max_cpus;
extern void __init setup_nr_cpu_ids(void); extern void __init setup_nr_cpu_ids(void);
extern void __init smp_init(void); extern void __init smp_init(void);
...@@ -203,7 +203,6 @@ static inline void up_smp_call_function(smp_call_func_t func, void *info) ...@@ -203,7 +203,6 @@ static inline void up_smp_call_function(smp_call_func_t func, void *info)
(up_smp_call_function(func, info)) (up_smp_call_function(func, info))
static inline void smp_send_reschedule(int cpu) { } static inline void smp_send_reschedule(int cpu) { }
#define smp_prepare_boot_cpu() do {} while (0)
#define smp_call_function_many(mask, func, info, wait) \ #define smp_call_function_many(mask, func, info, wait) \
(up_smp_call_function(func, info)) (up_smp_call_function(func, info))
static inline void call_function_init(void) { } static inline void call_function_init(void) { }
......
...@@ -776,6 +776,10 @@ void __init __weak smp_setup_processor_id(void) ...@@ -776,6 +776,10 @@ void __init __weak smp_setup_processor_id(void)
{ {
} }
void __init __weak smp_prepare_boot_cpu(void)
{
}
# if THREAD_SIZE >= PAGE_SIZE # if THREAD_SIZE >= PAGE_SIZE
void __init __weak thread_stack_cache_init(void) void __init __weak thread_stack_cache_init(void)
{ {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment