Commit 2c773dd3 authored by Jan Beulich's avatar Jan Beulich Committed by Thomas Gleixner

x86: Convert a few more per-CPU items to read-mostly ones

Both this_cpu_off and cpu_info aren't getting modified post boot, yet
are being accessed on enough code paths that grouping them with other
frequently read items seems desirable. For cpu_info this at the same
time implies removing the cache line alignment (which afaict became
pointless when it got converted to per-CPU data years ago).
Signed-off-by: default avatarJan Beulich <jbeulich@suse.com>
Link: http://lkml.kernel.org/r/54589BD20200007800044A84@mail.emea.novell.comSigned-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
parent fb7183ef
...@@ -522,7 +522,7 @@ static inline int x86_this_cpu_variable_test_bit(int nr, ...@@ -522,7 +522,7 @@ static inline int x86_this_cpu_variable_test_bit(int nr,
#include <asm-generic/percpu.h> #include <asm-generic/percpu.h>
/* We can use this directly for local CPU (faster). */ /* We can use this directly for local CPU (faster). */
DECLARE_PER_CPU(unsigned long, this_cpu_off); DECLARE_PER_CPU_READ_MOSTLY(unsigned long, this_cpu_off);
#endif /* !__ASSEMBLY__ */ #endif /* !__ASSEMBLY__ */
......
...@@ -127,7 +127,7 @@ struct cpuinfo_x86 { ...@@ -127,7 +127,7 @@ struct cpuinfo_x86 {
/* Index into per_cpu list: */ /* Index into per_cpu list: */
u16 cpu_index; u16 cpu_index;
u32 microcode; u32 microcode;
} __attribute__((__aligned__(SMP_CACHE_BYTES))); };
#define X86_VENDOR_INTEL 0 #define X86_VENDOR_INTEL 0
#define X86_VENDOR_CYRIX 1 #define X86_VENDOR_CYRIX 1
...@@ -151,7 +151,7 @@ extern __u32 cpu_caps_cleared[NCAPINTS]; ...@@ -151,7 +151,7 @@ extern __u32 cpu_caps_cleared[NCAPINTS];
extern __u32 cpu_caps_set[NCAPINTS]; extern __u32 cpu_caps_set[NCAPINTS];
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
DECLARE_PER_CPU_SHARED_ALIGNED(struct cpuinfo_x86, cpu_info); DECLARE_PER_CPU_READ_MOSTLY(struct cpuinfo_x86, cpu_info);
#define cpu_data(cpu) per_cpu(cpu_info, cpu) #define cpu_data(cpu) per_cpu(cpu_info, cpu)
#else #else
#define cpu_info boot_cpu_data #define cpu_info boot_cpu_data
......
...@@ -30,7 +30,7 @@ EXPORT_PER_CPU_SYMBOL(cpu_number); ...@@ -30,7 +30,7 @@ EXPORT_PER_CPU_SYMBOL(cpu_number);
#define BOOT_PERCPU_OFFSET 0 #define BOOT_PERCPU_OFFSET 0
#endif #endif
DEFINE_PER_CPU(unsigned long, this_cpu_off) = BOOT_PERCPU_OFFSET; DEFINE_PER_CPU_READ_MOSTLY(unsigned long, this_cpu_off) = BOOT_PERCPU_OFFSET;
EXPORT_PER_CPU_SYMBOL(this_cpu_off); EXPORT_PER_CPU_SYMBOL(this_cpu_off);
unsigned long __per_cpu_offset[NR_CPUS] __read_mostly = { unsigned long __per_cpu_offset[NR_CPUS] __read_mostly = {
......
...@@ -99,7 +99,7 @@ EXPORT_PER_CPU_SYMBOL(cpu_core_map); ...@@ -99,7 +99,7 @@ EXPORT_PER_CPU_SYMBOL(cpu_core_map);
DEFINE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_llc_shared_map); DEFINE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_llc_shared_map);
/* Per CPU bogomips and other parameters */ /* Per CPU bogomips and other parameters */
DEFINE_PER_CPU_SHARED_ALIGNED(struct cpuinfo_x86, cpu_info); DEFINE_PER_CPU_READ_MOSTLY(struct cpuinfo_x86, cpu_info);
EXPORT_PER_CPU_SYMBOL(cpu_info); EXPORT_PER_CPU_SYMBOL(cpu_info);
atomic_t init_deasserted; atomic_t init_deasserted;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment