Commit 056612fd authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'x86-cleanups-2023-02-20' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull miscellaneous x86 cleanups from Thomas Gleixner:

 - Correct the common copy and pasted mishandling of kstrtobool() in the
   strict_sas_size() setup function

 - Make recalibrate_cpu_khz() an GPL only export

 - Check TSC feature before doing anything else which avoids pointless
   code execution if TSC is not available

 - Remove or fixup stale and misleading comments

 - Remove unused or pointelessly duplicated variables

 - Spelling and typo fixes

* tag 'x86-cleanups-2023-02-20' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  x86/hotplug: Remove incorrect comment about mwait_play_dead()
  x86/tsc: Do feature check as the very first thing
  x86/tsc: Make recalibrate_cpu_khz() export GPL only
  x86/cacheinfo: Remove unused trace variable
  x86/Kconfig: Fix spellos & punctuation
  x86/signal: Fix the value returned by strict_sas_size()
  x86/cpu: Remove misleading comment
  x86/setup: Move duplicate boot_cpu_data definition out of the ifdeffery
  x86/boot/e820: Fix typo in e820.c comment
parents 3f0b0903 fcb3a81d
...@@ -1502,7 +1502,7 @@ config X86_5LEVEL ...@@ -1502,7 +1502,7 @@ config X86_5LEVEL
depends on X86_64 depends on X86_64
help help
5-level paging enables access to larger address space: 5-level paging enables access to larger address space:
upto 128 PiB of virtual address space and 4 PiB of up to 128 PiB of virtual address space and 4 PiB of
physical address space. physical address space.
It will be supported by future Intel CPUs. It will be supported by future Intel CPUs.
...@@ -2609,8 +2609,8 @@ config CALL_THUNKS_DEBUG ...@@ -2609,8 +2609,8 @@ config CALL_THUNKS_DEBUG
a noisy dmesg about callthunks generation and call patching for a noisy dmesg about callthunks generation and call patching for
trouble shooting. The debug prints need to be enabled on the trouble shooting. The debug prints need to be enabled on the
kernel command line with 'debug-callthunks'. kernel command line with 'debug-callthunks'.
Only enable this, when you are debugging call thunks as this Only enable this when you are debugging call thunks as this
creates a noticable runtime overhead. If unsure say N. creates a noticeable runtime overhead. If unsure say N.
config CPU_IBPB_ENTRY config CPU_IBPB_ENTRY
bool "Enable IBPB on kernel entry" bool "Enable IBPB on kernel entry"
......
...@@ -734,7 +734,7 @@ void init_hygon_cacheinfo(struct cpuinfo_x86 *c) ...@@ -734,7 +734,7 @@ void init_hygon_cacheinfo(struct cpuinfo_x86 *c)
void init_intel_cacheinfo(struct cpuinfo_x86 *c) void init_intel_cacheinfo(struct cpuinfo_x86 *c)
{ {
/* Cache sizes */ /* Cache sizes */
unsigned int trace = 0, l1i = 0, l1d = 0, l2 = 0, l3 = 0; unsigned int l1i = 0, l1d = 0, l2 = 0, l3 = 0;
unsigned int new_l1d = 0, new_l1i = 0; /* Cache sizes from cpuid(4) */ unsigned int new_l1d = 0, new_l1i = 0; /* Cache sizes from cpuid(4) */
unsigned int new_l2 = 0, new_l3 = 0, i; /* Cache sizes from cpuid(4) */ unsigned int new_l2 = 0, new_l3 = 0, i; /* Cache sizes from cpuid(4) */
unsigned int l2_id = 0, l3_id = 0, num_threads_sharing, index_msb; unsigned int l2_id = 0, l3_id = 0, num_threads_sharing, index_msb;
...@@ -835,9 +835,6 @@ void init_intel_cacheinfo(struct cpuinfo_x86 *c) ...@@ -835,9 +835,6 @@ void init_intel_cacheinfo(struct cpuinfo_x86 *c)
case LVL_3: case LVL_3:
l3 += cache_table[k].size; l3 += cache_table[k].size;
break; break;
case LVL_TRACE:
trace += cache_table[k].size;
break;
} }
break; break;
......
...@@ -567,7 +567,7 @@ static __init int setup_disable_pku(char *arg) ...@@ -567,7 +567,7 @@ static __init int setup_disable_pku(char *arg)
return 1; return 1;
} }
__setup("nopku", setup_disable_pku); __setup("nopku", setup_disable_pku);
#endif /* CONFIG_X86_64 */ #endif
#ifdef CONFIG_X86_KERNEL_IBT #ifdef CONFIG_X86_KERNEL_IBT
......
...@@ -53,7 +53,7 @@ ...@@ -53,7 +53,7 @@
* *
* Once the E820 map has been converted to the standard Linux memory layout * Once the E820 map has been converted to the standard Linux memory layout
* information its role stops - modifying it has no effect and does not get * information its role stops - modifying it has no effect and does not get
* re-propagated. So itsmain role is a temporary bootstrap storage of firmware * re-propagated. So its main role is a temporary bootstrap storage of firmware
* specific memory layout data during early bootup. * specific memory layout data during early bootup.
*/ */
static struct e820_table e820_table_init __initdata; static struct e820_table e820_table_init __initdata;
...@@ -395,7 +395,7 @@ int __init e820__update_table(struct e820_table *table) ...@@ -395,7 +395,7 @@ int __init e820__update_table(struct e820_table *table)
/* Continue building up new map based on this information: */ /* Continue building up new map based on this information: */
if (current_type != last_type || e820_nomerge(current_type)) { if (current_type != last_type || e820_nomerge(current_type)) {
if (last_type != 0) { if (last_type) {
new_entries[new_nr_entries].size = change_point[chg_idx]->addr - last_addr; new_entries[new_nr_entries].size = change_point[chg_idx]->addr - last_addr;
/* Move forward only if the new size was non-zero: */ /* Move forward only if the new size was non-zero: */
if (new_entries[new_nr_entries].size != 0) if (new_entries[new_nr_entries].size != 0)
...@@ -403,7 +403,7 @@ int __init e820__update_table(struct e820_table *table) ...@@ -403,7 +403,7 @@ int __init e820__update_table(struct e820_table *table)
if (++new_nr_entries >= max_nr_entries) if (++new_nr_entries >= max_nr_entries)
break; break;
} }
if (current_type != 0) { if (current_type) {
new_entries[new_nr_entries].addr = change_point[chg_idx]->addr; new_entries[new_nr_entries].addr = change_point[chg_idx]->addr;
new_entries[new_nr_entries].type = current_type; new_entries[new_nr_entries].type = current_type;
last_addr = change_point[chg_idx]->addr; last_addr = change_point[chg_idx]->addr;
......
...@@ -114,11 +114,6 @@ static struct resource bss_resource = { ...@@ -114,11 +114,6 @@ static struct resource bss_resource = {
#ifdef CONFIG_X86_32 #ifdef CONFIG_X86_32
/* CPU data as detected by the assembly code in head_32.S */ /* CPU data as detected by the assembly code in head_32.S */
struct cpuinfo_x86 new_cpu_data; struct cpuinfo_x86 new_cpu_data;
/* Common CPU data for all CPUs */
struct cpuinfo_x86 boot_cpu_data __read_mostly;
EXPORT_SYMBOL(boot_cpu_data);
unsigned int def_to_bigsmp; unsigned int def_to_bigsmp;
struct apm_info apm_info; struct apm_info apm_info;
...@@ -132,11 +127,10 @@ EXPORT_SYMBOL(ist_info); ...@@ -132,11 +127,10 @@ EXPORT_SYMBOL(ist_info);
struct ist_info ist_info; struct ist_info ist_info;
#endif #endif
#else
struct cpuinfo_x86 boot_cpu_data __read_mostly;
EXPORT_SYMBOL(boot_cpu_data);
#endif #endif
struct cpuinfo_x86 boot_cpu_data __read_mostly;
EXPORT_SYMBOL(boot_cpu_data);
#if !defined(CONFIG_X86_PAE) || defined(CONFIG_X86_64) #if !defined(CONFIG_X86_PAE) || defined(CONFIG_X86_64)
__visible unsigned long mmu_cr4_features __ro_after_init; __visible unsigned long mmu_cr4_features __ro_after_init;
......
...@@ -360,7 +360,7 @@ static bool strict_sigaltstack_size __ro_after_init = false; ...@@ -360,7 +360,7 @@ static bool strict_sigaltstack_size __ro_after_init = false;
static int __init strict_sas_size(char *arg) static int __init strict_sas_size(char *arg)
{ {
return kstrtobool(arg, &strict_sigaltstack_size); return kstrtobool(arg, &strict_sigaltstack_size) == 0;
} }
__setup("strict_sas_size", strict_sas_size); __setup("strict_sas_size", strict_sas_size);
......
...@@ -1833,7 +1833,7 @@ void native_play_dead(void) ...@@ -1833,7 +1833,7 @@ void native_play_dead(void)
play_dead_common(); play_dead_common();
tboot_shutdown(TB_SHUTDOWN_WFS); tboot_shutdown(TB_SHUTDOWN_WFS);
mwait_play_dead(); /* Only returns on failure */ mwait_play_dead();
if (cpuidle_play_dead()) if (cpuidle_play_dead())
hlt_play_dead(); hlt_play_dead();
} }
......
...@@ -911,8 +911,7 @@ void recalibrate_cpu_khz(void) ...@@ -911,8 +911,7 @@ void recalibrate_cpu_khz(void)
cpu_khz_old, cpu_khz); cpu_khz_old, cpu_khz);
#endif #endif
} }
EXPORT_SYMBOL_GPL(recalibrate_cpu_khz);
EXPORT_SYMBOL(recalibrate_cpu_khz);
static unsigned long long cyc2ns_suspend; static unsigned long long cyc2ns_suspend;
...@@ -1509,6 +1508,11 @@ void __init tsc_early_init(void) ...@@ -1509,6 +1508,11 @@ void __init tsc_early_init(void)
void __init tsc_init(void) void __init tsc_init(void)
{ {
if (!cpu_feature_enabled(X86_FEATURE_TSC)) {
setup_clear_cpu_cap(X86_FEATURE_TSC_DEADLINE_TIMER);
return;
}
/* /*
* native_calibrate_cpu_early can only calibrate using methods that are * native_calibrate_cpu_early can only calibrate using methods that are
* available early in boot. * available early in boot.
...@@ -1516,11 +1520,6 @@ void __init tsc_init(void) ...@@ -1516,11 +1520,6 @@ void __init tsc_init(void)
if (x86_platform.calibrate_cpu == native_calibrate_cpu_early) if (x86_platform.calibrate_cpu == native_calibrate_cpu_early)
x86_platform.calibrate_cpu = native_calibrate_cpu; x86_platform.calibrate_cpu = native_calibrate_cpu;
if (!boot_cpu_has(X86_FEATURE_TSC)) {
setup_clear_cpu_cap(X86_FEATURE_TSC_DEADLINE_TIMER);
return;
}
if (!tsc_khz) { if (!tsc_khz) {
/* We failed to determine frequencies earlier, try again */ /* We failed to determine frequencies earlier, try again */
if (!determine_cpu_tsc_frequencies(false)) { if (!determine_cpu_tsc_frequencies(false)) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment