Commit 15f63e30 authored by Michael Ellerman's avatar Michael Ellerman

Merge branch 'topic/cpu-smt' into next

Merge SMT changes we are sharing with the tip tree.
parents 7f965394 d1099e22
...@@ -555,6 +555,7 @@ Description: Control Symmetric Multi Threading (SMT) ...@@ -555,6 +555,7 @@ Description: Control Symmetric Multi Threading (SMT)
================ ========================================= ================ =========================================
"on" SMT is enabled "on" SMT is enabled
"off" SMT is disabled "off" SMT is disabled
"<N>" SMT is enabled with N threads per core.
"forceoff" SMT is force disabled. Cannot be changed. "forceoff" SMT is force disabled. Cannot be changed.
"notsupported" SMT is not supported by the CPU "notsupported" SMT is not supported by the CPU
"notimplemented" SMT runtime toggling is not "notimplemented" SMT runtime toggling is not
......
...@@ -3853,10 +3853,10 @@ ...@@ -3853,10 +3853,10 @@
nosmp [SMP] Tells an SMP kernel to act as a UP kernel, nosmp [SMP] Tells an SMP kernel to act as a UP kernel,
and disable the IO APIC. legacy for "maxcpus=0". and disable the IO APIC. legacy for "maxcpus=0".
nosmt [KNL,MIPS,S390] Disable symmetric multithreading (SMT). nosmt [KNL,MIPS,PPC,S390] Disable symmetric multithreading (SMT).
Equivalent to smt=1. Equivalent to smt=1.
[KNL,X86] Disable symmetric multithreading (SMT). [KNL,X86,PPC] Disable symmetric multithreading (SMT).
nosmt=force: Force disable SMT, cannot be undone nosmt=force: Force disable SMT, cannot be undone
via the sysfs control file. via the sysfs control file.
......
...@@ -34,6 +34,9 @@ config ARCH_HAS_SUBPAGE_FAULTS ...@@ -34,6 +34,9 @@ config ARCH_HAS_SUBPAGE_FAULTS
config HOTPLUG_SMT config HOTPLUG_SMT
bool bool
config SMT_NUM_THREADS_DYNAMIC
bool
# Selected by HOTPLUG_CORE_SYNC_DEAD or HOTPLUG_CORE_SYNC_FULL # Selected by HOTPLUG_CORE_SYNC_DEAD or HOTPLUG_CORE_SYNC_FULL
config HOTPLUG_CORE_SYNC config HOTPLUG_CORE_SYNC
bool bool
......
...@@ -272,6 +272,8 @@ config PPC ...@@ -272,6 +272,8 @@ config PPC
select HAVE_SYSCALL_TRACEPOINTS select HAVE_SYSCALL_TRACEPOINTS
select HAVE_VIRT_CPU_ACCOUNTING select HAVE_VIRT_CPU_ACCOUNTING
select HAVE_VIRT_CPU_ACCOUNTING_GEN select HAVE_VIRT_CPU_ACCOUNTING_GEN
select HOTPLUG_SMT if HOTPLUG_CPU
select SMT_NUM_THREADS_DYNAMIC
select HUGETLB_PAGE_SIZE_VARIABLE if PPC_BOOK3S_64 && HUGETLB_PAGE select HUGETLB_PAGE_SIZE_VARIABLE if PPC_BOOK3S_64 && HUGETLB_PAGE
select IOMMU_HELPER if PPC64 select IOMMU_HELPER if PPC64
select IRQ_DOMAIN select IRQ_DOMAIN
......
...@@ -143,5 +143,20 @@ static inline int cpu_to_coregroup_id(int cpu) ...@@ -143,5 +143,20 @@ static inline int cpu_to_coregroup_id(int cpu)
#endif #endif
#endif #endif
#ifdef CONFIG_HOTPLUG_SMT
#include <linux/cpu_smt.h>
#include <asm/cputhreads.h>
static inline bool topology_is_primary_thread(unsigned int cpu)
{
return cpu == cpu_first_thread_sibling(cpu);
}
static inline bool topology_smt_thread_allowed(unsigned int cpu)
{
return cpu_thread_in_core(cpu) < cpu_smt_num_threads;
}
#endif
#endif /* __KERNEL__ */ #endif /* __KERNEL__ */
#endif /* _ASM_POWERPC_TOPOLOGY_H */ #endif /* _ASM_POWERPC_TOPOLOGY_H */
...@@ -1088,7 +1088,7 @@ static int __init init_big_cores(void) ...@@ -1088,7 +1088,7 @@ static int __init init_big_cores(void)
void __init smp_prepare_cpus(unsigned int max_cpus) void __init smp_prepare_cpus(unsigned int max_cpus)
{ {
unsigned int cpu; unsigned int cpu, num_threads;
DBG("smp_prepare_cpus\n"); DBG("smp_prepare_cpus\n");
...@@ -1155,6 +1155,12 @@ void __init smp_prepare_cpus(unsigned int max_cpus) ...@@ -1155,6 +1155,12 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
if (smp_ops && smp_ops->probe) if (smp_ops && smp_ops->probe)
smp_ops->probe(); smp_ops->probe();
// Initalise the generic SMT topology support
num_threads = 1;
if (smt_enabled_at_boot)
num_threads = smt_enabled_at_boot;
cpu_smt_set_num_threads(num_threads, threads_per_core);
} }
void smp_prepare_boot_cpu(void) void smp_prepare_boot_cpu(void)
......
...@@ -398,6 +398,14 @@ static int dlpar_online_cpu(struct device_node *dn) ...@@ -398,6 +398,14 @@ static int dlpar_online_cpu(struct device_node *dn)
for_each_present_cpu(cpu) { for_each_present_cpu(cpu) {
if (get_hard_smp_processor_id(cpu) != thread) if (get_hard_smp_processor_id(cpu) != thread)
continue; continue;
if (!topology_is_primary_thread(cpu)) {
if (cpu_smt_control != CPU_SMT_ENABLED)
break;
if (!topology_smt_thread_allowed(cpu))
break;
}
cpu_maps_update_done(); cpu_maps_update_done();
find_and_update_cpu_nid(cpu); find_and_update_cpu_nid(cpu);
rc = device_online(get_cpu_device(cpu)); rc = device_online(get_cpu_device(cpu));
...@@ -845,15 +853,9 @@ static struct notifier_block pseries_smp_nb = { ...@@ -845,15 +853,9 @@ static struct notifier_block pseries_smp_nb = {
.notifier_call = pseries_smp_notifier, .notifier_call = pseries_smp_notifier,
}; };
static int __init pseries_cpu_hotplug_init(void) void __init pseries_cpu_hotplug_init(void)
{ {
int qcss_tok; int qcss_tok;
unsigned int node;
#ifdef CONFIG_ARCH_CPU_PROBE_RELEASE
ppc_md.cpu_probe = dlpar_cpu_probe;
ppc_md.cpu_release = dlpar_cpu_release;
#endif /* CONFIG_ARCH_CPU_PROBE_RELEASE */
rtas_stop_self_token = rtas_function_token(RTAS_FN_STOP_SELF); rtas_stop_self_token = rtas_function_token(RTAS_FN_STOP_SELF);
qcss_tok = rtas_function_token(RTAS_FN_QUERY_CPU_STOPPED_STATE); qcss_tok = rtas_function_token(RTAS_FN_QUERY_CPU_STOPPED_STATE);
...@@ -862,12 +864,22 @@ static int __init pseries_cpu_hotplug_init(void) ...@@ -862,12 +864,22 @@ static int __init pseries_cpu_hotplug_init(void)
qcss_tok == RTAS_UNKNOWN_SERVICE) { qcss_tok == RTAS_UNKNOWN_SERVICE) {
printk(KERN_INFO "CPU Hotplug not supported by firmware " printk(KERN_INFO "CPU Hotplug not supported by firmware "
"- disabling.\n"); "- disabling.\n");
return 0; return;
} }
smp_ops->cpu_offline_self = pseries_cpu_offline_self; smp_ops->cpu_offline_self = pseries_cpu_offline_self;
smp_ops->cpu_disable = pseries_cpu_disable; smp_ops->cpu_disable = pseries_cpu_disable;
smp_ops->cpu_die = pseries_cpu_die; smp_ops->cpu_die = pseries_cpu_die;
}
static int __init pseries_dlpar_init(void)
{
unsigned int node;
#ifdef CONFIG_ARCH_CPU_PROBE_RELEASE
ppc_md.cpu_probe = dlpar_cpu_probe;
ppc_md.cpu_release = dlpar_cpu_release;
#endif /* CONFIG_ARCH_CPU_PROBE_RELEASE */
/* Processors can be added/removed only on LPAR */ /* Processors can be added/removed only on LPAR */
if (firmware_has_feature(FW_FEATURE_LPAR)) { if (firmware_has_feature(FW_FEATURE_LPAR)) {
...@@ -886,4 +898,4 @@ static int __init pseries_cpu_hotplug_init(void) ...@@ -886,4 +898,4 @@ static int __init pseries_cpu_hotplug_init(void)
return 0; return 0;
} }
machine_arch_initcall(pseries, pseries_cpu_hotplug_init); machine_arch_initcall(pseries, pseries_dlpar_init);
...@@ -75,11 +75,13 @@ static inline int dlpar_hp_pmem(struct pseries_hp_errorlog *hp_elog) ...@@ -75,11 +75,13 @@ static inline int dlpar_hp_pmem(struct pseries_hp_errorlog *hp_elog)
#ifdef CONFIG_HOTPLUG_CPU #ifdef CONFIG_HOTPLUG_CPU
int dlpar_cpu(struct pseries_hp_errorlog *hp_elog); int dlpar_cpu(struct pseries_hp_errorlog *hp_elog);
void pseries_cpu_hotplug_init(void);
#else #else
static inline int dlpar_cpu(struct pseries_hp_errorlog *hp_elog) static inline int dlpar_cpu(struct pseries_hp_errorlog *hp_elog)
{ {
return -EOPNOTSUPP; return -EOPNOTSUPP;
} }
static inline void pseries_cpu_hotplug_init(void) { }
#endif #endif
/* PCI root bridge prepare function override for pseries */ /* PCI root bridge prepare function override for pseries */
......
...@@ -816,6 +816,8 @@ static void __init pSeries_setup_arch(void) ...@@ -816,6 +816,8 @@ static void __init pSeries_setup_arch(void)
/* Discover PIC type and setup ppc_md accordingly */ /* Discover PIC type and setup ppc_md accordingly */
smp_init_pseries(); smp_init_pseries();
// Setup CPU hotplug callbacks
pseries_cpu_hotplug_init();
if (radix_enabled() && !mmu_has_feature(MMU_FTR_GTSE)) if (radix_enabled() && !mmu_has_feature(MMU_FTR_GTSE))
if (!firmware_has_feature(FW_FEATURE_RPT_INVALIDATE)) if (!firmware_has_feature(FW_FEATURE_RPT_INVALIDATE))
......
...@@ -136,10 +136,11 @@ static inline int topology_max_smt_threads(void) ...@@ -136,10 +136,11 @@ static inline int topology_max_smt_threads(void)
return __max_smt_threads; return __max_smt_threads;
} }
#include <linux/cpu_smt.h>
int topology_update_package_map(unsigned int apicid, unsigned int cpu); int topology_update_package_map(unsigned int apicid, unsigned int cpu);
int topology_update_die_map(unsigned int dieid, unsigned int cpu); int topology_update_die_map(unsigned int dieid, unsigned int cpu);
int topology_phys_to_logical_pkg(unsigned int pkg); int topology_phys_to_logical_pkg(unsigned int pkg);
bool topology_smt_supported(void);
extern struct cpumask __cpu_primary_thread_mask; extern struct cpumask __cpu_primary_thread_mask;
#define cpu_primary_thread_mask ((const struct cpumask *)&__cpu_primary_thread_mask) #define cpu_primary_thread_mask ((const struct cpumask *)&__cpu_primary_thread_mask)
...@@ -162,7 +163,6 @@ static inline int topology_phys_to_logical_pkg(unsigned int pkg) { return 0; } ...@@ -162,7 +163,6 @@ static inline int topology_phys_to_logical_pkg(unsigned int pkg) { return 0; }
static inline int topology_max_die_per_package(void) { return 1; } static inline int topology_max_die_per_package(void) { return 1; }
static inline int topology_max_smt_threads(void) { return 1; } static inline int topology_max_smt_threads(void) { return 1; }
static inline bool topology_is_primary_thread(unsigned int cpu) { return true; } static inline bool topology_is_primary_thread(unsigned int cpu) { return true; }
static inline bool topology_smt_supported(void) { return false; }
#endif /* !CONFIG_SMP */ #endif /* !CONFIG_SMP */
static inline void arch_fix_phys_package_id(int num, u32 slot) static inline void arch_fix_phys_package_id(int num, u32 slot)
......
...@@ -2317,7 +2317,7 @@ void __init arch_cpu_finalize_init(void) ...@@ -2317,7 +2317,7 @@ void __init arch_cpu_finalize_init(void)
* identify_boot_cpu() initialized SMT support information, let the * identify_boot_cpu() initialized SMT support information, let the
* core code know. * core code know.
*/ */
cpu_smt_check_topology(); cpu_smt_set_num_threads(smp_num_siblings, smp_num_siblings);
if (!IS_ENABLED(CONFIG_SMP)) { if (!IS_ENABLED(CONFIG_SMP)) {
pr_info("CPU: "); pr_info("CPU: ");
......
...@@ -326,14 +326,6 @@ static void notrace start_secondary(void *unused) ...@@ -326,14 +326,6 @@ static void notrace start_secondary(void *unused)
cpu_startup_entry(CPUHP_AP_ONLINE_IDLE); cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
} }
/**
* topology_smt_supported - Check whether SMT is supported by the CPUs
*/
bool topology_smt_supported(void)
{
return smp_num_siblings > 1;
}
/** /**
* topology_phys_to_logical_pkg - Map a physical package id to a logical * topology_phys_to_logical_pkg - Map a physical package id to a logical
* @phys_pkg: The physical package id to map * @phys_pkg: The physical package id to map
......
...@@ -18,6 +18,7 @@ ...@@ -18,6 +18,7 @@
#include <linux/compiler.h> #include <linux/compiler.h>
#include <linux/cpumask.h> #include <linux/cpumask.h>
#include <linux/cpuhotplug.h> #include <linux/cpuhotplug.h>
#include <linux/cpu_smt.h>
struct device; struct device;
struct device_node; struct device_node;
...@@ -204,30 +205,6 @@ void cpuhp_report_idle_dead(void); ...@@ -204,30 +205,6 @@ void cpuhp_report_idle_dead(void);
static inline void cpuhp_report_idle_dead(void) { } static inline void cpuhp_report_idle_dead(void) { }
#endif /* #ifdef CONFIG_HOTPLUG_CPU */ #endif /* #ifdef CONFIG_HOTPLUG_CPU */
enum cpuhp_smt_control {
CPU_SMT_ENABLED,
CPU_SMT_DISABLED,
CPU_SMT_FORCE_DISABLED,
CPU_SMT_NOT_SUPPORTED,
CPU_SMT_NOT_IMPLEMENTED,
};
#if defined(CONFIG_SMP) && defined(CONFIG_HOTPLUG_SMT)
extern enum cpuhp_smt_control cpu_smt_control;
extern void cpu_smt_disable(bool force);
extern void cpu_smt_check_topology(void);
extern bool cpu_smt_possible(void);
extern int cpuhp_smt_enable(void);
extern int cpuhp_smt_disable(enum cpuhp_smt_control ctrlval);
#else
# define cpu_smt_control (CPU_SMT_NOT_IMPLEMENTED)
static inline void cpu_smt_disable(bool force) { }
static inline void cpu_smt_check_topology(void) { }
static inline bool cpu_smt_possible(void) { return false; }
static inline int cpuhp_smt_enable(void) { return 0; }
static inline int cpuhp_smt_disable(enum cpuhp_smt_control ctrlval) { return 0; }
#endif
extern bool cpu_mitigations_off(void); extern bool cpu_mitigations_off(void);
extern bool cpu_mitigations_auto_nosmt(void); extern bool cpu_mitigations_auto_nosmt(void);
......
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_CPU_SMT_H_
#define _LINUX_CPU_SMT_H_
enum cpuhp_smt_control {
CPU_SMT_ENABLED,
CPU_SMT_DISABLED,
CPU_SMT_FORCE_DISABLED,
CPU_SMT_NOT_SUPPORTED,
CPU_SMT_NOT_IMPLEMENTED,
};
#if defined(CONFIG_SMP) && defined(CONFIG_HOTPLUG_SMT)
extern enum cpuhp_smt_control cpu_smt_control;
extern unsigned int cpu_smt_num_threads;
extern void cpu_smt_disable(bool force);
extern void cpu_smt_set_num_threads(unsigned int num_threads,
unsigned int max_threads);
extern bool cpu_smt_possible(void);
extern int cpuhp_smt_enable(void);
extern int cpuhp_smt_disable(enum cpuhp_smt_control ctrlval);
#else
# define cpu_smt_control (CPU_SMT_NOT_IMPLEMENTED)
# define cpu_smt_num_threads 1
static inline void cpu_smt_disable(bool force) { }
static inline void cpu_smt_set_num_threads(unsigned int num_threads,
unsigned int max_threads) { }
static inline bool cpu_smt_possible(void) { return false; }
static inline int cpuhp_smt_enable(void) { return 0; }
static inline int cpuhp_smt_disable(enum cpuhp_smt_control ctrlval) { return 0; }
#endif
#endif /* _LINUX_CPU_SMT_H_ */
...@@ -592,7 +592,10 @@ static void lockdep_release_cpus_lock(void) ...@@ -592,7 +592,10 @@ static void lockdep_release_cpus_lock(void)
void __weak arch_smt_update(void) { } void __weak arch_smt_update(void) { }
#ifdef CONFIG_HOTPLUG_SMT #ifdef CONFIG_HOTPLUG_SMT
enum cpuhp_smt_control cpu_smt_control __read_mostly = CPU_SMT_ENABLED; enum cpuhp_smt_control cpu_smt_control __read_mostly = CPU_SMT_ENABLED;
static unsigned int cpu_smt_max_threads __ro_after_init;
unsigned int cpu_smt_num_threads __read_mostly = UINT_MAX;
void __init cpu_smt_disable(bool force) void __init cpu_smt_disable(bool force)
{ {
...@@ -606,16 +609,33 @@ void __init cpu_smt_disable(bool force) ...@@ -606,16 +609,33 @@ void __init cpu_smt_disable(bool force)
pr_info("SMT: disabled\n"); pr_info("SMT: disabled\n");
cpu_smt_control = CPU_SMT_DISABLED; cpu_smt_control = CPU_SMT_DISABLED;
} }
cpu_smt_num_threads = 1;
} }
/* /*
* The decision whether SMT is supported can only be done after the full * The decision whether SMT is supported can only be done after the full
* CPU identification. Called from architecture code. * CPU identification. Called from architecture code.
*/ */
void __init cpu_smt_check_topology(void) void __init cpu_smt_set_num_threads(unsigned int num_threads,
unsigned int max_threads)
{ {
if (!topology_smt_supported()) WARN_ON(!num_threads || (num_threads > max_threads));
if (max_threads == 1)
cpu_smt_control = CPU_SMT_NOT_SUPPORTED; cpu_smt_control = CPU_SMT_NOT_SUPPORTED;
cpu_smt_max_threads = max_threads;
/*
* If SMT has been disabled via the kernel command line or SMT is
* not supported, set cpu_smt_num_threads to 1 for consistency.
* If enabled, take the architecture requested number of threads
* to bring up into account.
*/
if (cpu_smt_control != CPU_SMT_ENABLED)
cpu_smt_num_threads = 1;
else if (num_threads < cpu_smt_num_threads)
cpu_smt_num_threads = num_threads;
} }
static int __init smt_cmdline_disable(char *str) static int __init smt_cmdline_disable(char *str)
...@@ -625,9 +645,23 @@ static int __init smt_cmdline_disable(char *str) ...@@ -625,9 +645,23 @@ static int __init smt_cmdline_disable(char *str)
} }
early_param("nosmt", smt_cmdline_disable); early_param("nosmt", smt_cmdline_disable);
/*
* For Archicture supporting partial SMT states check if the thread is allowed.
* Otherwise this has already been checked through cpu_smt_max_threads when
* setting the SMT level.
*/
static inline bool cpu_smt_thread_allowed(unsigned int cpu)
{
#ifdef CONFIG_SMT_NUM_THREADS_DYNAMIC
return topology_smt_thread_allowed(cpu);
#else
return true;
#endif
}
static inline bool cpu_smt_allowed(unsigned int cpu) static inline bool cpu_smt_allowed(unsigned int cpu)
{ {
if (cpu_smt_control == CPU_SMT_ENABLED) if (cpu_smt_control == CPU_SMT_ENABLED && cpu_smt_thread_allowed(cpu))
return true; return true;
if (topology_is_primary_thread(cpu)) if (topology_is_primary_thread(cpu))
...@@ -650,22 +684,8 @@ bool cpu_smt_possible(void) ...@@ -650,22 +684,8 @@ bool cpu_smt_possible(void)
} }
EXPORT_SYMBOL_GPL(cpu_smt_possible); EXPORT_SYMBOL_GPL(cpu_smt_possible);
static inline bool cpuhp_smt_aware(void)
{
return topology_smt_supported();
}
static inline const struct cpumask *cpuhp_get_primary_thread_mask(void)
{
return cpu_primary_thread_mask;
}
#else #else
static inline bool cpu_smt_allowed(unsigned int cpu) { return true; } static inline bool cpu_smt_allowed(unsigned int cpu) { return true; }
static inline bool cpuhp_smt_aware(void) { return false; }
static inline const struct cpumask *cpuhp_get_primary_thread_mask(void)
{
return cpu_present_mask;
}
#endif #endif
static inline enum cpuhp_state static inline enum cpuhp_state
...@@ -1793,6 +1813,16 @@ static int __init parallel_bringup_parse_param(char *arg) ...@@ -1793,6 +1813,16 @@ static int __init parallel_bringup_parse_param(char *arg)
} }
early_param("cpuhp.parallel", parallel_bringup_parse_param); early_param("cpuhp.parallel", parallel_bringup_parse_param);
static inline bool cpuhp_smt_aware(void)
{
return cpu_smt_max_threads > 1;
}
static inline const struct cpumask *cpuhp_get_primary_thread_mask(void)
{
return cpu_primary_thread_mask;
}
/* /*
* On architectures which have enabled parallel bringup this invokes all BP * On architectures which have enabled parallel bringup this invokes all BP
* prepare states for each of the to be onlined APs first. The last state * prepare states for each of the to be onlined APs first. The last state
...@@ -2626,6 +2656,12 @@ int cpuhp_smt_disable(enum cpuhp_smt_control ctrlval) ...@@ -2626,6 +2656,12 @@ int cpuhp_smt_disable(enum cpuhp_smt_control ctrlval)
for_each_online_cpu(cpu) { for_each_online_cpu(cpu) {
if (topology_is_primary_thread(cpu)) if (topology_is_primary_thread(cpu))
continue; continue;
/*
* Disable can be called with CPU_SMT_ENABLED when changing
* from a higher to lower number of SMT threads per core.
*/
if (ctrlval == CPU_SMT_ENABLED && cpu_smt_thread_allowed(cpu))
continue;
ret = cpu_down_maps_locked(cpu, CPUHP_OFFLINE); ret = cpu_down_maps_locked(cpu, CPUHP_OFFLINE);
if (ret) if (ret)
break; break;
...@@ -2660,6 +2696,8 @@ int cpuhp_smt_enable(void) ...@@ -2660,6 +2696,8 @@ int cpuhp_smt_enable(void)
/* Skip online CPUs and CPUs on offline nodes */ /* Skip online CPUs and CPUs on offline nodes */
if (cpu_online(cpu) || !node_online(cpu_to_node(cpu))) if (cpu_online(cpu) || !node_online(cpu_to_node(cpu)))
continue; continue;
if (!cpu_smt_thread_allowed(cpu))
continue;
ret = _cpu_up(cpu, 0, CPUHP_ONLINE); ret = _cpu_up(cpu, 0, CPUHP_ONLINE);
if (ret) if (ret)
break; break;
...@@ -2838,20 +2876,19 @@ static const struct attribute_group cpuhp_cpu_root_attr_group = { ...@@ -2838,20 +2876,19 @@ static const struct attribute_group cpuhp_cpu_root_attr_group = {
#ifdef CONFIG_HOTPLUG_SMT #ifdef CONFIG_HOTPLUG_SMT
static bool cpu_smt_num_threads_valid(unsigned int threads)
{
if (IS_ENABLED(CONFIG_SMT_NUM_THREADS_DYNAMIC))
return threads >= 1 && threads <= cpu_smt_max_threads;
return threads == 1 || threads == cpu_smt_max_threads;
}
static ssize_t static ssize_t
__store_smt_control(struct device *dev, struct device_attribute *attr, __store_smt_control(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count) const char *buf, size_t count)
{ {
int ctrlval, ret; int ctrlval, ret, num_threads, orig_threads;
bool force_off;
if (sysfs_streq(buf, "on"))
ctrlval = CPU_SMT_ENABLED;
else if (sysfs_streq(buf, "off"))
ctrlval = CPU_SMT_DISABLED;
else if (sysfs_streq(buf, "forceoff"))
ctrlval = CPU_SMT_FORCE_DISABLED;
else
return -EINVAL;
if (cpu_smt_control == CPU_SMT_FORCE_DISABLED) if (cpu_smt_control == CPU_SMT_FORCE_DISABLED)
return -EPERM; return -EPERM;
...@@ -2859,21 +2896,39 @@ __store_smt_control(struct device *dev, struct device_attribute *attr, ...@@ -2859,21 +2896,39 @@ __store_smt_control(struct device *dev, struct device_attribute *attr,
if (cpu_smt_control == CPU_SMT_NOT_SUPPORTED) if (cpu_smt_control == CPU_SMT_NOT_SUPPORTED)
return -ENODEV; return -ENODEV;
if (sysfs_streq(buf, "on")) {
ctrlval = CPU_SMT_ENABLED;
num_threads = cpu_smt_max_threads;
} else if (sysfs_streq(buf, "off")) {
ctrlval = CPU_SMT_DISABLED;
num_threads = 1;
} else if (sysfs_streq(buf, "forceoff")) {
ctrlval = CPU_SMT_FORCE_DISABLED;
num_threads = 1;
} else if (kstrtoint(buf, 10, &num_threads) == 0) {
if (num_threads == 1)
ctrlval = CPU_SMT_DISABLED;
else if (cpu_smt_num_threads_valid(num_threads))
ctrlval = CPU_SMT_ENABLED;
else
return -EINVAL;
} else {
return -EINVAL;
}
ret = lock_device_hotplug_sysfs(); ret = lock_device_hotplug_sysfs();
if (ret) if (ret)
return ret; return ret;
if (ctrlval != cpu_smt_control) { orig_threads = cpu_smt_num_threads;
switch (ctrlval) { cpu_smt_num_threads = num_threads;
case CPU_SMT_ENABLED:
force_off = ctrlval != cpu_smt_control && ctrlval == CPU_SMT_FORCE_DISABLED;
if (num_threads > orig_threads)
ret = cpuhp_smt_enable(); ret = cpuhp_smt_enable();
break; else if (num_threads < orig_threads || force_off)
case CPU_SMT_DISABLED:
case CPU_SMT_FORCE_DISABLED:
ret = cpuhp_smt_disable(ctrlval); ret = cpuhp_smt_disable(ctrlval);
break;
}
}
unlock_device_hotplug(); unlock_device_hotplug();
return ret ? ret : count; return ret ? ret : count;
...@@ -2901,6 +2956,17 @@ static ssize_t control_show(struct device *dev, ...@@ -2901,6 +2956,17 @@ static ssize_t control_show(struct device *dev,
{ {
const char *state = smt_states[cpu_smt_control]; const char *state = smt_states[cpu_smt_control];
#ifdef CONFIG_HOTPLUG_SMT
/*
* If SMT is enabled but not all threads are enabled then show the
* number of threads. If all threads are enabled show "on". Otherwise
* show the state name.
*/
if (cpu_smt_control == CPU_SMT_ENABLED &&
cpu_smt_num_threads != cpu_smt_max_threads)
return sysfs_emit(buf, "%d\n", cpu_smt_num_threads);
#endif
return snprintf(buf, PAGE_SIZE - 2, "%s\n", state); return snprintf(buf, PAGE_SIZE - 2, "%s\n", state);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment