Commit 31b164e2 authored by Yang Yingliang's avatar Yang Yingliang Committed by Peter Zijlstra

sched/smt: Introduce sched_smt_present_inc/dec() helper

Introduce sched_smt_present_inc/dec() helper, so it can be called
in normal or error path simply. No functional changed.

Cc: stable@kernel.org
Signed-off-by: default avatarYang Yingliang <yangyingliang@huawei.com>
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Link: https://lore.kernel.org/r/20240703031610.587047-2-yangyingliang@huaweicloud.com
parent 77baa5ba
...@@ -7895,6 +7895,22 @@ static int cpuset_cpu_inactive(unsigned int cpu) ...@@ -7895,6 +7895,22 @@ static int cpuset_cpu_inactive(unsigned int cpu)
return 0; return 0;
} }
static inline void sched_smt_present_inc(int cpu)
{
#ifdef CONFIG_SCHED_SMT
if (cpumask_weight(cpu_smt_mask(cpu)) == 2)
static_branch_inc_cpuslocked(&sched_smt_present);
#endif
}
static inline void sched_smt_present_dec(int cpu)
{
#ifdef CONFIG_SCHED_SMT
if (cpumask_weight(cpu_smt_mask(cpu)) == 2)
static_branch_dec_cpuslocked(&sched_smt_present);
#endif
}
int sched_cpu_activate(unsigned int cpu) int sched_cpu_activate(unsigned int cpu)
{ {
struct rq *rq = cpu_rq(cpu); struct rq *rq = cpu_rq(cpu);
...@@ -7906,13 +7922,10 @@ int sched_cpu_activate(unsigned int cpu) ...@@ -7906,13 +7922,10 @@ int sched_cpu_activate(unsigned int cpu)
*/ */
balance_push_set(cpu, false); balance_push_set(cpu, false);
#ifdef CONFIG_SCHED_SMT
/* /*
* When going up, increment the number of cores with SMT present. * When going up, increment the number of cores with SMT present.
*/ */
if (cpumask_weight(cpu_smt_mask(cpu)) == 2) sched_smt_present_inc(cpu);
static_branch_inc_cpuslocked(&sched_smt_present);
#endif
set_cpu_active(cpu, true); set_cpu_active(cpu, true);
if (sched_smp_initialized) { if (sched_smp_initialized) {
...@@ -7981,13 +7994,12 @@ int sched_cpu_deactivate(unsigned int cpu) ...@@ -7981,13 +7994,12 @@ int sched_cpu_deactivate(unsigned int cpu)
} }
rq_unlock_irqrestore(rq, &rf); rq_unlock_irqrestore(rq, &rf);
#ifdef CONFIG_SCHED_SMT
/* /*
* When going down, decrement the number of cores with SMT present. * When going down, decrement the number of cores with SMT present.
*/ */
if (cpumask_weight(cpu_smt_mask(cpu)) == 2) sched_smt_present_dec(cpu);
static_branch_dec_cpuslocked(&sched_smt_present);
#ifdef CONFIG_SCHED_SMT
sched_core_cpu_deactivate(cpu); sched_core_cpu_deactivate(cpu);
#endif #endif
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment