Commit 97450eb9 authored by Vincent Guittot's avatar Vincent Guittot Committed by Ingo Molnar

sched/pelt: Remove shift of thermal clock

The optional shift of the clock used by thermal/hw load avg has been
introduced to handle case where the signal was not always a high frequency
hw signal. Now that cpufreq provides a signal for firmware and
SW pressure, we can remove this exception and always keep this PELT signal
aligned with other signals.
Mark sysctl_sched_migration_cost boot parameter as deprecated
Signed-off-by: default avatarVincent Guittot <vincent.guittot@linaro.org>
Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
Tested-by: default avatarLukasz Luba <lukasz.luba@arm.com>
Reviewed-by: default avatarQais Yousef <qyousef@layalina.io>
Reviewed-by: default avatarLukasz Luba <lukasz.luba@arm.com>
Link: https://lore.kernel.org/r/20240326091616.3696851-6-vincent.guittot@linaro.org
parent d4dbc991
...@@ -5807,6 +5807,7 @@ ...@@ -5807,6 +5807,7 @@
but is useful for debugging and performance tuning. but is useful for debugging and performance tuning.
sched_thermal_decay_shift= sched_thermal_decay_shift=
[Deprecated]
[KNL, SMP] Set a decay shift for scheduler thermal [KNL, SMP] Set a decay shift for scheduler thermal
pressure signal. Thermal pressure signal follows the pressure signal. Thermal pressure signal follows the
default decay period of other scheduler pelt default decay period of other scheduler pelt
......
...@@ -5680,7 +5680,7 @@ void sched_tick(void) ...@@ -5680,7 +5680,7 @@ void sched_tick(void)
update_rq_clock(rq); update_rq_clock(rq);
hw_pressure = arch_scale_hw_pressure(cpu_of(rq)); hw_pressure = arch_scale_hw_pressure(cpu_of(rq));
update_hw_load_avg(rq_clock_hw(rq), rq, hw_pressure); update_hw_load_avg(rq_clock_task(rq), rq, hw_pressure);
curr->sched_class->task_tick(rq, curr, 0); curr->sched_class->task_tick(rq, curr, 0);
if (sched_feat(LATENCY_WARN)) if (sched_feat(LATENCY_WARN))
resched_latency = cpu_resched_latency(rq); resched_latency = cpu_resched_latency(rq);
......
...@@ -78,15 +78,9 @@ static unsigned int normalized_sysctl_sched_base_slice = 750000ULL; ...@@ -78,15 +78,9 @@ static unsigned int normalized_sysctl_sched_base_slice = 750000ULL;
const_debug unsigned int sysctl_sched_migration_cost = 500000UL; const_debug unsigned int sysctl_sched_migration_cost = 500000UL;
int sched_hw_decay_shift;
static int __init setup_sched_thermal_decay_shift(char *str) static int __init setup_sched_thermal_decay_shift(char *str)
{ {
int _shift = 0; pr_warn("Ignoring the deprecated sched_thermal_decay_shift= option\n");
if (kstrtoint(str, 0, &_shift))
pr_warn("Unable to set scheduler thermal pressure decay shift parameter\n");
sched_hw_decay_shift = clamp(_shift, 0, 10);
return 1; return 1;
} }
__setup("sched_thermal_decay_shift=", setup_sched_thermal_decay_shift); __setup("sched_thermal_decay_shift=", setup_sched_thermal_decay_shift);
...@@ -9367,7 +9361,7 @@ static bool __update_blocked_others(struct rq *rq, bool *done) ...@@ -9367,7 +9361,7 @@ static bool __update_blocked_others(struct rq *rq, bool *done)
decayed = update_rt_rq_load_avg(now, rq, curr_class == &rt_sched_class) | decayed = update_rt_rq_load_avg(now, rq, curr_class == &rt_sched_class) |
update_dl_rq_load_avg(now, rq, curr_class == &dl_sched_class) | update_dl_rq_load_avg(now, rq, curr_class == &dl_sched_class) |
update_hw_load_avg(rq_clock_hw(rq), rq, hw_pressure) | update_hw_load_avg(now, rq, hw_pressure) |
update_irq_load_avg(rq, 0); update_irq_load_avg(rq, 0);
if (others_have_blocked(rq)) if (others_have_blocked(rq))
......
...@@ -1550,24 +1550,6 @@ static inline u64 rq_clock_task(struct rq *rq) ...@@ -1550,24 +1550,6 @@ static inline u64 rq_clock_task(struct rq *rq)
return rq->clock_task; return rq->clock_task;
} }
/**
* By default the decay is the default pelt decay period.
* The decay shift can change the decay period in
* multiples of 32.
* Decay shift Decay period(ms)
* 0 32
* 1 64
* 2 128
* 3 256
* 4 512
*/
extern int sched_hw_decay_shift;
static inline u64 rq_clock_hw(struct rq *rq)
{
return rq_clock_task(rq) >> sched_hw_decay_shift;
}
static inline void rq_clock_skip_update(struct rq *rq) static inline void rq_clock_skip_update(struct rq *rq)
{ {
lockdep_assert_rq_held(rq); lockdep_assert_rq_held(rq);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment