Commit d4edd662 authored by Juri Lelli's avatar Juri Lelli Committed by Ingo Molnar

sched/cpufreq: Use the DEADLINE utilization signal

SCHED_DEADLINE tracks active utilization signal with a per dl_rq
variable named running_bw.

Make use of that to drive CPU frequency selection: add up FAIR and
DEADLINE contribution to get the required CPU capacity to handle both
requirements (while RT still selects max frequency).
Co-authored-by: default avatarClaudio Scordino <claudio@evidence.eu.com>
Signed-off-by: default avatarJuri Lelli <juri.lelli@arm.com>
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Acked-by: default avatarViresh Kumar <viresh.kumar@linaro.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Luca Abeni <luca.abeni@santannapisa.it>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Rafael J . Wysocki <rafael.j.wysocki@intel.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: alessio.balsini@arm.com
Cc: bristot@redhat.com
Cc: dietmar.eggemann@arm.com
Cc: joelaf@google.com
Cc: juri.lelli@redhat.com
Cc: mathieu.poirier@linaro.org
Cc: morten.rasmussen@arm.com
Cc: patrick.bellasi@arm.com
Cc: rjw@rjwysocki.net
Cc: rostedt@goodmis.org
Cc: tkjos@android.com
Cc: tommaso.cucinotta@santannapisa.it
Cc: vincent.guittot@linaro.org
Link: http://lkml.kernel.org/r/20171204102325.5110-2-juri.lelli@redhat.comSigned-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent 34be3930
...@@ -12,8 +12,6 @@ ...@@ -12,8 +12,6 @@
#define SCHED_CPUFREQ_DL (1U << 1) #define SCHED_CPUFREQ_DL (1U << 1)
#define SCHED_CPUFREQ_IOWAIT (1U << 2) #define SCHED_CPUFREQ_IOWAIT (1U << 2)
#define SCHED_CPUFREQ_RT_DL (SCHED_CPUFREQ_RT | SCHED_CPUFREQ_DL)
#ifdef CONFIG_CPU_FREQ #ifdef CONFIG_CPU_FREQ
struct update_util_data { struct update_util_data {
void (*func)(struct update_util_data *data, u64 time, unsigned int flags); void (*func)(struct update_util_data *data, u64 time, unsigned int flags);
......
...@@ -179,12 +179,17 @@ static unsigned int get_next_freq(struct sugov_policy *sg_policy, ...@@ -179,12 +179,17 @@ static unsigned int get_next_freq(struct sugov_policy *sg_policy,
static void sugov_get_util(unsigned long *util, unsigned long *max, int cpu) static void sugov_get_util(unsigned long *util, unsigned long *max, int cpu)
{ {
struct rq *rq = cpu_rq(cpu); struct rq *rq = cpu_rq(cpu);
unsigned long cfs_max; unsigned long util_cfs = cpu_util_cfs(rq);
unsigned long util_dl = cpu_util_dl(rq);
cfs_max = arch_scale_cpu_capacity(NULL, cpu); *max = arch_scale_cpu_capacity(NULL, cpu);
*util = min(rq->cfs.avg.util_avg, cfs_max); /*
*max = cfs_max; * Ideally we would like to set util_dl as min/guaranteed freq and
* util_cfs + util_dl as requested freq. However, cpufreq is not yet
* ready for such an interface. So, we only do the latter for now.
*/
*util = min(util_cfs + util_dl, *max);
} }
static void sugov_set_iowait_boost(struct sugov_cpu *sg_cpu, u64 time) static void sugov_set_iowait_boost(struct sugov_cpu *sg_cpu, u64 time)
...@@ -271,7 +276,7 @@ static void sugov_update_single(struct update_util_data *hook, u64 time, ...@@ -271,7 +276,7 @@ static void sugov_update_single(struct update_util_data *hook, u64 time,
busy = sugov_cpu_is_busy(sg_cpu); busy = sugov_cpu_is_busy(sg_cpu);
if (flags & SCHED_CPUFREQ_RT_DL) { if (flags & SCHED_CPUFREQ_RT) {
next_f = policy->cpuinfo.max_freq; next_f = policy->cpuinfo.max_freq;
} else { } else {
sugov_get_util(&util, &max, sg_cpu->cpu); sugov_get_util(&util, &max, sg_cpu->cpu);
...@@ -316,7 +321,7 @@ static unsigned int sugov_next_freq_shared(struct sugov_cpu *sg_cpu, u64 time) ...@@ -316,7 +321,7 @@ static unsigned int sugov_next_freq_shared(struct sugov_cpu *sg_cpu, u64 time)
j_sg_cpu->iowait_boost_pending = false; j_sg_cpu->iowait_boost_pending = false;
continue; continue;
} }
if (j_sg_cpu->flags & SCHED_CPUFREQ_RT_DL) if (j_sg_cpu->flags & SCHED_CPUFREQ_RT)
return policy->cpuinfo.max_freq; return policy->cpuinfo.max_freq;
j_util = j_sg_cpu->util; j_util = j_sg_cpu->util;
...@@ -352,7 +357,7 @@ static void sugov_update_shared(struct update_util_data *hook, u64 time, ...@@ -352,7 +357,7 @@ static void sugov_update_shared(struct update_util_data *hook, u64 time,
sg_cpu->last_update = time; sg_cpu->last_update = time;
if (sugov_should_update_freq(sg_policy, time)) { if (sugov_should_update_freq(sg_policy, time)) {
if (flags & SCHED_CPUFREQ_RT_DL) if (flags & SCHED_CPUFREQ_RT)
next_f = sg_policy->policy->cpuinfo.max_freq; next_f = sg_policy->policy->cpuinfo.max_freq;
else else
next_f = sugov_next_freq_shared(sg_cpu, time); next_f = sugov_next_freq_shared(sg_cpu, time);
...@@ -382,9 +387,9 @@ static void sugov_irq_work(struct irq_work *irq_work) ...@@ -382,9 +387,9 @@ static void sugov_irq_work(struct irq_work *irq_work)
sg_policy = container_of(irq_work, struct sugov_policy, irq_work); sg_policy = container_of(irq_work, struct sugov_policy, irq_work);
/* /*
* For RT and deadline tasks, the schedutil governor shoots the * For RT tasks, the schedutil governor shoots the frequency to maximum.
* frequency to maximum. Special care must be taken to ensure that this * Special care must be taken to ensure that this kthread doesn't result
* kthread doesn't result in the same behavior. * in the same behavior.
* *
* This is (mostly) guaranteed by the work_in_progress flag. The flag is * This is (mostly) guaranteed by the work_in_progress flag. The flag is
* updated only at the end of the sugov_work() function and before that * updated only at the end of the sugov_work() function and before that
......
...@@ -2084,3 +2084,13 @@ static inline void cpufreq_update_util(struct rq *rq, unsigned int flags) {} ...@@ -2084,3 +2084,13 @@ static inline void cpufreq_update_util(struct rq *rq, unsigned int flags) {}
#else /* arch_scale_freq_capacity */ #else /* arch_scale_freq_capacity */
#define arch_scale_freq_invariant() (false) #define arch_scale_freq_invariant() (false)
#endif #endif
static inline unsigned long cpu_util_dl(struct rq *rq)
{
return (rq->dl.running_bw * SCHED_CAPACITY_SCALE) >> BW_SHIFT;
}
static inline unsigned long cpu_util_cfs(struct rq *rq)
{
return rq->cfs.avg.util_avg;
}
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment