Commit 371bf427 authored by Vincent Guittot's avatar Vincent Guittot Committed by Ingo Molnar

sched/rt: Add rt_rq utilization tracking

schedutil governor relies on cfs_rq's util_avg to choose the OPP when CFS
tasks are running. When the CPU is overloaded by CFS and RT tasks, CFS tasks
are preempted by RT tasks and in this case util_avg reflects the remaining
capacity but not what CFS want to use. In such case, schedutil can select a
lower OPP whereas the CPU is overloaded. In order to have a more accurate
view of the utilization of the CPU, we track the utilization of RT tasks.
Only util_avg is correctly tracked but not load_avg and runnable_load_avg
which are useless for rt_rq.

rt_rq uses rq_clock_task and cfs_rq uses cfs_rq_clock_task but they are
the same at the root group level, so the PELT windows of the util_sum are
aligned.
Signed-off-by: default avatarVincent Guittot <vincent.guittot@linaro.org>
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Morten.Rasmussen@arm.com
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: claudio@evidence.eu.com
Cc: daniel.lezcano@linaro.org
Cc: dietmar.eggemann@arm.com
Cc: joel@joelfernandes.org
Cc: juri.lelli@redhat.com
Cc: luca.abeni@santannapisa.it
Cc: patrick.bellasi@arm.com
Cc: quentin.perret@arm.com
Cc: rjw@rjwysocki.net
Cc: valentin.schneider@arm.com
Cc: viresh.kumar@linaro.org
Link: http://lkml.kernel.org/r/1530200714-4504-3-git-send-email-vincent.guittot@linaro.orgSigned-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent c0796298
...@@ -7290,6 +7290,14 @@ static inline bool cfs_rq_has_blocked(struct cfs_rq *cfs_rq) ...@@ -7290,6 +7290,14 @@ static inline bool cfs_rq_has_blocked(struct cfs_rq *cfs_rq)
return false; return false;
} }
static inline bool rt_rq_has_blocked(struct rq *rq)
{
if (READ_ONCE(rq->avg_rt.util_avg))
return true;
return false;
}
#ifdef CONFIG_FAIR_GROUP_SCHED #ifdef CONFIG_FAIR_GROUP_SCHED
static inline bool cfs_rq_is_decayed(struct cfs_rq *cfs_rq) static inline bool cfs_rq_is_decayed(struct cfs_rq *cfs_rq)
...@@ -7349,6 +7357,10 @@ static void update_blocked_averages(int cpu) ...@@ -7349,6 +7357,10 @@ static void update_blocked_averages(int cpu)
if (cfs_rq_has_blocked(cfs_rq)) if (cfs_rq_has_blocked(cfs_rq))
done = false; done = false;
} }
update_rt_rq_load_avg(rq_clock_task(rq), rq, 0);
/* Don't need periodic decay once load/util_avg are null */
if (rt_rq_has_blocked(rq))
done = false;
#ifdef CONFIG_NO_HZ_COMMON #ifdef CONFIG_NO_HZ_COMMON
rq->last_blocked_load_update_tick = jiffies; rq->last_blocked_load_update_tick = jiffies;
...@@ -7414,9 +7426,10 @@ static inline void update_blocked_averages(int cpu) ...@@ -7414,9 +7426,10 @@ static inline void update_blocked_averages(int cpu)
rq_lock_irqsave(rq, &rf); rq_lock_irqsave(rq, &rf);
update_rq_clock(rq); update_rq_clock(rq);
update_cfs_rq_load_avg(cfs_rq_clock_task(cfs_rq), cfs_rq); update_cfs_rq_load_avg(cfs_rq_clock_task(cfs_rq), cfs_rq);
update_rt_rq_load_avg(rq_clock_task(rq), rq, 0);
#ifdef CONFIG_NO_HZ_COMMON #ifdef CONFIG_NO_HZ_COMMON
rq->last_blocked_load_update_tick = jiffies; rq->last_blocked_load_update_tick = jiffies;
if (!cfs_rq_has_blocked(cfs_rq)) if (!cfs_rq_has_blocked(cfs_rq) && !rt_rq_has_blocked(rq))
rq->has_blocked_load = 0; rq->has_blocked_load = 0;
#endif #endif
rq_unlock_irqrestore(rq, &rf); rq_unlock_irqrestore(rq, &rf);
......
...@@ -309,3 +309,28 @@ int __update_load_avg_cfs_rq(u64 now, int cpu, struct cfs_rq *cfs_rq) ...@@ -309,3 +309,28 @@ int __update_load_avg_cfs_rq(u64 now, int cpu, struct cfs_rq *cfs_rq)
return 0; return 0;
} }
/*
* rt_rq:
*
* util_sum = \Sum se->avg.util_sum but se->avg.util_sum is not tracked
* util_sum = cpu_scale * load_sum
* runnable_load_sum = load_sum
*
* load_avg and runnable_load_avg are not supported and meaningless.
*
*/
int update_rt_rq_load_avg(u64 now, struct rq *rq, int running)
{
if (___update_load_sum(now, rq->cpu, &rq->avg_rt,
running,
running,
running)) {
___update_load_avg(&rq->avg_rt, 1, 1);
return 1;
}
return 0;
}
...@@ -3,6 +3,7 @@ ...@@ -3,6 +3,7 @@
int __update_load_avg_blocked_se(u64 now, int cpu, struct sched_entity *se); int __update_load_avg_blocked_se(u64 now, int cpu, struct sched_entity *se);
int __update_load_avg_se(u64 now, int cpu, struct cfs_rq *cfs_rq, struct sched_entity *se); int __update_load_avg_se(u64 now, int cpu, struct cfs_rq *cfs_rq, struct sched_entity *se);
int __update_load_avg_cfs_rq(u64 now, int cpu, struct cfs_rq *cfs_rq); int __update_load_avg_cfs_rq(u64 now, int cpu, struct cfs_rq *cfs_rq);
int update_rt_rq_load_avg(u64 now, struct rq *rq, int running);
/* /*
* When a task is dequeued, its estimated utilization should not be update if * When a task is dequeued, its estimated utilization should not be update if
...@@ -38,6 +39,12 @@ update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq) ...@@ -38,6 +39,12 @@ update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq)
return 0; return 0;
} }
static inline int
update_rt_rq_load_avg(u64 now, struct rq *rq, int running)
{
return 0;
}
#endif #endif
...@@ -5,6 +5,8 @@ ...@@ -5,6 +5,8 @@
*/ */
#include "sched.h" #include "sched.h"
#include "pelt.h"
int sched_rr_timeslice = RR_TIMESLICE; int sched_rr_timeslice = RR_TIMESLICE;
int sysctl_sched_rr_timeslice = (MSEC_PER_SEC / HZ) * RR_TIMESLICE; int sysctl_sched_rr_timeslice = (MSEC_PER_SEC / HZ) * RR_TIMESLICE;
...@@ -1576,6 +1578,14 @@ pick_next_task_rt(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) ...@@ -1576,6 +1578,14 @@ pick_next_task_rt(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
rt_queue_push_tasks(rq); rt_queue_push_tasks(rq);
/*
* If prev task was rt, put_prev_task() has already updated the
* utilization. We only care of the case where we start to schedule a
* rt task
*/
if (rq->curr->sched_class != &rt_sched_class)
update_rt_rq_load_avg(rq_clock_task(rq), rq, 0);
return p; return p;
} }
...@@ -1583,6 +1593,8 @@ static void put_prev_task_rt(struct rq *rq, struct task_struct *p) ...@@ -1583,6 +1593,8 @@ static void put_prev_task_rt(struct rq *rq, struct task_struct *p)
{ {
update_curr_rt(rq); update_curr_rt(rq);
update_rt_rq_load_avg(rq_clock_task(rq), rq, 1);
/* /*
* The previous task needs to be made eligible for pushing * The previous task needs to be made eligible for pushing
* if it is still active * if it is still active
...@@ -2312,6 +2324,7 @@ static void task_tick_rt(struct rq *rq, struct task_struct *p, int queued) ...@@ -2312,6 +2324,7 @@ static void task_tick_rt(struct rq *rq, struct task_struct *p, int queued)
struct sched_rt_entity *rt_se = &p->rt; struct sched_rt_entity *rt_se = &p->rt;
update_curr_rt(rq); update_curr_rt(rq);
update_rt_rq_load_avg(rq_clock_task(rq), rq, 1);
watchdog(rq, p); watchdog(rq, p);
......
...@@ -594,6 +594,7 @@ struct rt_rq { ...@@ -594,6 +594,7 @@ struct rt_rq {
unsigned long rt_nr_total; unsigned long rt_nr_total;
int overloaded; int overloaded;
struct plist_head pushable_tasks; struct plist_head pushable_tasks;
#endif /* CONFIG_SMP */ #endif /* CONFIG_SMP */
int rt_queued; int rt_queued;
...@@ -854,6 +855,7 @@ struct rq { ...@@ -854,6 +855,7 @@ struct rq {
u64 rt_avg; u64 rt_avg;
u64 age_stamp; u64 age_stamp;
struct sched_avg avg_rt;
u64 idle_stamp; u64 idle_stamp;
u64 avg_idle; u64 avg_idle;
...@@ -2212,4 +2214,9 @@ static inline unsigned long cpu_util_cfs(struct rq *rq) ...@@ -2212,4 +2214,9 @@ static inline unsigned long cpu_util_cfs(struct rq *rq)
return util; return util;
} }
static inline unsigned long cpu_util_rt(struct rq *rq)
{
return rq->avg_rt.util_avg;
}
#endif #endif
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment