Commit 104cb16d authored by Morten Rasmussen's avatar Morten Rasmussen Committed by Ingo Molnar

sched/fair: Compute task/cpu utilization at wake-up correctly

At task wake-up load-tracking isn't updated until the task is enqueued.
The task's own view of its utilization contribution may therefore not be
aligned with its contribution to the cfs_rq load-tracking which may have
been updated in the meantime. Basically, the task's own utilization
hasn't yet accounted for the sleep decay, while the cfs_rq may have
(partially). Estimating the cfs_rq utilization in case the task is
migrated at wake-up as task_rq(p)->cfs.avg.util_avg - p->se.avg.util_avg
is therefore incorrect as the two load-tracking signals aren't time
synchronized (different last update).

To solve this problem, this patch synchronizes the task utilization with
its previous rq before the task utilization is used in the wake-up path.
Currently the update/synchronization is done _after_ the task has been
placed by select_task_rq_fair(). The synchronization is done without
having to take the rq lock using the existing mechanism used in
remove_entity_load_avg().
Signed-off-by: default avatarMorten Rasmussen <morten.rasmussen@arm.com>
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: dietmar.eggemann@arm.com
Cc: freedom.tan@mediatek.com
Cc: keita.kobayashi.ym@renesas.com
Cc: mgalbraith@suse.de
Cc: sgurrappadi@nvidia.com
Cc: vincent.guittot@linaro.org
Cc: yuyang.du@intel.com
Link: http://lkml.kernel.org/r/1476452472-24740-2-git-send-email-morten.rasmussen@arm.comSigned-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent f285144f
...@@ -3198,6 +3198,19 @@ static inline u64 cfs_rq_last_update_time(struct cfs_rq *cfs_rq) ...@@ -3198,6 +3198,19 @@ static inline u64 cfs_rq_last_update_time(struct cfs_rq *cfs_rq)
} }
#endif #endif
/*
* Synchronize entity load avg of dequeued entity without locking
* the previous rq.
*/
void sync_entity_load_avg(struct sched_entity *se)
{
struct cfs_rq *cfs_rq = cfs_rq_of(se);
u64 last_update_time;
last_update_time = cfs_rq_last_update_time(cfs_rq);
__update_load_avg(last_update_time, cpu_of(rq_of(cfs_rq)), &se->avg, 0, 0, NULL);
}
/* /*
* Task first catches up with cfs_rq, and then subtract * Task first catches up with cfs_rq, and then subtract
* itself from the cfs_rq (task must be off the queue now). * itself from the cfs_rq (task must be off the queue now).
...@@ -3205,7 +3218,6 @@ static inline u64 cfs_rq_last_update_time(struct cfs_rq *cfs_rq) ...@@ -3205,7 +3218,6 @@ static inline u64 cfs_rq_last_update_time(struct cfs_rq *cfs_rq)
void remove_entity_load_avg(struct sched_entity *se) void remove_entity_load_avg(struct sched_entity *se)
{ {
struct cfs_rq *cfs_rq = cfs_rq_of(se); struct cfs_rq *cfs_rq = cfs_rq_of(se);
u64 last_update_time;
/* /*
* tasks cannot exit without having gone through wake_up_new_task() -> * tasks cannot exit without having gone through wake_up_new_task() ->
...@@ -3217,9 +3229,7 @@ void remove_entity_load_avg(struct sched_entity *se) ...@@ -3217,9 +3229,7 @@ void remove_entity_load_avg(struct sched_entity *se)
* calls this. * calls this.
*/ */
last_update_time = cfs_rq_last_update_time(cfs_rq); sync_entity_load_avg(se);
__update_load_avg(last_update_time, cpu_of(rq_of(cfs_rq)), &se->avg, 0, 0, NULL);
atomic_long_add(se->avg.load_avg, &cfs_rq->removed_load_avg); atomic_long_add(se->avg.load_avg, &cfs_rq->removed_load_avg);
atomic_long_add(se->avg.util_avg, &cfs_rq->removed_util_avg); atomic_long_add(se->avg.util_avg, &cfs_rq->removed_util_avg);
} }
...@@ -5582,6 +5592,24 @@ static inline int task_util(struct task_struct *p) ...@@ -5582,6 +5592,24 @@ static inline int task_util(struct task_struct *p)
return p->se.avg.util_avg; return p->se.avg.util_avg;
} }
/*
* cpu_util_wake: Compute cpu utilization with any contributions from
* the waking task p removed.
*/
static int cpu_util_wake(int cpu, struct task_struct *p)
{
unsigned long util, capacity;
/* Task has no contribution or is new */
if (cpu != task_cpu(p) || !p->se.avg.last_update_time)
return cpu_util(cpu);
capacity = capacity_orig_of(cpu);
util = max_t(long, cpu_rq(cpu)->cfs.avg.util_avg - task_util(p), 0);
return (util >= capacity) ? capacity : util;
}
/* /*
* Disable WAKE_AFFINE in the case where task @p doesn't fit in the * Disable WAKE_AFFINE in the case where task @p doesn't fit in the
* capacity of either the waking CPU @cpu or the previous CPU @prev_cpu. * capacity of either the waking CPU @cpu or the previous CPU @prev_cpu.
...@@ -5600,6 +5628,9 @@ static int wake_cap(struct task_struct *p, int cpu, int prev_cpu) ...@@ -5600,6 +5628,9 @@ static int wake_cap(struct task_struct *p, int cpu, int prev_cpu)
if (max_cap - min_cap < max_cap >> 3) if (max_cap - min_cap < max_cap >> 3)
return 0; return 0;
/* Bring task utilization in sync with prev_cpu */
sync_entity_load_avg(&p->se);
return min_cap * 1024 < task_util(p) * capacity_margin; return min_cap * 1024 < task_util(p) * capacity_margin;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment