Commit 8165e145 authored by Paul Turner's avatar Paul Turner Committed by Ingo Molnar

sched: Compute load contribution by a group entity

Unlike task entities who have a fixed weight, group entities instead own a
fraction of their parenting task_group's shares as their contributed weight.

Compute this fraction so that we can correctly account hierarchies and shared
entity nodes.
Signed-off-by: default avatarPaul Turner <pjt@google.com>
Reviewed-by: default avatarBen Segall <bsegall@google.com>
Signed-off-by: default avatarPeter Zijlstra <a.p.zijlstra@chello.nl>
Link: http://lkml.kernel.org/r/20120823141506.855074415@google.comSigned-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent c566e8e9
...@@ -1117,22 +1117,43 @@ static inline void __update_cfs_rq_tg_load_contrib(struct cfs_rq *cfs_rq, ...@@ -1117,22 +1117,43 @@ static inline void __update_cfs_rq_tg_load_contrib(struct cfs_rq *cfs_rq,
cfs_rq->tg_load_contrib += tg_contrib; cfs_rq->tg_load_contrib += tg_contrib;
} }
} }
static inline void __update_group_entity_contrib(struct sched_entity *se)
{
struct cfs_rq *cfs_rq = group_cfs_rq(se);
struct task_group *tg = cfs_rq->tg;
u64 contrib;
contrib = cfs_rq->tg_load_contrib * tg->shares;
se->avg.load_avg_contrib = div64_u64(contrib,
atomic64_read(&tg->load_avg) + 1);
}
#else #else
static inline void __update_cfs_rq_tg_load_contrib(struct cfs_rq *cfs_rq, static inline void __update_cfs_rq_tg_load_contrib(struct cfs_rq *cfs_rq,
int force_update) {} int force_update) {}
static inline void __update_group_entity_contrib(struct sched_entity *se) {}
#endif #endif
static inline void __update_task_entity_contrib(struct sched_entity *se)
{
u32 contrib;
/* avoid overflowing a 32-bit type w/ SCHED_LOAD_SCALE */
contrib = se->avg.runnable_avg_sum * scale_load_down(se->load.weight);
contrib /= (se->avg.runnable_avg_period + 1);
se->avg.load_avg_contrib = scale_load(contrib);
}
/* Compute the current contribution to load_avg by se, return any delta */ /* Compute the current contribution to load_avg by se, return any delta */
static long __update_entity_load_avg_contrib(struct sched_entity *se) static long __update_entity_load_avg_contrib(struct sched_entity *se)
{ {
long old_contrib = se->avg.load_avg_contrib; long old_contrib = se->avg.load_avg_contrib;
if (!entity_is_task(se)) if (entity_is_task(se)) {
return 0; __update_task_entity_contrib(se);
} else {
se->avg.load_avg_contrib = div64_u64(se->avg.runnable_avg_sum * __update_group_entity_contrib(se);
se->load.weight, }
se->avg.runnable_avg_period + 1);
return se->avg.load_avg_contrib - old_contrib; return se->avg.load_avg_contrib - old_contrib;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment