Commit 83378269 authored by Peter Zijlstra's avatar Peter Zijlstra Committed by Ingo Molnar

sched: correct wakeup weight calculations

rw_i = {2, 4, 1, 0}
s_i = {2/7, 4/7, 1/7, 0}

wakeup on cpu0, weight=1

rw'_i = {3, 4, 1, 0}
s'_i = {3/8, 4/8, 1/8, 0}

s_0 = S * rw_0 / \Sum rw_j ->
  \Sum rw_j = S*rw_0/s_0 = 1*2*7/2 = 7 (correct)

s'_0 = S * (rw_0 + 1) / (\Sum rw_j + 1) =
       1 * (2+1) / (7+1) = 3/8 (correct

so we find that adding 1 to cpu0 gains 5/56 in weight
if say the other cpu were, cpu1, we'd also have to calculate its 4/56 loss
Signed-off-by: default avatarPeter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Srivatsa Vaddagiri <vatsa@linux.vnet.ibm.com>
Cc: Mike Galbraith <efault@gmx.de>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent 243e0e7b
...@@ -365,6 +365,10 @@ static inline void set_task_rq(struct task_struct *p, unsigned int cpu) ...@@ -365,6 +365,10 @@ static inline void set_task_rq(struct task_struct *p, unsigned int cpu)
#else #else
static inline void set_task_rq(struct task_struct *p, unsigned int cpu) { } static inline void set_task_rq(struct task_struct *p, unsigned int cpu) { }
static inline struct task_group *task_group(struct task_struct *p)
{
return NULL;
}
#endif /* CONFIG_GROUP_SCHED */ #endif /* CONFIG_GROUP_SCHED */
......
...@@ -1074,10 +1074,10 @@ static inline int wake_idle(int cpu, struct task_struct *p) ...@@ -1074,10 +1074,10 @@ static inline int wake_idle(int cpu, struct task_struct *p)
static const struct sched_class fair_sched_class; static const struct sched_class fair_sched_class;
#ifdef CONFIG_FAIR_GROUP_SCHED #ifdef CONFIG_FAIR_GROUP_SCHED
static unsigned long effective_load(struct task_group *tg, long wl, int cpu) static unsigned long effective_load(struct task_group *tg, int cpu,
unsigned long wl, unsigned long wg)
{ {
struct sched_entity *se = tg->se[cpu]; struct sched_entity *se = tg->se[cpu];
long wg = wl;
for_each_sched_entity(se) { for_each_sched_entity(se) {
#define D(n) (likely(n) ? (n) : 1) #define D(n) (likely(n) ? (n) : 1)
...@@ -1092,6 +1092,13 @@ static unsigned long effective_load(struct task_group *tg, long wl, int cpu) ...@@ -1092,6 +1092,13 @@ static unsigned long effective_load(struct task_group *tg, long wl, int cpu)
b = S*rw + s*wg; b = S*rw + s*wg;
wl = s*(a-b)/D(b); wl = s*(a-b)/D(b);
/*
* Assume the group is already running and will
* thus already be accounted for in the weight.
*
* That is, moving shares between CPUs, does not
* alter the group weight.
*/
wg = 0; wg = 0;
#undef D #undef D
} }
...@@ -1099,26 +1106,12 @@ static unsigned long effective_load(struct task_group *tg, long wl, int cpu) ...@@ -1099,26 +1106,12 @@ static unsigned long effective_load(struct task_group *tg, long wl, int cpu)
return wl; return wl;
} }
static unsigned long task_load_sub(struct task_struct *p)
{
return effective_load(task_group(p), -(long)p->se.load.weight, task_cpu(p));
}
static unsigned long task_load_add(struct task_struct *p, int cpu)
{
return effective_load(task_group(p), p->se.load.weight, cpu);
}
#else #else
static unsigned long task_load_sub(struct task_struct *p) static inline unsigned long effective_load(struct task_group *tg, int cpu,
unsigned long wl, unsigned long wg)
{ {
return -p->se.load.weight; return wl;
}
static unsigned long task_load_add(struct task_struct *p, int cpu)
{
return p->se.load.weight;
} }
#endif #endif
...@@ -1130,8 +1123,10 @@ wake_affine(struct rq *rq, struct sched_domain *this_sd, struct rq *this_rq, ...@@ -1130,8 +1123,10 @@ wake_affine(struct rq *rq, struct sched_domain *this_sd, struct rq *this_rq,
unsigned int imbalance) unsigned int imbalance)
{ {
struct task_struct *curr = this_rq->curr; struct task_struct *curr = this_rq->curr;
struct task_group *tg;
unsigned long tl = this_load; unsigned long tl = this_load;
unsigned long tl_per_task; unsigned long tl_per_task;
unsigned long weight;
int balanced; int balanced;
if (!(this_sd->flags & SD_WAKE_AFFINE) || !sched_feat(AFFINE_WAKEUPS)) if (!(this_sd->flags & SD_WAKE_AFFINE) || !sched_feat(AFFINE_WAKEUPS))
...@@ -1142,10 +1137,19 @@ wake_affine(struct rq *rq, struct sched_domain *this_sd, struct rq *this_rq, ...@@ -1142,10 +1137,19 @@ wake_affine(struct rq *rq, struct sched_domain *this_sd, struct rq *this_rq,
* effect of the currently running task from the load * effect of the currently running task from the load
* of the current CPU: * of the current CPU:
*/ */
if (sync) if (sync) {
tl += task_load_sub(current); tg = task_group(current);
weight = current->se.load.weight;
tl += effective_load(tg, this_cpu, -weight, -weight);
load += effective_load(tg, prev_cpu, 0, -weight);
}
tg = task_group(p);
weight = p->se.load.weight;
balanced = 100*(tl + task_load_add(p, this_cpu)) <= imbalance*load; balanced = 100*(tl + effective_load(tg, this_cpu, weight, weight)) <=
imbalance*(load + effective_load(tg, prev_cpu, 0, weight));
/* /*
* If the currently running task will sleep within * If the currently running task will sleep within
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment