Commit a8af7246 authored by Peter Zijlstra's avatar Peter Zijlstra Committed by Ingo Molnar

sched: Avoid division by zero

Patch a5004278 (sched: Fix
cgroup smp fairness) introduced the possibility of a
divide-by-zero because load-balancing is not synchronized
between sched_domains.

This can cause the state of cpus to change between the first
and second loop over the sched domain in tg_shares_up().
Reported-by: default avatarYinghai Lu <yinghai@kernel.org>
Signed-off-by: default avatarPeter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Jes Sorensen <jes@sgi.com>
Cc: Jens Axboe <jens.axboe@oracle.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
LKML-Reference: <1250855934.7538.30.camel@twins>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent cde7e5ca
...@@ -1522,7 +1522,8 @@ static void __set_se_shares(struct sched_entity *se, unsigned long shares); ...@@ -1522,7 +1522,8 @@ static void __set_se_shares(struct sched_entity *se, unsigned long shares);
*/ */
static void static void
update_group_shares_cpu(struct task_group *tg, int cpu, update_group_shares_cpu(struct task_group *tg, int cpu,
unsigned long sd_shares, unsigned long sd_rq_weight) unsigned long sd_shares, unsigned long sd_rq_weight,
unsigned long sd_eff_weight)
{ {
unsigned long rq_weight; unsigned long rq_weight;
unsigned long shares; unsigned long shares;
...@@ -1535,13 +1536,15 @@ update_group_shares_cpu(struct task_group *tg, int cpu, ...@@ -1535,13 +1536,15 @@ update_group_shares_cpu(struct task_group *tg, int cpu,
if (!rq_weight) { if (!rq_weight) {
boost = 1; boost = 1;
rq_weight = NICE_0_LOAD; rq_weight = NICE_0_LOAD;
if (sd_rq_weight == sd_eff_weight)
sd_eff_weight += NICE_0_LOAD;
sd_rq_weight = sd_eff_weight;
} }
/* /*
* \Sum shares * rq_weight * \Sum_j shares_j * rq_weight_i
* shares = ----------------------- * shares_i = -----------------------------
* \Sum rq_weight * \Sum_j rq_weight_j
*
*/ */
shares = (sd_shares * rq_weight) / sd_rq_weight; shares = (sd_shares * rq_weight) / sd_rq_weight;
shares = clamp_t(unsigned long, shares, MIN_SHARES, MAX_SHARES); shares = clamp_t(unsigned long, shares, MIN_SHARES, MAX_SHARES);
...@@ -1593,14 +1596,8 @@ static int tg_shares_up(struct task_group *tg, void *data) ...@@ -1593,14 +1596,8 @@ static int tg_shares_up(struct task_group *tg, void *data)
if (!sd->parent || !(sd->parent->flags & SD_LOAD_BALANCE)) if (!sd->parent || !(sd->parent->flags & SD_LOAD_BALANCE))
shares = tg->shares; shares = tg->shares;
for_each_cpu(i, sched_domain_span(sd)) { for_each_cpu(i, sched_domain_span(sd))
unsigned long sd_rq_weight = rq_weight; update_group_shares_cpu(tg, i, shares, rq_weight, eff_weight);
if (!tg->cfs_rq[i]->rq_weight)
sd_rq_weight = eff_weight;
update_group_shares_cpu(tg, i, shares, sd_rq_weight);
}
return 0; return 0;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment