Commit 369da7fc authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'sched-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull scheduler fixes from Ingo Molnar:
 "Two load-balancing fixes for cgroups-intense workloads"

* 'sched-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  sched/fair: Fix calc_cfs_shares() fixed point arithmetics width confusion
  sched/fair: Fix effective_load() to consistently use smoothed load
parents 612807fe ea1dc6fc
...@@ -735,8 +735,6 @@ void post_init_entity_util_avg(struct sched_entity *se) ...@@ -735,8 +735,6 @@ void post_init_entity_util_avg(struct sched_entity *se)
} }
} }
static inline unsigned long cfs_rq_runnable_load_avg(struct cfs_rq *cfs_rq);
static inline unsigned long cfs_rq_load_avg(struct cfs_rq *cfs_rq);
#else #else
void init_entity_runnable_average(struct sched_entity *se) void init_entity_runnable_average(struct sched_entity *se)
{ {
...@@ -2499,28 +2497,22 @@ account_entity_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se) ...@@ -2499,28 +2497,22 @@ account_entity_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se)
#ifdef CONFIG_FAIR_GROUP_SCHED #ifdef CONFIG_FAIR_GROUP_SCHED
# ifdef CONFIG_SMP # ifdef CONFIG_SMP
static inline long calc_tg_weight(struct task_group *tg, struct cfs_rq *cfs_rq) static long calc_cfs_shares(struct cfs_rq *cfs_rq, struct task_group *tg)
{ {
long tg_weight; long tg_weight, load, shares;
/* /*
* Use this CPU's real-time load instead of the last load contribution * This really should be: cfs_rq->avg.load_avg, but instead we use
* as the updating of the contribution is delayed, and we will use the * cfs_rq->load.weight, which is its upper bound. This helps ramp up
* the real-time load to calc the share. See update_tg_load_avg(). * the shares for small weight interactive tasks.
*/ */
tg_weight = atomic_long_read(&tg->load_avg); load = scale_load_down(cfs_rq->load.weight);
tg_weight -= cfs_rq->tg_load_avg_contrib;
tg_weight += cfs_rq->load.weight;
return tg_weight; tg_weight = atomic_long_read(&tg->load_avg);
}
static long calc_cfs_shares(struct cfs_rq *cfs_rq, struct task_group *tg)
{
long tg_weight, load, shares;
tg_weight = calc_tg_weight(tg, cfs_rq); /* Ensure tg_weight >= load */
load = cfs_rq->load.weight; tg_weight -= cfs_rq->tg_load_avg_contrib;
tg_weight += load;
shares = (tg->shares * load); shares = (tg->shares * load);
if (tg_weight) if (tg_weight)
...@@ -2539,6 +2531,7 @@ static inline long calc_cfs_shares(struct cfs_rq *cfs_rq, struct task_group *tg) ...@@ -2539,6 +2531,7 @@ static inline long calc_cfs_shares(struct cfs_rq *cfs_rq, struct task_group *tg)
return tg->shares; return tg->shares;
} }
# endif /* CONFIG_SMP */ # endif /* CONFIG_SMP */
static void reweight_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, static void reweight_entity(struct cfs_rq *cfs_rq, struct sched_entity *se,
unsigned long weight) unsigned long weight)
{ {
...@@ -4946,19 +4939,24 @@ static long effective_load(struct task_group *tg, int cpu, long wl, long wg) ...@@ -4946,19 +4939,24 @@ static long effective_load(struct task_group *tg, int cpu, long wl, long wg)
return wl; return wl;
for_each_sched_entity(se) { for_each_sched_entity(se) {
long w, W; struct cfs_rq *cfs_rq = se->my_q;
long W, w = cfs_rq_load_avg(cfs_rq);
tg = se->my_q->tg; tg = cfs_rq->tg;
/* /*
* W = @wg + \Sum rw_j * W = @wg + \Sum rw_j
*/ */
W = wg + calc_tg_weight(tg, se->my_q); W = wg + atomic_long_read(&tg->load_avg);
/* Ensure \Sum rw_j >= rw_i */
W -= cfs_rq->tg_load_avg_contrib;
W += w;
/* /*
* w = rw_i + @wl * w = rw_i + @wl
*/ */
w = cfs_rq_load_avg(se->my_q) + wl; w += wl;
/* /*
* wl = S * s'_i; see (2) * wl = S * s'_i; see (2)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment