Commit 9e61d12b authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'sched-urgent-2020-05-24' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull scheduler fixes from Thomas Gleixner:
 "A set of fixes for the scheduler:

   - Fix handling of throttled parents in enqueue_task_fair() completely.

     The recent fix overlooked a corner case where the first iteration
     terminates due to an entity already being on the runqueue which
     makes the list management incomplete and later triggers the
     assertion which checks for completeness.

   - Fix a similar problem in unthrottle_cfs_rq().

   - Show the correct uclamp values in procfs which prints the effective
     value twice instead of requested and effective"

* tag 'sched-urgent-2020-05-24' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  sched/fair: Fix unthrottle_cfs_rq() for leaf_cfs_rq list
  sched/debug: Fix requested task uclamp values shown in procfs
  sched/fair: Fix enqueue_task_fair() warning some more
parents caffb99b 39f23ce0
...@@ -948,8 +948,8 @@ void proc_sched_show_task(struct task_struct *p, struct pid_namespace *ns, ...@@ -948,8 +948,8 @@ void proc_sched_show_task(struct task_struct *p, struct pid_namespace *ns,
P(se.avg.util_est.enqueued); P(se.avg.util_est.enqueued);
#endif #endif
#ifdef CONFIG_UCLAMP_TASK #ifdef CONFIG_UCLAMP_TASK
__PS("uclamp.min", p->uclamp[UCLAMP_MIN].value); __PS("uclamp.min", p->uclamp_req[UCLAMP_MIN].value);
__PS("uclamp.max", p->uclamp[UCLAMP_MAX].value); __PS("uclamp.max", p->uclamp_req[UCLAMP_MAX].value);
__PS("effective uclamp.min", uclamp_eff_value(p, UCLAMP_MIN)); __PS("effective uclamp.min", uclamp_eff_value(p, UCLAMP_MIN));
__PS("effective uclamp.max", uclamp_eff_value(p, UCLAMP_MAX)); __PS("effective uclamp.max", uclamp_eff_value(p, UCLAMP_MAX));
#endif #endif
......
...@@ -4774,7 +4774,6 @@ void unthrottle_cfs_rq(struct cfs_rq *cfs_rq) ...@@ -4774,7 +4774,6 @@ void unthrottle_cfs_rq(struct cfs_rq *cfs_rq)
struct rq *rq = rq_of(cfs_rq); struct rq *rq = rq_of(cfs_rq);
struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg); struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
struct sched_entity *se; struct sched_entity *se;
int enqueue = 1;
long task_delta, idle_task_delta; long task_delta, idle_task_delta;
se = cfs_rq->tg->se[cpu_of(rq)]; se = cfs_rq->tg->se[cpu_of(rq)];
...@@ -4798,26 +4797,44 @@ void unthrottle_cfs_rq(struct cfs_rq *cfs_rq) ...@@ -4798,26 +4797,44 @@ void unthrottle_cfs_rq(struct cfs_rq *cfs_rq)
idle_task_delta = cfs_rq->idle_h_nr_running; idle_task_delta = cfs_rq->idle_h_nr_running;
for_each_sched_entity(se) { for_each_sched_entity(se) {
if (se->on_rq) if (se->on_rq)
enqueue = 0; break;
cfs_rq = cfs_rq_of(se); cfs_rq = cfs_rq_of(se);
if (enqueue) {
enqueue_entity(cfs_rq, se, ENQUEUE_WAKEUP); enqueue_entity(cfs_rq, se, ENQUEUE_WAKEUP);
} else {
update_load_avg(cfs_rq, se, 0); cfs_rq->h_nr_running += task_delta;
se_update_runnable(se); cfs_rq->idle_h_nr_running += idle_task_delta;
/* end evaluation on encountering a throttled cfs_rq */
if (cfs_rq_throttled(cfs_rq))
goto unthrottle_throttle;
} }
for_each_sched_entity(se) {
cfs_rq = cfs_rq_of(se);
update_load_avg(cfs_rq, se, UPDATE_TG);
se_update_runnable(se);
cfs_rq->h_nr_running += task_delta; cfs_rq->h_nr_running += task_delta;
cfs_rq->idle_h_nr_running += idle_task_delta; cfs_rq->idle_h_nr_running += idle_task_delta;
/* end evaluation on encountering a throttled cfs_rq */
if (cfs_rq_throttled(cfs_rq)) if (cfs_rq_throttled(cfs_rq))
break; goto unthrottle_throttle;
/*
* One parent has been throttled and cfs_rq removed from the
* list. Add it back to not break the leaf list.
*/
if (throttled_hierarchy(cfs_rq))
list_add_leaf_cfs_rq(cfs_rq);
} }
if (!se) /* At this point se is NULL and we are at root level*/
add_nr_running(rq, task_delta); add_nr_running(rq, task_delta);
unthrottle_throttle:
/* /*
* The cfs_rq_throttled() breaks in the above iteration can result in * The cfs_rq_throttled() breaks in the above iteration can result in
* incomplete leaf list maintenance, resulting in triggering the * incomplete leaf list maintenance, resulting in triggering the
...@@ -4826,7 +4843,8 @@ void unthrottle_cfs_rq(struct cfs_rq *cfs_rq) ...@@ -4826,7 +4843,8 @@ void unthrottle_cfs_rq(struct cfs_rq *cfs_rq)
for_each_sched_entity(se) { for_each_sched_entity(se) {
cfs_rq = cfs_rq_of(se); cfs_rq = cfs_rq_of(se);
list_add_leaf_cfs_rq(cfs_rq); if (list_add_leaf_cfs_rq(cfs_rq))
break;
} }
assert_list_leaf_cfs_rq(rq); assert_list_leaf_cfs_rq(rq);
...@@ -5479,6 +5497,13 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags) ...@@ -5479,6 +5497,13 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
/* end evaluation on encountering a throttled cfs_rq */ /* end evaluation on encountering a throttled cfs_rq */
if (cfs_rq_throttled(cfs_rq)) if (cfs_rq_throttled(cfs_rq))
goto enqueue_throttle; goto enqueue_throttle;
/*
* One parent has been throttled and cfs_rq removed from the
* list. Add it back to not break the leaf list.
*/
if (throttled_hierarchy(cfs_rq))
list_add_leaf_cfs_rq(cfs_rq);
} }
enqueue_throttle: enqueue_throttle:
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment