Commit cea5a347 authored by Peter Zijlstra's avatar Peter Zijlstra

sched/fair: Cleanup fair_server

The throttle interaction made my brain hurt, make it consistently
about 0 transitions of h_nr_running.
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
parent 5f6bd380
...@@ -5849,10 +5849,10 @@ static bool throttle_cfs_rq(struct cfs_rq *cfs_rq) ...@@ -5849,10 +5849,10 @@ static bool throttle_cfs_rq(struct cfs_rq *cfs_rq)
/* At this point se is NULL and we are at root level*/ /* At this point se is NULL and we are at root level*/
sub_nr_running(rq, task_delta); sub_nr_running(rq, task_delta);
done:
/* Stop the fair server if throttling resulted in no runnable tasks */ /* Stop the fair server if throttling resulted in no runnable tasks */
if (rq_h_nr_running && !rq->cfs.h_nr_running) if (rq_h_nr_running && !rq->cfs.h_nr_running)
dl_server_stop(&rq->fair_server); dl_server_stop(&rq->fair_server);
done:
/* /*
* Note: distribution will already see us throttled via the * Note: distribution will already see us throttled via the
* throttled-list. rq->lock protects completion. * throttled-list. rq->lock protects completion.
...@@ -5940,16 +5940,16 @@ void unthrottle_cfs_rq(struct cfs_rq *cfs_rq) ...@@ -5940,16 +5940,16 @@ void unthrottle_cfs_rq(struct cfs_rq *cfs_rq)
goto unthrottle_throttle; goto unthrottle_throttle;
} }
/* Start the fair server if un-throttling resulted in new runnable tasks */
if (!rq_h_nr_running && rq->cfs.h_nr_running)
dl_server_start(&rq->fair_server);
/* At this point se is NULL and we are at root level*/ /* At this point se is NULL and we are at root level*/
add_nr_running(rq, task_delta); add_nr_running(rq, task_delta);
unthrottle_throttle: unthrottle_throttle:
assert_list_leaf_cfs_rq(rq); assert_list_leaf_cfs_rq(rq);
/* Start the fair server if un-throttling resulted in new runnable tasks */
if (!rq_h_nr_running && rq->cfs.h_nr_running)
dl_server_start(&rq->fair_server);
/* Determine whether we need to wake up potentially idle CPU: */ /* Determine whether we need to wake up potentially idle CPU: */
if (rq->curr == rq->idle && rq->cfs.nr_running) if (rq->curr == rq->idle && rq->cfs.nr_running)
resched_curr(rq); resched_curr(rq);
...@@ -6771,6 +6771,7 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags) ...@@ -6771,6 +6771,7 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
struct sched_entity *se = &p->se; struct sched_entity *se = &p->se;
int idle_h_nr_running = task_has_idle_policy(p); int idle_h_nr_running = task_has_idle_policy(p);
int task_new = !(flags & ENQUEUE_WAKEUP); int task_new = !(flags & ENQUEUE_WAKEUP);
int rq_h_nr_running = rq->cfs.h_nr_running;
/* /*
* The code below (indirectly) updates schedutil which looks at * The code below (indirectly) updates schedutil which looks at
...@@ -6780,13 +6781,6 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags) ...@@ -6780,13 +6781,6 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
*/ */
util_est_enqueue(&rq->cfs, p); util_est_enqueue(&rq->cfs, p);
if (!throttled_hierarchy(task_cfs_rq(p)) && !rq->cfs.h_nr_running) {
/* Account for idle runtime */
if (!rq->nr_running)
dl_server_update_idle_time(rq, rq->curr);
dl_server_start(&rq->fair_server);
}
/* /*
* If in_iowait is set, the code below may not trigger any cpufreq * If in_iowait is set, the code below may not trigger any cpufreq
* utilization updates, so do it here explicitly with the IOWAIT flag * utilization updates, so do it here explicitly with the IOWAIT flag
...@@ -6832,6 +6826,13 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags) ...@@ -6832,6 +6826,13 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
goto enqueue_throttle; goto enqueue_throttle;
} }
if (!rq_h_nr_running && rq->cfs.h_nr_running) {
/* Account for idle runtime */
if (!rq->nr_running)
dl_server_update_idle_time(rq, rq->curr);
dl_server_start(&rq->fair_server);
}
/* At this point se is NULL and we are at root level*/ /* At this point se is NULL and we are at root level*/
add_nr_running(rq, 1); add_nr_running(rq, 1);
...@@ -6872,6 +6873,7 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags) ...@@ -6872,6 +6873,7 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags)
int task_sleep = flags & DEQUEUE_SLEEP; int task_sleep = flags & DEQUEUE_SLEEP;
int idle_h_nr_running = task_has_idle_policy(p); int idle_h_nr_running = task_has_idle_policy(p);
bool was_sched_idle = sched_idle_rq(rq); bool was_sched_idle = sched_idle_rq(rq);
int rq_h_nr_running = rq->cfs.h_nr_running;
util_est_dequeue(&rq->cfs, p); util_est_dequeue(&rq->cfs, p);
...@@ -6926,14 +6928,14 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags) ...@@ -6926,14 +6928,14 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags)
/* At this point se is NULL and we are at root level*/ /* At this point se is NULL and we are at root level*/
sub_nr_running(rq, 1); sub_nr_running(rq, 1);
if (rq_h_nr_running && !rq->cfs.h_nr_running)
dl_server_stop(&rq->fair_server);
/* balance early to pull high priority tasks */ /* balance early to pull high priority tasks */
if (unlikely(!was_sched_idle && sched_idle_rq(rq))) if (unlikely(!was_sched_idle && sched_idle_rq(rq)))
rq->next_balance = jiffies; rq->next_balance = jiffies;
dequeue_throttle: dequeue_throttle:
if (!throttled_hierarchy(task_cfs_rq(p)) && !rq->cfs.h_nr_running)
dl_server_stop(&rq->fair_server);
util_est_update(&rq->cfs, p, task_sleep); util_est_update(&rq->cfs, p, task_sleep);
hrtick_update(rq); hrtick_update(rq);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment