Commit ab93a4bc authored by Josh Don's avatar Josh Don Committed by Peter Zijlstra

sched/fair: Remove distribute_running from CFS bandwidth

This is mostly a revert of commit:

  baa9be4f ("sched/fair: Fix throttle_list starvation with low CFS quota")

The primary use of distribute_running was to determine whether to add
throttled entities to the head or the tail of the throttled list. Now
that we always add to the tail, we can remove this field.

The other use of distribute_running is in the slack_timer, so that we
don't start a distribution while one is already running. However, even
in the event that this race occurs, it is fine to have two distributions
running (especially now that distribute grabs the cfs_b->lock to
determine remaining quota before assigning).
Signed-off-by: default avatarJosh Don <joshdon@google.com>
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Reviewed-by: default avatarPhil Auld <pauld@redhat.com>
Tested-by: default avatarPhil Auld <pauld@redhat.com>
Link: https://lkml.kernel.org/r/20200410225208.109717-3-joshdon@google.com
parent e98fa02c
...@@ -4931,14 +4931,12 @@ static int do_sched_cfs_period_timer(struct cfs_bandwidth *cfs_b, int overrun, u ...@@ -4931,14 +4931,12 @@ static int do_sched_cfs_period_timer(struct cfs_bandwidth *cfs_b, int overrun, u
/* /*
* This check is repeated as we release cfs_b->lock while we unthrottle. * This check is repeated as we release cfs_b->lock while we unthrottle.
*/ */
while (throttled && cfs_b->runtime > 0 && !cfs_b->distribute_running) { while (throttled && cfs_b->runtime > 0) {
cfs_b->distribute_running = 1;
raw_spin_unlock_irqrestore(&cfs_b->lock, flags); raw_spin_unlock_irqrestore(&cfs_b->lock, flags);
/* we can't nest cfs_b->lock while distributing bandwidth */ /* we can't nest cfs_b->lock while distributing bandwidth */
distribute_cfs_runtime(cfs_b); distribute_cfs_runtime(cfs_b);
raw_spin_lock_irqsave(&cfs_b->lock, flags); raw_spin_lock_irqsave(&cfs_b->lock, flags);
cfs_b->distribute_running = 0;
throttled = !list_empty(&cfs_b->throttled_cfs_rq); throttled = !list_empty(&cfs_b->throttled_cfs_rq);
} }
...@@ -5052,10 +5050,6 @@ static void do_sched_cfs_slack_timer(struct cfs_bandwidth *cfs_b) ...@@ -5052,10 +5050,6 @@ static void do_sched_cfs_slack_timer(struct cfs_bandwidth *cfs_b)
/* confirm we're still not at a refresh boundary */ /* confirm we're still not at a refresh boundary */
raw_spin_lock_irqsave(&cfs_b->lock, flags); raw_spin_lock_irqsave(&cfs_b->lock, flags);
cfs_b->slack_started = false; cfs_b->slack_started = false;
if (cfs_b->distribute_running) {
raw_spin_unlock_irqrestore(&cfs_b->lock, flags);
return;
}
if (runtime_refresh_within(cfs_b, min_bandwidth_expiration)) { if (runtime_refresh_within(cfs_b, min_bandwidth_expiration)) {
raw_spin_unlock_irqrestore(&cfs_b->lock, flags); raw_spin_unlock_irqrestore(&cfs_b->lock, flags);
...@@ -5065,9 +5059,6 @@ static void do_sched_cfs_slack_timer(struct cfs_bandwidth *cfs_b) ...@@ -5065,9 +5059,6 @@ static void do_sched_cfs_slack_timer(struct cfs_bandwidth *cfs_b)
if (cfs_b->quota != RUNTIME_INF && cfs_b->runtime > slice) if (cfs_b->quota != RUNTIME_INF && cfs_b->runtime > slice)
runtime = cfs_b->runtime; runtime = cfs_b->runtime;
if (runtime)
cfs_b->distribute_running = 1;
raw_spin_unlock_irqrestore(&cfs_b->lock, flags); raw_spin_unlock_irqrestore(&cfs_b->lock, flags);
if (!runtime) if (!runtime)
...@@ -5076,7 +5067,6 @@ static void do_sched_cfs_slack_timer(struct cfs_bandwidth *cfs_b) ...@@ -5076,7 +5067,6 @@ static void do_sched_cfs_slack_timer(struct cfs_bandwidth *cfs_b)
distribute_cfs_runtime(cfs_b); distribute_cfs_runtime(cfs_b);
raw_spin_lock_irqsave(&cfs_b->lock, flags); raw_spin_lock_irqsave(&cfs_b->lock, flags);
cfs_b->distribute_running = 0;
raw_spin_unlock_irqrestore(&cfs_b->lock, flags); raw_spin_unlock_irqrestore(&cfs_b->lock, flags);
} }
...@@ -5218,7 +5208,6 @@ void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b) ...@@ -5218,7 +5208,6 @@ void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b)
cfs_b->period_timer.function = sched_cfs_period_timer; cfs_b->period_timer.function = sched_cfs_period_timer;
hrtimer_init(&cfs_b->slack_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); hrtimer_init(&cfs_b->slack_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
cfs_b->slack_timer.function = sched_cfs_slack_timer; cfs_b->slack_timer.function = sched_cfs_slack_timer;
cfs_b->distribute_running = 0;
cfs_b->slack_started = false; cfs_b->slack_started = false;
} }
......
...@@ -349,7 +349,6 @@ struct cfs_bandwidth { ...@@ -349,7 +349,6 @@ struct cfs_bandwidth {
u8 idle; u8 idle;
u8 period_active; u8 period_active;
u8 distribute_running;
u8 slack_started; u8 slack_started;
struct hrtimer period_timer; struct hrtimer period_timer;
struct hrtimer slack_timer; struct hrtimer slack_timer;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment