Commit d7bd15a1 authored by Tejun Heo's avatar Tejun Heo Committed by Jens Axboe

iocost: over-budget forced IOs should schedule async delay

When over-budget IOs are force-issued through root cgroup,
iocg_kick_delay() adjusts the async delay accordingly but doesn't
actually schedule async throttle for the issuing task.  This bug is
pretty well masked because sooner or later the offending threads are
gonna get directly throttled on regular IOs or have async delay
scheduled by mem_cgroup_throttle_swaprate().

However, it can affect control quality on filesystem metadata heavy
operations.  Let's fix it by invoking blkcg_schedule_throttle() when
iocg_kick_delay() says async delay is needed.
Signed-off-by: default avatarTejun Heo <tj@kernel.org>
Fixes: 7caa4715 ("blkcg: implement blk-iocost")
Cc: stable@vger.kernel.org
Reported-by: default avatarJosef Bacik <josef@toxicpanda.com>
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent 6afa8731
...@@ -1212,7 +1212,7 @@ static enum hrtimer_restart iocg_waitq_timer_fn(struct hrtimer *timer) ...@@ -1212,7 +1212,7 @@ static enum hrtimer_restart iocg_waitq_timer_fn(struct hrtimer *timer)
return HRTIMER_NORESTART; return HRTIMER_NORESTART;
} }
static void iocg_kick_delay(struct ioc_gq *iocg, struct ioc_now *now, u64 cost) static bool iocg_kick_delay(struct ioc_gq *iocg, struct ioc_now *now, u64 cost)
{ {
struct ioc *ioc = iocg->ioc; struct ioc *ioc = iocg->ioc;
struct blkcg_gq *blkg = iocg_to_blkg(iocg); struct blkcg_gq *blkg = iocg_to_blkg(iocg);
...@@ -1229,11 +1229,11 @@ static void iocg_kick_delay(struct ioc_gq *iocg, struct ioc_now *now, u64 cost) ...@@ -1229,11 +1229,11 @@ static void iocg_kick_delay(struct ioc_gq *iocg, struct ioc_now *now, u64 cost)
/* clear or maintain depending on the overage */ /* clear or maintain depending on the overage */
if (time_before_eq64(vtime, now->vnow)) { if (time_before_eq64(vtime, now->vnow)) {
blkcg_clear_delay(blkg); blkcg_clear_delay(blkg);
return; return false;
} }
if (!atomic_read(&blkg->use_delay) && if (!atomic_read(&blkg->use_delay) &&
time_before_eq64(vtime, now->vnow + vmargin)) time_before_eq64(vtime, now->vnow + vmargin))
return; return false;
/* use delay */ /* use delay */
if (cost) { if (cost) {
...@@ -1250,10 +1250,11 @@ static void iocg_kick_delay(struct ioc_gq *iocg, struct ioc_now *now, u64 cost) ...@@ -1250,10 +1250,11 @@ static void iocg_kick_delay(struct ioc_gq *iocg, struct ioc_now *now, u64 cost)
oexpires = ktime_to_ns(hrtimer_get_softexpires(&iocg->delay_timer)); oexpires = ktime_to_ns(hrtimer_get_softexpires(&iocg->delay_timer));
if (hrtimer_is_queued(&iocg->delay_timer) && if (hrtimer_is_queued(&iocg->delay_timer) &&
abs(oexpires - expires) <= margin_ns / 4) abs(oexpires - expires) <= margin_ns / 4)
return; return true;
hrtimer_start_range_ns(&iocg->delay_timer, ns_to_ktime(expires), hrtimer_start_range_ns(&iocg->delay_timer, ns_to_ktime(expires),
margin_ns / 4, HRTIMER_MODE_ABS); margin_ns / 4, HRTIMER_MODE_ABS);
return true;
} }
static enum hrtimer_restart iocg_delay_timer_fn(struct hrtimer *timer) static enum hrtimer_restart iocg_delay_timer_fn(struct hrtimer *timer)
...@@ -1739,7 +1740,9 @@ static void ioc_rqos_throttle(struct rq_qos *rqos, struct bio *bio) ...@@ -1739,7 +1740,9 @@ static void ioc_rqos_throttle(struct rq_qos *rqos, struct bio *bio)
*/ */
if (bio_issue_as_root_blkg(bio) || fatal_signal_pending(current)) { if (bio_issue_as_root_blkg(bio) || fatal_signal_pending(current)) {
atomic64_add(abs_cost, &iocg->abs_vdebt); atomic64_add(abs_cost, &iocg->abs_vdebt);
iocg_kick_delay(iocg, &now, cost); if (iocg_kick_delay(iocg, &now, cost))
blkcg_schedule_throttle(rqos->q,
(bio->bi_opf & REQ_SWAP) == REQ_SWAP);
return; return;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment