Commit b1092c9a authored by Michael Lyle's avatar Michael Lyle Committed by Jens Axboe

bcache: allow quick writeback when backing idle

If the control system would wait for at least half a second, and there's
been no reqs hitting the backing disk for awhile: use an alternate mode
where we have at most one contiguous set of writebacks in flight at a
time. (But don't otherwise delay).  If front-end IO appears, it will
still be quick, as it will only have to contend with one real operation
in flight.  But otherwise, we'll be sending data to the backing disk as
quickly as it can accept it (with one op at a time).
Signed-off-by: default avatarMichael Lyle <mlyle@lyle.org>
Reviewed-by: default avatarTang Junhui <tang.junhui@zte.com.cn>
Acked-by: default avatarColy Li <colyli@suse.de>
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent 6e6ccc67
...@@ -320,6 +320,13 @@ struct cached_dev { ...@@ -320,6 +320,13 @@ struct cached_dev {
*/ */
atomic_t has_dirty; atomic_t has_dirty;
/*
* Set to zero by things that touch the backing volume-- except
* writeback. Incremented by writeback. Used to determine when to
* accelerate idle writeback.
*/
atomic_t backing_idle;
struct bch_ratelimit writeback_rate; struct bch_ratelimit writeback_rate;
struct delayed_work writeback_rate_update; struct delayed_work writeback_rate_update;
......
...@@ -996,6 +996,7 @@ static blk_qc_t cached_dev_make_request(struct request_queue *q, ...@@ -996,6 +996,7 @@ static blk_qc_t cached_dev_make_request(struct request_queue *q,
struct cached_dev *dc = container_of(d, struct cached_dev, disk); struct cached_dev *dc = container_of(d, struct cached_dev, disk);
int rw = bio_data_dir(bio); int rw = bio_data_dir(bio);
atomic_set(&dc->backing_idle, 0);
generic_start_io_acct(q, rw, bio_sectors(bio), &d->disk->part0); generic_start_io_acct(q, rw, bio_sectors(bio), &d->disk->part0);
bio_set_dev(bio, dc->bdev); bio_set_dev(bio, dc->bdev);
......
...@@ -356,6 +356,27 @@ static void read_dirty(struct cached_dev *dc) ...@@ -356,6 +356,27 @@ static void read_dirty(struct cached_dev *dc)
delay = writeback_delay(dc, size); delay = writeback_delay(dc, size);
/* If the control system would wait for at least half a
* second, and there's been no reqs hitting the backing disk
* for awhile: use an alternate mode where we have at most
* one contiguous set of writebacks in flight at a time. If
* someone wants to do IO it will be quick, as it will only
* have to contend with one operation in flight, and we'll
* be round-tripping data to the backing disk as quickly as
* it can accept it.
*/
if (delay >= HZ / 2) {
/* 3 means at least 1.5 seconds, up to 7.5 if we
* have slowed way down.
*/
if (atomic_inc_return(&dc->backing_idle) >= 3) {
/* Wait for current I/Os to finish */
closure_sync(&cl);
/* And immediately launch a new set. */
delay = 0;
}
}
while (!kthread_should_stop() && delay) { while (!kthread_should_stop() && delay) {
schedule_timeout_interruptible(delay); schedule_timeout_interruptible(delay);
delay = writeback_delay(dc, 0); delay = writeback_delay(dc, 0);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment