Commit e3ba9ae5 authored by Jens Axboe's avatar Jens Axboe

block: reserve some tags just for sync IO

By only allowing async IO to consume 3/4 ths of the tag depth, we
always have slots free to serve sync IO. This is important to avoid
having writes fill the entire tag queue, thus starving reads.

Original patch and idea from Linus Torvalds <torvalds@linux-foundation.org>
Signed-off-by: default avatarJens Axboe <jens.axboe@oracle.com>
parent f7d7b7a7
...@@ -337,6 +337,7 @@ EXPORT_SYMBOL(blk_queue_end_tag); ...@@ -337,6 +337,7 @@ EXPORT_SYMBOL(blk_queue_end_tag);
int blk_queue_start_tag(struct request_queue *q, struct request *rq) int blk_queue_start_tag(struct request_queue *q, struct request *rq)
{ {
struct blk_queue_tag *bqt = q->queue_tags; struct blk_queue_tag *bqt = q->queue_tags;
unsigned max_depth, offset;
int tag; int tag;
if (unlikely((rq->cmd_flags & REQ_QUEUED))) { if (unlikely((rq->cmd_flags & REQ_QUEUED))) {
...@@ -350,10 +351,19 @@ int blk_queue_start_tag(struct request_queue *q, struct request *rq) ...@@ -350,10 +351,19 @@ int blk_queue_start_tag(struct request_queue *q, struct request *rq)
/* /*
* Protect against shared tag maps, as we may not have exclusive * Protect against shared tag maps, as we may not have exclusive
* access to the tag map. * access to the tag map.
*
* We reserve a few tags just for sync IO, since we don't want
* to starve sync IO on behalf of flooding async IO.
*/ */
max_depth = bqt->max_depth;
if (rq_is_sync(rq))
offset = 0;
else
offset = max_depth >> 2;
do { do {
tag = find_first_zero_bit(bqt->tag_map, bqt->max_depth); tag = find_next_zero_bit(bqt->tag_map, max_depth, offset);
if (tag >= bqt->max_depth) if (tag >= max_depth)
return 1; return 1;
} while (test_and_set_bit_lock(tag, bqt->tag_map)); } while (test_and_set_bit_lock(tag, bqt->tag_map));
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment