Commit 70f36b60 authored by Jens Axboe's avatar Jens Axboe

blk-mq: allow resize of scheduler requests

Add support for growing the tags associated with a hardware queue, for
the scheduler tags. Currently we only support resizing within the
limits of the original depth, change that so we can grow it as well by
allocating and replacing the existing scheduler tag set.

This is similar to how we could increase the software queue depth with
the legacy IO stack and schedulers.
Signed-off-by: default avatarJens Axboe <axboe@fb.com>
Reviewed-by: default avatarOmar Sandoval <osandov@fb.com>
parent 7e79dadc
...@@ -387,19 +387,56 @@ void blk_mq_free_tags(struct blk_mq_tags *tags) ...@@ -387,19 +387,56 @@ void blk_mq_free_tags(struct blk_mq_tags *tags)
kfree(tags); kfree(tags);
} }
int blk_mq_tag_update_depth(struct blk_mq_tags *tags, unsigned int tdepth) int blk_mq_tag_update_depth(struct blk_mq_hw_ctx *hctx,
struct blk_mq_tags **tagsptr, unsigned int tdepth,
bool can_grow)
{ {
struct blk_mq_tags *tags = *tagsptr;
if (tdepth <= tags->nr_reserved_tags)
return -EINVAL;
tdepth -= tags->nr_reserved_tags; tdepth -= tags->nr_reserved_tags;
if (tdepth > tags->nr_tags)
/*
* If we are allowed to grow beyond the original size, allocate
* a new set of tags before freeing the old one.
*/
if (tdepth > tags->nr_tags) {
struct blk_mq_tag_set *set = hctx->queue->tag_set;
struct blk_mq_tags *new;
bool ret;
if (!can_grow)
return -EINVAL;
/*
* We need some sort of upper limit, set it high enough that
* no valid use cases should require more.
*/
if (tdepth > 16 * BLKDEV_MAX_RQ)
return -EINVAL; return -EINVAL;
new = blk_mq_alloc_rq_map(set, hctx->queue_num, tdepth, 0);
if (!new)
return -ENOMEM;
ret = blk_mq_alloc_rqs(set, new, hctx->queue_num, tdepth);
if (ret) {
blk_mq_free_rq_map(new);
return -ENOMEM;
}
blk_mq_free_rqs(set, *tagsptr, hctx->queue_num);
blk_mq_free_rq_map(*tagsptr);
*tagsptr = new;
} else {
/* /*
* Don't need (or can't) update reserved tags here, they remain * Don't need (or can't) update reserved tags here, they
* static and should never need resizing. * remain static and should never need resizing.
*/ */
sbitmap_queue_resize(&tags->bitmap_tags, tdepth); sbitmap_queue_resize(&tags->bitmap_tags, tdepth);
}
blk_mq_tag_wakeup_all(tags, false);
return 0; return 0;
} }
......
...@@ -29,7 +29,9 @@ extern void blk_mq_put_tag(struct blk_mq_hw_ctx *hctx, struct blk_mq_tags *tags, ...@@ -29,7 +29,9 @@ extern void blk_mq_put_tag(struct blk_mq_hw_ctx *hctx, struct blk_mq_tags *tags,
struct blk_mq_ctx *ctx, unsigned int tag); struct blk_mq_ctx *ctx, unsigned int tag);
extern bool blk_mq_has_free_tags(struct blk_mq_tags *tags); extern bool blk_mq_has_free_tags(struct blk_mq_tags *tags);
extern ssize_t blk_mq_tag_sysfs_show(struct blk_mq_tags *tags, char *page); extern ssize_t blk_mq_tag_sysfs_show(struct blk_mq_tags *tags, char *page);
extern int blk_mq_tag_update_depth(struct blk_mq_tags *tags, unsigned int depth); extern int blk_mq_tag_update_depth(struct blk_mq_hw_ctx *hctx,
struct blk_mq_tags **tags,
unsigned int depth, bool can_grow);
extern void blk_mq_tag_wakeup_all(struct blk_mq_tags *tags, bool); extern void blk_mq_tag_wakeup_all(struct blk_mq_tags *tags, bool);
void blk_mq_queue_tag_busy_iter(struct request_queue *q, busy_iter_fn *fn, void blk_mq_queue_tag_busy_iter(struct request_queue *q, busy_iter_fn *fn,
void *priv); void *priv);
......
...@@ -2561,6 +2561,9 @@ int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr) ...@@ -2561,6 +2561,9 @@ int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr)
if (!set) if (!set)
return -EINVAL; return -EINVAL;
blk_mq_freeze_queue(q);
blk_mq_quiesce_queue(q);
ret = 0; ret = 0;
queue_for_each_hw_ctx(q, hctx, i) { queue_for_each_hw_ctx(q, hctx, i) {
if (!hctx->tags) if (!hctx->tags)
...@@ -2569,11 +2572,14 @@ int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr) ...@@ -2569,11 +2572,14 @@ int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr)
* If we're using an MQ scheduler, just update the scheduler * If we're using an MQ scheduler, just update the scheduler
* queue depth. This is similar to what the old code would do. * queue depth. This is similar to what the old code would do.
*/ */
if (!hctx->sched_tags) if (!hctx->sched_tags) {
ret = blk_mq_tag_update_depth(hctx->tags, ret = blk_mq_tag_update_depth(hctx, &hctx->tags,
min(nr, set->queue_depth)); min(nr, set->queue_depth),
else false);
ret = blk_mq_tag_update_depth(hctx->sched_tags, nr); } else {
ret = blk_mq_tag_update_depth(hctx, &hctx->sched_tags,
nr, true);
}
if (ret) if (ret)
break; break;
} }
...@@ -2581,6 +2587,9 @@ int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr) ...@@ -2581,6 +2587,9 @@ int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr)
if (!ret) if (!ret)
q->nr_requests = nr; q->nr_requests = nr;
blk_mq_unfreeze_queue(q);
blk_mq_start_stopped_hw_queues(q, true);
return ret; return ret;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment