Commit 4941115b authored by Jens Axboe's avatar Jens Axboe

blk-mq-tag: cleanup the normal/reserved tag allocation

This is in preparation for having another tag set available. Cleanup
the parameters, and allow passing in of tags for blk_mq_put_tag().
Signed-off-by: default avatarJens Axboe <axboe@fb.com>
[hch: even more cleanups]
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Reviewed-by: default avatarOmar Sandoval <osandov@fb.com>
parent 2c3ad667
...@@ -90,32 +90,46 @@ static inline bool hctx_may_queue(struct blk_mq_hw_ctx *hctx, ...@@ -90,32 +90,46 @@ static inline bool hctx_may_queue(struct blk_mq_hw_ctx *hctx,
return atomic_read(&hctx->nr_active) < depth; return atomic_read(&hctx->nr_active) < depth;
} }
static int __bt_get(struct blk_mq_hw_ctx *hctx, struct sbitmap_queue *bt) static int __blk_mq_get_tag(struct blk_mq_hw_ctx *hctx, struct sbitmap_queue *bt)
{ {
if (!hctx_may_queue(hctx, bt)) if (!hctx_may_queue(hctx, bt))
return -1; return -1;
return __sbitmap_queue_get(bt); return __sbitmap_queue_get(bt);
} }
static int bt_get(struct blk_mq_alloc_data *data, struct sbitmap_queue *bt, unsigned int blk_mq_get_tag(struct blk_mq_alloc_data *data)
struct blk_mq_hw_ctx *hctx, struct blk_mq_tags *tags)
{ {
struct blk_mq_tags *tags = blk_mq_tags_from_data(data);
struct sbitmap_queue *bt;
struct sbq_wait_state *ws; struct sbq_wait_state *ws;
DEFINE_WAIT(wait); DEFINE_WAIT(wait);
unsigned int tag_offset;
int tag; int tag;
tag = __bt_get(hctx, bt); if (data->flags & BLK_MQ_REQ_RESERVED) {
if (unlikely(!tags->nr_reserved_tags)) {
WARN_ON_ONCE(1);
return BLK_MQ_TAG_FAIL;
}
bt = &tags->breserved_tags;
tag_offset = 0;
} else {
bt = &tags->bitmap_tags;
tag_offset = tags->nr_reserved_tags;
}
tag = __blk_mq_get_tag(data->hctx, bt);
if (tag != -1) if (tag != -1)
return tag; goto found_tag;
if (data->flags & BLK_MQ_REQ_NOWAIT) if (data->flags & BLK_MQ_REQ_NOWAIT)
return -1; return BLK_MQ_TAG_FAIL;
ws = bt_wait_ptr(bt, hctx); ws = bt_wait_ptr(bt, data->hctx);
do { do {
prepare_to_wait(&ws->wait, &wait, TASK_UNINTERRUPTIBLE); prepare_to_wait(&ws->wait, &wait, TASK_UNINTERRUPTIBLE);
tag = __bt_get(hctx, bt); tag = __blk_mq_get_tag(data->hctx, bt);
if (tag != -1) if (tag != -1)
break; break;
...@@ -125,14 +139,14 @@ static int bt_get(struct blk_mq_alloc_data *data, struct sbitmap_queue *bt, ...@@ -125,14 +139,14 @@ static int bt_get(struct blk_mq_alloc_data *data, struct sbitmap_queue *bt,
* some to complete. Note that hctx can be NULL here for * some to complete. Note that hctx can be NULL here for
* reserved tag allocation. * reserved tag allocation.
*/ */
if (hctx) if (data->hctx)
blk_mq_run_hw_queue(hctx, false); blk_mq_run_hw_queue(data->hctx, false);
/* /*
* Retry tag allocation after running the hardware queue, * Retry tag allocation after running the hardware queue,
* as running the queue may also have found completions. * as running the queue may also have found completions.
*/ */
tag = __bt_get(hctx, bt); tag = __blk_mq_get_tag(data->hctx, bt);
if (tag != -1) if (tag != -1)
break; break;
...@@ -142,61 +156,25 @@ static int bt_get(struct blk_mq_alloc_data *data, struct sbitmap_queue *bt, ...@@ -142,61 +156,25 @@ static int bt_get(struct blk_mq_alloc_data *data, struct sbitmap_queue *bt,
data->ctx = blk_mq_get_ctx(data->q); data->ctx = blk_mq_get_ctx(data->q);
data->hctx = blk_mq_map_queue(data->q, data->ctx->cpu); data->hctx = blk_mq_map_queue(data->q, data->ctx->cpu);
if (data->flags & BLK_MQ_REQ_RESERVED) { tags = blk_mq_tags_from_data(data);
bt = &data->hctx->tags->breserved_tags; if (data->flags & BLK_MQ_REQ_RESERVED)
} else { bt = &tags->breserved_tags;
hctx = data->hctx; else
bt = &hctx->tags->bitmap_tags; bt = &tags->bitmap_tags;
}
finish_wait(&ws->wait, &wait); finish_wait(&ws->wait, &wait);
ws = bt_wait_ptr(bt, hctx); ws = bt_wait_ptr(bt, data->hctx);
} while (1); } while (1);
finish_wait(&ws->wait, &wait); finish_wait(&ws->wait, &wait);
return tag;
}
static unsigned int __blk_mq_get_tag(struct blk_mq_alloc_data *data)
{
int tag;
tag = bt_get(data, &data->hctx->tags->bitmap_tags, data->hctx,
data->hctx->tags);
if (tag >= 0)
return tag + data->hctx->tags->nr_reserved_tags;
return BLK_MQ_TAG_FAIL; found_tag:
return tag + tag_offset;
} }
static unsigned int __blk_mq_get_reserved_tag(struct blk_mq_alloc_data *data) void blk_mq_put_tag(struct blk_mq_hw_ctx *hctx, struct blk_mq_tags *tags,
struct blk_mq_ctx *ctx, unsigned int tag)
{ {
int tag;
if (unlikely(!data->hctx->tags->nr_reserved_tags)) {
WARN_ON_ONCE(1);
return BLK_MQ_TAG_FAIL;
}
tag = bt_get(data, &data->hctx->tags->breserved_tags, NULL,
data->hctx->tags);
if (tag < 0)
return BLK_MQ_TAG_FAIL;
return tag;
}
unsigned int blk_mq_get_tag(struct blk_mq_alloc_data *data)
{
if (data->flags & BLK_MQ_REQ_RESERVED)
return __blk_mq_get_reserved_tag(data);
return __blk_mq_get_tag(data);
}
void blk_mq_put_tag(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx,
unsigned int tag)
{
struct blk_mq_tags *tags = hctx->tags;
if (tag >= tags->nr_reserved_tags) { if (tag >= tags->nr_reserved_tags) {
const int real_tag = tag - tags->nr_reserved_tags; const int real_tag = tag - tags->nr_reserved_tags;
......
...@@ -24,8 +24,8 @@ extern struct blk_mq_tags *blk_mq_init_tags(unsigned int nr_tags, unsigned int r ...@@ -24,8 +24,8 @@ extern struct blk_mq_tags *blk_mq_init_tags(unsigned int nr_tags, unsigned int r
extern void blk_mq_free_tags(struct blk_mq_tags *tags); extern void blk_mq_free_tags(struct blk_mq_tags *tags);
extern unsigned int blk_mq_get_tag(struct blk_mq_alloc_data *data); extern unsigned int blk_mq_get_tag(struct blk_mq_alloc_data *data);
extern void blk_mq_put_tag(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx, extern void blk_mq_put_tag(struct blk_mq_hw_ctx *hctx, struct blk_mq_tags *tags,
unsigned int tag); struct blk_mq_ctx *ctx, unsigned int tag);
extern bool blk_mq_has_free_tags(struct blk_mq_tags *tags); extern bool blk_mq_has_free_tags(struct blk_mq_tags *tags);
extern ssize_t blk_mq_tag_sysfs_show(struct blk_mq_tags *tags, char *page); extern ssize_t blk_mq_tag_sysfs_show(struct blk_mq_tags *tags, char *page);
extern int blk_mq_tag_update_depth(struct blk_mq_tags *tags, unsigned int depth); extern int blk_mq_tag_update_depth(struct blk_mq_tags *tags, unsigned int depth);
......
...@@ -335,7 +335,7 @@ void __blk_mq_free_request(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx, ...@@ -335,7 +335,7 @@ void __blk_mq_free_request(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx,
clear_bit(REQ_ATOM_STARTED, &rq->atomic_flags); clear_bit(REQ_ATOM_STARTED, &rq->atomic_flags);
clear_bit(REQ_ATOM_POLL_SLEPT, &rq->atomic_flags); clear_bit(REQ_ATOM_POLL_SLEPT, &rq->atomic_flags);
blk_mq_put_tag(hctx, ctx, tag); blk_mq_put_tag(hctx, hctx->tags, ctx, tag);
blk_queue_exit(q); blk_queue_exit(q);
} }
......
...@@ -118,6 +118,11 @@ static inline void blk_mq_set_alloc_data(struct blk_mq_alloc_data *data, ...@@ -118,6 +118,11 @@ static inline void blk_mq_set_alloc_data(struct blk_mq_alloc_data *data,
data->hctx = hctx; data->hctx = hctx;
} }
static inline struct blk_mq_tags *blk_mq_tags_from_data(struct blk_mq_alloc_data *data)
{
return data->hctx->tags;
}
/* /*
* Internal helpers for request allocation/init/free * Internal helpers for request allocation/init/free
*/ */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment