Commit 079a2e3e authored by John Garry's avatar John Garry Committed by Jens Axboe

blk-mq: Change shared sbitmap naming to shared tags

Now that shared sbitmap support really means shared tags, rename symbols
to match that.
Signed-off-by: default avatarJohn Garry <john.garry@huawei.com>
Link: https://lore.kernel.org/r/1633429419-228500-15-git-send-email-john.garry@huawei.comSigned-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent ae0f1a73
...@@ -554,7 +554,7 @@ struct request_queue *blk_alloc_queue(int node_id) ...@@ -554,7 +554,7 @@ struct request_queue *blk_alloc_queue(int node_id)
q->node = node_id; q->node = node_id;
atomic_set(&q->nr_active_requests_shared_sbitmap, 0); atomic_set(&q->nr_active_requests_shared_tags, 0);
timer_setup(&q->timeout, blk_rq_timed_out_timer, 0); timer_setup(&q->timeout, blk_rq_timed_out_timer, 0);
INIT_WORK(&q->timeout_work, blk_timeout_work); INIT_WORK(&q->timeout_work, blk_timeout_work);
......
...@@ -519,8 +519,8 @@ static int blk_mq_sched_alloc_map_and_rqs(struct request_queue *q, ...@@ -519,8 +519,8 @@ static int blk_mq_sched_alloc_map_and_rqs(struct request_queue *q,
struct blk_mq_hw_ctx *hctx, struct blk_mq_hw_ctx *hctx,
unsigned int hctx_idx) unsigned int hctx_idx)
{ {
if (blk_mq_is_sbitmap_shared(q->tag_set->flags)) { if (blk_mq_is_shared_tags(q->tag_set->flags)) {
hctx->sched_tags = q->shared_sbitmap_tags; hctx->sched_tags = q->sched_shared_tags;
return 0; return 0;
} }
...@@ -532,10 +532,10 @@ static int blk_mq_sched_alloc_map_and_rqs(struct request_queue *q, ...@@ -532,10 +532,10 @@ static int blk_mq_sched_alloc_map_and_rqs(struct request_queue *q,
return 0; return 0;
} }
static void blk_mq_exit_sched_shared_sbitmap(struct request_queue *queue) static void blk_mq_exit_sched_shared_tags(struct request_queue *queue)
{ {
blk_mq_free_rq_map(queue->shared_sbitmap_tags); blk_mq_free_rq_map(queue->sched_shared_tags);
queue->shared_sbitmap_tags = NULL; queue->sched_shared_tags = NULL;
} }
/* called in queue's release handler, tagset has gone away */ /* called in queue's release handler, tagset has gone away */
...@@ -546,17 +546,17 @@ static void blk_mq_sched_tags_teardown(struct request_queue *q, unsigned int fla ...@@ -546,17 +546,17 @@ static void blk_mq_sched_tags_teardown(struct request_queue *q, unsigned int fla
queue_for_each_hw_ctx(q, hctx, i) { queue_for_each_hw_ctx(q, hctx, i) {
if (hctx->sched_tags) { if (hctx->sched_tags) {
if (!blk_mq_is_sbitmap_shared(q->tag_set->flags)) if (!blk_mq_is_shared_tags(q->tag_set->flags))
blk_mq_free_rq_map(hctx->sched_tags); blk_mq_free_rq_map(hctx->sched_tags);
hctx->sched_tags = NULL; hctx->sched_tags = NULL;
} }
} }
if (blk_mq_is_sbitmap_shared(flags)) if (blk_mq_is_shared_tags(flags))
blk_mq_exit_sched_shared_sbitmap(q); blk_mq_exit_sched_shared_tags(q);
} }
static int blk_mq_init_sched_shared_sbitmap(struct request_queue *queue) static int blk_mq_init_sched_shared_tags(struct request_queue *queue)
{ {
struct blk_mq_tag_set *set = queue->tag_set; struct blk_mq_tag_set *set = queue->tag_set;
...@@ -564,13 +564,13 @@ static int blk_mq_init_sched_shared_sbitmap(struct request_queue *queue) ...@@ -564,13 +564,13 @@ static int blk_mq_init_sched_shared_sbitmap(struct request_queue *queue)
* Set initial depth at max so that we don't need to reallocate for * Set initial depth at max so that we don't need to reallocate for
* updating nr_requests. * updating nr_requests.
*/ */
queue->shared_sbitmap_tags = blk_mq_alloc_map_and_rqs(set, queue->sched_shared_tags = blk_mq_alloc_map_and_rqs(set,
BLK_MQ_NO_HCTX_IDX, BLK_MQ_NO_HCTX_IDX,
MAX_SCHED_RQ); MAX_SCHED_RQ);
if (!queue->shared_sbitmap_tags) if (!queue->sched_shared_tags)
return -ENOMEM; return -ENOMEM;
blk_mq_tag_update_sched_shared_sbitmap(queue); blk_mq_tag_update_sched_shared_tags(queue);
return 0; return 0;
} }
...@@ -596,8 +596,8 @@ int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e) ...@@ -596,8 +596,8 @@ int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e)
q->nr_requests = 2 * min_t(unsigned int, q->tag_set->queue_depth, q->nr_requests = 2 * min_t(unsigned int, q->tag_set->queue_depth,
BLKDEV_DEFAULT_RQ); BLKDEV_DEFAULT_RQ);
if (blk_mq_is_sbitmap_shared(flags)) { if (blk_mq_is_shared_tags(flags)) {
ret = blk_mq_init_sched_shared_sbitmap(q); ret = blk_mq_init_sched_shared_tags(q);
if (ret) if (ret)
return ret; return ret;
} }
...@@ -647,8 +647,8 @@ void blk_mq_sched_free_rqs(struct request_queue *q) ...@@ -647,8 +647,8 @@ void blk_mq_sched_free_rqs(struct request_queue *q)
struct blk_mq_hw_ctx *hctx; struct blk_mq_hw_ctx *hctx;
int i; int i;
if (blk_mq_is_sbitmap_shared(q->tag_set->flags)) { if (blk_mq_is_shared_tags(q->tag_set->flags)) {
blk_mq_free_rqs(q->tag_set, q->shared_sbitmap_tags, blk_mq_free_rqs(q->tag_set, q->sched_shared_tags,
BLK_MQ_NO_HCTX_IDX); BLK_MQ_NO_HCTX_IDX);
} else { } else {
queue_for_each_hw_ctx(q, hctx, i) { queue_for_each_hw_ctx(q, hctx, i) {
......
...@@ -24,7 +24,7 @@ ...@@ -24,7 +24,7 @@
*/ */
bool __blk_mq_tag_busy(struct blk_mq_hw_ctx *hctx) bool __blk_mq_tag_busy(struct blk_mq_hw_ctx *hctx)
{ {
if (blk_mq_is_sbitmap_shared(hctx->flags)) { if (blk_mq_is_shared_tags(hctx->flags)) {
struct request_queue *q = hctx->queue; struct request_queue *q = hctx->queue;
if (!test_bit(QUEUE_FLAG_HCTX_ACTIVE, &q->queue_flags) && if (!test_bit(QUEUE_FLAG_HCTX_ACTIVE, &q->queue_flags) &&
...@@ -57,19 +57,19 @@ void __blk_mq_tag_idle(struct blk_mq_hw_ctx *hctx) ...@@ -57,19 +57,19 @@ void __blk_mq_tag_idle(struct blk_mq_hw_ctx *hctx)
{ {
struct blk_mq_tags *tags = hctx->tags; struct blk_mq_tags *tags = hctx->tags;
if (blk_mq_is_sbitmap_shared(hctx->flags)) { if (blk_mq_is_shared_tags(hctx->flags)) {
struct request_queue *q = hctx->queue; struct request_queue *q = hctx->queue;
if (!test_and_clear_bit(QUEUE_FLAG_HCTX_ACTIVE, if (!test_and_clear_bit(QUEUE_FLAG_HCTX_ACTIVE,
&q->queue_flags)) &q->queue_flags))
return; return;
atomic_dec(&tags->active_queues);
} else { } else {
if (!test_and_clear_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state)) if (!test_and_clear_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state))
return; return;
atomic_dec(&tags->active_queues);
} }
atomic_dec(&tags->active_queues);
blk_mq_tag_wakeup_all(tags, false); blk_mq_tag_wakeup_all(tags, false);
} }
...@@ -557,7 +557,7 @@ int blk_mq_tag_update_depth(struct blk_mq_hw_ctx *hctx, ...@@ -557,7 +557,7 @@ int blk_mq_tag_update_depth(struct blk_mq_hw_ctx *hctx,
* Only the sbitmap needs resizing since we allocated the max * Only the sbitmap needs resizing since we allocated the max
* initially. * initially.
*/ */
if (blk_mq_is_sbitmap_shared(set->flags)) if (blk_mq_is_shared_tags(set->flags))
return 0; return 0;
new = blk_mq_alloc_map_and_rqs(set, hctx->queue_num, tdepth); new = blk_mq_alloc_map_and_rqs(set, hctx->queue_num, tdepth);
...@@ -578,16 +578,16 @@ int blk_mq_tag_update_depth(struct blk_mq_hw_ctx *hctx, ...@@ -578,16 +578,16 @@ int blk_mq_tag_update_depth(struct blk_mq_hw_ctx *hctx,
return 0; return 0;
} }
void blk_mq_tag_resize_shared_sbitmap(struct blk_mq_tag_set *set, unsigned int size) void blk_mq_tag_resize_shared_tags(struct blk_mq_tag_set *set, unsigned int size)
{ {
struct blk_mq_tags *tags = set->shared_sbitmap_tags; struct blk_mq_tags *tags = set->shared_tags;
sbitmap_queue_resize(&tags->bitmap_tags, size - set->reserved_tags); sbitmap_queue_resize(&tags->bitmap_tags, size - set->reserved_tags);
} }
void blk_mq_tag_update_sched_shared_sbitmap(struct request_queue *q) void blk_mq_tag_update_sched_shared_tags(struct request_queue *q)
{ {
sbitmap_queue_resize(&q->shared_sbitmap_tags->bitmap_tags, sbitmap_queue_resize(&q->sched_shared_tags->bitmap_tags,
q->nr_requests - q->tag_set->reserved_tags); q->nr_requests - q->tag_set->reserved_tags);
} }
......
...@@ -43,9 +43,9 @@ extern void blk_mq_put_tag(struct blk_mq_tags *tags, struct blk_mq_ctx *ctx, ...@@ -43,9 +43,9 @@ extern void blk_mq_put_tag(struct blk_mq_tags *tags, struct blk_mq_ctx *ctx,
extern int blk_mq_tag_update_depth(struct blk_mq_hw_ctx *hctx, extern int blk_mq_tag_update_depth(struct blk_mq_hw_ctx *hctx,
struct blk_mq_tags **tags, struct blk_mq_tags **tags,
unsigned int depth, bool can_grow); unsigned int depth, bool can_grow);
extern void blk_mq_tag_resize_shared_sbitmap(struct blk_mq_tag_set *set, extern void blk_mq_tag_resize_shared_tags(struct blk_mq_tag_set *set,
unsigned int size); unsigned int size);
extern void blk_mq_tag_update_sched_shared_sbitmap(struct request_queue *q); extern void blk_mq_tag_update_sched_shared_tags(struct request_queue *q);
extern void blk_mq_tag_wakeup_all(struct blk_mq_tags *tags, bool); extern void blk_mq_tag_wakeup_all(struct blk_mq_tags *tags, bool);
void blk_mq_queue_tag_busy_iter(struct request_queue *q, busy_iter_fn *fn, void blk_mq_queue_tag_busy_iter(struct request_queue *q, busy_iter_fn *fn,
......
...@@ -2235,7 +2235,7 @@ blk_qc_t blk_mq_submit_bio(struct bio *bio) ...@@ -2235,7 +2235,7 @@ blk_qc_t blk_mq_submit_bio(struct bio *bio)
blk_insert_flush(rq); blk_insert_flush(rq);
blk_mq_run_hw_queue(data.hctx, true); blk_mq_run_hw_queue(data.hctx, true);
} else if (plug && (q->nr_hw_queues == 1 || } else if (plug && (q->nr_hw_queues == 1 ||
blk_mq_is_sbitmap_shared(rq->mq_hctx->flags) || blk_mq_is_shared_tags(rq->mq_hctx->flags) ||
q->mq_ops->commit_rqs || !blk_queue_nonrot(q))) { q->mq_ops->commit_rqs || !blk_queue_nonrot(q))) {
/* /*
* Use plugging if we have a ->commit_rqs() hook as well, as * Use plugging if we have a ->commit_rqs() hook as well, as
...@@ -2353,8 +2353,8 @@ void blk_mq_free_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags, ...@@ -2353,8 +2353,8 @@ void blk_mq_free_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags,
struct blk_mq_tags *drv_tags; struct blk_mq_tags *drv_tags;
struct page *page; struct page *page;
if (blk_mq_is_sbitmap_shared(set->flags)) if (blk_mq_is_shared_tags(set->flags))
drv_tags = set->shared_sbitmap_tags; drv_tags = set->shared_tags;
else else
drv_tags = set->tags[hctx_idx]; drv_tags = set->tags[hctx_idx];
...@@ -2883,8 +2883,8 @@ struct blk_mq_tags *blk_mq_alloc_map_and_rqs(struct blk_mq_tag_set *set, ...@@ -2883,8 +2883,8 @@ struct blk_mq_tags *blk_mq_alloc_map_and_rqs(struct blk_mq_tag_set *set,
static bool __blk_mq_alloc_map_and_rqs(struct blk_mq_tag_set *set, static bool __blk_mq_alloc_map_and_rqs(struct blk_mq_tag_set *set,
int hctx_idx) int hctx_idx)
{ {
if (blk_mq_is_sbitmap_shared(set->flags)) { if (blk_mq_is_shared_tags(set->flags)) {
set->tags[hctx_idx] = set->shared_sbitmap_tags; set->tags[hctx_idx] = set->shared_tags;
return true; return true;
} }
...@@ -2908,7 +2908,7 @@ void blk_mq_free_map_and_rqs(struct blk_mq_tag_set *set, ...@@ -2908,7 +2908,7 @@ void blk_mq_free_map_and_rqs(struct blk_mq_tag_set *set,
static void __blk_mq_free_map_and_rqs(struct blk_mq_tag_set *set, static void __blk_mq_free_map_and_rqs(struct blk_mq_tag_set *set,
unsigned int hctx_idx) unsigned int hctx_idx)
{ {
if (!blk_mq_is_sbitmap_shared(set->flags)) if (!blk_mq_is_shared_tags(set->flags))
blk_mq_free_map_and_rqs(set, set->tags[hctx_idx], hctx_idx); blk_mq_free_map_and_rqs(set, set->tags[hctx_idx], hctx_idx);
set->tags[hctx_idx] = NULL; set->tags[hctx_idx] = NULL;
...@@ -3375,11 +3375,11 @@ static int __blk_mq_alloc_rq_maps(struct blk_mq_tag_set *set) ...@@ -3375,11 +3375,11 @@ static int __blk_mq_alloc_rq_maps(struct blk_mq_tag_set *set)
{ {
int i; int i;
if (blk_mq_is_sbitmap_shared(set->flags)) { if (blk_mq_is_shared_tags(set->flags)) {
set->shared_sbitmap_tags = blk_mq_alloc_map_and_rqs(set, set->shared_tags = blk_mq_alloc_map_and_rqs(set,
BLK_MQ_NO_HCTX_IDX, BLK_MQ_NO_HCTX_IDX,
set->queue_depth); set->queue_depth);
if (!set->shared_sbitmap_tags) if (!set->shared_tags)
return -ENOMEM; return -ENOMEM;
} }
...@@ -3395,8 +3395,8 @@ static int __blk_mq_alloc_rq_maps(struct blk_mq_tag_set *set) ...@@ -3395,8 +3395,8 @@ static int __blk_mq_alloc_rq_maps(struct blk_mq_tag_set *set)
while (--i >= 0) while (--i >= 0)
__blk_mq_free_map_and_rqs(set, i); __blk_mq_free_map_and_rqs(set, i);
if (blk_mq_is_sbitmap_shared(set->flags)) { if (blk_mq_is_shared_tags(set->flags)) {
blk_mq_free_map_and_rqs(set, set->shared_sbitmap_tags, blk_mq_free_map_and_rqs(set, set->shared_tags,
BLK_MQ_NO_HCTX_IDX); BLK_MQ_NO_HCTX_IDX);
} }
...@@ -3617,8 +3617,8 @@ void blk_mq_free_tag_set(struct blk_mq_tag_set *set) ...@@ -3617,8 +3617,8 @@ void blk_mq_free_tag_set(struct blk_mq_tag_set *set)
for (i = 0; i < set->nr_hw_queues; i++) for (i = 0; i < set->nr_hw_queues; i++)
__blk_mq_free_map_and_rqs(set, i); __blk_mq_free_map_and_rqs(set, i);
if (blk_mq_is_sbitmap_shared(set->flags)) { if (blk_mq_is_shared_tags(set->flags)) {
blk_mq_free_map_and_rqs(set, set->shared_sbitmap_tags, blk_mq_free_map_and_rqs(set, set->shared_tags,
BLK_MQ_NO_HCTX_IDX); BLK_MQ_NO_HCTX_IDX);
} }
...@@ -3669,11 +3669,11 @@ int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr) ...@@ -3669,11 +3669,11 @@ int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr)
} }
if (!ret) { if (!ret) {
q->nr_requests = nr; q->nr_requests = nr;
if (blk_mq_is_sbitmap_shared(set->flags)) { if (blk_mq_is_shared_tags(set->flags)) {
if (q->elevator) if (q->elevator)
blk_mq_tag_update_sched_shared_sbitmap(q); blk_mq_tag_update_sched_shared_tags(q);
else else
blk_mq_tag_resize_shared_sbitmap(set, nr); blk_mq_tag_resize_shared_tags(set, nr);
} }
} }
......
...@@ -157,7 +157,7 @@ struct blk_mq_alloc_data { ...@@ -157,7 +157,7 @@ struct blk_mq_alloc_data {
struct blk_mq_hw_ctx *hctx; struct blk_mq_hw_ctx *hctx;
}; };
static inline bool blk_mq_is_sbitmap_shared(unsigned int flags) static inline bool blk_mq_is_shared_tags(unsigned int flags)
{ {
return flags & BLK_MQ_F_TAG_HCTX_SHARED; return flags & BLK_MQ_F_TAG_HCTX_SHARED;
} }
...@@ -217,24 +217,24 @@ static inline int blk_mq_get_rq_budget_token(struct request *rq) ...@@ -217,24 +217,24 @@ static inline int blk_mq_get_rq_budget_token(struct request *rq)
static inline void __blk_mq_inc_active_requests(struct blk_mq_hw_ctx *hctx) static inline void __blk_mq_inc_active_requests(struct blk_mq_hw_ctx *hctx)
{ {
if (blk_mq_is_sbitmap_shared(hctx->flags)) if (blk_mq_is_shared_tags(hctx->flags))
atomic_inc(&hctx->queue->nr_active_requests_shared_sbitmap); atomic_inc(&hctx->queue->nr_active_requests_shared_tags);
else else
atomic_inc(&hctx->nr_active); atomic_inc(&hctx->nr_active);
} }
static inline void __blk_mq_dec_active_requests(struct blk_mq_hw_ctx *hctx) static inline void __blk_mq_dec_active_requests(struct blk_mq_hw_ctx *hctx)
{ {
if (blk_mq_is_sbitmap_shared(hctx->flags)) if (blk_mq_is_shared_tags(hctx->flags))
atomic_dec(&hctx->queue->nr_active_requests_shared_sbitmap); atomic_dec(&hctx->queue->nr_active_requests_shared_tags);
else else
atomic_dec(&hctx->nr_active); atomic_dec(&hctx->nr_active);
} }
static inline int __blk_mq_active_requests(struct blk_mq_hw_ctx *hctx) static inline int __blk_mq_active_requests(struct blk_mq_hw_ctx *hctx)
{ {
if (blk_mq_is_sbitmap_shared(hctx->flags)) if (blk_mq_is_shared_tags(hctx->flags))
return atomic_read(&hctx->queue->nr_active_requests_shared_sbitmap); return atomic_read(&hctx->queue->nr_active_requests_shared_tags);
return atomic_read(&hctx->nr_active); return atomic_read(&hctx->nr_active);
} }
static inline void __blk_mq_put_driver_tag(struct blk_mq_hw_ctx *hctx, static inline void __blk_mq_put_driver_tag(struct blk_mq_hw_ctx *hctx,
...@@ -328,7 +328,7 @@ static inline bool hctx_may_queue(struct blk_mq_hw_ctx *hctx, ...@@ -328,7 +328,7 @@ static inline bool hctx_may_queue(struct blk_mq_hw_ctx *hctx,
if (bt->sb.depth == 1) if (bt->sb.depth == 1)
return true; return true;
if (blk_mq_is_sbitmap_shared(hctx->flags)) { if (blk_mq_is_shared_tags(hctx->flags)) {
struct request_queue *q = hctx->queue; struct request_queue *q = hctx->queue;
if (!test_bit(QUEUE_FLAG_HCTX_ACTIVE, &q->queue_flags)) if (!test_bit(QUEUE_FLAG_HCTX_ACTIVE, &q->queue_flags))
......
...@@ -637,7 +637,7 @@ static struct elevator_type *elevator_get_default(struct request_queue *q) ...@@ -637,7 +637,7 @@ static struct elevator_type *elevator_get_default(struct request_queue *q)
return NULL; return NULL;
if (q->nr_hw_queues != 1 && if (q->nr_hw_queues != 1 &&
!blk_mq_is_sbitmap_shared(q->tag_set->flags)) !blk_mq_is_shared_tags(q->tag_set->flags))
return NULL; return NULL;
return elevator_get(q, "mq-deadline", false); return elevator_get(q, "mq-deadline", false);
......
...@@ -442,9 +442,9 @@ enum hctx_type { ...@@ -442,9 +442,9 @@ enum hctx_type {
* tag set. * tag set.
* @tags: Tag sets. One tag set per hardware queue. Has @nr_hw_queues * @tags: Tag sets. One tag set per hardware queue. Has @nr_hw_queues
* elements. * elements.
* @shared_sbitmap_tags: * @shared_tags:
* Shared sbitmap set of tags. Has @nr_hw_queues elements. If * Shared set of tags. Has @nr_hw_queues elements. If set,
* set, shared by all @tags. * shared by all @tags.
* @tag_list_lock: Serializes tag_list accesses. * @tag_list_lock: Serializes tag_list accesses.
* @tag_list: List of the request queues that use this tag set. See also * @tag_list: List of the request queues that use this tag set. See also
* request_queue.tag_set_list. * request_queue.tag_set_list.
...@@ -464,7 +464,7 @@ struct blk_mq_tag_set { ...@@ -464,7 +464,7 @@ struct blk_mq_tag_set {
struct blk_mq_tags **tags; struct blk_mq_tags **tags;
struct blk_mq_tags *shared_sbitmap_tags; struct blk_mq_tags *shared_tags;
struct mutex tag_list_lock; struct mutex tag_list_lock;
struct list_head tag_list; struct list_head tag_list;
......
...@@ -236,9 +236,9 @@ struct request_queue { ...@@ -236,9 +236,9 @@ struct request_queue {
struct timer_list timeout; struct timer_list timeout;
struct work_struct timeout_work; struct work_struct timeout_work;
atomic_t nr_active_requests_shared_sbitmap; atomic_t nr_active_requests_shared_tags;
struct blk_mq_tags *shared_sbitmap_tags; struct blk_mq_tags *sched_shared_tags;
struct list_head icq_list; struct list_head icq_list;
#ifdef CONFIG_BLK_CGROUP #ifdef CONFIG_BLK_CGROUP
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment