Commit a0235d23 authored by John Garry's avatar John Garry Committed by Jens Axboe

blk-mq: Relocate hctx_may_queue()

blk-mq.h and blk-mq-tag.h include on each other, which is less than ideal.

Locate hctx_may_queue() to blk-mq.h, as it is not really tag specific code.

In this way, we can drop the blk-mq-tag.h include of blk-mq.h
Signed-off-by: default avatarJohn Garry <john.garry@huawei.com>
Tested-by: default avatarDouglas Gilbert <dgilbert@interlog.com>
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent 32bc15af
...@@ -2,8 +2,6 @@ ...@@ -2,8 +2,6 @@
#ifndef INT_BLK_MQ_TAG_H #ifndef INT_BLK_MQ_TAG_H
#define INT_BLK_MQ_TAG_H #define INT_BLK_MQ_TAG_H
#include "blk-mq.h"
/* /*
* Tag address space map. * Tag address space map.
*/ */
...@@ -81,37 +79,6 @@ static inline void blk_mq_tag_idle(struct blk_mq_hw_ctx *hctx) ...@@ -81,37 +79,6 @@ static inline void blk_mq_tag_idle(struct blk_mq_hw_ctx *hctx)
__blk_mq_tag_idle(hctx); __blk_mq_tag_idle(hctx);
} }
/*
* For shared tag users, we track the number of currently active users
* and attempt to provide a fair share of the tag depth for each of them.
*/
static inline bool hctx_may_queue(struct blk_mq_hw_ctx *hctx,
struct sbitmap_queue *bt)
{
unsigned int depth, users;
if (!hctx || !(hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED))
return true;
if (!test_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state))
return true;
/*
* Don't try dividing an ant
*/
if (bt->sb.depth == 1)
return true;
users = atomic_read(&hctx->tags->active_queues);
if (!users)
return true;
/*
* Allow at least some tags
*/
depth = max((bt->sb.depth + users - 1) / users, 4U);
return atomic_read(&hctx->nr_active) < depth;
}
static inline bool blk_mq_tag_is_reserved(struct blk_mq_tags *tags, static inline bool blk_mq_tag_is_reserved(struct blk_mq_tags *tags,
unsigned int tag) unsigned int tag)
{ {
......
...@@ -259,4 +259,36 @@ static inline struct blk_plug *blk_mq_plug(struct request_queue *q, ...@@ -259,4 +259,36 @@ static inline struct blk_plug *blk_mq_plug(struct request_queue *q,
return NULL; return NULL;
} }
/*
* For shared tag users, we track the number of currently active users
* and attempt to provide a fair share of the tag depth for each of them.
*/
static inline bool hctx_may_queue(struct blk_mq_hw_ctx *hctx,
struct sbitmap_queue *bt)
{
unsigned int depth, users;
if (!hctx || !(hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED))
return true;
if (!test_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state))
return true;
/*
* Don't try dividing an ant
*/
if (bt->sb.depth == 1)
return true;
users = atomic_read(&hctx->tags->active_queues);
if (!users)
return true;
/*
* Allow at least some tags
*/
depth = max((bt->sb.depth + users - 1) / users, 4U);
return atomic_read(&hctx->nr_active) < depth;
}
#endif #endif
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment