Commit bbd7bb70 authored by Jens Axboe's avatar Jens Axboe

block: move poll code to blk-mq

The poll code is blk-mq specific, let's move it to blk-mq.c. This
is a prep patch for improving the polling code.
Signed-off-by: default avatarJens Axboe <axboe@fb.com>
Reviewed-by: default avatarChristoph Hellwig <hch@lst.de>
parent b425b020
...@@ -3312,52 +3312,6 @@ void blk_finish_plug(struct blk_plug *plug) ...@@ -3312,52 +3312,6 @@ void blk_finish_plug(struct blk_plug *plug)
} }
EXPORT_SYMBOL(blk_finish_plug); EXPORT_SYMBOL(blk_finish_plug);
bool blk_poll(struct request_queue *q, blk_qc_t cookie)
{
struct blk_plug *plug;
long state;
unsigned int queue_num;
struct blk_mq_hw_ctx *hctx;
if (!q->mq_ops || !q->mq_ops->poll || !blk_qc_t_valid(cookie) ||
!test_bit(QUEUE_FLAG_POLL, &q->queue_flags))
return false;
queue_num = blk_qc_t_to_queue_num(cookie);
hctx = q->queue_hw_ctx[queue_num];
hctx->poll_considered++;
plug = current->plug;
if (plug)
blk_flush_plug_list(plug, false);
state = current->state;
while (!need_resched()) {
int ret;
hctx->poll_invoked++;
ret = q->mq_ops->poll(hctx, blk_qc_t_to_tag(cookie));
if (ret > 0) {
hctx->poll_success++;
set_current_state(TASK_RUNNING);
return true;
}
if (signal_pending_state(state, current))
set_current_state(TASK_RUNNING);
if (current->state == TASK_RUNNING)
return true;
if (ret < 0)
break;
cpu_relax();
}
return false;
}
EXPORT_SYMBOL_GPL(blk_poll);
#ifdef CONFIG_PM #ifdef CONFIG_PM
/** /**
* blk_pm_runtime_init - Block layer runtime PM initialization routine * blk_pm_runtime_init - Block layer runtime PM initialization routine
......
...@@ -2461,6 +2461,60 @@ void blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, int nr_hw_queues) ...@@ -2461,6 +2461,60 @@ void blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, int nr_hw_queues)
} }
EXPORT_SYMBOL_GPL(blk_mq_update_nr_hw_queues); EXPORT_SYMBOL_GPL(blk_mq_update_nr_hw_queues);
static bool __blk_mq_poll(struct blk_mq_hw_ctx *hctx, struct request *rq)
{
struct request_queue *q = hctx->queue;
long state;
hctx->poll_considered++;
state = current->state;
while (!need_resched()) {
int ret;
hctx->poll_invoked++;
ret = q->mq_ops->poll(hctx, rq->tag);
if (ret > 0) {
hctx->poll_success++;
set_current_state(TASK_RUNNING);
return true;
}
if (signal_pending_state(state, current))
set_current_state(TASK_RUNNING);
if (current->state == TASK_RUNNING)
return true;
if (ret < 0)
break;
cpu_relax();
}
return false;
}
bool blk_mq_poll(struct request_queue *q, blk_qc_t cookie)
{
struct blk_mq_hw_ctx *hctx;
struct blk_plug *plug;
struct request *rq;
if (!q->mq_ops || !q->mq_ops->poll || !blk_qc_t_valid(cookie) ||
!test_bit(QUEUE_FLAG_POLL, &q->queue_flags))
return false;
plug = current->plug;
if (plug)
blk_flush_plug_list(plug, false);
hctx = q->queue_hw_ctx[blk_qc_t_to_queue_num(cookie)];
rq = blk_mq_tag_to_rq(hctx->tags, blk_qc_t_to_tag(cookie));
return __blk_mq_poll(hctx, rq);
}
EXPORT_SYMBOL_GPL(blk_mq_poll);
void blk_mq_disable_hotplug(void) void blk_mq_disable_hotplug(void)
{ {
mutex_lock(&all_q_mutex); mutex_lock(&all_q_mutex);
......
...@@ -96,7 +96,7 @@ static void nvmet_execute_rw(struct nvmet_req *req) ...@@ -96,7 +96,7 @@ static void nvmet_execute_rw(struct nvmet_req *req)
cookie = submit_bio(bio); cookie = submit_bio(bio);
blk_poll(bdev_get_queue(req->ns->bdev), cookie); blk_mq_poll(bdev_get_queue(req->ns->bdev), cookie);
} }
static void nvmet_execute_flush(struct nvmet_req *req) static void nvmet_execute_flush(struct nvmet_req *req)
......
...@@ -457,7 +457,7 @@ static struct bio *dio_await_one(struct dio *dio) ...@@ -457,7 +457,7 @@ static struct bio *dio_await_one(struct dio *dio)
dio->waiter = current; dio->waiter = current;
spin_unlock_irqrestore(&dio->bio_lock, flags); spin_unlock_irqrestore(&dio->bio_lock, flags);
if (!(dio->iocb->ki_flags & IOCB_HIPRI) || if (!(dio->iocb->ki_flags & IOCB_HIPRI) ||
!blk_poll(bdev_get_queue(dio->bio_bdev), dio->bio_cookie)) !blk_mq_poll(bdev_get_queue(dio->bio_bdev), dio->bio_cookie))
io_schedule(); io_schedule();
/* wake up sets us TASK_RUNNING */ /* wake up sets us TASK_RUNNING */
spin_lock_irqsave(&dio->bio_lock, flags); spin_lock_irqsave(&dio->bio_lock, flags);
......
...@@ -952,7 +952,7 @@ extern int blk_execute_rq(struct request_queue *, struct gendisk *, ...@@ -952,7 +952,7 @@ extern int blk_execute_rq(struct request_queue *, struct gendisk *,
extern void blk_execute_rq_nowait(struct request_queue *, struct gendisk *, extern void blk_execute_rq_nowait(struct request_queue *, struct gendisk *,
struct request *, int, rq_end_io_fn *); struct request *, int, rq_end_io_fn *);
bool blk_poll(struct request_queue *q, blk_qc_t cookie); bool blk_mq_poll(struct request_queue *q, blk_qc_t cookie);
static inline struct request_queue *bdev_get_queue(struct block_device *bdev) static inline struct request_queue *bdev_get_queue(struct block_device *bdev)
{ {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment