Commit c6699d6f authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Jens Axboe

blk-mq: factor out a "classic" poll helper

Factor the code to do the classic full metal polling out of blk_poll into
a separate blk_mq_poll_classic helper.
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Reviewed-by: default avatarSagi Grimberg <sagi@grimberg.me>
Tested-by: default avatarMark Wunderlich <mark.wunderlich@intel.com>
Link: https://lore.kernel.org/r/20211012111226.760968-7-hch@lst.deSigned-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent f70299f0
...@@ -71,6 +71,14 @@ static inline struct blk_mq_hw_ctx *blk_qc_to_hctx(struct request_queue *q, ...@@ -71,6 +71,14 @@ static inline struct blk_mq_hw_ctx *blk_qc_to_hctx(struct request_queue *q,
return q->queue_hw_ctx[(qc & ~BLK_QC_T_INTERNAL) >> BLK_QC_T_SHIFT]; return q->queue_hw_ctx[(qc & ~BLK_QC_T_INTERNAL) >> BLK_QC_T_SHIFT];
} }
static inline struct request *blk_qc_to_rq(struct blk_mq_hw_ctx *hctx,
blk_qc_t qc)
{
if (blk_qc_t_is_internal(qc))
return blk_mq_tag_to_rq(hctx->sched_tags, blk_qc_t_to_tag(qc));
return blk_mq_tag_to_rq(hctx->tags, blk_qc_t_to_tag(qc));
}
/* /*
* Check if any of the ctx, dispatch list or elevator * Check if any of the ctx, dispatch list or elevator
* have pending work in this hardware queue. * have pending work in this hardware queue.
...@@ -3975,15 +3983,20 @@ static unsigned long blk_mq_poll_nsecs(struct request_queue *q, ...@@ -3975,15 +3983,20 @@ static unsigned long blk_mq_poll_nsecs(struct request_queue *q,
return ret; return ret;
} }
static bool blk_mq_poll_hybrid_sleep(struct request_queue *q, static bool blk_mq_poll_hybrid(struct request_queue *q, blk_qc_t qc)
struct request *rq)
{ {
struct blk_mq_hw_ctx *hctx = blk_qc_to_hctx(q, qc);
struct request *rq = blk_qc_to_rq(hctx, qc);
struct hrtimer_sleeper hs; struct hrtimer_sleeper hs;
enum hrtimer_mode mode; enum hrtimer_mode mode;
unsigned int nsecs; unsigned int nsecs;
ktime_t kt; ktime_t kt;
if (rq->rq_flags & RQF_MQ_POLL_SLEPT) /*
* If a request has completed on queue that uses an I/O scheduler, we
* won't get back a request from blk_qc_to_rq.
*/
if (!rq || (rq->rq_flags & RQF_MQ_POLL_SLEPT))
return false; return false;
/* /*
...@@ -4025,32 +4038,48 @@ static bool blk_mq_poll_hybrid_sleep(struct request_queue *q, ...@@ -4025,32 +4038,48 @@ static bool blk_mq_poll_hybrid_sleep(struct request_queue *q,
__set_current_state(TASK_RUNNING); __set_current_state(TASK_RUNNING);
destroy_hrtimer_on_stack(&hs.timer); destroy_hrtimer_on_stack(&hs.timer);
/*
* If we sleep, have the caller restart the poll loop to reset the
* state. Like for the other success return cases, the caller is
* responsible for checking if the IO completed. If the IO isn't
* complete, we'll get called again and will go straight to the busy
* poll loop.
*/
return true; return true;
} }
static bool blk_mq_poll_hybrid(struct request_queue *q, static int blk_mq_poll_classic(struct request_queue *q, blk_qc_t cookie,
struct blk_mq_hw_ctx *hctx, blk_qc_t cookie) bool spin)
{ {
struct request *rq; struct blk_mq_hw_ctx *hctx = blk_qc_to_hctx(q, cookie);
long state = get_current_state();
int ret;
if (q->poll_nsec == BLK_MQ_POLL_CLASSIC) hctx->poll_considered++;
return false;
if (!blk_qc_t_is_internal(cookie)) do {
rq = blk_mq_tag_to_rq(hctx->tags, blk_qc_t_to_tag(cookie)); hctx->poll_invoked++;
else {
rq = blk_mq_tag_to_rq(hctx->sched_tags, blk_qc_t_to_tag(cookie)); ret = q->mq_ops->poll(hctx);
/* if (ret > 0) {
* With scheduling, if the request has completed, we'll hctx->poll_success++;
* get a NULL return here, as we clear the sched tag when __set_current_state(TASK_RUNNING);
* that happens. The request still remains valid, like always, return ret;
* so we should be safe with just the NULL check.
*/
if (!rq)
return false;
} }
return blk_mq_poll_hybrid_sleep(q, rq); if (signal_pending_state(state, current))
__set_current_state(TASK_RUNNING);
if (task_is_running(current))
return 1;
if (ret < 0 || !spin)
break;
cpu_relax();
} while (!need_resched());
__set_current_state(TASK_RUNNING);
return 0;
} }
/** /**
...@@ -4067,9 +4096,6 @@ static bool blk_mq_poll_hybrid(struct request_queue *q, ...@@ -4067,9 +4096,6 @@ static bool blk_mq_poll_hybrid(struct request_queue *q,
*/ */
int blk_poll(struct request_queue *q, blk_qc_t cookie, bool spin) int blk_poll(struct request_queue *q, blk_qc_t cookie, bool spin)
{ {
struct blk_mq_hw_ctx *hctx;
unsigned int state;
if (!blk_qc_t_valid(cookie) || if (!blk_qc_t_valid(cookie) ||
!test_bit(QUEUE_FLAG_POLL, &q->queue_flags)) !test_bit(QUEUE_FLAG_POLL, &q->queue_flags))
return 0; return 0;
...@@ -4077,46 +4103,12 @@ int blk_poll(struct request_queue *q, blk_qc_t cookie, bool spin) ...@@ -4077,46 +4103,12 @@ int blk_poll(struct request_queue *q, blk_qc_t cookie, bool spin)
if (current->plug) if (current->plug)
blk_flush_plug_list(current->plug, false); blk_flush_plug_list(current->plug, false);
hctx = blk_qc_to_hctx(q, cookie); /* If specified not to spin, we also should not sleep. */
if (spin && q->poll_nsec != BLK_MQ_POLL_CLASSIC) {
/* if (blk_mq_poll_hybrid(q, cookie))
* If we sleep, have the caller restart the poll loop to reset
* the state. Like for the other success return cases, the
* caller is responsible for checking if the IO completed. If
* the IO isn't complete, we'll get called again and will go
* straight to the busy poll loop. If specified not to spin,
* we also should not sleep.
*/
if (spin && blk_mq_poll_hybrid(q, hctx, cookie))
return 1; return 1;
hctx->poll_considered++;
state = get_current_state();
do {
int ret;
hctx->poll_invoked++;
ret = q->mq_ops->poll(hctx);
if (ret > 0) {
hctx->poll_success++;
__set_current_state(TASK_RUNNING);
return ret;
} }
return blk_mq_poll_classic(q, cookie, spin);
if (signal_pending_state(state, current))
__set_current_state(TASK_RUNNING);
if (task_is_running(current))
return 1;
if (ret < 0 || !spin)
break;
cpu_relax();
} while (!need_resched());
__set_current_state(TASK_RUNNING);
return 0;
} }
EXPORT_SYMBOL_GPL(blk_poll); EXPORT_SYMBOL_GPL(blk_poll);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment