Commit b637108a authored by Ming Lei's avatar Ming Lei Committed by Jens Axboe

blk-mq: fix filesystem I/O request allocation

submit_bio_checks() may update bio->bi_opf, so we have to initialize
blk_mq_alloc_data.cmd_flags with bio->bi_opf after submit_bio_checks()
returns when allocating new request.

In case of using cached request, fallback to allocate new request if
cached rq isn't compatible with the incoming bio, otherwise change
rq->cmd_flags with incoming bio->bi_opf.

Fixes: 900e0807 ("block: move queue enter logic into blk_mq_submit_bio()")
Reported-by: default avatarGeert Uytterhoeven <geert@linux-m68k.org>
Tested-by: default avatarGeert Uytterhoeven <geert@linux-m68k.org>
Cc: Christoph Hellwig <hch@lst.de>
Signed-off-by: default avatarMing Lei <ming.lei@redhat.com>
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent b781d8db
......@@ -2521,12 +2521,8 @@ static struct request *blk_mq_get_new_requests(struct request_queue *q,
};
struct request *rq;
if (unlikely(bio_queue_enter(bio)))
return NULL;
if (unlikely(!submit_bio_checks(bio)))
goto put_exit;
if (blk_mq_attempt_bio_merge(q, bio, nsegs, same_queue_rq))
goto put_exit;
return NULL;
rq_qos_throttle(q, bio);
......@@ -2543,19 +2539,32 @@ static struct request *blk_mq_get_new_requests(struct request_queue *q,
rq_qos_cleanup(q, bio);
if (bio->bi_opf & REQ_NOWAIT)
bio_wouldblock_error(bio);
put_exit:
blk_queue_exit(q);
return NULL;
}
static inline bool blk_mq_can_use_cached_rq(struct request *rq,
struct bio *bio)
{
if (blk_mq_get_hctx_type(bio->bi_opf) != rq->mq_hctx->type)
return false;
if (op_is_flush(rq->cmd_flags) != op_is_flush(bio->bi_opf))
return false;
return true;
}
static inline struct request *blk_mq_get_request(struct request_queue *q,
struct blk_plug *plug,
struct bio *bio,
unsigned int nsegs,
bool *same_queue_rq)
{
struct request *rq;
bool checked = false;
if (plug) {
struct request *rq;
rq = rq_list_peek(&plug->cached_rq);
if (rq && rq->q == q) {
......@@ -2564,6 +2573,10 @@ static inline struct request *blk_mq_get_request(struct request_queue *q,
if (blk_mq_attempt_bio_merge(q, bio, nsegs,
same_queue_rq))
return NULL;
checked = true;
if (!blk_mq_can_use_cached_rq(rq, bio))
goto fallback;
rq->cmd_flags = bio->bi_opf;
plug->cached_rq = rq_list_next(rq);
INIT_LIST_HEAD(&rq->queuelist);
rq_qos_throttle(q, bio);
......@@ -2571,7 +2584,15 @@ static inline struct request *blk_mq_get_request(struct request_queue *q,
}
}
return blk_mq_get_new_requests(q, plug, bio, nsegs, same_queue_rq);
fallback:
if (unlikely(bio_queue_enter(bio)))
return NULL;
if (!checked && !submit_bio_checks(bio))
return NULL;
rq = blk_mq_get_new_requests(q, plug, bio, nsegs, same_queue_rq);
if (!rq)
blk_queue_exit(q);
return rq;
}
/**
......
......@@ -89,15 +89,7 @@ static inline struct blk_mq_hw_ctx *blk_mq_map_queue_type(struct request_queue *
return q->queue_hw_ctx[q->tag_set->map[type].mq_map[cpu]];
}
/*
* blk_mq_map_queue() - map (cmd_flags,type) to hardware queue
* @q: request queue
* @flags: request command flags
* @ctx: software queue cpu ctx
*/
static inline struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *q,
unsigned int flags,
struct blk_mq_ctx *ctx)
static inline enum hctx_type blk_mq_get_hctx_type(unsigned int flags)
{
enum hctx_type type = HCTX_TYPE_DEFAULT;
......@@ -108,8 +100,20 @@ static inline struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *q,
type = HCTX_TYPE_POLL;
else if ((flags & REQ_OP_MASK) == REQ_OP_READ)
type = HCTX_TYPE_READ;
return ctx->hctxs[type];
return type;
}
/*
* blk_mq_map_queue() - map (cmd_flags,type) to hardware queue
* @q: request queue
* @flags: request command flags
* @ctx: software queue cpu ctx
*/
static inline struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *q,
unsigned int flags,
struct blk_mq_ctx *ctx)
{
return ctx->hctxs[blk_mq_get_hctx_type(flags)];
}
/*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment