Commit d6a51a97 authored by Jianchao Wang's avatar Jianchao Wang Committed by Jens Axboe

blk-mq: replace and kill blk_mq_request_issue_directly

Replace blk_mq_request_issue_directly with blk_mq_try_issue_directly
in blk_insert_cloned_request and kill it as nobody uses it any more.
Signed-off-by: default avatarJianchao Wang <jianchao.w.wang@oracle.com>
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent 5b7a6f12
...@@ -1240,6 +1240,8 @@ static int blk_cloned_rq_check_limits(struct request_queue *q, ...@@ -1240,6 +1240,8 @@ static int blk_cloned_rq_check_limits(struct request_queue *q,
*/ */
blk_status_t blk_insert_cloned_request(struct request_queue *q, struct request *rq) blk_status_t blk_insert_cloned_request(struct request_queue *q, struct request *rq)
{ {
blk_qc_t unused;
if (blk_cloned_rq_check_limits(q, rq)) if (blk_cloned_rq_check_limits(q, rq))
return BLK_STS_IOERR; return BLK_STS_IOERR;
...@@ -1255,7 +1257,7 @@ blk_status_t blk_insert_cloned_request(struct request_queue *q, struct request * ...@@ -1255,7 +1257,7 @@ blk_status_t blk_insert_cloned_request(struct request_queue *q, struct request *
* bypass a potential scheduler on the bottom device for * bypass a potential scheduler on the bottom device for
* insert. * insert.
*/ */
return blk_mq_request_issue_directly(rq, true); return blk_mq_try_issue_directly(rq->mq_hctx, rq, &unused, true, true);
} }
EXPORT_SYMBOL_GPL(blk_insert_cloned_request); EXPORT_SYMBOL_GPL(blk_insert_cloned_request);
......
...@@ -1792,7 +1792,7 @@ static blk_status_t __blk_mq_issue_directly(struct blk_mq_hw_ctx *hctx, ...@@ -1792,7 +1792,7 @@ static blk_status_t __blk_mq_issue_directly(struct blk_mq_hw_ctx *hctx,
return ret; return ret;
} }
static blk_status_t blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx, blk_status_t blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
struct request *rq, struct request *rq,
blk_qc_t *cookie, blk_qc_t *cookie,
bool bypass, bool last) bool bypass, bool last)
...@@ -1864,13 +1864,6 @@ static blk_status_t blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx, ...@@ -1864,13 +1864,6 @@ static blk_status_t blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
return ret; return ret;
} }
blk_status_t blk_mq_request_issue_directly(struct request *rq, bool last)
{
blk_qc_t unused;
return blk_mq_try_issue_directly(rq->mq_hctx, rq, &unused, true, last);
}
void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx, void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx,
struct list_head *list) struct list_head *list)
{ {
......
...@@ -68,8 +68,10 @@ void blk_mq_request_bypass_insert(struct request *rq, bool run_queue); ...@@ -68,8 +68,10 @@ void blk_mq_request_bypass_insert(struct request *rq, bool run_queue);
void blk_mq_insert_requests(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx, void blk_mq_insert_requests(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx,
struct list_head *list); struct list_head *list);
/* Used by blk_insert_cloned_request() to issue request directly */ blk_status_t blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
blk_status_t blk_mq_request_issue_directly(struct request *rq, bool last); struct request *rq,
blk_qc_t *cookie,
bool bypass, bool last);
void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx, void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx,
struct list_head *list); struct list_head *list);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment