Commit bf572297 authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Jens Axboe

blk-mq: remove REQ_END

Pass an explicit parameter for the last request in a batch to ->queue_rq
instead of using a request flag.  Besides being a cleaner and non-stateful
interface this is also required for the next patch, which fixes the blk-mq
I/O submission code to not start a time too early.
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Signed-off-by: default avatarJens Axboe <axboe@fb.com>
parent 6d11fb45
...@@ -384,7 +384,7 @@ void blk_mq_complete_request(struct request *rq) ...@@ -384,7 +384,7 @@ void blk_mq_complete_request(struct request *rq)
} }
EXPORT_SYMBOL(blk_mq_complete_request); EXPORT_SYMBOL(blk_mq_complete_request);
static void blk_mq_start_request(struct request *rq, bool last) static void blk_mq_start_request(struct request *rq)
{ {
struct request_queue *q = rq->q; struct request_queue *q = rq->q;
...@@ -421,16 +421,6 @@ static void blk_mq_start_request(struct request *rq, bool last) ...@@ -421,16 +421,6 @@ static void blk_mq_start_request(struct request *rq, bool last)
*/ */
rq->nr_phys_segments++; rq->nr_phys_segments++;
} }
/*
* Flag the last request in the series so that drivers know when IO
* should be kicked off, if they don't do it on a per-request basis.
*
* Note: the flag isn't the only condition drivers should do kick off.
* If drive is busy, the last request might not have the bit set.
*/
if (last)
rq->cmd_flags |= REQ_END;
} }
static void __blk_mq_requeue_request(struct request *rq) static void __blk_mq_requeue_request(struct request *rq)
...@@ -440,8 +430,6 @@ static void __blk_mq_requeue_request(struct request *rq) ...@@ -440,8 +430,6 @@ static void __blk_mq_requeue_request(struct request *rq)
trace_block_rq_requeue(q, rq); trace_block_rq_requeue(q, rq);
clear_bit(REQ_ATOM_STARTED, &rq->atomic_flags); clear_bit(REQ_ATOM_STARTED, &rq->atomic_flags);
rq->cmd_flags &= ~REQ_END;
if (q->dma_drain_size && blk_rq_bytes(rq)) if (q->dma_drain_size && blk_rq_bytes(rq))
rq->nr_phys_segments--; rq->nr_phys_segments--;
} }
...@@ -755,9 +743,9 @@ static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx) ...@@ -755,9 +743,9 @@ static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx)
rq = list_first_entry(&rq_list, struct request, queuelist); rq = list_first_entry(&rq_list, struct request, queuelist);
list_del_init(&rq->queuelist); list_del_init(&rq->queuelist);
blk_mq_start_request(rq, list_empty(&rq_list)); blk_mq_start_request(rq);
ret = q->mq_ops->queue_rq(hctx, rq); ret = q->mq_ops->queue_rq(hctx, rq, list_empty(&rq_list));
switch (ret) { switch (ret) {
case BLK_MQ_RQ_QUEUE_OK: case BLK_MQ_RQ_QUEUE_OK:
queued++; queued++;
...@@ -1198,14 +1186,14 @@ static void blk_mq_make_request(struct request_queue *q, struct bio *bio) ...@@ -1198,14 +1186,14 @@ static void blk_mq_make_request(struct request_queue *q, struct bio *bio)
int ret; int ret;
blk_mq_bio_to_request(rq, bio); blk_mq_bio_to_request(rq, bio);
blk_mq_start_request(rq, true); blk_mq_start_request(rq);
/* /*
* For OK queue, we are done. For error, kill it. Any other * For OK queue, we are done. For error, kill it. Any other
* error (busy), just add it to our list as we previously * error (busy), just add it to our list as we previously
* would have done * would have done
*/ */
ret = q->mq_ops->queue_rq(data.hctx, rq); ret = q->mq_ops->queue_rq(data.hctx, rq, true);
if (ret == BLK_MQ_RQ_QUEUE_OK) if (ret == BLK_MQ_RQ_QUEUE_OK)
goto done; goto done;
else { else {
......
...@@ -3775,7 +3775,8 @@ static bool mtip_check_unal_depth(struct blk_mq_hw_ctx *hctx, ...@@ -3775,7 +3775,8 @@ static bool mtip_check_unal_depth(struct blk_mq_hw_ctx *hctx,
return false; return false;
} }
static int mtip_queue_rq(struct blk_mq_hw_ctx *hctx, struct request *rq) static int mtip_queue_rq(struct blk_mq_hw_ctx *hctx, struct request *rq,
bool last)
{ {
int ret; int ret;
......
...@@ -313,7 +313,8 @@ static void null_request_fn(struct request_queue *q) ...@@ -313,7 +313,8 @@ static void null_request_fn(struct request_queue *q)
} }
} }
static int null_queue_rq(struct blk_mq_hw_ctx *hctx, struct request *rq) static int null_queue_rq(struct blk_mq_hw_ctx *hctx, struct request *rq,
bool last)
{ {
struct nullb_cmd *cmd = blk_mq_rq_to_pdu(rq); struct nullb_cmd *cmd = blk_mq_rq_to_pdu(rq);
......
...@@ -164,14 +164,14 @@ static void virtblk_done(struct virtqueue *vq) ...@@ -164,14 +164,14 @@ static void virtblk_done(struct virtqueue *vq)
spin_unlock_irqrestore(&vblk->vqs[qid].lock, flags); spin_unlock_irqrestore(&vblk->vqs[qid].lock, flags);
} }
static int virtio_queue_rq(struct blk_mq_hw_ctx *hctx, struct request *req) static int virtio_queue_rq(struct blk_mq_hw_ctx *hctx, struct request *req,
bool last)
{ {
struct virtio_blk *vblk = hctx->queue->queuedata; struct virtio_blk *vblk = hctx->queue->queuedata;
struct virtblk_req *vbr = blk_mq_rq_to_pdu(req); struct virtblk_req *vbr = blk_mq_rq_to_pdu(req);
unsigned long flags; unsigned long flags;
unsigned int num; unsigned int num;
int qid = hctx->queue_num; int qid = hctx->queue_num;
const bool last = (req->cmd_flags & REQ_END) != 0;
int err; int err;
bool notify = false; bool notify = false;
......
...@@ -1855,7 +1855,8 @@ static void scsi_mq_done(struct scsi_cmnd *cmd) ...@@ -1855,7 +1855,8 @@ static void scsi_mq_done(struct scsi_cmnd *cmd)
blk_mq_complete_request(cmd->request); blk_mq_complete_request(cmd->request);
} }
static int scsi_queue_rq(struct blk_mq_hw_ctx *hctx, struct request *req) static int scsi_queue_rq(struct blk_mq_hw_ctx *hctx, struct request *req,
bool last)
{ {
struct request_queue *q = req->q; struct request_queue *q = req->q;
struct scsi_device *sdev = q->queuedata; struct scsi_device *sdev = q->queuedata;
......
...@@ -77,7 +77,7 @@ struct blk_mq_tag_set { ...@@ -77,7 +77,7 @@ struct blk_mq_tag_set {
struct list_head tag_list; struct list_head tag_list;
}; };
typedef int (queue_rq_fn)(struct blk_mq_hw_ctx *, struct request *); typedef int (queue_rq_fn)(struct blk_mq_hw_ctx *, struct request *, bool);
typedef struct blk_mq_hw_ctx *(map_queue_fn)(struct request_queue *, const int); typedef struct blk_mq_hw_ctx *(map_queue_fn)(struct request_queue *, const int);
typedef int (init_hctx_fn)(struct blk_mq_hw_ctx *, void *, unsigned int); typedef int (init_hctx_fn)(struct blk_mq_hw_ctx *, void *, unsigned int);
typedef void (exit_hctx_fn)(struct blk_mq_hw_ctx *, unsigned int); typedef void (exit_hctx_fn)(struct blk_mq_hw_ctx *, unsigned int);
......
...@@ -188,7 +188,6 @@ enum rq_flag_bits { ...@@ -188,7 +188,6 @@ enum rq_flag_bits {
__REQ_MIXED_MERGE, /* merge of different types, fail separately */ __REQ_MIXED_MERGE, /* merge of different types, fail separately */
__REQ_KERNEL, /* direct IO to kernel pages */ __REQ_KERNEL, /* direct IO to kernel pages */
__REQ_PM, /* runtime pm request */ __REQ_PM, /* runtime pm request */
__REQ_END, /* last of chain of requests */
__REQ_HASHED, /* on IO scheduler merge hash */ __REQ_HASHED, /* on IO scheduler merge hash */
__REQ_MQ_INFLIGHT, /* track inflight for MQ */ __REQ_MQ_INFLIGHT, /* track inflight for MQ */
__REQ_NR_BITS, /* stops here */ __REQ_NR_BITS, /* stops here */
...@@ -242,7 +241,6 @@ enum rq_flag_bits { ...@@ -242,7 +241,6 @@ enum rq_flag_bits {
#define REQ_SECURE (1ULL << __REQ_SECURE) #define REQ_SECURE (1ULL << __REQ_SECURE)
#define REQ_KERNEL (1ULL << __REQ_KERNEL) #define REQ_KERNEL (1ULL << __REQ_KERNEL)
#define REQ_PM (1ULL << __REQ_PM) #define REQ_PM (1ULL << __REQ_PM)
#define REQ_END (1ULL << __REQ_END)
#define REQ_HASHED (1ULL << __REQ_HASHED) #define REQ_HASHED (1ULL << __REQ_HASHED)
#define REQ_MQ_INFLIGHT (1ULL << __REQ_MQ_INFLIGHT) #define REQ_MQ_INFLIGHT (1ULL << __REQ_MQ_INFLIGHT)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment