Commit 08e0029a authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Jens Axboe

blk-mq: remove the error argument to blk_mq_complete_request

Now that all drivers that call blk_mq_complete_requests have a
->complete callback we can remove the direct call to blk_mq_end_request,
as well as the error argument to blk_mq_complete_request.
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Reviewed-by: default avatarJohannes Thumshirn <jthumshirn@suse.de>
Reviewed-by: default avatarBart Van Assche <Bart.VanAssche@sandisk.com>
Signed-off-by: default avatarJens Axboe <axboe@fb.com>
parent 2609587c
......@@ -442,16 +442,9 @@ static void blk_mq_stat_add(struct request *rq)
static void __blk_mq_complete_request(struct request *rq)
{
struct request_queue *q = rq->q;
if (rq->internal_tag != -1)
blk_mq_sched_completed_request(rq);
blk_mq_stat_add(rq);
if (!q->softirq_done_fn)
blk_mq_end_request(rq, rq->errors);
else
blk_mq_ipi_complete_request(rq);
}
......@@ -463,16 +456,14 @@ static void __blk_mq_complete_request(struct request *rq)
* Ends all I/O on a request. It does not handle partial completions.
* The actual completion happens out-of-order, through a IPI handler.
**/
void blk_mq_complete_request(struct request *rq, int error)
void blk_mq_complete_request(struct request *rq)
{
struct request_queue *q = rq->q;
if (unlikely(blk_should_fake_timeout(q)))
return;
if (!blk_mark_rq_complete(rq)) {
rq->errors = error;
if (!blk_mark_rq_complete(rq))
__blk_mq_complete_request(rq);
}
}
EXPORT_SYMBOL(blk_mq_complete_request);
......
......@@ -465,7 +465,7 @@ static void lo_rw_aio_complete(struct kiocb *iocb, long ret, long ret2)
struct loop_cmd *cmd = container_of(iocb, struct loop_cmd, iocb);
cmd->ret = ret;
blk_mq_complete_request(cmd->rq, 0);
blk_mq_complete_request(cmd->rq);
}
static int lo_rw_aio(struct loop_device *lo, struct loop_cmd *cmd,
......@@ -1685,7 +1685,7 @@ static void loop_handle_cmd(struct loop_cmd *cmd)
/* complete non-aio request */
if (!cmd->use_aio || ret) {
cmd->ret = ret ? -EIO : 0;
blk_mq_complete_request(cmd->rq, 0);
blk_mq_complete_request(cmd->rq);
}
}
......
......@@ -242,7 +242,7 @@ static void mtip_async_complete(struct mtip_port *port,
rq = mtip_rq_from_tag(dd, tag);
cmd->status = status;
blk_mq_complete_request(rq, 0);
blk_mq_complete_request(rq);
}
/*
......@@ -4109,7 +4109,7 @@ static void mtip_no_dev_cleanup(struct request *rq, void *data, bool reserv)
if (likely(!reserv)) {
cmd->status = -ENODEV;
blk_mq_complete_request(rq, 0);
blk_mq_complete_request(rq);
} else if (test_bit(MTIP_PF_IC_ACTIVE_BIT, &dd->port->flags)) {
cmd = mtip_cmd_from_tag(dd, MTIP_TAG_INTERNAL);
......
......@@ -635,7 +635,7 @@ static void recv_work(struct work_struct *work)
break;
}
blk_mq_complete_request(blk_mq_rq_from_pdu(cmd), 0);
blk_mq_complete_request(blk_mq_rq_from_pdu(cmd));
}
atomic_dec(&config->recv_threads);
wake_up(&config->recv_wq);
......@@ -651,7 +651,7 @@ static void nbd_clear_req(struct request *req, void *data, bool reserved)
return;
cmd = blk_mq_rq_to_pdu(req);
cmd->status = -EIO;
blk_mq_complete_request(req, 0);
blk_mq_complete_request(req);
}
static void nbd_clear_que(struct nbd_device *nbd)
......
......@@ -281,7 +281,7 @@ static inline void null_handle_cmd(struct nullb_cmd *cmd)
case NULL_IRQ_SOFTIRQ:
switch (queue_mode) {
case NULL_Q_MQ:
blk_mq_complete_request(cmd->rq, 0);
blk_mq_complete_request(cmd->rq);
break;
case NULL_Q_RQ:
blk_complete_request(cmd->rq);
......
......@@ -201,7 +201,7 @@ static void virtblk_done(struct virtqueue *vq)
while ((vbr = virtqueue_get_buf(vblk->vqs[qid].vq, &len)) != NULL) {
struct request *req = blk_mq_rq_from_pdu(vbr);
blk_mq_complete_request(req, 0);
blk_mq_complete_request(req);
req_done = true;
}
if (unlikely(virtqueue_is_broken(vq)))
......
......@@ -1647,7 +1647,7 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id)
BUG();
}
blk_mq_complete_request(req, 0);
blk_mq_complete_request(req);
}
rinfo->ring.rsp_cons = i;
......
......@@ -363,7 +363,7 @@ static void dm_complete_request(struct request *rq, int error)
if (!rq->q->mq_ops)
blk_complete_request(rq);
else
blk_mq_complete_request(rq, 0);
blk_mq_complete_request(rq);
}
/*
......
......@@ -117,7 +117,7 @@ void nvme_cancel_request(struct request *req, void *data, bool reserved)
if (blk_queue_dying(req->q))
status |= NVME_SC_DNR;
nvme_req(req)->status = status;
blk_mq_complete_request(req, 0);
blk_mq_complete_request(req);
}
EXPORT_SYMBOL_GPL(nvme_cancel_request);
......
......@@ -251,7 +251,7 @@ static inline void nvme_end_request(struct request *req, __le16 status,
rq->status = le16_to_cpu(status) >> 1;
rq->result = result;
blk_mq_complete_request(req, 0);
blk_mq_complete_request(req);
}
void nvme_complete_rq(struct request *req);
......
......@@ -1904,7 +1904,7 @@ static int scsi_mq_prep_fn(struct request *req)
static void scsi_mq_done(struct scsi_cmnd *cmd)
{
trace_scsi_dispatch_cmd_done(cmd);
blk_mq_complete_request(cmd->request, 0);
blk_mq_complete_request(cmd->request);
}
static int scsi_queue_rq(struct blk_mq_hw_ctx *hctx,
......
......@@ -228,7 +228,7 @@ void blk_mq_add_to_requeue_list(struct request *rq, bool at_head,
void blk_mq_kick_requeue_list(struct request_queue *q);
void blk_mq_delay_kick_requeue_list(struct request_queue *q, unsigned long msecs);
void blk_mq_abort_requeue_list(struct request_queue *q);
void blk_mq_complete_request(struct request *rq, int error);
void blk_mq_complete_request(struct request *rq);
bool blk_mq_queue_stopped(struct request_queue *q);
void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment