Commit db8c48e4 authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Jens Axboe

nvme: return BLK_EH_DONE from ->timeout

NVMe always completes the request before returning from ->timeout, either
by polling for it, or by disabling the controller.  Return BLK_EH_DONE so
that the block layer doesn't even try to complete it again.
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Reviewed-by: default avatarHannes Reinecke <hare@suse.com>
Reviewed-by: default avatarJohannes Thumshirn <jthumshirn@suse.de>
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent 6600593c
...@@ -1205,7 +1205,7 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved) ...@@ -1205,7 +1205,7 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved)
nvme_warn_reset(dev, csts); nvme_warn_reset(dev, csts);
nvme_dev_disable(dev, false); nvme_dev_disable(dev, false);
nvme_reset_ctrl(&dev->ctrl); nvme_reset_ctrl(&dev->ctrl);
return BLK_EH_HANDLED; return BLK_EH_DONE;
} }
/* /*
...@@ -1215,14 +1215,14 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved) ...@@ -1215,14 +1215,14 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved)
dev_warn(dev->ctrl.device, dev_warn(dev->ctrl.device,
"I/O %d QID %d timeout, completion polled\n", "I/O %d QID %d timeout, completion polled\n",
req->tag, nvmeq->qid); req->tag, nvmeq->qid);
return BLK_EH_HANDLED; return BLK_EH_DONE;
} }
/* /*
* Shutdown immediately if controller times out while starting. The * Shutdown immediately if controller times out while starting. The
* reset work will see the pci device disabled when it gets the forced * reset work will see the pci device disabled when it gets the forced
* cancellation error. All outstanding requests are completed on * cancellation error. All outstanding requests are completed on
* shutdown, so we return BLK_EH_HANDLED. * shutdown, so we return BLK_EH_DONE.
*/ */
switch (dev->ctrl.state) { switch (dev->ctrl.state) {
case NVME_CTRL_CONNECTING: case NVME_CTRL_CONNECTING:
...@@ -1232,7 +1232,7 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved) ...@@ -1232,7 +1232,7 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved)
req->tag, nvmeq->qid); req->tag, nvmeq->qid);
nvme_dev_disable(dev, false); nvme_dev_disable(dev, false);
nvme_req(req)->flags |= NVME_REQ_CANCELLED; nvme_req(req)->flags |= NVME_REQ_CANCELLED;
return BLK_EH_HANDLED; return BLK_EH_DONE;
default: default:
break; break;
} }
...@@ -1249,12 +1249,8 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved) ...@@ -1249,12 +1249,8 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved)
nvme_dev_disable(dev, false); nvme_dev_disable(dev, false);
nvme_reset_ctrl(&dev->ctrl); nvme_reset_ctrl(&dev->ctrl);
/*
* Mark the request as handled, since the inline shutdown
* forces all outstanding requests to complete.
*/
nvme_req(req)->flags |= NVME_REQ_CANCELLED; nvme_req(req)->flags |= NVME_REQ_CANCELLED;
return BLK_EH_HANDLED; return BLK_EH_DONE;
} }
if (atomic_dec_return(&dev->ctrl.abort_limit) < 0) { if (atomic_dec_return(&dev->ctrl.abort_limit) < 0) {
......
...@@ -1598,7 +1598,7 @@ nvme_rdma_timeout(struct request *rq, bool reserved) ...@@ -1598,7 +1598,7 @@ nvme_rdma_timeout(struct request *rq, bool reserved)
/* fail with DNR on cmd timeout */ /* fail with DNR on cmd timeout */
nvme_req(rq)->status = NVME_SC_ABORT_REQ | NVME_SC_DNR; nvme_req(rq)->status = NVME_SC_ABORT_REQ | NVME_SC_DNR;
return BLK_EH_HANDLED; return BLK_EH_DONE;
} }
static blk_status_t nvme_rdma_queue_rq(struct blk_mq_hw_ctx *hctx, static blk_status_t nvme_rdma_queue_rq(struct blk_mq_hw_ctx *hctx,
......
...@@ -146,7 +146,7 @@ nvme_loop_timeout(struct request *rq, bool reserved) ...@@ -146,7 +146,7 @@ nvme_loop_timeout(struct request *rq, bool reserved)
/* fail with DNR on admin cmd timeout */ /* fail with DNR on admin cmd timeout */
nvme_req(rq)->status = NVME_SC_ABORT_REQ | NVME_SC_DNR; nvme_req(rq)->status = NVME_SC_ABORT_REQ | NVME_SC_DNR;
return BLK_EH_HANDLED; return BLK_EH_DONE;
} }
static blk_status_t nvme_loop_queue_rq(struct blk_mq_hw_ctx *hctx, static blk_status_t nvme_loop_queue_rq(struct blk_mq_hw_ctx *hctx,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment