Commit 7bf58533 authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Jens Axboe

nvme: don't pass the full CQE to nvme_complete_async_event

We only need the status and result fields, and passing them explicitly
makes life a lot easier for the Fibre Channel transport which doesn't
have a full CQE for the fast path case.
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Reviewed-by: default avatarKeith Busch <keith.busch@intel.com>
Signed-off-by: default avatarJens Axboe <axboe@fb.com>
parent d49187e9
...@@ -1895,18 +1895,25 @@ static void nvme_async_event_work(struct work_struct *work) ...@@ -1895,18 +1895,25 @@ static void nvme_async_event_work(struct work_struct *work)
spin_unlock_irq(&ctrl->lock); spin_unlock_irq(&ctrl->lock);
} }
void nvme_complete_async_event(struct nvme_ctrl *ctrl, void nvme_complete_async_event(struct nvme_ctrl *ctrl, __le16 status,
struct nvme_completion *cqe) union nvme_result *res)
{ {
u16 status = le16_to_cpu(cqe->status) >> 1; u32 result = le32_to_cpu(res->u32);
u32 result = le32_to_cpu(cqe->result.u32); bool done = true;
if (status == NVME_SC_SUCCESS || status == NVME_SC_ABORT_REQ) { switch (le16_to_cpu(status) >> 1) {
case NVME_SC_SUCCESS:
done = false;
/*FALLTHRU*/
case NVME_SC_ABORT_REQ:
++ctrl->event_limit; ++ctrl->event_limit;
schedule_work(&ctrl->async_event_work); schedule_work(&ctrl->async_event_work);
break;
default:
break;
} }
if (status != NVME_SC_SUCCESS) if (done)
return; return;
switch (result & 0xff07) { switch (result & 0xff07) {
......
...@@ -275,8 +275,8 @@ void nvme_queue_scan(struct nvme_ctrl *ctrl); ...@@ -275,8 +275,8 @@ void nvme_queue_scan(struct nvme_ctrl *ctrl);
void nvme_remove_namespaces(struct nvme_ctrl *ctrl); void nvme_remove_namespaces(struct nvme_ctrl *ctrl);
#define NVME_NR_AERS 1 #define NVME_NR_AERS 1
void nvme_complete_async_event(struct nvme_ctrl *ctrl, void nvme_complete_async_event(struct nvme_ctrl *ctrl, __le16 status,
struct nvme_completion *cqe); union nvme_result *res);
void nvme_queue_async_events(struct nvme_ctrl *ctrl); void nvme_queue_async_events(struct nvme_ctrl *ctrl);
void nvme_stop_queues(struct nvme_ctrl *ctrl); void nvme_stop_queues(struct nvme_ctrl *ctrl);
......
...@@ -703,7 +703,8 @@ static void __nvme_process_cq(struct nvme_queue *nvmeq, unsigned int *tag) ...@@ -703,7 +703,8 @@ static void __nvme_process_cq(struct nvme_queue *nvmeq, unsigned int *tag)
*/ */
if (unlikely(nvmeq->qid == 0 && if (unlikely(nvmeq->qid == 0 &&
cqe.command_id >= NVME_AQ_BLKMQ_DEPTH)) { cqe.command_id >= NVME_AQ_BLKMQ_DEPTH)) {
nvme_complete_async_event(&nvmeq->dev->ctrl, &cqe); nvme_complete_async_event(&nvmeq->dev->ctrl,
cqe.status, &cqe.result);
continue; continue;
} }
......
...@@ -1168,7 +1168,8 @@ static int __nvme_rdma_recv_done(struct ib_cq *cq, struct ib_wc *wc, int tag) ...@@ -1168,7 +1168,8 @@ static int __nvme_rdma_recv_done(struct ib_cq *cq, struct ib_wc *wc, int tag)
*/ */
if (unlikely(nvme_rdma_queue_idx(queue) == 0 && if (unlikely(nvme_rdma_queue_idx(queue) == 0 &&
cqe->command_id >= NVME_RDMA_AQ_BLKMQ_DEPTH)) cqe->command_id >= NVME_RDMA_AQ_BLKMQ_DEPTH))
nvme_complete_async_event(&queue->ctrl->ctrl, cqe); nvme_complete_async_event(&queue->ctrl->ctrl, cqe->status,
&cqe->result);
else else
ret = nvme_rdma_process_nvme_rsp(queue, cqe, wc, tag); ret = nvme_rdma_process_nvme_rsp(queue, cqe, wc, tag);
ib_dma_sync_single_for_device(ibdev, qe->dma, len, DMA_FROM_DEVICE); ib_dma_sync_single_for_device(ibdev, qe->dma, len, DMA_FROM_DEVICE);
......
...@@ -127,7 +127,8 @@ static void nvme_loop_queue_response(struct nvmet_req *req) ...@@ -127,7 +127,8 @@ static void nvme_loop_queue_response(struct nvmet_req *req)
*/ */
if (unlikely(nvme_loop_queue_idx(iod->queue) == 0 && if (unlikely(nvme_loop_queue_idx(iod->queue) == 0 &&
cqe->command_id >= NVME_LOOP_AQ_BLKMQ_DEPTH)) { cqe->command_id >= NVME_LOOP_AQ_BLKMQ_DEPTH)) {
nvme_complete_async_event(&iod->queue->ctrl->ctrl, cqe); nvme_complete_async_event(&iod->queue->ctrl->ctrl, cqe->status,
&cqe->result);
} else { } else {
struct request *rq = blk_mq_rq_from_pdu(iod); struct request *rq = blk_mq_rq_from_pdu(iod);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment