Commit f361e5a0 authored by Steve Wise's avatar Steve Wise Committed by Sagi Grimberg

nvme-rdma: destroy nvme queue rdma resources on connect failure

After address resolution, the nvme_rdma_queue rdma resources are
allocated.  If rdma route resolution or the connect fails, or the
controller reconnect times out and gives up, then the rdma resources
need to be freed.  Otherwise, rdma resources are leaked.
Reviewed-by: default avatarChristoph Hellwig <hch@lst.de>
Reviewed-by: default avatarSagi Grimberg <sagi@grimbrg.me>
Signed-off-by: default avatarSteve Wise <swise@opengridcomputing.com>
Signed-off-by: default avatarSagi Grimberg <sagi@grimberg.me>
parent cdbecc8d
...@@ -82,6 +82,7 @@ struct nvme_rdma_request { ...@@ -82,6 +82,7 @@ struct nvme_rdma_request {
enum nvme_rdma_queue_flags { enum nvme_rdma_queue_flags {
NVME_RDMA_Q_CONNECTED = (1 << 0), NVME_RDMA_Q_CONNECTED = (1 << 0),
NVME_RDMA_IB_QUEUE_ALLOCATED = (1 << 1),
}; };
struct nvme_rdma_queue { struct nvme_rdma_queue {
...@@ -480,9 +481,14 @@ nvme_rdma_find_get_device(struct rdma_cm_id *cm_id) ...@@ -480,9 +481,14 @@ nvme_rdma_find_get_device(struct rdma_cm_id *cm_id)
static void nvme_rdma_destroy_queue_ib(struct nvme_rdma_queue *queue) static void nvme_rdma_destroy_queue_ib(struct nvme_rdma_queue *queue)
{ {
struct nvme_rdma_device *dev = queue->device; struct nvme_rdma_device *dev;
struct ib_device *ibdev = dev->dev; struct ib_device *ibdev;
if (!test_and_clear_bit(NVME_RDMA_IB_QUEUE_ALLOCATED, &queue->flags))
return;
dev = queue->device;
ibdev = dev->dev;
rdma_destroy_qp(queue->cm_id); rdma_destroy_qp(queue->cm_id);
ib_free_cq(queue->ib_cq); ib_free_cq(queue->ib_cq);
...@@ -533,6 +539,7 @@ static int nvme_rdma_create_queue_ib(struct nvme_rdma_queue *queue, ...@@ -533,6 +539,7 @@ static int nvme_rdma_create_queue_ib(struct nvme_rdma_queue *queue,
ret = -ENOMEM; ret = -ENOMEM;
goto out_destroy_qp; goto out_destroy_qp;
} }
set_bit(NVME_RDMA_IB_QUEUE_ALLOCATED, &queue->flags);
return 0; return 0;
...@@ -590,6 +597,7 @@ static int nvme_rdma_init_queue(struct nvme_rdma_ctrl *ctrl, ...@@ -590,6 +597,7 @@ static int nvme_rdma_init_queue(struct nvme_rdma_ctrl *ctrl,
return 0; return 0;
out_destroy_cm_id: out_destroy_cm_id:
nvme_rdma_destroy_queue_ib(queue);
rdma_destroy_id(queue->cm_id); rdma_destroy_id(queue->cm_id);
return ret; return ret;
} }
...@@ -652,7 +660,7 @@ static int nvme_rdma_init_io_queues(struct nvme_rdma_ctrl *ctrl) ...@@ -652,7 +660,7 @@ static int nvme_rdma_init_io_queues(struct nvme_rdma_ctrl *ctrl)
return 0; return 0;
out_free_queues: out_free_queues:
for (; i >= 1; i--) for (i--; i >= 1; i--)
nvme_rdma_stop_and_free_queue(&ctrl->queues[i]); nvme_rdma_stop_and_free_queue(&ctrl->queues[i]);
return ret; return ret;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment