Commit 9817d763 authored by Ruozhu Li's avatar Ruozhu Li Committed by Christoph Hellwig

nvme-rdma: destroy cm id before destroy qp to avoid use after free

We should always destroy cm_id before destroy qp to avoid to get cma
event after qp was destroyed, which may lead to use after free.
In RDMA connection establishment error flow, don't destroy qp in cm
event handler.Just report cm_error to upper level, qp will be destroy
in nvme_rdma_alloc_queue() after destroy cm id.
Signed-off-by: default avatarRuozhu Li <liruozhu@huawei.com>
Reviewed-by: default avatarMax Gurtovoy <mgurtovoy@nvidia.com>
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
parent 79f528af
...@@ -656,8 +656,8 @@ static void nvme_rdma_free_queue(struct nvme_rdma_queue *queue) ...@@ -656,8 +656,8 @@ static void nvme_rdma_free_queue(struct nvme_rdma_queue *queue)
if (!test_and_clear_bit(NVME_RDMA_Q_ALLOCATED, &queue->flags)) if (!test_and_clear_bit(NVME_RDMA_Q_ALLOCATED, &queue->flags))
return; return;
nvme_rdma_destroy_queue_ib(queue);
rdma_destroy_id(queue->cm_id); rdma_destroy_id(queue->cm_id);
nvme_rdma_destroy_queue_ib(queue);
mutex_destroy(&queue->queue_lock); mutex_destroy(&queue->queue_lock);
} }
...@@ -1815,14 +1815,10 @@ static int nvme_rdma_conn_established(struct nvme_rdma_queue *queue) ...@@ -1815,14 +1815,10 @@ static int nvme_rdma_conn_established(struct nvme_rdma_queue *queue)
for (i = 0; i < queue->queue_size; i++) { for (i = 0; i < queue->queue_size; i++) {
ret = nvme_rdma_post_recv(queue, &queue->rsp_ring[i]); ret = nvme_rdma_post_recv(queue, &queue->rsp_ring[i]);
if (ret) if (ret)
goto out_destroy_queue_ib; return ret;
} }
return 0; return 0;
out_destroy_queue_ib:
nvme_rdma_destroy_queue_ib(queue);
return ret;
} }
static int nvme_rdma_conn_rejected(struct nvme_rdma_queue *queue, static int nvme_rdma_conn_rejected(struct nvme_rdma_queue *queue,
...@@ -1916,14 +1912,10 @@ static int nvme_rdma_route_resolved(struct nvme_rdma_queue *queue) ...@@ -1916,14 +1912,10 @@ static int nvme_rdma_route_resolved(struct nvme_rdma_queue *queue)
if (ret) { if (ret) {
dev_err(ctrl->ctrl.device, dev_err(ctrl->ctrl.device,
"rdma_connect_locked failed (%d).\n", ret); "rdma_connect_locked failed (%d).\n", ret);
goto out_destroy_queue_ib; return ret;
} }
return 0; return 0;
out_destroy_queue_ib:
nvme_rdma_destroy_queue_ib(queue);
return ret;
} }
static int nvme_rdma_cm_handler(struct rdma_cm_id *cm_id, static int nvme_rdma_cm_handler(struct rdma_cm_id *cm_id,
...@@ -1954,8 +1946,6 @@ static int nvme_rdma_cm_handler(struct rdma_cm_id *cm_id, ...@@ -1954,8 +1946,6 @@ static int nvme_rdma_cm_handler(struct rdma_cm_id *cm_id,
case RDMA_CM_EVENT_ROUTE_ERROR: case RDMA_CM_EVENT_ROUTE_ERROR:
case RDMA_CM_EVENT_CONNECT_ERROR: case RDMA_CM_EVENT_CONNECT_ERROR:
case RDMA_CM_EVENT_UNREACHABLE: case RDMA_CM_EVENT_UNREACHABLE:
nvme_rdma_destroy_queue_ib(queue);
fallthrough;
case RDMA_CM_EVENT_ADDR_ERROR: case RDMA_CM_EVENT_ADDR_ERROR:
dev_dbg(queue->ctrl->ctrl.device, dev_dbg(queue->ctrl->ctrl.device,
"CM error event %d\n", ev->event); "CM error event %d\n", ev->event);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment