Commit ceee1953 authored by Christoph Hellwig's avatar Christoph Hellwig

nvme-loop: use the tagset alloc/free helpers

Use the common helpers to allocate and free the tagsets.  To make this
work the generic nvme_ctrl now needs to be stored in the hctx private
data instead of the nvme_loop_ctrl.
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Reviewed-by: default avatarSagi Grimberg <sagi@grimberg.me>
Reviewed-by: default avatarChaitanya Kulkarni <kch@nvidia.com>
parent 2ade8221
...@@ -266,9 +266,7 @@ static void nvme_loop_destroy_admin_queue(struct nvme_loop_ctrl *ctrl) ...@@ -266,9 +266,7 @@ static void nvme_loop_destroy_admin_queue(struct nvme_loop_ctrl *ctrl)
if (!test_and_clear_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[0].flags)) if (!test_and_clear_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[0].flags))
return; return;
nvmet_sq_destroy(&ctrl->queues[0].nvme_sq); nvmet_sq_destroy(&ctrl->queues[0].nvme_sq);
blk_mq_destroy_queue(ctrl->ctrl.admin_q); nvme_remove_admin_tag_set(&ctrl->ctrl);
blk_mq_destroy_queue(ctrl->ctrl.fabrics_q);
blk_mq_free_tag_set(&ctrl->admin_tag_set);
} }
static void nvme_loop_free_ctrl(struct nvme_ctrl *nctrl) static void nvme_loop_free_ctrl(struct nvme_ctrl *nctrl)
...@@ -282,10 +280,8 @@ static void nvme_loop_free_ctrl(struct nvme_ctrl *nctrl) ...@@ -282,10 +280,8 @@ static void nvme_loop_free_ctrl(struct nvme_ctrl *nctrl)
list_del(&ctrl->list); list_del(&ctrl->list);
mutex_unlock(&nvme_loop_ctrl_mutex); mutex_unlock(&nvme_loop_ctrl_mutex);
if (nctrl->tagset) { if (nctrl->tagset)
blk_mq_destroy_queue(ctrl->ctrl.connect_q); nvme_remove_io_tag_set(nctrl);
blk_mq_free_tag_set(&ctrl->tag_set);
}
kfree(ctrl->queues); kfree(ctrl->queues);
nvmf_free_options(nctrl->opts); nvmf_free_options(nctrl->opts);
free_ctrl: free_ctrl:
...@@ -350,52 +346,31 @@ static int nvme_loop_configure_admin_queue(struct nvme_loop_ctrl *ctrl) ...@@ -350,52 +346,31 @@ static int nvme_loop_configure_admin_queue(struct nvme_loop_ctrl *ctrl)
{ {
int error; int error;
memset(&ctrl->admin_tag_set, 0, sizeof(ctrl->admin_tag_set));
ctrl->admin_tag_set.ops = &nvme_loop_admin_mq_ops;
ctrl->admin_tag_set.queue_depth = NVME_AQ_MQ_TAG_DEPTH;
ctrl->admin_tag_set.reserved_tags = NVMF_RESERVED_TAGS;
ctrl->admin_tag_set.numa_node = ctrl->ctrl.numa_node;
ctrl->admin_tag_set.cmd_size = sizeof(struct nvme_loop_iod) +
NVME_INLINE_SG_CNT * sizeof(struct scatterlist);
ctrl->admin_tag_set.driver_data = &ctrl->ctrl;
ctrl->admin_tag_set.nr_hw_queues = 1;
ctrl->admin_tag_set.timeout = NVME_ADMIN_TIMEOUT;
ctrl->admin_tag_set.flags = BLK_MQ_F_NO_SCHED;
ctrl->queues[0].ctrl = ctrl; ctrl->queues[0].ctrl = ctrl;
error = nvmet_sq_init(&ctrl->queues[0].nvme_sq); error = nvmet_sq_init(&ctrl->queues[0].nvme_sq);
if (error) if (error)
return error; return error;
ctrl->ctrl.queue_count = 1; ctrl->ctrl.queue_count = 1;
error = blk_mq_alloc_tag_set(&ctrl->admin_tag_set); error = nvme_alloc_admin_tag_set(&ctrl->ctrl, &ctrl->admin_tag_set,
&nvme_loop_admin_mq_ops, BLK_MQ_F_NO_SCHED,
sizeof(struct nvme_loop_iod) +
NVME_INLINE_SG_CNT * sizeof(struct scatterlist));
if (error) if (error)
goto out_free_sq; goto out_free_sq;
ctrl->ctrl.admin_tagset = &ctrl->admin_tag_set;
ctrl->ctrl.fabrics_q = blk_mq_init_queue(&ctrl->admin_tag_set);
if (IS_ERR(ctrl->ctrl.fabrics_q)) {
error = PTR_ERR(ctrl->ctrl.fabrics_q);
goto out_free_tagset;
}
ctrl->ctrl.admin_q = blk_mq_init_queue(&ctrl->admin_tag_set);
if (IS_ERR(ctrl->ctrl.admin_q)) {
error = PTR_ERR(ctrl->ctrl.admin_q);
goto out_cleanup_fabrics_q;
}
/* reset stopped state for the fresh admin queue */ /* reset stopped state for the fresh admin queue */
clear_bit(NVME_CTRL_ADMIN_Q_STOPPED, &ctrl->ctrl.flags); clear_bit(NVME_CTRL_ADMIN_Q_STOPPED, &ctrl->ctrl.flags);
error = nvmf_connect_admin_queue(&ctrl->ctrl); error = nvmf_connect_admin_queue(&ctrl->ctrl);
if (error) if (error)
goto out_cleanup_queue; goto out_cleanup_tagset;
set_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[0].flags); set_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[0].flags);
error = nvme_enable_ctrl(&ctrl->ctrl); error = nvme_enable_ctrl(&ctrl->ctrl);
if (error) if (error)
goto out_cleanup_queue; goto out_cleanup_tagset;
ctrl->ctrl.max_hw_sectors = ctrl->ctrl.max_hw_sectors =
(NVME_LOOP_MAX_SEGMENTS - 1) << (PAGE_SHIFT - 9); (NVME_LOOP_MAX_SEGMENTS - 1) << (PAGE_SHIFT - 9);
...@@ -404,17 +379,13 @@ static int nvme_loop_configure_admin_queue(struct nvme_loop_ctrl *ctrl) ...@@ -404,17 +379,13 @@ static int nvme_loop_configure_admin_queue(struct nvme_loop_ctrl *ctrl)
error = nvme_init_ctrl_finish(&ctrl->ctrl); error = nvme_init_ctrl_finish(&ctrl->ctrl);
if (error) if (error)
goto out_cleanup_queue; goto out_cleanup_tagset;
return 0; return 0;
out_cleanup_queue: out_cleanup_tagset:
clear_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[0].flags); clear_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[0].flags);
blk_mq_destroy_queue(ctrl->ctrl.admin_q); nvme_remove_admin_tag_set(&ctrl->ctrl);
out_cleanup_fabrics_q:
blk_mq_destroy_queue(ctrl->ctrl.fabrics_q);
out_free_tagset:
blk_mq_free_tag_set(&ctrl->admin_tag_set);
out_free_sq: out_free_sq:
nvmet_sq_destroy(&ctrl->queues[0].nvme_sq); nvmet_sq_destroy(&ctrl->queues[0].nvme_sq);
return error; return error;
...@@ -522,37 +493,21 @@ static int nvme_loop_create_io_queues(struct nvme_loop_ctrl *ctrl) ...@@ -522,37 +493,21 @@ static int nvme_loop_create_io_queues(struct nvme_loop_ctrl *ctrl)
if (ret) if (ret)
return ret; return ret;
memset(&ctrl->tag_set, 0, sizeof(ctrl->tag_set)); ret = nvme_alloc_io_tag_set(&ctrl->ctrl, &ctrl->tag_set,
ctrl->tag_set.ops = &nvme_loop_mq_ops; &nvme_loop_mq_ops, BLK_MQ_F_SHOULD_MERGE,
ctrl->tag_set.queue_depth = ctrl->ctrl.opts->queue_size; sizeof(struct nvme_loop_iod) +
ctrl->tag_set.reserved_tags = NVMF_RESERVED_TAGS; NVME_INLINE_SG_CNT * sizeof(struct scatterlist));
ctrl->tag_set.numa_node = ctrl->ctrl.numa_node;
ctrl->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
ctrl->tag_set.cmd_size = sizeof(struct nvme_loop_iod) +
NVME_INLINE_SG_CNT * sizeof(struct scatterlist);
ctrl->tag_set.driver_data = &ctrl->ctrl;
ctrl->tag_set.nr_hw_queues = ctrl->ctrl.queue_count - 1;
ctrl->tag_set.timeout = NVME_IO_TIMEOUT;
ctrl->ctrl.tagset = &ctrl->tag_set;
ret = blk_mq_alloc_tag_set(&ctrl->tag_set);
if (ret) if (ret)
goto out_destroy_queues; goto out_destroy_queues;
ret = nvme_ctrl_init_connect_q(&(ctrl->ctrl));
if (ret)
goto out_free_tagset;
ret = nvme_loop_connect_io_queues(ctrl); ret = nvme_loop_connect_io_queues(ctrl);
if (ret) if (ret)
goto out_cleanup_connect_q; goto out_cleanup_tagset;
return 0; return 0;
out_cleanup_connect_q: out_cleanup_tagset:
blk_mq_destroy_queue(ctrl->ctrl.connect_q); nvme_remove_io_tag_set(&ctrl->ctrl);
out_free_tagset:
blk_mq_free_tag_set(&ctrl->tag_set);
out_destroy_queues: out_destroy_queues:
nvme_loop_destroy_io_queues(ctrl); nvme_loop_destroy_io_queues(ctrl);
return ret; return ret;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment