Commit a7f7b711 authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Jens Axboe

nvme-rdma: split nvme_rdma_alloc_tagset

Split nvme_rdma_alloc_tagset into one helper for the admin tag_set and
one for the I/O tag set.
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Reviewed-by: default avatarKeith Busch <kbusch@kernel.org>
Reviewed-by: default avatarSagi Grimberg <sagi@grimberg.me>
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent 2455a4b7
...@@ -787,15 +787,12 @@ static int nvme_rdma_alloc_io_queues(struct nvme_rdma_ctrl *ctrl) ...@@ -787,15 +787,12 @@ static int nvme_rdma_alloc_io_queues(struct nvme_rdma_ctrl *ctrl)
return ret; return ret;
} }
static struct blk_mq_tag_set *nvme_rdma_alloc_tagset(struct nvme_ctrl *nctrl, static int nvme_rdma_alloc_admin_tag_set(struct nvme_ctrl *nctrl)
bool admin)
{ {
struct nvme_rdma_ctrl *ctrl = to_rdma_ctrl(nctrl); struct nvme_rdma_ctrl *ctrl = to_rdma_ctrl(nctrl);
struct blk_mq_tag_set *set; struct blk_mq_tag_set *set = &ctrl->admin_tag_set;
int ret; int ret;
if (admin) {
set = &ctrl->admin_tag_set;
memset(set, 0, sizeof(*set)); memset(set, 0, sizeof(*set));
set->ops = &nvme_rdma_admin_mq_ops; set->ops = &nvme_rdma_admin_mq_ops;
set->queue_depth = NVME_AQ_MQ_TAG_DEPTH; set->queue_depth = NVME_AQ_MQ_TAG_DEPTH;
...@@ -807,8 +804,18 @@ static struct blk_mq_tag_set *nvme_rdma_alloc_tagset(struct nvme_ctrl *nctrl, ...@@ -807,8 +804,18 @@ static struct blk_mq_tag_set *nvme_rdma_alloc_tagset(struct nvme_ctrl *nctrl,
set->nr_hw_queues = 1; set->nr_hw_queues = 1;
set->timeout = NVME_ADMIN_TIMEOUT; set->timeout = NVME_ADMIN_TIMEOUT;
set->flags = BLK_MQ_F_NO_SCHED; set->flags = BLK_MQ_F_NO_SCHED;
} else { ret = blk_mq_alloc_tag_set(set);
set = &ctrl->tag_set; if (!ret)
ctrl->ctrl.admin_tagset = set;
return ret;
}
static int nvme_rdma_alloc_tag_set(struct nvme_ctrl *nctrl)
{
struct nvme_rdma_ctrl *ctrl = to_rdma_ctrl(nctrl);
struct blk_mq_tag_set *set = &ctrl->tag_set;
int ret;
memset(set, 0, sizeof(*set)); memset(set, 0, sizeof(*set));
set->ops = &nvme_rdma_mq_ops; set->ops = &nvme_rdma_mq_ops;
set->queue_depth = nctrl->sqsize + 1; set->queue_depth = nctrl->sqsize + 1;
...@@ -824,13 +831,10 @@ static struct blk_mq_tag_set *nvme_rdma_alloc_tagset(struct nvme_ctrl *nctrl, ...@@ -824,13 +831,10 @@ static struct blk_mq_tag_set *nvme_rdma_alloc_tagset(struct nvme_ctrl *nctrl,
set->nr_hw_queues = nctrl->queue_count - 1; set->nr_hw_queues = nctrl->queue_count - 1;
set->timeout = NVME_IO_TIMEOUT; set->timeout = NVME_IO_TIMEOUT;
set->nr_maps = nctrl->opts->nr_poll_queues ? HCTX_MAX_TYPES : 2; set->nr_maps = nctrl->opts->nr_poll_queues ? HCTX_MAX_TYPES : 2;
}
ret = blk_mq_alloc_tag_set(set); ret = blk_mq_alloc_tag_set(set);
if (ret) if (!ret)
return ERR_PTR(ret); ctrl->ctrl.tagset = set;
return ret;
return set;
} }
static void nvme_rdma_destroy_admin_queue(struct nvme_rdma_ctrl *ctrl, static void nvme_rdma_destroy_admin_queue(struct nvme_rdma_ctrl *ctrl,
...@@ -882,11 +886,9 @@ static int nvme_rdma_configure_admin_queue(struct nvme_rdma_ctrl *ctrl, ...@@ -882,11 +886,9 @@ static int nvme_rdma_configure_admin_queue(struct nvme_rdma_ctrl *ctrl,
goto out_free_queue; goto out_free_queue;
if (new) { if (new) {
ctrl->ctrl.admin_tagset = nvme_rdma_alloc_tagset(&ctrl->ctrl, true); error = nvme_rdma_alloc_admin_tag_set(&ctrl->ctrl);
if (IS_ERR(ctrl->ctrl.admin_tagset)) { if (error)
error = PTR_ERR(ctrl->ctrl.admin_tagset);
goto out_free_async_qe; goto out_free_async_qe;
}
ctrl->ctrl.fabrics_q = blk_mq_init_queue(&ctrl->admin_tag_set); ctrl->ctrl.fabrics_q = blk_mq_init_queue(&ctrl->admin_tag_set);
if (IS_ERR(ctrl->ctrl.fabrics_q)) { if (IS_ERR(ctrl->ctrl.fabrics_q)) {
...@@ -969,11 +971,9 @@ static int nvme_rdma_configure_io_queues(struct nvme_rdma_ctrl *ctrl, bool new) ...@@ -969,11 +971,9 @@ static int nvme_rdma_configure_io_queues(struct nvme_rdma_ctrl *ctrl, bool new)
return ret; return ret;
if (new) { if (new) {
ctrl->ctrl.tagset = nvme_rdma_alloc_tagset(&ctrl->ctrl, false); ret = nvme_rdma_alloc_tag_set(&ctrl->ctrl);
if (IS_ERR(ctrl->ctrl.tagset)) { if (ret)
ret = PTR_ERR(ctrl->ctrl.tagset);
goto out_free_io_queues; goto out_free_io_queues;
}
ret = nvme_ctrl_init_connect_q(&(ctrl->ctrl)); ret = nvme_ctrl_init_connect_q(&(ctrl->ctrl));
if (ret) if (ret)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment