Commit da35825d authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Jens Axboe

nvme: set queue limits for the admin queue

Factor out a helper to set all the device specific queue limits and apply
them to the admin queue in addition to the I/O queues.  Without this the
command size on the admin queue is arbitrarily low, and the missing
other limitations are just minefields waiting for victims.
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Reported-by: default avatarJeff Lien <Jeff.Lien@hgst.com>
Tested-by: default avatarJeff Lien <Jeff.Lien@hgst.com>
Reviewed-by: default avatarKeith Busch <keith.busch@intel.com>
Signed-off-by: default avatarJens Axboe <axboe@fb.com>
parent a1a0e23e
...@@ -840,6 +840,21 @@ int nvme_shutdown_ctrl(struct nvme_ctrl *ctrl) ...@@ -840,6 +840,21 @@ int nvme_shutdown_ctrl(struct nvme_ctrl *ctrl)
return ret; return ret;
} }
static void nvme_set_queue_limits(struct nvme_ctrl *ctrl,
struct request_queue *q)
{
if (ctrl->max_hw_sectors) {
blk_queue_max_hw_sectors(q, ctrl->max_hw_sectors);
blk_queue_max_segments(q,
(ctrl->max_hw_sectors / (ctrl->page_size >> 9)) + 1);
}
if (ctrl->stripe_size)
blk_queue_chunk_sectors(q, ctrl->stripe_size >> 9);
if (ctrl->vwc & NVME_CTRL_VWC_PRESENT)
blk_queue_flush(q, REQ_FLUSH | REQ_FUA);
blk_queue_virt_boundary(q, ctrl->page_size - 1);
}
/* /*
* Initialize the cached copies of the Identify data and various controller * Initialize the cached copies of the Identify data and various controller
* register in our nvme_ctrl structure. This should be called as soon as * register in our nvme_ctrl structure. This should be called as soon as
...@@ -897,6 +912,8 @@ int nvme_init_identify(struct nvme_ctrl *ctrl) ...@@ -897,6 +912,8 @@ int nvme_init_identify(struct nvme_ctrl *ctrl)
} }
} }
nvme_set_queue_limits(ctrl, ctrl->admin_q);
kfree(id); kfree(id);
return 0; return 0;
} }
...@@ -1147,17 +1164,9 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid) ...@@ -1147,17 +1164,9 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid)
ns->disk = disk; ns->disk = disk;
ns->lba_shift = 9; /* set to a default value for 512 until disk is validated */ ns->lba_shift = 9; /* set to a default value for 512 until disk is validated */
blk_queue_logical_block_size(ns->queue, 1 << ns->lba_shift); blk_queue_logical_block_size(ns->queue, 1 << ns->lba_shift);
if (ctrl->max_hw_sectors) { nvme_set_queue_limits(ctrl, ns->queue);
blk_queue_max_hw_sectors(ns->queue, ctrl->max_hw_sectors);
blk_queue_max_segments(ns->queue,
(ctrl->max_hw_sectors / (ctrl->page_size >> 9)) + 1);
}
if (ctrl->stripe_size)
blk_queue_chunk_sectors(ns->queue, ctrl->stripe_size >> 9);
if (ctrl->vwc & NVME_CTRL_VWC_PRESENT)
blk_queue_flush(ns->queue, REQ_FLUSH | REQ_FUA);
blk_queue_virt_boundary(ns->queue, ctrl->page_size - 1);
disk->major = nvme_major; disk->major = nvme_major;
disk->first_minor = 0; disk->first_minor = 0;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment