Commit c85c9ab9 authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Keith Busch

nvme: split nvme_update_zone_info

nvme_update_zone_info does (admin queue) I/O to the device and can fail.
We fail to abort the queue limits update if that happen, but really
should avoid with the frozen I/O queue as much as possible anyway.

Split the logic into a helper to query the information that can be
called on an unfrozen queue and one to apply it to the queue limits.

Fixes: 9b130d681443 ("nvme: use the atomic queue limits update API")
Reported-by: default avatarKanchan Joshi <joshi.k@samsung.com>
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Reviewed-by: default avatarKanchan Joshi <joshi.k@samsung.com>
Signed-off-by: default avatarKeith Busch <kbusch@kernel.org>
parent ac229a2d
......@@ -2076,6 +2076,7 @@ static int nvme_update_ns_info_block(struct nvme_ns *ns,
bool vwc = ns->ctrl->vwc & NVME_CTRL_VWC_PRESENT;
struct queue_limits lim;
struct nvme_id_ns_nvm *nvm = NULL;
struct nvme_zone_info zi = {};
struct nvme_id_ns *id;
sector_t capacity;
unsigned lbaf;
......@@ -2091,6 +2092,7 @@ static int nvme_update_ns_info_block(struct nvme_ns *ns,
ret = -ENODEV;
goto out;
}
lbaf = nvme_lbaf_index(id->flbas);
if (ns->ctrl->ctratt & NVME_CTRL_ATTR_ELBAS) {
ret = nvme_identify_ns_nvm(ns->ctrl, info->nsid, &nvm);
......@@ -2098,8 +2100,14 @@ static int nvme_update_ns_info_block(struct nvme_ns *ns,
goto out;
}
if (IS_ENABLED(CONFIG_BLK_DEV_ZONED) &&
ns->head->ids.csi == NVME_CSI_ZNS) {
ret = nvme_query_zone_info(ns, lbaf, &zi);
if (ret < 0)
goto out;
}
blk_mq_freeze_queue(ns->disk->queue);
lbaf = nvme_lbaf_index(id->flbas);
ns->head->lba_shift = id->lbaf[lbaf].ds;
ns->head->nuse = le64_to_cpu(id->nuse);
capacity = nvme_lba_to_sect(ns->head, le64_to_cpu(id->nsze));
......@@ -2112,13 +2120,8 @@ static int nvme_update_ns_info_block(struct nvme_ns *ns,
capacity = 0;
nvme_config_discard(ns, &lim);
if (IS_ENABLED(CONFIG_BLK_DEV_ZONED) &&
ns->head->ids.csi == NVME_CSI_ZNS) {
ret = nvme_update_zone_info(ns, lbaf, &lim);
if (ret) {
blk_mq_unfreeze_queue(ns->disk->queue);
goto out;
}
}
ns->head->ids.csi == NVME_CSI_ZNS)
nvme_update_zone_info(ns, &lim, &zi);
ret = queue_limits_commit_update(ns->disk->queue, &lim);
if (ret) {
blk_mq_unfreeze_queue(ns->disk->queue);
......
......@@ -1036,10 +1036,18 @@ static inline bool nvme_disk_is_ns_head(struct gendisk *disk)
}
#endif /* CONFIG_NVME_MULTIPATH */
struct nvme_zone_info {
u64 zone_size;
unsigned int max_open_zones;
unsigned int max_active_zones;
};
int nvme_ns_report_zones(struct nvme_ns *ns, sector_t sector,
unsigned int nr_zones, report_zones_cb cb, void *data);
int nvme_update_zone_info(struct nvme_ns *ns, unsigned lbaf,
struct queue_limits *lim);
int nvme_query_zone_info(struct nvme_ns *ns, unsigned lbaf,
struct nvme_zone_info *zi);
void nvme_update_zone_info(struct nvme_ns *ns, struct queue_limits *lim,
struct nvme_zone_info *zi);
#ifdef CONFIG_BLK_DEV_ZONED
blk_status_t nvme_setup_zone_mgmt_send(struct nvme_ns *ns, struct request *req,
struct nvme_command *cmnd,
......
......@@ -35,8 +35,8 @@ static int nvme_set_max_append(struct nvme_ctrl *ctrl)
return 0;
}
int nvme_update_zone_info(struct nvme_ns *ns, unsigned lbaf,
struct queue_limits *lim)
int nvme_query_zone_info(struct nvme_ns *ns, unsigned lbaf,
struct nvme_zone_info *zi)
{
struct nvme_effects_log *log = ns->head->effects;
struct nvme_command c = { };
......@@ -89,27 +89,34 @@ int nvme_update_zone_info(struct nvme_ns *ns, unsigned lbaf,
goto free_data;
}
ns->head->zsze =
nvme_lba_to_sect(ns->head, le64_to_cpu(id->lbafe[lbaf].zsze));
if (!is_power_of_2(ns->head->zsze)) {
zi->zone_size = le64_to_cpu(id->lbafe[lbaf].zsze);
if (!is_power_of_2(zi->zone_size)) {
dev_warn(ns->ctrl->device,
"invalid zone size:%llu for namespace:%u\n",
ns->head->zsze, ns->head->ns_id);
"invalid zone size: %llu for namespace: %u\n",
zi->zone_size, ns->head->ns_id);
status = -ENODEV;
goto free_data;
}
zi->max_open_zones = le32_to_cpu(id->mor) + 1;
zi->max_active_zones = le32_to_cpu(id->mar) + 1;
blk_queue_flag_set(QUEUE_FLAG_ZONE_RESETALL, ns->queue);
lim->zoned = 1;
lim->max_open_zones = le32_to_cpu(id->mor) + 1;
lim->max_active_zones = le32_to_cpu(id->mar) + 1;
lim->chunk_sectors = ns->head->zsze;
lim->max_zone_append_sectors = ns->ctrl->max_zone_append;
free_data:
kfree(id);
return status;
}
void nvme_update_zone_info(struct nvme_ns *ns, struct queue_limits *lim,
struct nvme_zone_info *zi)
{
lim->zoned = 1;
lim->max_open_zones = zi->max_open_zones;
lim->max_active_zones = zi->max_active_zones;
lim->max_zone_append_sectors = ns->ctrl->max_zone_append;
lim->chunk_sectors = ns->head->zsze =
nvme_lba_to_sect(ns->head, zi->zone_size);
blk_queue_flag_set(QUEUE_FLAG_ZONE_RESETALL, ns->queue);
}
static void *nvme_zns_alloc_report_buffer(struct nvme_ns *ns,
unsigned int nr_zones, size_t *buflen)
{
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment