Commit d386aedc authored by Daniel Wagner's avatar Daniel Wagner Committed by Keith Busch

nvme: refactor ns info setup function

Use nvme_ns_head instead of nvme_ns where possible. This reduces the
coupling between the different data structures.
Reviewed-by: default avatarChristoph Hellwig <hch@lst.de>
Reviewed-by: default avatarSagi Grimberg <sagi@grimberg.me>
Reviewed-by: default avatarHannes Reinecke <hare@suse.de>
Signed-off-by: default avatarDaniel Wagner <dwagner@suse.de>
Signed-off-by: default avatarKeith Busch <kbusch@kernel.org>
parent 0372dd4e
...@@ -1665,14 +1665,14 @@ int nvme_getgeo(struct block_device *bdev, struct hd_geometry *geo) ...@@ -1665,14 +1665,14 @@ int nvme_getgeo(struct block_device *bdev, struct hd_geometry *geo)
} }
#ifdef CONFIG_BLK_DEV_INTEGRITY #ifdef CONFIG_BLK_DEV_INTEGRITY
static void nvme_init_integrity(struct gendisk *disk, struct nvme_ns *ns, static void nvme_init_integrity(struct gendisk *disk,
u32 max_integrity_segments) struct nvme_ns_head *head, u32 max_integrity_segments)
{ {
struct blk_integrity integrity = { }; struct blk_integrity integrity = { };
switch (ns->head->pi_type) { switch (head->pi_type) {
case NVME_NS_DPS_PI_TYPE3: case NVME_NS_DPS_PI_TYPE3:
switch (ns->head->guard_type) { switch (head->guard_type) {
case NVME_NVM_NS_16B_GUARD: case NVME_NVM_NS_16B_GUARD:
integrity.profile = &t10_pi_type3_crc; integrity.profile = &t10_pi_type3_crc;
integrity.tag_size = sizeof(u16) + sizeof(u32); integrity.tag_size = sizeof(u16) + sizeof(u32);
...@@ -1690,7 +1690,7 @@ static void nvme_init_integrity(struct gendisk *disk, struct nvme_ns *ns, ...@@ -1690,7 +1690,7 @@ static void nvme_init_integrity(struct gendisk *disk, struct nvme_ns *ns,
break; break;
case NVME_NS_DPS_PI_TYPE1: case NVME_NS_DPS_PI_TYPE1:
case NVME_NS_DPS_PI_TYPE2: case NVME_NS_DPS_PI_TYPE2:
switch (ns->head->guard_type) { switch (head->guard_type) {
case NVME_NVM_NS_16B_GUARD: case NVME_NVM_NS_16B_GUARD:
integrity.profile = &t10_pi_type1_crc; integrity.profile = &t10_pi_type1_crc;
integrity.tag_size = sizeof(u16); integrity.tag_size = sizeof(u16);
...@@ -1711,26 +1711,26 @@ static void nvme_init_integrity(struct gendisk *disk, struct nvme_ns *ns, ...@@ -1711,26 +1711,26 @@ static void nvme_init_integrity(struct gendisk *disk, struct nvme_ns *ns,
break; break;
} }
integrity.tuple_size = ns->head->ms; integrity.tuple_size = head->ms;
blk_integrity_register(disk, &integrity); blk_integrity_register(disk, &integrity);
blk_queue_max_integrity_segments(disk->queue, max_integrity_segments); blk_queue_max_integrity_segments(disk->queue, max_integrity_segments);
} }
#else #else
static void nvme_init_integrity(struct gendisk *disk, struct nvme_ns *ns, static void nvme_init_integrity(struct gendisk *disk,
u32 max_integrity_segments) struct nvme_ns_head *head, u32 max_integrity_segments)
{ {
} }
#endif /* CONFIG_BLK_DEV_INTEGRITY */ #endif /* CONFIG_BLK_DEV_INTEGRITY */
static void nvme_config_discard(struct gendisk *disk, struct nvme_ns *ns) static void nvme_config_discard(struct nvme_ctrl *ctrl, struct gendisk *disk,
struct nvme_ns_head *head)
{ {
struct nvme_ctrl *ctrl = ns->ctrl;
struct request_queue *queue = disk->queue; struct request_queue *queue = disk->queue;
u32 size = queue_logical_block_size(queue); u32 size = queue_logical_block_size(queue);
if (ctrl->dmrsl && ctrl->dmrsl <= nvme_sect_to_lba(ns->head, UINT_MAX)) if (ctrl->dmrsl && ctrl->dmrsl <= nvme_sect_to_lba(head, UINT_MAX))
ctrl->max_discard_sectors = ctrl->max_discard_sectors =
nvme_lba_to_sect(ns->head, ctrl->dmrsl); nvme_lba_to_sect(head, ctrl->dmrsl);
if (ctrl->max_discard_sectors == 0) { if (ctrl->max_discard_sectors == 0) {
blk_queue_max_discard_sectors(queue, 0); blk_queue_max_discard_sectors(queue, 0);
...@@ -1761,21 +1761,21 @@ static bool nvme_ns_ids_equal(struct nvme_ns_ids *a, struct nvme_ns_ids *b) ...@@ -1761,21 +1761,21 @@ static bool nvme_ns_ids_equal(struct nvme_ns_ids *a, struct nvme_ns_ids *b)
a->csi == b->csi; a->csi == b->csi;
} }
static int nvme_init_ms(struct nvme_ns *ns, struct nvme_id_ns *id) static int nvme_init_ms(struct nvme_ctrl *ctrl, struct nvme_ns_head *head,
struct nvme_id_ns *id)
{ {
bool first = id->dps & NVME_NS_DPS_PI_FIRST; bool first = id->dps & NVME_NS_DPS_PI_FIRST;
unsigned lbaf = nvme_lbaf_index(id->flbas); unsigned lbaf = nvme_lbaf_index(id->flbas);
struct nvme_ctrl *ctrl = ns->ctrl;
struct nvme_command c = { }; struct nvme_command c = { };
struct nvme_id_ns_nvm *nvm; struct nvme_id_ns_nvm *nvm;
int ret = 0; int ret = 0;
u32 elbaf; u32 elbaf;
ns->head->pi_size = 0; head->pi_size = 0;
ns->head->ms = le16_to_cpu(id->lbaf[lbaf].ms); head->ms = le16_to_cpu(id->lbaf[lbaf].ms);
if (!(ctrl->ctratt & NVME_CTRL_ATTR_ELBAS)) { if (!(ctrl->ctratt & NVME_CTRL_ATTR_ELBAS)) {
ns->head->pi_size = sizeof(struct t10_pi_tuple); head->pi_size = sizeof(struct t10_pi_tuple);
ns->head->guard_type = NVME_NVM_NS_16B_GUARD; head->guard_type = NVME_NVM_NS_16B_GUARD;
goto set_pi; goto set_pi;
} }
...@@ -1784,11 +1784,11 @@ static int nvme_init_ms(struct nvme_ns *ns, struct nvme_id_ns *id) ...@@ -1784,11 +1784,11 @@ static int nvme_init_ms(struct nvme_ns *ns, struct nvme_id_ns *id)
return -ENOMEM; return -ENOMEM;
c.identify.opcode = nvme_admin_identify; c.identify.opcode = nvme_admin_identify;
c.identify.nsid = cpu_to_le32(ns->head->ns_id); c.identify.nsid = cpu_to_le32(head->ns_id);
c.identify.cns = NVME_ID_CNS_CS_NS; c.identify.cns = NVME_ID_CNS_CS_NS;
c.identify.csi = NVME_CSI_NVM; c.identify.csi = NVME_CSI_NVM;
ret = nvme_submit_sync_cmd(ns->ctrl->admin_q, &c, nvm, sizeof(*nvm)); ret = nvme_submit_sync_cmd(ctrl->admin_q, &c, nvm, sizeof(*nvm));
if (ret) if (ret)
goto free_data; goto free_data;
...@@ -1798,13 +1798,13 @@ static int nvme_init_ms(struct nvme_ns *ns, struct nvme_id_ns *id) ...@@ -1798,13 +1798,13 @@ static int nvme_init_ms(struct nvme_ns *ns, struct nvme_id_ns *id)
if (nvme_elbaf_sts(elbaf)) if (nvme_elbaf_sts(elbaf))
goto free_data; goto free_data;
ns->head->guard_type = nvme_elbaf_guard_type(elbaf); head->guard_type = nvme_elbaf_guard_type(elbaf);
switch (ns->head->guard_type) { switch (head->guard_type) {
case NVME_NVM_NS_64B_GUARD: case NVME_NVM_NS_64B_GUARD:
ns->head->pi_size = sizeof(struct crc64_pi_tuple); head->pi_size = sizeof(struct crc64_pi_tuple);
break; break;
case NVME_NVM_NS_16B_GUARD: case NVME_NVM_NS_16B_GUARD:
ns->head->pi_size = sizeof(struct t10_pi_tuple); head->pi_size = sizeof(struct t10_pi_tuple);
break; break;
default: default:
break; break;
...@@ -1813,25 +1813,25 @@ static int nvme_init_ms(struct nvme_ns *ns, struct nvme_id_ns *id) ...@@ -1813,25 +1813,25 @@ static int nvme_init_ms(struct nvme_ns *ns, struct nvme_id_ns *id)
free_data: free_data:
kfree(nvm); kfree(nvm);
set_pi: set_pi:
if (ns->head->pi_size && (first || ns->head->ms == ns->head->pi_size)) if (head->pi_size && (first || head->ms == head->pi_size))
ns->head->pi_type = id->dps & NVME_NS_DPS_PI_MASK; head->pi_type = id->dps & NVME_NS_DPS_PI_MASK;
else else
ns->head->pi_type = 0; head->pi_type = 0;
return ret; return ret;
} }
static int nvme_configure_metadata(struct nvme_ns *ns, struct nvme_id_ns *id) static int nvme_configure_metadata(struct nvme_ctrl *ctrl,
struct nvme_ns_head *head, struct nvme_id_ns *id)
{ {
struct nvme_ctrl *ctrl = ns->ctrl;
int ret; int ret;
ret = nvme_init_ms(ns, id); ret = nvme_init_ms(ctrl, head, id);
if (ret) if (ret)
return ret; return ret;
ns->head->features &= ~(NVME_NS_METADATA_SUPPORTED | NVME_NS_EXT_LBAS); head->features &= ~(NVME_NS_METADATA_SUPPORTED | NVME_NS_EXT_LBAS);
if (!ns->head->ms || !(ctrl->ops->flags & NVME_F_METADATA_SUPPORTED)) if (!head->ms || !(ctrl->ops->flags & NVME_F_METADATA_SUPPORTED))
return 0; return 0;
if (ctrl->ops->flags & NVME_F_FABRICS) { if (ctrl->ops->flags & NVME_F_FABRICS) {
...@@ -1843,7 +1843,7 @@ static int nvme_configure_metadata(struct nvme_ns *ns, struct nvme_id_ns *id) ...@@ -1843,7 +1843,7 @@ static int nvme_configure_metadata(struct nvme_ns *ns, struct nvme_id_ns *id)
if (WARN_ON_ONCE(!(id->flbas & NVME_NS_FLBAS_META_EXT))) if (WARN_ON_ONCE(!(id->flbas & NVME_NS_FLBAS_META_EXT)))
return 0; return 0;
ns->head->features |= NVME_NS_EXT_LBAS; head->features |= NVME_NS_EXT_LBAS;
/* /*
* The current fabrics transport drivers support namespace * The current fabrics transport drivers support namespace
...@@ -1854,8 +1854,8 @@ static int nvme_configure_metadata(struct nvme_ns *ns, struct nvme_id_ns *id) ...@@ -1854,8 +1854,8 @@ static int nvme_configure_metadata(struct nvme_ns *ns, struct nvme_id_ns *id)
* Note, this check will need to be modified if any drivers * Note, this check will need to be modified if any drivers
* gain the ability to use other metadata formats. * gain the ability to use other metadata formats.
*/ */
if (ctrl->max_integrity_segments && nvme_ns_has_pi(ns->head)) if (ctrl->max_integrity_segments && nvme_ns_has_pi(head))
ns->head->features |= NVME_NS_METADATA_SUPPORTED; head->features |= NVME_NS_METADATA_SUPPORTED;
} else { } else {
/* /*
* For PCIe controllers, we can't easily remap the separate * For PCIe controllers, we can't easily remap the separate
...@@ -1864,9 +1864,9 @@ static int nvme_configure_metadata(struct nvme_ns *ns, struct nvme_id_ns *id) ...@@ -1864,9 +1864,9 @@ static int nvme_configure_metadata(struct nvme_ns *ns, struct nvme_id_ns *id)
* We allow extended LBAs for the passthrough interface, though. * We allow extended LBAs for the passthrough interface, though.
*/ */
if (id->flbas & NVME_NS_FLBAS_META_EXT) if (id->flbas & NVME_NS_FLBAS_META_EXT)
ns->head->features |= NVME_NS_EXT_LBAS; head->features |= NVME_NS_EXT_LBAS;
else else
ns->head->features |= NVME_NS_METADATA_SUPPORTED; head->features |= NVME_NS_METADATA_SUPPORTED;
} }
return 0; return 0;
} }
...@@ -1889,18 +1889,18 @@ static void nvme_set_queue_limits(struct nvme_ctrl *ctrl, ...@@ -1889,18 +1889,18 @@ static void nvme_set_queue_limits(struct nvme_ctrl *ctrl,
blk_queue_write_cache(q, vwc, vwc); blk_queue_write_cache(q, vwc, vwc);
} }
static void nvme_update_disk_info(struct gendisk *disk, static void nvme_update_disk_info(struct nvme_ctrl *ctrl, struct gendisk *disk,
struct nvme_ns *ns, struct nvme_id_ns *id) struct nvme_ns_head *head, struct nvme_id_ns *id)
{ {
sector_t capacity = nvme_lba_to_sect(ns->head, le64_to_cpu(id->nsze)); sector_t capacity = nvme_lba_to_sect(head, le64_to_cpu(id->nsze));
u32 bs = 1U << ns->head->lba_shift; u32 bs = 1U << head->lba_shift;
u32 atomic_bs, phys_bs, io_opt = 0; u32 atomic_bs, phys_bs, io_opt = 0;
/* /*
* The block layer can't support LBA sizes larger than the page size * The block layer can't support LBA sizes larger than the page size
* yet, so catch this early and don't allow block I/O. * yet, so catch this early and don't allow block I/O.
*/ */
if (ns->head->lba_shift > PAGE_SHIFT) { if (head->lba_shift > PAGE_SHIFT) {
capacity = 0; capacity = 0;
bs = (1 << 9); bs = (1 << 9);
} }
...@@ -1917,7 +1917,7 @@ static void nvme_update_disk_info(struct gendisk *disk, ...@@ -1917,7 +1917,7 @@ static void nvme_update_disk_info(struct gendisk *disk,
if (id->nsfeat & NVME_NS_FEAT_ATOMICS && id->nawupf) if (id->nsfeat & NVME_NS_FEAT_ATOMICS && id->nawupf)
atomic_bs = (1 + le16_to_cpu(id->nawupf)) * bs; atomic_bs = (1 + le16_to_cpu(id->nawupf)) * bs;
else else
atomic_bs = (1 + ns->ctrl->subsys->awupf) * bs; atomic_bs = (1 + ctrl->subsys->awupf) * bs;
} }
if (id->nsfeat & NVME_NS_FEAT_IO_OPT) { if (id->nsfeat & NVME_NS_FEAT_IO_OPT) {
...@@ -1943,20 +1943,20 @@ static void nvme_update_disk_info(struct gendisk *disk, ...@@ -1943,20 +1943,20 @@ static void nvme_update_disk_info(struct gendisk *disk,
* I/O to namespaces with metadata except when the namespace supports * I/O to namespaces with metadata except when the namespace supports
* PI, as it can strip/insert in that case. * PI, as it can strip/insert in that case.
*/ */
if (ns->head->ms) { if (head->ms) {
if (IS_ENABLED(CONFIG_BLK_DEV_INTEGRITY) && if (IS_ENABLED(CONFIG_BLK_DEV_INTEGRITY) &&
(ns->head->features & NVME_NS_METADATA_SUPPORTED)) (head->features & NVME_NS_METADATA_SUPPORTED))
nvme_init_integrity(disk, ns, nvme_init_integrity(disk, head,
ns->ctrl->max_integrity_segments); ctrl->max_integrity_segments);
else if (!nvme_ns_has_pi(ns->head)) else if (!nvme_ns_has_pi(head))
capacity = 0; capacity = 0;
} }
set_capacity_and_notify(disk, capacity); set_capacity_and_notify(disk, capacity);
nvme_config_discard(disk, ns); nvme_config_discard(ctrl, disk, head);
blk_queue_max_write_zeroes_sectors(disk->queue, blk_queue_max_write_zeroes_sectors(disk->queue,
ns->ctrl->max_zeroes_sectors); ctrl->max_zeroes_sectors);
} }
static bool nvme_ns_is_readonly(struct nvme_ns *ns, struct nvme_ns_info *info) static bool nvme_ns_is_readonly(struct nvme_ns *ns, struct nvme_ns_info *info)
...@@ -2042,13 +2042,13 @@ static int nvme_update_ns_info_block(struct nvme_ns *ns, ...@@ -2042,13 +2042,13 @@ static int nvme_update_ns_info_block(struct nvme_ns *ns,
ns->head->lba_shift = id->lbaf[lbaf].ds; ns->head->lba_shift = id->lbaf[lbaf].ds;
nvme_set_queue_limits(ns->ctrl, ns->queue); nvme_set_queue_limits(ns->ctrl, ns->queue);
ret = nvme_configure_metadata(ns, id); ret = nvme_configure_metadata(ns->ctrl, ns->head, id);
if (ret < 0) { if (ret < 0) {
blk_mq_unfreeze_queue(ns->disk->queue); blk_mq_unfreeze_queue(ns->disk->queue);
goto out; goto out;
} }
nvme_set_chunk_sectors(ns, id); nvme_set_chunk_sectors(ns, id);
nvme_update_disk_info(ns->disk, ns, id); nvme_update_disk_info(ns->ctrl, ns->disk, ns->head, id);
if (ns->head->ids.csi == NVME_CSI_ZNS) { if (ns->head->ids.csi == NVME_CSI_ZNS) {
ret = nvme_update_zone_info(ns, lbaf); ret = nvme_update_zone_info(ns, lbaf);
...@@ -2078,7 +2078,7 @@ static int nvme_update_ns_info_block(struct nvme_ns *ns, ...@@ -2078,7 +2078,7 @@ static int nvme_update_ns_info_block(struct nvme_ns *ns,
if (nvme_ns_head_multipath(ns->head)) { if (nvme_ns_head_multipath(ns->head)) {
blk_mq_freeze_queue(ns->head->disk->queue); blk_mq_freeze_queue(ns->head->disk->queue);
nvme_update_disk_info(ns->head->disk, ns, id); nvme_update_disk_info(ns->ctrl, ns->head->disk, ns->head, id);
set_disk_ro(ns->head->disk, nvme_ns_is_readonly(ns, info)); set_disk_ro(ns->head->disk, nvme_ns_is_readonly(ns, info));
nvme_mpath_revalidate_paths(ns); nvme_mpath_revalidate_paths(ns);
blk_stack_limits(&ns->head->disk->queue->limits, blk_stack_limits(&ns->head->disk->queue->limits,
......
...@@ -148,7 +148,8 @@ static void *nvme_zns_alloc_report_buffer(struct nvme_ns *ns, ...@@ -148,7 +148,8 @@ static void *nvme_zns_alloc_report_buffer(struct nvme_ns *ns,
return NULL; return NULL;
} }
static int nvme_zone_parse_entry(struct nvme_ns *ns, static int nvme_zone_parse_entry(struct nvme_ctrl *ctrl,
struct nvme_ns_head *head,
struct nvme_zone_descriptor *entry, struct nvme_zone_descriptor *entry,
unsigned int idx, report_zones_cb cb, unsigned int idx, report_zones_cb cb,
void *data) void *data)
...@@ -156,20 +157,20 @@ static int nvme_zone_parse_entry(struct nvme_ns *ns, ...@@ -156,20 +157,20 @@ static int nvme_zone_parse_entry(struct nvme_ns *ns,
struct blk_zone zone = { }; struct blk_zone zone = { };
if ((entry->zt & 0xf) != NVME_ZONE_TYPE_SEQWRITE_REQ) { if ((entry->zt & 0xf) != NVME_ZONE_TYPE_SEQWRITE_REQ) {
dev_err(ns->ctrl->device, "invalid zone type %#x\n", dev_err(ctrl->device, "invalid zone type %#x\n",
entry->zt); entry->zt);
return -EINVAL; return -EINVAL;
} }
zone.type = BLK_ZONE_TYPE_SEQWRITE_REQ; zone.type = BLK_ZONE_TYPE_SEQWRITE_REQ;
zone.cond = entry->zs >> 4; zone.cond = entry->zs >> 4;
zone.len = ns->head->zsze; zone.len = head->zsze;
zone.capacity = nvme_lba_to_sect(ns->head, le64_to_cpu(entry->zcap)); zone.capacity = nvme_lba_to_sect(head, le64_to_cpu(entry->zcap));
zone.start = nvme_lba_to_sect(ns->head, le64_to_cpu(entry->zslba)); zone.start = nvme_lba_to_sect(head, le64_to_cpu(entry->zslba));
if (zone.cond == BLK_ZONE_COND_FULL) if (zone.cond == BLK_ZONE_COND_FULL)
zone.wp = zone.start + zone.len; zone.wp = zone.start + zone.len;
else else
zone.wp = nvme_lba_to_sect(ns->head, le64_to_cpu(entry->wp)); zone.wp = nvme_lba_to_sect(head, le64_to_cpu(entry->wp));
return cb(&zone, idx, data); return cb(&zone, idx, data);
} }
...@@ -214,7 +215,8 @@ int nvme_ns_report_zones(struct nvme_ns *ns, sector_t sector, ...@@ -214,7 +215,8 @@ int nvme_ns_report_zones(struct nvme_ns *ns, sector_t sector,
break; break;
for (i = 0; i < nz && zone_idx < nr_zones; i++) { for (i = 0; i < nz && zone_idx < nr_zones; i++) {
ret = nvme_zone_parse_entry(ns, &report->entries[i], ret = nvme_zone_parse_entry(ns->ctrl, ns->head,
&report->entries[i],
zone_idx, cb, data); zone_idx, cb, data);
if (ret) if (ret)
goto out_free; goto out_free;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment