Commit 9419e71b authored by Daniel Wagner's avatar Daniel Wagner Committed by Keith Busch

nvme: move ns id info to struct nvme_ns_head

Move the namesapce info to struct nvme_ns_head, because it's the same
for all associated namespaces.

Note: with multipathing enabled the PI information is shared between all
paths. If a path is using a different PI configuration it will overwrite
the previous settings. This is obviously not correct and such
configuration will be rejected in future. For the time being we expect
a correctly configured storage.
Reviewed-by: default avatarChristoph Hellwig <hch@lst.de>
Reviewed-by: default avatarHannes Reinecke <hare@suse.de>
Reviewed-by: default avatarSagi Grimberg <sagi@grimberg.me>
Signed-off-by: default avatarDaniel Wagner <dwagner@suse.de>
Signed-off-by: default avatarKeith Busch <kbusch@kernel.org>
parent 4ba8b3f7
...@@ -312,12 +312,12 @@ static void nvme_log_error(struct request *req) ...@@ -312,12 +312,12 @@ static void nvme_log_error(struct request *req)
struct nvme_request *nr = nvme_req(req); struct nvme_request *nr = nvme_req(req);
if (ns) { if (ns) {
pr_err_ratelimited("%s: %s(0x%x) @ LBA %llu, %llu blocks, %s (sct 0x%x / sc 0x%x) %s%s\n", pr_err_ratelimited("%s: %s(0x%x) @ LBA %llu, %u blocks, %s (sct 0x%x / sc 0x%x) %s%s\n",
ns->disk ? ns->disk->disk_name : "?", ns->disk ? ns->disk->disk_name : "?",
nvme_get_opcode_str(nr->cmd->common.opcode), nvme_get_opcode_str(nr->cmd->common.opcode),
nr->cmd->common.opcode, nr->cmd->common.opcode,
(unsigned long long)nvme_sect_to_lba(ns, blk_rq_pos(req)), nvme_sect_to_lba(ns, blk_rq_pos(req)),
(unsigned long long)blk_rq_bytes(req) >> ns->lba_shift, blk_rq_bytes(req) >> ns->head->lba_shift,
nvme_get_error_status_str(nr->status), nvme_get_error_status_str(nr->status),
nr->status >> 8 & 7, /* Status Code Type */ nr->status >> 8 & 7, /* Status Code Type */
nr->status & 0xff, /* Status Code */ nr->status & 0xff, /* Status Code */
...@@ -792,7 +792,7 @@ static blk_status_t nvme_setup_discard(struct nvme_ns *ns, struct request *req, ...@@ -792,7 +792,7 @@ static blk_status_t nvme_setup_discard(struct nvme_ns *ns, struct request *req,
if (queue_max_discard_segments(req->q) == 1) { if (queue_max_discard_segments(req->q) == 1) {
u64 slba = nvme_sect_to_lba(ns, blk_rq_pos(req)); u64 slba = nvme_sect_to_lba(ns, blk_rq_pos(req));
u32 nlb = blk_rq_sectors(req) >> (ns->lba_shift - 9); u32 nlb = blk_rq_sectors(req) >> (ns->head->lba_shift - 9);
range[0].cattr = cpu_to_le32(0); range[0].cattr = cpu_to_le32(0);
range[0].nlb = cpu_to_le32(nlb); range[0].nlb = cpu_to_le32(nlb);
...@@ -801,7 +801,7 @@ static blk_status_t nvme_setup_discard(struct nvme_ns *ns, struct request *req, ...@@ -801,7 +801,7 @@ static blk_status_t nvme_setup_discard(struct nvme_ns *ns, struct request *req,
} else { } else {
__rq_for_each_bio(bio, req) { __rq_for_each_bio(bio, req) {
u64 slba = nvme_sect_to_lba(ns, bio->bi_iter.bi_sector); u64 slba = nvme_sect_to_lba(ns, bio->bi_iter.bi_sector);
u32 nlb = bio->bi_iter.bi_size >> ns->lba_shift; u32 nlb = bio->bi_iter.bi_size >> ns->head->lba_shift;
if (n < segments) { if (n < segments) {
range[n].cattr = cpu_to_le32(0); range[n].cattr = cpu_to_le32(0);
...@@ -839,7 +839,7 @@ static void nvme_set_ref_tag(struct nvme_ns *ns, struct nvme_command *cmnd, ...@@ -839,7 +839,7 @@ static void nvme_set_ref_tag(struct nvme_ns *ns, struct nvme_command *cmnd,
u64 ref48; u64 ref48;
/* both rw and write zeroes share the same reftag format */ /* both rw and write zeroes share the same reftag format */
switch (ns->guard_type) { switch (ns->head->guard_type) {
case NVME_NVM_NS_16B_GUARD: case NVME_NVM_NS_16B_GUARD:
cmnd->rw.reftag = cpu_to_le32(t10_pi_ref_tag(req)); cmnd->rw.reftag = cpu_to_le32(t10_pi_ref_tag(req));
break; break;
...@@ -869,15 +869,16 @@ static inline blk_status_t nvme_setup_write_zeroes(struct nvme_ns *ns, ...@@ -869,15 +869,16 @@ static inline blk_status_t nvme_setup_write_zeroes(struct nvme_ns *ns,
cmnd->write_zeroes.slba = cmnd->write_zeroes.slba =
cpu_to_le64(nvme_sect_to_lba(ns, blk_rq_pos(req))); cpu_to_le64(nvme_sect_to_lba(ns, blk_rq_pos(req)));
cmnd->write_zeroes.length = cmnd->write_zeroes.length =
cpu_to_le16((blk_rq_bytes(req) >> ns->lba_shift) - 1); cpu_to_le16((blk_rq_bytes(req) >> ns->head->lba_shift) - 1);
if (!(req->cmd_flags & REQ_NOUNMAP) && (ns->features & NVME_NS_DEAC)) if (!(req->cmd_flags & REQ_NOUNMAP) &&
(ns->head->features & NVME_NS_DEAC))
cmnd->write_zeroes.control |= cpu_to_le16(NVME_WZ_DEAC); cmnd->write_zeroes.control |= cpu_to_le16(NVME_WZ_DEAC);
if (nvme_ns_has_pi(ns)) { if (nvme_ns_has_pi(ns)) {
cmnd->write_zeroes.control |= cpu_to_le16(NVME_RW_PRINFO_PRACT); cmnd->write_zeroes.control |= cpu_to_le16(NVME_RW_PRINFO_PRACT);
switch (ns->pi_type) { switch (ns->head->pi_type) {
case NVME_NS_DPS_PI_TYPE1: case NVME_NS_DPS_PI_TYPE1:
case NVME_NS_DPS_PI_TYPE2: case NVME_NS_DPS_PI_TYPE2:
nvme_set_ref_tag(ns, cmnd, req); nvme_set_ref_tag(ns, cmnd, req);
...@@ -910,12 +911,13 @@ static inline blk_status_t nvme_setup_rw(struct nvme_ns *ns, ...@@ -910,12 +911,13 @@ static inline blk_status_t nvme_setup_rw(struct nvme_ns *ns,
cmnd->rw.cdw3 = 0; cmnd->rw.cdw3 = 0;
cmnd->rw.metadata = 0; cmnd->rw.metadata = 0;
cmnd->rw.slba = cpu_to_le64(nvme_sect_to_lba(ns, blk_rq_pos(req))); cmnd->rw.slba = cpu_to_le64(nvme_sect_to_lba(ns, blk_rq_pos(req)));
cmnd->rw.length = cpu_to_le16((blk_rq_bytes(req) >> ns->lba_shift) - 1); cmnd->rw.length =
cpu_to_le16((blk_rq_bytes(req) >> ns->head->lba_shift) - 1);
cmnd->rw.reftag = 0; cmnd->rw.reftag = 0;
cmnd->rw.apptag = 0; cmnd->rw.apptag = 0;
cmnd->rw.appmask = 0; cmnd->rw.appmask = 0;
if (ns->ms) { if (ns->head->ms) {
/* /*
* If formated with metadata, the block layer always provides a * If formated with metadata, the block layer always provides a
* metadata buffer if CONFIG_BLK_DEV_INTEGRITY is enabled. Else * metadata buffer if CONFIG_BLK_DEV_INTEGRITY is enabled. Else
...@@ -928,7 +930,7 @@ static inline blk_status_t nvme_setup_rw(struct nvme_ns *ns, ...@@ -928,7 +930,7 @@ static inline blk_status_t nvme_setup_rw(struct nvme_ns *ns,
control |= NVME_RW_PRINFO_PRACT; control |= NVME_RW_PRINFO_PRACT;
} }
switch (ns->pi_type) { switch (ns->head->pi_type) {
case NVME_NS_DPS_PI_TYPE3: case NVME_NS_DPS_PI_TYPE3:
control |= NVME_RW_PRINFO_PRCHK_GUARD; control |= NVME_RW_PRINFO_PRCHK_GUARD;
break; break;
...@@ -1663,9 +1665,9 @@ static void nvme_init_integrity(struct gendisk *disk, struct nvme_ns *ns, ...@@ -1663,9 +1665,9 @@ static void nvme_init_integrity(struct gendisk *disk, struct nvme_ns *ns,
{ {
struct blk_integrity integrity = { }; struct blk_integrity integrity = { };
switch (ns->pi_type) { switch (ns->head->pi_type) {
case NVME_NS_DPS_PI_TYPE3: case NVME_NS_DPS_PI_TYPE3:
switch (ns->guard_type) { switch (ns->head->guard_type) {
case NVME_NVM_NS_16B_GUARD: case NVME_NVM_NS_16B_GUARD:
integrity.profile = &t10_pi_type3_crc; integrity.profile = &t10_pi_type3_crc;
integrity.tag_size = sizeof(u16) + sizeof(u32); integrity.tag_size = sizeof(u16) + sizeof(u32);
...@@ -1683,7 +1685,7 @@ static void nvme_init_integrity(struct gendisk *disk, struct nvme_ns *ns, ...@@ -1683,7 +1685,7 @@ static void nvme_init_integrity(struct gendisk *disk, struct nvme_ns *ns,
break; break;
case NVME_NS_DPS_PI_TYPE1: case NVME_NS_DPS_PI_TYPE1:
case NVME_NS_DPS_PI_TYPE2: case NVME_NS_DPS_PI_TYPE2:
switch (ns->guard_type) { switch (ns->head->guard_type) {
case NVME_NVM_NS_16B_GUARD: case NVME_NVM_NS_16B_GUARD:
integrity.profile = &t10_pi_type1_crc; integrity.profile = &t10_pi_type1_crc;
integrity.tag_size = sizeof(u16); integrity.tag_size = sizeof(u16);
...@@ -1704,7 +1706,7 @@ static void nvme_init_integrity(struct gendisk *disk, struct nvme_ns *ns, ...@@ -1704,7 +1706,7 @@ static void nvme_init_integrity(struct gendisk *disk, struct nvme_ns *ns,
break; break;
} }
integrity.tuple_size = ns->ms; integrity.tuple_size = ns->head->ms;
blk_integrity_register(disk, &integrity); blk_integrity_register(disk, &integrity);
blk_queue_max_integrity_segments(disk->queue, max_integrity_segments); blk_queue_max_integrity_segments(disk->queue, max_integrity_segments);
} }
...@@ -1763,11 +1765,11 @@ static int nvme_init_ms(struct nvme_ns *ns, struct nvme_id_ns *id) ...@@ -1763,11 +1765,11 @@ static int nvme_init_ms(struct nvme_ns *ns, struct nvme_id_ns *id)
int ret = 0; int ret = 0;
u32 elbaf; u32 elbaf;
ns->pi_size = 0; ns->head->pi_size = 0;
ns->ms = le16_to_cpu(id->lbaf[lbaf].ms); ns->head->ms = le16_to_cpu(id->lbaf[lbaf].ms);
if (!(ctrl->ctratt & NVME_CTRL_ATTR_ELBAS)) { if (!(ctrl->ctratt & NVME_CTRL_ATTR_ELBAS)) {
ns->pi_size = sizeof(struct t10_pi_tuple); ns->head->pi_size = sizeof(struct t10_pi_tuple);
ns->guard_type = NVME_NVM_NS_16B_GUARD; ns->head->guard_type = NVME_NVM_NS_16B_GUARD;
goto set_pi; goto set_pi;
} }
...@@ -1790,13 +1792,13 @@ static int nvme_init_ms(struct nvme_ns *ns, struct nvme_id_ns *id) ...@@ -1790,13 +1792,13 @@ static int nvme_init_ms(struct nvme_ns *ns, struct nvme_id_ns *id)
if (nvme_elbaf_sts(elbaf)) if (nvme_elbaf_sts(elbaf))
goto free_data; goto free_data;
ns->guard_type = nvme_elbaf_guard_type(elbaf); ns->head->guard_type = nvme_elbaf_guard_type(elbaf);
switch (ns->guard_type) { switch (ns->head->guard_type) {
case NVME_NVM_NS_64B_GUARD: case NVME_NVM_NS_64B_GUARD:
ns->pi_size = sizeof(struct crc64_pi_tuple); ns->head->pi_size = sizeof(struct crc64_pi_tuple);
break; break;
case NVME_NVM_NS_16B_GUARD: case NVME_NVM_NS_16B_GUARD:
ns->pi_size = sizeof(struct t10_pi_tuple); ns->head->pi_size = sizeof(struct t10_pi_tuple);
break; break;
default: default:
break; break;
...@@ -1805,10 +1807,10 @@ static int nvme_init_ms(struct nvme_ns *ns, struct nvme_id_ns *id) ...@@ -1805,10 +1807,10 @@ static int nvme_init_ms(struct nvme_ns *ns, struct nvme_id_ns *id)
free_data: free_data:
kfree(nvm); kfree(nvm);
set_pi: set_pi:
if (ns->pi_size && (first || ns->ms == ns->pi_size)) if (ns->head->pi_size && (first || ns->head->ms == ns->head->pi_size))
ns->pi_type = id->dps & NVME_NS_DPS_PI_MASK; ns->head->pi_type = id->dps & NVME_NS_DPS_PI_MASK;
else else
ns->pi_type = 0; ns->head->pi_type = 0;
return ret; return ret;
} }
...@@ -1822,8 +1824,8 @@ static int nvme_configure_metadata(struct nvme_ns *ns, struct nvme_id_ns *id) ...@@ -1822,8 +1824,8 @@ static int nvme_configure_metadata(struct nvme_ns *ns, struct nvme_id_ns *id)
if (ret) if (ret)
return ret; return ret;
ns->features &= ~(NVME_NS_METADATA_SUPPORTED | NVME_NS_EXT_LBAS); ns->head->features &= ~(NVME_NS_METADATA_SUPPORTED | NVME_NS_EXT_LBAS);
if (!ns->ms || !(ctrl->ops->flags & NVME_F_METADATA_SUPPORTED)) if (!ns->head->ms || !(ctrl->ops->flags & NVME_F_METADATA_SUPPORTED))
return 0; return 0;
if (ctrl->ops->flags & NVME_F_FABRICS) { if (ctrl->ops->flags & NVME_F_FABRICS) {
...@@ -1835,7 +1837,7 @@ static int nvme_configure_metadata(struct nvme_ns *ns, struct nvme_id_ns *id) ...@@ -1835,7 +1837,7 @@ static int nvme_configure_metadata(struct nvme_ns *ns, struct nvme_id_ns *id)
if (WARN_ON_ONCE(!(id->flbas & NVME_NS_FLBAS_META_EXT))) if (WARN_ON_ONCE(!(id->flbas & NVME_NS_FLBAS_META_EXT)))
return 0; return 0;
ns->features |= NVME_NS_EXT_LBAS; ns->head->features |= NVME_NS_EXT_LBAS;
/* /*
* The current fabrics transport drivers support namespace * The current fabrics transport drivers support namespace
...@@ -1847,7 +1849,7 @@ static int nvme_configure_metadata(struct nvme_ns *ns, struct nvme_id_ns *id) ...@@ -1847,7 +1849,7 @@ static int nvme_configure_metadata(struct nvme_ns *ns, struct nvme_id_ns *id)
* gain the ability to use other metadata formats. * gain the ability to use other metadata formats.
*/ */
if (ctrl->max_integrity_segments && nvme_ns_has_pi(ns)) if (ctrl->max_integrity_segments && nvme_ns_has_pi(ns))
ns->features |= NVME_NS_METADATA_SUPPORTED; ns->head->features |= NVME_NS_METADATA_SUPPORTED;
} else { } else {
/* /*
* For PCIe controllers, we can't easily remap the separate * For PCIe controllers, we can't easily remap the separate
...@@ -1856,9 +1858,9 @@ static int nvme_configure_metadata(struct nvme_ns *ns, struct nvme_id_ns *id) ...@@ -1856,9 +1858,9 @@ static int nvme_configure_metadata(struct nvme_ns *ns, struct nvme_id_ns *id)
* We allow extended LBAs for the passthrough interface, though. * We allow extended LBAs for the passthrough interface, though.
*/ */
if (id->flbas & NVME_NS_FLBAS_META_EXT) if (id->flbas & NVME_NS_FLBAS_META_EXT)
ns->features |= NVME_NS_EXT_LBAS; ns->head->features |= NVME_NS_EXT_LBAS;
else else
ns->features |= NVME_NS_METADATA_SUPPORTED; ns->head->features |= NVME_NS_METADATA_SUPPORTED;
} }
return 0; return 0;
} }
...@@ -1885,14 +1887,14 @@ static void nvme_update_disk_info(struct gendisk *disk, ...@@ -1885,14 +1887,14 @@ static void nvme_update_disk_info(struct gendisk *disk,
struct nvme_ns *ns, struct nvme_id_ns *id) struct nvme_ns *ns, struct nvme_id_ns *id)
{ {
sector_t capacity = nvme_lba_to_sect(ns, le64_to_cpu(id->nsze)); sector_t capacity = nvme_lba_to_sect(ns, le64_to_cpu(id->nsze));
u32 bs = 1U << ns->lba_shift; u32 bs = 1U << ns->head->lba_shift;
u32 atomic_bs, phys_bs, io_opt = 0; u32 atomic_bs, phys_bs, io_opt = 0;
/* /*
* The block layer can't support LBA sizes larger than the page size * The block layer can't support LBA sizes larger than the page size
* yet, so catch this early and don't allow block I/O. * yet, so catch this early and don't allow block I/O.
*/ */
if (ns->lba_shift > PAGE_SHIFT) { if (ns->head->lba_shift > PAGE_SHIFT) {
capacity = 0; capacity = 0;
bs = (1 << 9); bs = (1 << 9);
} }
...@@ -1935,9 +1937,9 @@ static void nvme_update_disk_info(struct gendisk *disk, ...@@ -1935,9 +1937,9 @@ static void nvme_update_disk_info(struct gendisk *disk,
* I/O to namespaces with metadata except when the namespace supports * I/O to namespaces with metadata except when the namespace supports
* PI, as it can strip/insert in that case. * PI, as it can strip/insert in that case.
*/ */
if (ns->ms) { if (ns->head->ms) {
if (IS_ENABLED(CONFIG_BLK_DEV_INTEGRITY) && if (IS_ENABLED(CONFIG_BLK_DEV_INTEGRITY) &&
(ns->features & NVME_NS_METADATA_SUPPORTED)) (ns->head->features & NVME_NS_METADATA_SUPPORTED))
nvme_init_integrity(disk, ns, nvme_init_integrity(disk, ns,
ns->ctrl->max_integrity_segments); ns->ctrl->max_integrity_segments);
else if (!nvme_ns_has_pi(ns)) else if (!nvme_ns_has_pi(ns))
...@@ -2031,7 +2033,7 @@ static int nvme_update_ns_info_block(struct nvme_ns *ns, ...@@ -2031,7 +2033,7 @@ static int nvme_update_ns_info_block(struct nvme_ns *ns,
blk_mq_freeze_queue(ns->disk->queue); blk_mq_freeze_queue(ns->disk->queue);
lbaf = nvme_lbaf_index(id->flbas); lbaf = nvme_lbaf_index(id->flbas);
ns->lba_shift = id->lbaf[lbaf].ds; ns->head->lba_shift = id->lbaf[lbaf].ds;
nvme_set_queue_limits(ns->ctrl, ns->queue); nvme_set_queue_limits(ns->ctrl, ns->queue);
ret = nvme_configure_metadata(ns, id); ret = nvme_configure_metadata(ns, id);
...@@ -2057,7 +2059,7 @@ static int nvme_update_ns_info_block(struct nvme_ns *ns, ...@@ -2057,7 +2059,7 @@ static int nvme_update_ns_info_block(struct nvme_ns *ns,
* do not return zeroes. * do not return zeroes.
*/ */
if ((id->dlfeat & 0x7) == 0x1 && (id->dlfeat & (1 << 3))) if ((id->dlfeat & 0x7) == 0x1 && (id->dlfeat & (1 << 3)))
ns->features |= NVME_NS_DEAC; ns->head->features |= NVME_NS_DEAC;
set_disk_ro(ns->disk, nvme_ns_is_readonly(ns, info)); set_disk_ro(ns->disk, nvme_ns_is_readonly(ns, info));
set_bit(NVME_NS_READY, &ns->flags); set_bit(NVME_NS_READY, &ns->flags);
blk_mq_unfreeze_queue(ns->disk->queue); blk_mq_unfreeze_queue(ns->disk->queue);
......
...@@ -224,10 +224,10 @@ static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio) ...@@ -224,10 +224,10 @@ static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio)
return -EINVAL; return -EINVAL;
} }
length = (io.nblocks + 1) << ns->lba_shift; length = (io.nblocks + 1) << ns->head->lba_shift;
if ((io.control & NVME_RW_PRINFO_PRACT) && if ((io.control & NVME_RW_PRINFO_PRACT) &&
ns->ms == sizeof(struct t10_pi_tuple)) { ns->head->ms == sizeof(struct t10_pi_tuple)) {
/* /*
* Protection information is stripped/inserted by the * Protection information is stripped/inserted by the
* controller. * controller.
...@@ -237,11 +237,11 @@ static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio) ...@@ -237,11 +237,11 @@ static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio)
meta_len = 0; meta_len = 0;
metadata = NULL; metadata = NULL;
} else { } else {
meta_len = (io.nblocks + 1) * ns->ms; meta_len = (io.nblocks + 1) * ns->head->ms;
metadata = nvme_to_user_ptr(io.metadata); metadata = nvme_to_user_ptr(io.metadata);
} }
if (ns->features & NVME_NS_EXT_LBAS) { if (ns->head->features & NVME_NS_EXT_LBAS) {
length += meta_len; length += meta_len;
meta_len = 0; meta_len = 0;
} else if (meta_len) { } else if (meta_len) {
......
...@@ -446,6 +446,17 @@ struct nvme_ns_head { ...@@ -446,6 +446,17 @@ struct nvme_ns_head {
bool shared; bool shared;
int instance; int instance;
struct nvme_effects_log *effects; struct nvme_effects_log *effects;
int lba_shift;
u16 ms;
u16 pi_size;
u16 sgs;
u32 sws;
u8 pi_type;
u8 guard_type;
#ifdef CONFIG_BLK_DEV_ZONED
u64 zsze;
#endif
unsigned long features;
struct cdev cdev; struct cdev cdev;
struct device cdev_device; struct device cdev_device;
...@@ -487,17 +498,6 @@ struct nvme_ns { ...@@ -487,17 +498,6 @@ struct nvme_ns {
struct kref kref; struct kref kref;
struct nvme_ns_head *head; struct nvme_ns_head *head;
int lba_shift;
u16 ms;
u16 pi_size;
u16 sgs;
u32 sws;
u8 pi_type;
u8 guard_type;
#ifdef CONFIG_BLK_DEV_ZONED
u64 zsze;
#endif
unsigned long features;
unsigned long flags; unsigned long flags;
#define NVME_NS_REMOVING 0 #define NVME_NS_REMOVING 0
#define NVME_NS_ANA_PENDING 2 #define NVME_NS_ANA_PENDING 2
...@@ -514,7 +514,7 @@ struct nvme_ns { ...@@ -514,7 +514,7 @@ struct nvme_ns {
/* NVMe ns supports metadata actions by the controller (generate/strip) */ /* NVMe ns supports metadata actions by the controller (generate/strip) */
static inline bool nvme_ns_has_pi(struct nvme_ns *ns) static inline bool nvme_ns_has_pi(struct nvme_ns *ns)
{ {
return ns->pi_type && ns->ms == ns->pi_size; return ns->head->pi_type && ns->head->ms == ns->head->pi_size;
} }
struct nvme_ctrl_ops { struct nvme_ctrl_ops {
...@@ -648,7 +648,7 @@ static inline int nvme_reset_subsystem(struct nvme_ctrl *ctrl) ...@@ -648,7 +648,7 @@ static inline int nvme_reset_subsystem(struct nvme_ctrl *ctrl)
*/ */
static inline u64 nvme_sect_to_lba(struct nvme_ns *ns, sector_t sector) static inline u64 nvme_sect_to_lba(struct nvme_ns *ns, sector_t sector)
{ {
return sector >> (ns->lba_shift - SECTOR_SHIFT); return sector >> (ns->head->lba_shift - SECTOR_SHIFT);
} }
/* /*
...@@ -656,7 +656,7 @@ static inline u64 nvme_sect_to_lba(struct nvme_ns *ns, sector_t sector) ...@@ -656,7 +656,7 @@ static inline u64 nvme_sect_to_lba(struct nvme_ns *ns, sector_t sector)
*/ */
static inline sector_t nvme_lba_to_sect(struct nvme_ns *ns, u64 lba) static inline sector_t nvme_lba_to_sect(struct nvme_ns *ns, u64 lba)
{ {
return lba << (ns->lba_shift - SECTOR_SHIFT); return lba << (ns->head->lba_shift - SECTOR_SHIFT);
} }
/* /*
......
...@@ -1418,7 +1418,7 @@ static int nvme_rdma_map_sg_pi(struct nvme_rdma_queue *queue, ...@@ -1418,7 +1418,7 @@ static int nvme_rdma_map_sg_pi(struct nvme_rdma_queue *queue,
goto mr_put; goto mr_put;
nvme_rdma_set_sig_attrs(blk_get_integrity(bio->bi_bdev->bd_disk), c, nvme_rdma_set_sig_attrs(blk_get_integrity(bio->bi_bdev->bd_disk), c,
req->mr->sig_attrs, ns->pi_type); req->mr->sig_attrs, ns->head->pi_type);
nvme_rdma_set_prot_checks(c, &req->mr->sig_attrs->check_mask); nvme_rdma_set_prot_checks(c, &req->mr->sig_attrs->check_mask);
ib_update_fast_reg_key(req->mr, ib_inc_rkey(req->mr->rkey)); ib_update_fast_reg_key(req->mr, ib_inc_rkey(req->mr->rkey));
......
...@@ -11,7 +11,7 @@ int nvme_revalidate_zones(struct nvme_ns *ns) ...@@ -11,7 +11,7 @@ int nvme_revalidate_zones(struct nvme_ns *ns)
{ {
struct request_queue *q = ns->queue; struct request_queue *q = ns->queue;
blk_queue_chunk_sectors(q, ns->zsze); blk_queue_chunk_sectors(q, ns->head->zsze);
blk_queue_max_zone_append_sectors(q, ns->ctrl->max_zone_append); blk_queue_max_zone_append_sectors(q, ns->ctrl->max_zone_append);
return blk_revalidate_disk_zones(ns->disk, NULL); return blk_revalidate_disk_zones(ns->disk, NULL);
...@@ -99,11 +99,12 @@ int nvme_update_zone_info(struct nvme_ns *ns, unsigned lbaf) ...@@ -99,11 +99,12 @@ int nvme_update_zone_info(struct nvme_ns *ns, unsigned lbaf)
goto free_data; goto free_data;
} }
ns->zsze = nvme_lba_to_sect(ns, le64_to_cpu(id->lbafe[lbaf].zsze)); ns->head->zsze =
if (!is_power_of_2(ns->zsze)) { nvme_lba_to_sect(ns, le64_to_cpu(id->lbafe[lbaf].zsze));
if (!is_power_of_2(ns->head->zsze)) {
dev_warn(ns->ctrl->device, dev_warn(ns->ctrl->device,
"invalid zone size:%llu for namespace:%u\n", "invalid zone size:%llu for namespace:%u\n",
ns->zsze, ns->head->ns_id); ns->head->zsze, ns->head->ns_id);
status = -ENODEV; status = -ENODEV;
goto free_data; goto free_data;
} }
...@@ -128,7 +129,7 @@ static void *nvme_zns_alloc_report_buffer(struct nvme_ns *ns, ...@@ -128,7 +129,7 @@ static void *nvme_zns_alloc_report_buffer(struct nvme_ns *ns,
sizeof(struct nvme_zone_descriptor); sizeof(struct nvme_zone_descriptor);
nr_zones = min_t(unsigned int, nr_zones, nr_zones = min_t(unsigned int, nr_zones,
get_capacity(ns->disk) >> ilog2(ns->zsze)); get_capacity(ns->disk) >> ilog2(ns->head->zsze));
bufsize = sizeof(struct nvme_zone_report) + bufsize = sizeof(struct nvme_zone_report) +
nr_zones * sizeof(struct nvme_zone_descriptor); nr_zones * sizeof(struct nvme_zone_descriptor);
...@@ -162,7 +163,7 @@ static int nvme_zone_parse_entry(struct nvme_ns *ns, ...@@ -162,7 +163,7 @@ static int nvme_zone_parse_entry(struct nvme_ns *ns,
zone.type = BLK_ZONE_TYPE_SEQWRITE_REQ; zone.type = BLK_ZONE_TYPE_SEQWRITE_REQ;
zone.cond = entry->zs >> 4; zone.cond = entry->zs >> 4;
zone.len = ns->zsze; zone.len = ns->head->zsze;
zone.capacity = nvme_lba_to_sect(ns, le64_to_cpu(entry->zcap)); zone.capacity = nvme_lba_to_sect(ns, le64_to_cpu(entry->zcap));
zone.start = nvme_lba_to_sect(ns, le64_to_cpu(entry->zslba)); zone.start = nvme_lba_to_sect(ns, le64_to_cpu(entry->zslba));
if (zone.cond == BLK_ZONE_COND_FULL) if (zone.cond == BLK_ZONE_COND_FULL)
...@@ -196,7 +197,7 @@ int nvme_ns_report_zones(struct nvme_ns *ns, sector_t sector, ...@@ -196,7 +197,7 @@ int nvme_ns_report_zones(struct nvme_ns *ns, sector_t sector,
c.zmr.zrasf = NVME_ZRASF_ZONE_REPORT_ALL; c.zmr.zrasf = NVME_ZRASF_ZONE_REPORT_ALL;
c.zmr.pr = NVME_REPORT_ZONE_PARTIAL; c.zmr.pr = NVME_REPORT_ZONE_PARTIAL;
sector &= ~(ns->zsze - 1); sector &= ~(ns->head->zsze - 1);
while (zone_idx < nr_zones && sector < get_capacity(ns->disk)) { while (zone_idx < nr_zones && sector < get_capacity(ns->disk)) {
memset(report, 0, buflen); memset(report, 0, buflen);
...@@ -220,7 +221,7 @@ int nvme_ns_report_zones(struct nvme_ns *ns, sector_t sector, ...@@ -220,7 +221,7 @@ int nvme_ns_report_zones(struct nvme_ns *ns, sector_t sector,
zone_idx++; zone_idx++;
} }
sector += ns->zsze * nz; sector += ns->head->zsze * nz;
} }
if (zone_idx > 0) if (zone_idx > 0)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment