Commit 47ca23c1 authored by Jens Axboe's avatar Jens Axboe

Merge branch 'nvme-5.2' of git://git.infradead.org/nvme into for-5.2/block-post

Pull NVMe fixes from Christoph

* 'nvme-5.2' of git://git.infradead.org/nvme:
  nvme: validate cntlid during controller initialisation
  nvme: change locking for the per-subsystem controller list
  nvme: trace all async notice events
  nvme: fix typos in nvme status code values
  nvme-fabrics: remove unused argument
  nvme-multipath: avoid crash on invalid subsystem cntlid enumeration
  nvme-fc: use separate work queue to avoid warning
  nvme-rdma: remove redundant reference between ib_device and tagset
  nvme-pci: mark expected switch fall-through
  nvme-pci: add known admin effects to augument admin effects log page
  nvme-pci: init shadow doorbell after each reset
parents 936b33f7 1b1031ca
...@@ -1257,10 +1257,9 @@ static u32 nvme_passthru_start(struct nvme_ctrl *ctrl, struct nvme_ns *ns, ...@@ -1257,10 +1257,9 @@ static u32 nvme_passthru_start(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
return 0; return 0;
} }
effects |= nvme_known_admin_effects(opcode);
if (ctrl->effects) if (ctrl->effects)
effects = le32_to_cpu(ctrl->effects->acs[opcode]); effects = le32_to_cpu(ctrl->effects->acs[opcode]);
else
effects = nvme_known_admin_effects(opcode);
/* /*
* For simplicity, IO to all namespaces is quiesced even if the command * For simplicity, IO to all namespaces is quiesced even if the command
...@@ -2342,20 +2341,35 @@ static const struct attribute_group *nvme_subsys_attrs_groups[] = { ...@@ -2342,20 +2341,35 @@ static const struct attribute_group *nvme_subsys_attrs_groups[] = {
NULL, NULL,
}; };
static int nvme_active_ctrls(struct nvme_subsystem *subsys) static bool nvme_validate_cntlid(struct nvme_subsystem *subsys,
struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
{ {
int count = 0; struct nvme_ctrl *tmp;
struct nvme_ctrl *ctrl;
lockdep_assert_held(&nvme_subsystems_lock);
list_for_each_entry(tmp, &subsys->ctrls, subsys_entry) {
if (ctrl->state == NVME_CTRL_DELETING ||
ctrl->state == NVME_CTRL_DEAD)
continue;
if (tmp->cntlid == ctrl->cntlid) {
dev_err(ctrl->device,
"Duplicate cntlid %u with %s, rejecting\n",
ctrl->cntlid, dev_name(tmp->device));
return false;
}
mutex_lock(&subsys->lock); if ((id->cmic & (1 << 1)) ||
list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry) { (ctrl->opts && ctrl->opts->discovery_nqn))
if (ctrl->state != NVME_CTRL_DELETING && continue;
ctrl->state != NVME_CTRL_DEAD)
count++; dev_err(ctrl->device,
"Subsystem does not support multiple controllers\n");
return false;
} }
mutex_unlock(&subsys->lock);
return count; return true;
} }
static int nvme_init_subsystem(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id) static int nvme_init_subsystem(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
...@@ -2395,22 +2409,13 @@ static int nvme_init_subsystem(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id) ...@@ -2395,22 +2409,13 @@ static int nvme_init_subsystem(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
mutex_lock(&nvme_subsystems_lock); mutex_lock(&nvme_subsystems_lock);
found = __nvme_find_get_subsystem(subsys->subnqn); found = __nvme_find_get_subsystem(subsys->subnqn);
if (found) { if (found) {
/*
* Verify that the subsystem actually supports multiple
* controllers, else bail out.
*/
if (!(ctrl->opts && ctrl->opts->discovery_nqn) &&
nvme_active_ctrls(found) && !(id->cmic & (1 << 1))) {
dev_err(ctrl->device,
"ignoring ctrl due to duplicate subnqn (%s).\n",
found->subnqn);
nvme_put_subsystem(found);
ret = -EINVAL;
goto out_unlock;
}
__nvme_release_subsystem(subsys); __nvme_release_subsystem(subsys);
subsys = found; subsys = found;
if (!nvme_validate_cntlid(subsys, ctrl, id)) {
ret = -EINVAL;
goto out_put_subsystem;
}
} else { } else {
ret = device_add(&subsys->dev); ret = device_add(&subsys->dev);
if (ret) { if (ret) {
...@@ -2422,23 +2427,20 @@ static int nvme_init_subsystem(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id) ...@@ -2422,23 +2427,20 @@ static int nvme_init_subsystem(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
list_add_tail(&subsys->entry, &nvme_subsystems); list_add_tail(&subsys->entry, &nvme_subsystems);
} }
ctrl->subsys = subsys;
mutex_unlock(&nvme_subsystems_lock);
if (sysfs_create_link(&subsys->dev.kobj, &ctrl->device->kobj, if (sysfs_create_link(&subsys->dev.kobj, &ctrl->device->kobj,
dev_name(ctrl->device))) { dev_name(ctrl->device))) {
dev_err(ctrl->device, dev_err(ctrl->device,
"failed to create sysfs link from subsystem.\n"); "failed to create sysfs link from subsystem.\n");
/* the transport driver will eventually put the subsystem */ goto out_put_subsystem;
return -EINVAL;
} }
mutex_lock(&subsys->lock); ctrl->subsys = subsys;
list_add_tail(&ctrl->subsys_entry, &subsys->ctrls); list_add_tail(&ctrl->subsys_entry, &subsys->ctrls);
mutex_unlock(&subsys->lock); mutex_unlock(&nvme_subsystems_lock);
return 0; return 0;
out_put_subsystem:
nvme_put_subsystem(subsys);
out_unlock: out_unlock:
mutex_unlock(&nvme_subsystems_lock); mutex_unlock(&nvme_subsystems_lock);
put_device(&subsys->dev); put_device(&subsys->dev);
...@@ -3605,19 +3607,18 @@ static void nvme_handle_aen_notice(struct nvme_ctrl *ctrl, u32 result) ...@@ -3605,19 +3607,18 @@ static void nvme_handle_aen_notice(struct nvme_ctrl *ctrl, u32 result)
{ {
u32 aer_notice_type = (result & 0xff00) >> 8; u32 aer_notice_type = (result & 0xff00) >> 8;
trace_nvme_async_event(ctrl, aer_notice_type);
switch (aer_notice_type) { switch (aer_notice_type) {
case NVME_AER_NOTICE_NS_CHANGED: case NVME_AER_NOTICE_NS_CHANGED:
trace_nvme_async_event(ctrl, aer_notice_type);
set_bit(NVME_AER_NOTICE_NS_CHANGED, &ctrl->events); set_bit(NVME_AER_NOTICE_NS_CHANGED, &ctrl->events);
nvme_queue_scan(ctrl); nvme_queue_scan(ctrl);
break; break;
case NVME_AER_NOTICE_FW_ACT_STARTING: case NVME_AER_NOTICE_FW_ACT_STARTING:
trace_nvme_async_event(ctrl, aer_notice_type);
queue_work(nvme_wq, &ctrl->fw_act_work); queue_work(nvme_wq, &ctrl->fw_act_work);
break; break;
#ifdef CONFIG_NVME_MULTIPATH #ifdef CONFIG_NVME_MULTIPATH
case NVME_AER_NOTICE_ANA: case NVME_AER_NOTICE_ANA:
trace_nvme_async_event(ctrl, aer_notice_type);
if (!ctrl->ana_log_buf) if (!ctrl->ana_log_buf)
break; break;
queue_work(nvme_wq, &ctrl->ana_work); queue_work(nvme_wq, &ctrl->ana_work);
...@@ -3696,10 +3697,10 @@ static void nvme_free_ctrl(struct device *dev) ...@@ -3696,10 +3697,10 @@ static void nvme_free_ctrl(struct device *dev)
__free_page(ctrl->discard_page); __free_page(ctrl->discard_page);
if (subsys) { if (subsys) {
mutex_lock(&subsys->lock); mutex_lock(&nvme_subsystems_lock);
list_del(&ctrl->subsys_entry); list_del(&ctrl->subsys_entry);
mutex_unlock(&subsys->lock);
sysfs_remove_link(&subsys->dev.kobj, dev_name(ctrl->device)); sysfs_remove_link(&subsys->dev.kobj, dev_name(ctrl->device));
mutex_unlock(&nvme_subsystems_lock);
} }
ctrl->ops->free_ctrl(ctrl); ctrl->ops->free_ctrl(ctrl);
......
...@@ -978,7 +978,7 @@ EXPORT_SYMBOL_GPL(nvmf_free_options); ...@@ -978,7 +978,7 @@ EXPORT_SYMBOL_GPL(nvmf_free_options);
NVMF_OPT_DISABLE_SQFLOW) NVMF_OPT_DISABLE_SQFLOW)
static struct nvme_ctrl * static struct nvme_ctrl *
nvmf_create_ctrl(struct device *dev, const char *buf, size_t count) nvmf_create_ctrl(struct device *dev, const char *buf)
{ {
struct nvmf_ctrl_options *opts; struct nvmf_ctrl_options *opts;
struct nvmf_transport_ops *ops; struct nvmf_transport_ops *ops;
...@@ -1073,7 +1073,7 @@ static ssize_t nvmf_dev_write(struct file *file, const char __user *ubuf, ...@@ -1073,7 +1073,7 @@ static ssize_t nvmf_dev_write(struct file *file, const char __user *ubuf,
goto out_unlock; goto out_unlock;
} }
ctrl = nvmf_create_ctrl(nvmf_device, buf, count); ctrl = nvmf_create_ctrl(nvmf_device, buf);
if (IS_ERR(ctrl)) { if (IS_ERR(ctrl)) {
ret = PTR_ERR(ctrl); ret = PTR_ERR(ctrl);
goto out_unlock; goto out_unlock;
......
...@@ -202,7 +202,7 @@ static LIST_HEAD(nvme_fc_lport_list); ...@@ -202,7 +202,7 @@ static LIST_HEAD(nvme_fc_lport_list);
static DEFINE_IDA(nvme_fc_local_port_cnt); static DEFINE_IDA(nvme_fc_local_port_cnt);
static DEFINE_IDA(nvme_fc_ctrl_cnt); static DEFINE_IDA(nvme_fc_ctrl_cnt);
static struct workqueue_struct *nvme_fc_wq;
/* /*
* These items are short-term. They will eventually be moved into * These items are short-term. They will eventually be moved into
...@@ -2054,7 +2054,7 @@ nvme_fc_error_recovery(struct nvme_fc_ctrl *ctrl, char *errmsg) ...@@ -2054,7 +2054,7 @@ nvme_fc_error_recovery(struct nvme_fc_ctrl *ctrl, char *errmsg)
*/ */
if (ctrl->ctrl.state == NVME_CTRL_CONNECTING) { if (ctrl->ctrl.state == NVME_CTRL_CONNECTING) {
active = atomic_xchg(&ctrl->err_work_active, 1); active = atomic_xchg(&ctrl->err_work_active, 1);
if (!active && !schedule_work(&ctrl->err_work)) { if (!active && !queue_work(nvme_fc_wq, &ctrl->err_work)) {
atomic_set(&ctrl->err_work_active, 0); atomic_set(&ctrl->err_work_active, 0);
WARN_ON(1); WARN_ON(1);
} }
...@@ -3399,6 +3399,10 @@ static int __init nvme_fc_init_module(void) ...@@ -3399,6 +3399,10 @@ static int __init nvme_fc_init_module(void)
{ {
int ret; int ret;
nvme_fc_wq = alloc_workqueue("nvme_fc_wq", WQ_MEM_RECLAIM, 0);
if (!nvme_fc_wq)
return -ENOMEM;
/* /*
* NOTE: * NOTE:
* It is expected that in the future the kernel will combine * It is expected that in the future the kernel will combine
...@@ -3416,7 +3420,7 @@ static int __init nvme_fc_init_module(void) ...@@ -3416,7 +3420,7 @@ static int __init nvme_fc_init_module(void)
ret = class_register(&fc_class); ret = class_register(&fc_class);
if (ret) { if (ret) {
pr_err("couldn't register class fc\n"); pr_err("couldn't register class fc\n");
return ret; goto out_destroy_wq;
} }
/* /*
...@@ -3440,6 +3444,9 @@ static int __init nvme_fc_init_module(void) ...@@ -3440,6 +3444,9 @@ static int __init nvme_fc_init_module(void)
device_destroy(&fc_class, MKDEV(0, 0)); device_destroy(&fc_class, MKDEV(0, 0));
out_destroy_class: out_destroy_class:
class_unregister(&fc_class); class_unregister(&fc_class);
out_destroy_wq:
destroy_workqueue(nvme_fc_wq);
return ret; return ret;
} }
...@@ -3456,6 +3463,7 @@ static void __exit nvme_fc_exit_module(void) ...@@ -3456,6 +3463,7 @@ static void __exit nvme_fc_exit_module(void)
device_destroy(&fc_class, MKDEV(0, 0)); device_destroy(&fc_class, MKDEV(0, 0));
class_unregister(&fc_class); class_unregister(&fc_class);
destroy_workqueue(nvme_fc_wq);
} }
module_init(nvme_fc_init_module); module_init(nvme_fc_init_module);
......
...@@ -31,7 +31,7 @@ void nvme_set_disk_name(char *disk_name, struct nvme_ns *ns, ...@@ -31,7 +31,7 @@ void nvme_set_disk_name(char *disk_name, struct nvme_ns *ns,
sprintf(disk_name, "nvme%dn%d", ctrl->instance, ns->head->instance); sprintf(disk_name, "nvme%dn%d", ctrl->instance, ns->head->instance);
} else if (ns->head->disk) { } else if (ns->head->disk) {
sprintf(disk_name, "nvme%dc%dn%d", ctrl->subsys->instance, sprintf(disk_name, "nvme%dc%dn%d", ctrl->subsys->instance,
ctrl->cntlid, ns->head->instance); ctrl->instance, ns->head->instance);
*flags = GENHD_FL_HIDDEN; *flags = GENHD_FL_HIDDEN;
} else { } else {
sprintf(disk_name, "nvme%dn%d", ctrl->subsys->instance, sprintf(disk_name, "nvme%dn%d", ctrl->subsys->instance,
......
...@@ -1296,6 +1296,7 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved) ...@@ -1296,6 +1296,7 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved)
switch (dev->ctrl.state) { switch (dev->ctrl.state) {
case NVME_CTRL_DELETING: case NVME_CTRL_DELETING:
shutdown = true; shutdown = true;
/* fall through */
case NVME_CTRL_CONNECTING: case NVME_CTRL_CONNECTING:
case NVME_CTRL_RESETTING: case NVME_CTRL_RESETTING:
dev_warn_ratelimited(dev->ctrl.device, dev_warn_ratelimited(dev->ctrl.device,
...@@ -2280,8 +2281,6 @@ static int nvme_dev_add(struct nvme_dev *dev) ...@@ -2280,8 +2281,6 @@ static int nvme_dev_add(struct nvme_dev *dev)
return ret; return ret;
} }
dev->ctrl.tagset = &dev->tagset; dev->ctrl.tagset = &dev->tagset;
nvme_dbbuf_set(dev);
} else { } else {
blk_mq_update_nr_hw_queues(&dev->tagset, dev->online_queues - 1); blk_mq_update_nr_hw_queues(&dev->tagset, dev->online_queues - 1);
...@@ -2289,6 +2288,7 @@ static int nvme_dev_add(struct nvme_dev *dev) ...@@ -2289,6 +2288,7 @@ static int nvme_dev_add(struct nvme_dev *dev)
nvme_free_queues(dev, dev->online_queues); nvme_free_queues(dev, dev->online_queues);
} }
nvme_dbbuf_set(dev);
return 0; return 0;
} }
......
...@@ -697,15 +697,6 @@ static int nvme_rdma_alloc_io_queues(struct nvme_rdma_ctrl *ctrl) ...@@ -697,15 +697,6 @@ static int nvme_rdma_alloc_io_queues(struct nvme_rdma_ctrl *ctrl)
return ret; return ret;
} }
static void nvme_rdma_free_tagset(struct nvme_ctrl *nctrl,
struct blk_mq_tag_set *set)
{
struct nvme_rdma_ctrl *ctrl = to_rdma_ctrl(nctrl);
blk_mq_free_tag_set(set);
nvme_rdma_dev_put(ctrl->device);
}
static struct blk_mq_tag_set *nvme_rdma_alloc_tagset(struct nvme_ctrl *nctrl, static struct blk_mq_tag_set *nvme_rdma_alloc_tagset(struct nvme_ctrl *nctrl,
bool admin) bool admin)
{ {
...@@ -744,24 +735,9 @@ static struct blk_mq_tag_set *nvme_rdma_alloc_tagset(struct nvme_ctrl *nctrl, ...@@ -744,24 +735,9 @@ static struct blk_mq_tag_set *nvme_rdma_alloc_tagset(struct nvme_ctrl *nctrl,
ret = blk_mq_alloc_tag_set(set); ret = blk_mq_alloc_tag_set(set);
if (ret) if (ret)
goto out; return ERR_PTR(ret);
/*
* We need a reference on the device as long as the tag_set is alive,
* as the MRs in the request structures need a valid ib_device.
*/
ret = nvme_rdma_dev_get(ctrl->device);
if (!ret) {
ret = -EINVAL;
goto out_free_tagset;
}
return set; return set;
out_free_tagset:
blk_mq_free_tag_set(set);
out:
return ERR_PTR(ret);
} }
static void nvme_rdma_destroy_admin_queue(struct nvme_rdma_ctrl *ctrl, static void nvme_rdma_destroy_admin_queue(struct nvme_rdma_ctrl *ctrl,
...@@ -769,7 +745,7 @@ static void nvme_rdma_destroy_admin_queue(struct nvme_rdma_ctrl *ctrl, ...@@ -769,7 +745,7 @@ static void nvme_rdma_destroy_admin_queue(struct nvme_rdma_ctrl *ctrl,
{ {
if (remove) { if (remove) {
blk_cleanup_queue(ctrl->ctrl.admin_q); blk_cleanup_queue(ctrl->ctrl.admin_q);
nvme_rdma_free_tagset(&ctrl->ctrl, ctrl->ctrl.admin_tagset); blk_mq_free_tag_set(ctrl->ctrl.admin_tagset);
} }
if (ctrl->async_event_sqe.data) { if (ctrl->async_event_sqe.data) {
nvme_rdma_free_qe(ctrl->device->dev, &ctrl->async_event_sqe, nvme_rdma_free_qe(ctrl->device->dev, &ctrl->async_event_sqe,
...@@ -847,7 +823,7 @@ static int nvme_rdma_configure_admin_queue(struct nvme_rdma_ctrl *ctrl, ...@@ -847,7 +823,7 @@ static int nvme_rdma_configure_admin_queue(struct nvme_rdma_ctrl *ctrl,
blk_cleanup_queue(ctrl->ctrl.admin_q); blk_cleanup_queue(ctrl->ctrl.admin_q);
out_free_tagset: out_free_tagset:
if (new) if (new)
nvme_rdma_free_tagset(&ctrl->ctrl, ctrl->ctrl.admin_tagset); blk_mq_free_tag_set(ctrl->ctrl.admin_tagset);
out_free_async_qe: out_free_async_qe:
nvme_rdma_free_qe(ctrl->device->dev, &ctrl->async_event_sqe, nvme_rdma_free_qe(ctrl->device->dev, &ctrl->async_event_sqe,
sizeof(struct nvme_command), DMA_TO_DEVICE); sizeof(struct nvme_command), DMA_TO_DEVICE);
...@@ -862,7 +838,7 @@ static void nvme_rdma_destroy_io_queues(struct nvme_rdma_ctrl *ctrl, ...@@ -862,7 +838,7 @@ static void nvme_rdma_destroy_io_queues(struct nvme_rdma_ctrl *ctrl,
{ {
if (remove) { if (remove) {
blk_cleanup_queue(ctrl->ctrl.connect_q); blk_cleanup_queue(ctrl->ctrl.connect_q);
nvme_rdma_free_tagset(&ctrl->ctrl, ctrl->ctrl.tagset); blk_mq_free_tag_set(ctrl->ctrl.tagset);
} }
nvme_rdma_free_io_queues(ctrl); nvme_rdma_free_io_queues(ctrl);
} }
...@@ -903,7 +879,7 @@ static int nvme_rdma_configure_io_queues(struct nvme_rdma_ctrl *ctrl, bool new) ...@@ -903,7 +879,7 @@ static int nvme_rdma_configure_io_queues(struct nvme_rdma_ctrl *ctrl, bool new)
blk_cleanup_queue(ctrl->ctrl.connect_q); blk_cleanup_queue(ctrl->ctrl.connect_q);
out_free_tag_set: out_free_tag_set:
if (new) if (new)
nvme_rdma_free_tagset(&ctrl->ctrl, ctrl->ctrl.tagset); blk_mq_free_tag_set(ctrl->ctrl.tagset);
out_free_io_queues: out_free_io_queues:
nvme_rdma_free_io_queues(ctrl); nvme_rdma_free_io_queues(ctrl);
return ret; return ret;
......
...@@ -167,6 +167,7 @@ TRACE_EVENT(nvme_async_event, ...@@ -167,6 +167,7 @@ TRACE_EVENT(nvme_async_event,
aer_name(NVME_AER_NOTICE_NS_CHANGED), aer_name(NVME_AER_NOTICE_NS_CHANGED),
aer_name(NVME_AER_NOTICE_ANA), aer_name(NVME_AER_NOTICE_ANA),
aer_name(NVME_AER_NOTICE_FW_ACT_STARTING), aer_name(NVME_AER_NOTICE_FW_ACT_STARTING),
aer_name(NVME_AER_NOTICE_DISC_CHANGED),
aer_name(NVME_AER_ERROR), aer_name(NVME_AER_ERROR),
aer_name(NVME_AER_SMART), aer_name(NVME_AER_SMART),
aer_name(NVME_AER_CSS), aer_name(NVME_AER_CSS),
......
...@@ -1246,9 +1246,9 @@ enum { ...@@ -1246,9 +1246,9 @@ enum {
NVME_SC_FW_NEEDS_SUBSYS_RESET = 0x110, NVME_SC_FW_NEEDS_SUBSYS_RESET = 0x110,
NVME_SC_FW_NEEDS_RESET = 0x111, NVME_SC_FW_NEEDS_RESET = 0x111,
NVME_SC_FW_NEEDS_MAX_TIME = 0x112, NVME_SC_FW_NEEDS_MAX_TIME = 0x112,
NVME_SC_FW_ACIVATE_PROHIBITED = 0x113, NVME_SC_FW_ACTIVATE_PROHIBITED = 0x113,
NVME_SC_OVERLAPPING_RANGE = 0x114, NVME_SC_OVERLAPPING_RANGE = 0x114,
NVME_SC_NS_INSUFFICENT_CAP = 0x115, NVME_SC_NS_INSUFFICIENT_CAP = 0x115,
NVME_SC_NS_ID_UNAVAILABLE = 0x116, NVME_SC_NS_ID_UNAVAILABLE = 0x116,
NVME_SC_NS_ALREADY_ATTACHED = 0x118, NVME_SC_NS_ALREADY_ATTACHED = 0x118,
NVME_SC_NS_IS_PRIVATE = 0x119, NVME_SC_NS_IS_PRIVATE = 0x119,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment