Commit 74413084 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'nvme-for-4.18' of git://git.infradead.org/nvme

Pull NVMe fixes from Christoph Hellwig:

 - fix a regression in 4.18 that causes a memory leak on probe failure
   (Keith Bush)

 - fix a deadlock in the passthrough ioctl code (Scott Bauer)

 - don't enable AENs if not supported (Weiping Zhang)

 - fix an old regression in metadata handling in the passthrough ioctl
   code (Roland Dreier)

* tag 'nvme-for-4.18' of git://git.infradead.org/nvme:
  nvme: fix handling of metadata_len for NVME_IOCTL_IO_CMD
  nvme: don't enable AEN if not supported
  nvme: ensure forward progress during Admin passthru
  nvme-pci: fix memory leak on probe failure
parents 165ea0d1 9b382768
...@@ -100,6 +100,22 @@ static struct class *nvme_subsys_class; ...@@ -100,6 +100,22 @@ static struct class *nvme_subsys_class;
static void nvme_ns_remove(struct nvme_ns *ns); static void nvme_ns_remove(struct nvme_ns *ns);
static int nvme_revalidate_disk(struct gendisk *disk); static int nvme_revalidate_disk(struct gendisk *disk);
static void nvme_put_subsystem(struct nvme_subsystem *subsys); static void nvme_put_subsystem(struct nvme_subsystem *subsys);
static void nvme_remove_invalid_namespaces(struct nvme_ctrl *ctrl,
unsigned nsid);
static void nvme_set_queue_dying(struct nvme_ns *ns)
{
/*
* Revalidating a dead namespace sets capacity to 0. This will end
* buffered writers dirtying pages that can't be synced.
*/
if (!ns->disk || test_and_set_bit(NVME_NS_DEAD, &ns->flags))
return;
revalidate_disk(ns->disk);
blk_set_queue_dying(ns->queue);
/* Forcibly unquiesce queues to avoid blocking dispatch */
blk_mq_unquiesce_queue(ns->queue);
}
static void nvme_queue_scan(struct nvme_ctrl *ctrl) static void nvme_queue_scan(struct nvme_ctrl *ctrl)
{ {
...@@ -1044,14 +1060,17 @@ EXPORT_SYMBOL_GPL(nvme_set_queue_count); ...@@ -1044,14 +1060,17 @@ EXPORT_SYMBOL_GPL(nvme_set_queue_count);
static void nvme_enable_aen(struct nvme_ctrl *ctrl) static void nvme_enable_aen(struct nvme_ctrl *ctrl)
{ {
u32 result; u32 result, supported_aens = ctrl->oaes & NVME_AEN_SUPPORTED;
int status; int status;
status = nvme_set_features(ctrl, NVME_FEAT_ASYNC_EVENT, if (!supported_aens)
ctrl->oaes & NVME_AEN_SUPPORTED, NULL, 0, &result); return;
status = nvme_set_features(ctrl, NVME_FEAT_ASYNC_EVENT, supported_aens,
NULL, 0, &result);
if (status) if (status)
dev_warn(ctrl->device, "Failed to configure AEN (cfg %x)\n", dev_warn(ctrl->device, "Failed to configure AEN (cfg %x)\n",
ctrl->oaes & NVME_AEN_SUPPORTED); supported_aens);
} }
static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio) static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio)
...@@ -1151,19 +1170,15 @@ static u32 nvme_passthru_start(struct nvme_ctrl *ctrl, struct nvme_ns *ns, ...@@ -1151,19 +1170,15 @@ static u32 nvme_passthru_start(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
static void nvme_update_formats(struct nvme_ctrl *ctrl) static void nvme_update_formats(struct nvme_ctrl *ctrl)
{ {
struct nvme_ns *ns, *next; struct nvme_ns *ns;
LIST_HEAD(rm_list);
down_write(&ctrl->namespaces_rwsem); down_read(&ctrl->namespaces_rwsem);
list_for_each_entry(ns, &ctrl->namespaces, list) { list_for_each_entry(ns, &ctrl->namespaces, list)
if (ns->disk && nvme_revalidate_disk(ns->disk)) { if (ns->disk && nvme_revalidate_disk(ns->disk))
list_move_tail(&ns->list, &rm_list); nvme_set_queue_dying(ns);
} up_read(&ctrl->namespaces_rwsem);
}
up_write(&ctrl->namespaces_rwsem);
list_for_each_entry_safe(ns, next, &rm_list, list) nvme_remove_invalid_namespaces(ctrl, NVME_NSID_ALL);
nvme_ns_remove(ns);
} }
static void nvme_passthru_end(struct nvme_ctrl *ctrl, u32 effects) static void nvme_passthru_end(struct nvme_ctrl *ctrl, u32 effects)
...@@ -1218,7 +1233,7 @@ static int nvme_user_cmd(struct nvme_ctrl *ctrl, struct nvme_ns *ns, ...@@ -1218,7 +1233,7 @@ static int nvme_user_cmd(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
effects = nvme_passthru_start(ctrl, ns, cmd.opcode); effects = nvme_passthru_start(ctrl, ns, cmd.opcode);
status = nvme_submit_user_cmd(ns ? ns->queue : ctrl->admin_q, &c, status = nvme_submit_user_cmd(ns ? ns->queue : ctrl->admin_q, &c,
(void __user *)(uintptr_t)cmd.addr, cmd.data_len, (void __user *)(uintptr_t)cmd.addr, cmd.data_len,
(void __user *)(uintptr_t)cmd.metadata, cmd.metadata, (void __user *)(uintptr_t)cmd.metadata, cmd.metadata_len,
0, &cmd.result, timeout); 0, &cmd.result, timeout);
nvme_passthru_end(ctrl, effects); nvme_passthru_end(ctrl, effects);
...@@ -3138,7 +3153,7 @@ static void nvme_remove_invalid_namespaces(struct nvme_ctrl *ctrl, ...@@ -3138,7 +3153,7 @@ static void nvme_remove_invalid_namespaces(struct nvme_ctrl *ctrl,
down_write(&ctrl->namespaces_rwsem); down_write(&ctrl->namespaces_rwsem);
list_for_each_entry_safe(ns, next, &ctrl->namespaces, list) { list_for_each_entry_safe(ns, next, &ctrl->namespaces, list) {
if (ns->head->ns_id > nsid) if (ns->head->ns_id > nsid || test_bit(NVME_NS_DEAD, &ns->flags))
list_move_tail(&ns->list, &rm_list); list_move_tail(&ns->list, &rm_list);
} }
up_write(&ctrl->namespaces_rwsem); up_write(&ctrl->namespaces_rwsem);
...@@ -3542,19 +3557,9 @@ void nvme_kill_queues(struct nvme_ctrl *ctrl) ...@@ -3542,19 +3557,9 @@ void nvme_kill_queues(struct nvme_ctrl *ctrl)
if (ctrl->admin_q) if (ctrl->admin_q)
blk_mq_unquiesce_queue(ctrl->admin_q); blk_mq_unquiesce_queue(ctrl->admin_q);
list_for_each_entry(ns, &ctrl->namespaces, list) { list_for_each_entry(ns, &ctrl->namespaces, list)
/* nvme_set_queue_dying(ns);
* Revalidating a dead namespace sets capacity to 0. This will
* end buffered writers dirtying pages that can't be synced.
*/
if (!ns->disk || test_and_set_bit(NVME_NS_DEAD, &ns->flags))
continue;
revalidate_disk(ns->disk);
blk_set_queue_dying(ns->queue);
/* Forcibly unquiesce queues to avoid blocking dispatch */
blk_mq_unquiesce_queue(ns->queue);
}
up_read(&ctrl->namespaces_rwsem); up_read(&ctrl->namespaces_rwsem);
} }
EXPORT_SYMBOL_GPL(nvme_kill_queues); EXPORT_SYMBOL_GPL(nvme_kill_queues);
......
...@@ -2556,11 +2556,6 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id) ...@@ -2556,11 +2556,6 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
quirks |= check_vendor_combination_bug(pdev); quirks |= check_vendor_combination_bug(pdev);
result = nvme_init_ctrl(&dev->ctrl, &pdev->dev, &nvme_pci_ctrl_ops,
quirks);
if (result)
goto release_pools;
/* /*
* Double check that our mempool alloc size will cover the biggest * Double check that our mempool alloc size will cover the biggest
* command we support. * command we support.
...@@ -2578,6 +2573,11 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id) ...@@ -2578,6 +2573,11 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
goto release_pools; goto release_pools;
} }
result = nvme_init_ctrl(&dev->ctrl, &pdev->dev, &nvme_pci_ctrl_ops,
quirks);
if (result)
goto release_mempool;
dev_info(dev->ctrl.device, "pci function %s\n", dev_name(&pdev->dev)); dev_info(dev->ctrl.device, "pci function %s\n", dev_name(&pdev->dev));
nvme_get_ctrl(&dev->ctrl); nvme_get_ctrl(&dev->ctrl);
...@@ -2585,6 +2585,8 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id) ...@@ -2585,6 +2585,8 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
return 0; return 0;
release_mempool:
mempool_destroy(dev->iod_mempool);
release_pools: release_pools:
nvme_release_prp_pools(dev); nvme_release_prp_pools(dev);
unmap: unmap:
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment