Commit a347c153 authored by Jens Axboe's avatar Jens Axboe

Merge tag 'nvme-5.14-2021-07-15' of git://git.infradead.org/nvme into block-5.14

Pull NVMe fixes from Christoph:

"nvme fixes for Linux 5.14

 - fix various races in nvme-pci when shutting down just after probing
   (Casey Chen)
 - fix a net_device leak in nvme-tcp (Prabhakar Kushwaha)"

* tag 'nvme-5.14-2021-07-15' of git://git.infradead.org/nvme:
  nvme-pci: do not call nvme_dev_remove_admin from nvme_remove
  nvme-pci: fix multiple races in nvme_setup_io_queues
  nvme-tcp: use __dev_get_by_name instead dev_get_by_name for OPT_HOST_IFACE
parents 16ad3db3 251ef6f7
...@@ -1554,6 +1554,28 @@ static void nvme_init_queue(struct nvme_queue *nvmeq, u16 qid) ...@@ -1554,6 +1554,28 @@ static void nvme_init_queue(struct nvme_queue *nvmeq, u16 qid)
wmb(); /* ensure the first interrupt sees the initialization */ wmb(); /* ensure the first interrupt sees the initialization */
} }
/*
* Try getting shutdown_lock while setting up IO queues.
*/
static int nvme_setup_io_queues_trylock(struct nvme_dev *dev)
{
/*
* Give up if the lock is being held by nvme_dev_disable.
*/
if (!mutex_trylock(&dev->shutdown_lock))
return -ENODEV;
/*
* Controller is in wrong state, fail early.
*/
if (dev->ctrl.state != NVME_CTRL_CONNECTING) {
mutex_unlock(&dev->shutdown_lock);
return -ENODEV;
}
return 0;
}
static int nvme_create_queue(struct nvme_queue *nvmeq, int qid, bool polled) static int nvme_create_queue(struct nvme_queue *nvmeq, int qid, bool polled)
{ {
struct nvme_dev *dev = nvmeq->dev; struct nvme_dev *dev = nvmeq->dev;
...@@ -1582,8 +1604,11 @@ static int nvme_create_queue(struct nvme_queue *nvmeq, int qid, bool polled) ...@@ -1582,8 +1604,11 @@ static int nvme_create_queue(struct nvme_queue *nvmeq, int qid, bool polled)
goto release_cq; goto release_cq;
nvmeq->cq_vector = vector; nvmeq->cq_vector = vector;
nvme_init_queue(nvmeq, qid);
result = nvme_setup_io_queues_trylock(dev);
if (result)
return result;
nvme_init_queue(nvmeq, qid);
if (!polled) { if (!polled) {
result = queue_request_irq(nvmeq); result = queue_request_irq(nvmeq);
if (result < 0) if (result < 0)
...@@ -1591,10 +1616,12 @@ static int nvme_create_queue(struct nvme_queue *nvmeq, int qid, bool polled) ...@@ -1591,10 +1616,12 @@ static int nvme_create_queue(struct nvme_queue *nvmeq, int qid, bool polled)
} }
set_bit(NVMEQ_ENABLED, &nvmeq->flags); set_bit(NVMEQ_ENABLED, &nvmeq->flags);
mutex_unlock(&dev->shutdown_lock);
return result; return result;
release_sq: release_sq:
dev->online_queues--; dev->online_queues--;
mutex_unlock(&dev->shutdown_lock);
adapter_delete_sq(dev, qid); adapter_delete_sq(dev, qid);
release_cq: release_cq:
adapter_delete_cq(dev, qid); adapter_delete_cq(dev, qid);
...@@ -2167,7 +2194,18 @@ static int nvme_setup_io_queues(struct nvme_dev *dev) ...@@ -2167,7 +2194,18 @@ static int nvme_setup_io_queues(struct nvme_dev *dev)
if (nr_io_queues == 0) if (nr_io_queues == 0)
return 0; return 0;
clear_bit(NVMEQ_ENABLED, &adminq->flags); /*
* Free IRQ resources as soon as NVMEQ_ENABLED bit transitions
* from set to unset. If there is a window to it is truely freed,
* pci_free_irq_vectors() jumping into this window will crash.
* And take lock to avoid racing with pci_free_irq_vectors() in
* nvme_dev_disable() path.
*/
result = nvme_setup_io_queues_trylock(dev);
if (result)
return result;
if (test_and_clear_bit(NVMEQ_ENABLED, &adminq->flags))
pci_free_irq(pdev, 0, adminq);
if (dev->cmb_use_sqes) { if (dev->cmb_use_sqes) {
result = nvme_cmb_qdepth(dev, nr_io_queues, result = nvme_cmb_qdepth(dev, nr_io_queues,
...@@ -2183,14 +2221,17 @@ static int nvme_setup_io_queues(struct nvme_dev *dev) ...@@ -2183,14 +2221,17 @@ static int nvme_setup_io_queues(struct nvme_dev *dev)
result = nvme_remap_bar(dev, size); result = nvme_remap_bar(dev, size);
if (!result) if (!result)
break; break;
if (!--nr_io_queues) if (!--nr_io_queues) {
return -ENOMEM; result = -ENOMEM;
goto out_unlock;
}
} while (1); } while (1);
adminq->q_db = dev->dbs; adminq->q_db = dev->dbs;
retry: retry:
/* Deregister the admin queue's interrupt */ /* Deregister the admin queue's interrupt */
pci_free_irq(pdev, 0, adminq); if (test_and_clear_bit(NVMEQ_ENABLED, &adminq->flags))
pci_free_irq(pdev, 0, adminq);
/* /*
* If we enable msix early due to not intx, disable it again before * If we enable msix early due to not intx, disable it again before
...@@ -2199,8 +2240,10 @@ static int nvme_setup_io_queues(struct nvme_dev *dev) ...@@ -2199,8 +2240,10 @@ static int nvme_setup_io_queues(struct nvme_dev *dev)
pci_free_irq_vectors(pdev); pci_free_irq_vectors(pdev);
result = nvme_setup_irqs(dev, nr_io_queues); result = nvme_setup_irqs(dev, nr_io_queues);
if (result <= 0) if (result <= 0) {
return -EIO; result = -EIO;
goto out_unlock;
}
dev->num_vecs = result; dev->num_vecs = result;
result = max(result - 1, 1); result = max(result - 1, 1);
...@@ -2214,8 +2257,9 @@ static int nvme_setup_io_queues(struct nvme_dev *dev) ...@@ -2214,8 +2257,9 @@ static int nvme_setup_io_queues(struct nvme_dev *dev)
*/ */
result = queue_request_irq(adminq); result = queue_request_irq(adminq);
if (result) if (result)
return result; goto out_unlock;
set_bit(NVMEQ_ENABLED, &adminq->flags); set_bit(NVMEQ_ENABLED, &adminq->flags);
mutex_unlock(&dev->shutdown_lock);
result = nvme_create_io_queues(dev); result = nvme_create_io_queues(dev);
if (result || dev->online_queues < 2) if (result || dev->online_queues < 2)
...@@ -2224,6 +2268,9 @@ static int nvme_setup_io_queues(struct nvme_dev *dev) ...@@ -2224,6 +2268,9 @@ static int nvme_setup_io_queues(struct nvme_dev *dev)
if (dev->online_queues - 1 < dev->max_qid) { if (dev->online_queues - 1 < dev->max_qid) {
nr_io_queues = dev->online_queues - 1; nr_io_queues = dev->online_queues - 1;
nvme_disable_io_queues(dev); nvme_disable_io_queues(dev);
result = nvme_setup_io_queues_trylock(dev);
if (result)
return result;
nvme_suspend_io_queues(dev); nvme_suspend_io_queues(dev);
goto retry; goto retry;
} }
...@@ -2232,6 +2279,9 @@ static int nvme_setup_io_queues(struct nvme_dev *dev) ...@@ -2232,6 +2279,9 @@ static int nvme_setup_io_queues(struct nvme_dev *dev)
dev->io_queues[HCTX_TYPE_READ], dev->io_queues[HCTX_TYPE_READ],
dev->io_queues[HCTX_TYPE_POLL]); dev->io_queues[HCTX_TYPE_POLL]);
return 0; return 0;
out_unlock:
mutex_unlock(&dev->shutdown_lock);
return result;
} }
static void nvme_del_queue_end(struct request *req, blk_status_t error) static void nvme_del_queue_end(struct request *req, blk_status_t error)
...@@ -2962,7 +3012,6 @@ static void nvme_remove(struct pci_dev *pdev) ...@@ -2962,7 +3012,6 @@ static void nvme_remove(struct pci_dev *pdev)
if (!pci_device_is_present(pdev)) { if (!pci_device_is_present(pdev)) {
nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_DEAD); nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_DEAD);
nvme_dev_disable(dev, true); nvme_dev_disable(dev, true);
nvme_dev_remove_admin(dev);
} }
flush_work(&dev->ctrl.reset_work); flush_work(&dev->ctrl.reset_work);
......
...@@ -123,7 +123,6 @@ struct nvme_tcp_ctrl { ...@@ -123,7 +123,6 @@ struct nvme_tcp_ctrl {
struct blk_mq_tag_set admin_tag_set; struct blk_mq_tag_set admin_tag_set;
struct sockaddr_storage addr; struct sockaddr_storage addr;
struct sockaddr_storage src_addr; struct sockaddr_storage src_addr;
struct net_device *ndev;
struct nvme_ctrl ctrl; struct nvme_ctrl ctrl;
struct work_struct err_work; struct work_struct err_work;
...@@ -2533,8 +2532,7 @@ static struct nvme_ctrl *nvme_tcp_create_ctrl(struct device *dev, ...@@ -2533,8 +2532,7 @@ static struct nvme_ctrl *nvme_tcp_create_ctrl(struct device *dev,
} }
if (opts->mask & NVMF_OPT_HOST_IFACE) { if (opts->mask & NVMF_OPT_HOST_IFACE) {
ctrl->ndev = dev_get_by_name(&init_net, opts->host_iface); if (!__dev_get_by_name(&init_net, opts->host_iface)) {
if (!ctrl->ndev) {
pr_err("invalid interface passed: %s\n", pr_err("invalid interface passed: %s\n",
opts->host_iface); opts->host_iface);
ret = -ENODEV; ret = -ENODEV;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment