Commit f4345f05 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'block-6.9-20240510' of git://git.kernel.dk/linux

Pull block fixes from Jens Axboe:

 - NVMe pull request via Keith:
     - nvme target fixes (Sagi, Dan, Maurizo)
     - new vendor quirk for broken MSI (Sean)

 - Virtual boundary fix for a regression in this merge window (Ming)

* tag 'block-6.9-20240510' of git://git.kernel.dk/linux:
  nvmet-rdma: fix possible bad dereference when freeing rsps
  nvmet: prevent sprintf() overflow in nvmet_subsys_nsid_exists()
  nvmet: make nvmet_wq unbound
  nvmet-auth: return the error code to the nvmet_auth_ctrl_hash() callers
  nvme-pci: Add quirk for broken MSIs
  block: set default max segment size in case of virt_boundary
parents ed44935c a7721784
...@@ -188,7 +188,10 @@ static int blk_validate_limits(struct queue_limits *lim) ...@@ -188,7 +188,10 @@ static int blk_validate_limits(struct queue_limits *lim)
* bvec and lower layer bio splitting is supposed to handle the two * bvec and lower layer bio splitting is supposed to handle the two
* correctly. * correctly.
*/ */
if (!lim->virt_boundary_mask) { if (lim->virt_boundary_mask) {
if (!lim->max_segment_size)
lim->max_segment_size = UINT_MAX;
} else {
/* /*
* The maximum segment size has an odd historic 64k default that * The maximum segment size has an odd historic 64k default that
* drivers probably should override. Just like the I/O size we * drivers probably should override. Just like the I/O size we
......
...@@ -162,6 +162,11 @@ enum nvme_quirks { ...@@ -162,6 +162,11 @@ enum nvme_quirks {
* Disables simple suspend/resume path. * Disables simple suspend/resume path.
*/ */
NVME_QUIRK_FORCE_NO_SIMPLE_SUSPEND = (1 << 20), NVME_QUIRK_FORCE_NO_SIMPLE_SUSPEND = (1 << 20),
/*
* MSI (but not MSI-X) interrupts are broken and never fire.
*/
NVME_QUIRK_BROKEN_MSI = (1 << 21),
}; };
/* /*
......
...@@ -2224,6 +2224,7 @@ static int nvme_setup_irqs(struct nvme_dev *dev, unsigned int nr_io_queues) ...@@ -2224,6 +2224,7 @@ static int nvme_setup_irqs(struct nvme_dev *dev, unsigned int nr_io_queues)
.priv = dev, .priv = dev,
}; };
unsigned int irq_queues, poll_queues; unsigned int irq_queues, poll_queues;
unsigned int flags = PCI_IRQ_ALL_TYPES | PCI_IRQ_AFFINITY;
/* /*
* Poll queues don't need interrupts, but we need at least one I/O queue * Poll queues don't need interrupts, but we need at least one I/O queue
...@@ -2247,8 +2248,10 @@ static int nvme_setup_irqs(struct nvme_dev *dev, unsigned int nr_io_queues) ...@@ -2247,8 +2248,10 @@ static int nvme_setup_irqs(struct nvme_dev *dev, unsigned int nr_io_queues)
irq_queues = 1; irq_queues = 1;
if (!(dev->ctrl.quirks & NVME_QUIRK_SINGLE_VECTOR)) if (!(dev->ctrl.quirks & NVME_QUIRK_SINGLE_VECTOR))
irq_queues += (nr_io_queues - poll_queues); irq_queues += (nr_io_queues - poll_queues);
return pci_alloc_irq_vectors_affinity(pdev, 1, irq_queues, if (dev->ctrl.quirks & NVME_QUIRK_BROKEN_MSI)
PCI_IRQ_ALL_TYPES | PCI_IRQ_AFFINITY, &affd); flags &= ~PCI_IRQ_MSI;
return pci_alloc_irq_vectors_affinity(pdev, 1, irq_queues, flags,
&affd);
} }
static unsigned int nvme_max_io_queues(struct nvme_dev *dev) static unsigned int nvme_max_io_queues(struct nvme_dev *dev)
...@@ -2477,6 +2480,7 @@ static int nvme_pci_enable(struct nvme_dev *dev) ...@@ -2477,6 +2480,7 @@ static int nvme_pci_enable(struct nvme_dev *dev)
{ {
int result = -ENOMEM; int result = -ENOMEM;
struct pci_dev *pdev = to_pci_dev(dev->dev); struct pci_dev *pdev = to_pci_dev(dev->dev);
unsigned int flags = PCI_IRQ_ALL_TYPES;
if (pci_enable_device_mem(pdev)) if (pci_enable_device_mem(pdev))
return result; return result;
...@@ -2493,7 +2497,9 @@ static int nvme_pci_enable(struct nvme_dev *dev) ...@@ -2493,7 +2497,9 @@ static int nvme_pci_enable(struct nvme_dev *dev)
* interrupts. Pre-enable a single MSIX or MSI vec for setup. We'll * interrupts. Pre-enable a single MSIX or MSI vec for setup. We'll
* adjust this later. * adjust this later.
*/ */
result = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_ALL_TYPES); if (dev->ctrl.quirks & NVME_QUIRK_BROKEN_MSI)
flags &= ~PCI_IRQ_MSI;
result = pci_alloc_irq_vectors(pdev, 1, 1, flags);
if (result < 0) if (result < 0)
goto disable; goto disable;
...@@ -3390,6 +3396,8 @@ static const struct pci_device_id nvme_id_table[] = { ...@@ -3390,6 +3396,8 @@ static const struct pci_device_id nvme_id_table[] = {
.driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY | .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY |
NVME_QUIRK_DISABLE_WRITE_ZEROES| NVME_QUIRK_DISABLE_WRITE_ZEROES|
NVME_QUIRK_IGNORE_DEV_SUBNQN, }, NVME_QUIRK_IGNORE_DEV_SUBNQN, },
{ PCI_DEVICE(0x15b7, 0x5008), /* Sandisk SN530 */
.driver_data = NVME_QUIRK_BROKEN_MSI },
{ PCI_DEVICE(0x1987, 0x5012), /* Phison E12 */ { PCI_DEVICE(0x1987, 0x5012), /* Phison E12 */
.driver_data = NVME_QUIRK_BOGUS_NID, }, .driver_data = NVME_QUIRK_BOGUS_NID, },
{ PCI_DEVICE(0x1987, 0x5016), /* Phison E16 */ { PCI_DEVICE(0x1987, 0x5016), /* Phison E16 */
......
...@@ -480,7 +480,7 @@ int nvmet_auth_ctrl_hash(struct nvmet_req *req, u8 *response, ...@@ -480,7 +480,7 @@ int nvmet_auth_ctrl_hash(struct nvmet_req *req, u8 *response,
nvme_auth_free_key(transformed_key); nvme_auth_free_key(transformed_key);
out_free_tfm: out_free_tfm:
crypto_free_shash(shash_tfm); crypto_free_shash(shash_tfm);
return 0; return ret;
} }
int nvmet_auth_ctrl_exponential(struct nvmet_req *req, int nvmet_auth_ctrl_exponential(struct nvmet_req *req,
......
...@@ -757,10 +757,9 @@ static struct configfs_attribute *nvmet_ns_attrs[] = { ...@@ -757,10 +757,9 @@ static struct configfs_attribute *nvmet_ns_attrs[] = {
bool nvmet_subsys_nsid_exists(struct nvmet_subsys *subsys, u32 nsid) bool nvmet_subsys_nsid_exists(struct nvmet_subsys *subsys, u32 nsid)
{ {
struct config_item *ns_item; struct config_item *ns_item;
char name[4] = {}; char name[12];
if (sprintf(name, "%u", nsid) <= 0) snprintf(name, sizeof(name), "%u", nsid);
return false;
mutex_lock(&subsys->namespaces_group.cg_subsys->su_mutex); mutex_lock(&subsys->namespaces_group.cg_subsys->su_mutex);
ns_item = config_group_find_item(&subsys->namespaces_group, name); ns_item = config_group_find_item(&subsys->namespaces_group, name);
mutex_unlock(&subsys->namespaces_group.cg_subsys->su_mutex); mutex_unlock(&subsys->namespaces_group.cg_subsys->su_mutex);
......
...@@ -1686,7 +1686,8 @@ static int __init nvmet_init(void) ...@@ -1686,7 +1686,8 @@ static int __init nvmet_init(void)
if (!buffered_io_wq) if (!buffered_io_wq)
goto out_free_zbd_work_queue; goto out_free_zbd_work_queue;
nvmet_wq = alloc_workqueue("nvmet-wq", WQ_MEM_RECLAIM, 0); nvmet_wq = alloc_workqueue("nvmet-wq",
WQ_MEM_RECLAIM | WQ_UNBOUND, 0);
if (!nvmet_wq) if (!nvmet_wq)
goto out_free_buffered_work_queue; goto out_free_buffered_work_queue;
......
...@@ -474,12 +474,8 @@ nvmet_rdma_alloc_rsps(struct nvmet_rdma_queue *queue) ...@@ -474,12 +474,8 @@ nvmet_rdma_alloc_rsps(struct nvmet_rdma_queue *queue)
return 0; return 0;
out_free: out_free:
while (--i >= 0) { while (--i >= 0)
struct nvmet_rdma_rsp *rsp = &queue->rsps[i]; nvmet_rdma_free_rsp(ndev, &queue->rsps[i]);
list_del(&rsp->free_list);
nvmet_rdma_free_rsp(ndev, rsp);
}
kfree(queue->rsps); kfree(queue->rsps);
out: out:
return ret; return ret;
...@@ -490,12 +486,8 @@ static void nvmet_rdma_free_rsps(struct nvmet_rdma_queue *queue) ...@@ -490,12 +486,8 @@ static void nvmet_rdma_free_rsps(struct nvmet_rdma_queue *queue)
struct nvmet_rdma_device *ndev = queue->dev; struct nvmet_rdma_device *ndev = queue->dev;
int i, nr_rsps = queue->recv_queue_size * 2; int i, nr_rsps = queue->recv_queue_size * 2;
for (i = 0; i < nr_rsps; i++) { for (i = 0; i < nr_rsps; i++)
struct nvmet_rdma_rsp *rsp = &queue->rsps[i]; nvmet_rdma_free_rsp(ndev, &queue->rsps[i]);
list_del(&rsp->free_list);
nvmet_rdma_free_rsp(ndev, rsp);
}
kfree(queue->rsps); kfree(queue->rsps);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment