Commit 2e572599 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'for-linus' of git://git.kernel.dk/linux-block

Pull block fixes from Jens Axboe:
 "A few fixes for the current series. This contains:

   - Two fixes for NVMe:

     One fixes a reset race that can be triggered by repeated
     insert/removal of the module.

     The other fixes an issue on some platforms, where we get probe
     timeouts since legacy interrupts isn't working.  This used not to
     be a problem since we had the worker thread poll for completions,
     but since that was killed off, it means those poor souls can't
     successfully probe their NVMe device.  Use a proper IRQ check and
     probe (msi-x -> msi ->legacy), like most other drivers to work
     around this.  Both from Keith.

   - A loop corruption issue with offset in iters, from Ming Lei.

   - A fix for not having the partition stat per cpu ref count
     initialized before sending out the KOBJ_ADD, which could cause user
     space to access the counter prior to initialization.  Also from
     Ming Lei.

   - A fix for using the wrong congestion state, from Kaixu Xia"

* 'for-linus' of git://git.kernel.dk/linux-block:
  block: loop: fix filesystem corruption in case of aio/dio
  NVMe: Always use MSI/MSI-x interrupts
  NVMe: Fix reset/remove race
  writeback: fix the wrong congested state variable definition
  block: partition: initialize percpuref before sending out KOBJ_ADD
parents f3c9a1ab a7297a6a
...@@ -361,15 +361,20 @@ struct hd_struct *add_partition(struct gendisk *disk, int partno, ...@@ -361,15 +361,20 @@ struct hd_struct *add_partition(struct gendisk *disk, int partno,
goto out_del; goto out_del;
} }
err = hd_ref_init(p);
if (err) {
if (flags & ADDPART_FLAG_WHOLEDISK)
goto out_remove_file;
goto out_del;
}
/* everything is up and running, commence */ /* everything is up and running, commence */
rcu_assign_pointer(ptbl->part[partno], p); rcu_assign_pointer(ptbl->part[partno], p);
/* suppress uevent if the disk suppresses it */ /* suppress uevent if the disk suppresses it */
if (!dev_get_uevent_suppress(ddev)) if (!dev_get_uevent_suppress(ddev))
kobject_uevent(&pdev->kobj, KOBJ_ADD); kobject_uevent(&pdev->kobj, KOBJ_ADD);
return p;
if (!hd_ref_init(p))
return p;
out_free_info: out_free_info:
free_part_info(p); free_part_info(p);
...@@ -378,6 +383,8 @@ struct hd_struct *add_partition(struct gendisk *disk, int partno, ...@@ -378,6 +383,8 @@ struct hd_struct *add_partition(struct gendisk *disk, int partno,
out_free: out_free:
kfree(p); kfree(p);
return ERR_PTR(err); return ERR_PTR(err);
out_remove_file:
device_remove_file(pdev, &dev_attr_whole_disk);
out_del: out_del:
kobject_put(p->holder_dir); kobject_put(p->holder_dir);
device_del(pdev); device_del(pdev);
......
...@@ -488,6 +488,12 @@ static int lo_rw_aio(struct loop_device *lo, struct loop_cmd *cmd, ...@@ -488,6 +488,12 @@ static int lo_rw_aio(struct loop_device *lo, struct loop_cmd *cmd,
bvec = __bvec_iter_bvec(bio->bi_io_vec, bio->bi_iter); bvec = __bvec_iter_bvec(bio->bi_io_vec, bio->bi_iter);
iov_iter_bvec(&iter, ITER_BVEC | rw, bvec, iov_iter_bvec(&iter, ITER_BVEC | rw, bvec,
bio_segments(bio), blk_rq_bytes(cmd->rq)); bio_segments(bio), blk_rq_bytes(cmd->rq));
/*
* This bio may be started from the middle of the 'bvec'
* because of bio splitting, so offset from the bvec must
* be passed to iov iterator
*/
iter.iov_offset = bio->bi_iter.bi_bvec_done;
cmd->iocb.ki_pos = pos; cmd->iocb.ki_pos = pos;
cmd->iocb.ki_filp = file; cmd->iocb.ki_filp = file;
......
...@@ -1478,8 +1478,7 @@ static int nvme_setup_io_queues(struct nvme_dev *dev) ...@@ -1478,8 +1478,7 @@ static int nvme_setup_io_queues(struct nvme_dev *dev)
if (result > 0) { if (result > 0) {
dev_err(dev->ctrl.device, dev_err(dev->ctrl.device,
"Could not set queue count (%d)\n", result); "Could not set queue count (%d)\n", result);
nr_io_queues = 0; return 0;
result = 0;
} }
if (dev->cmb && NVME_CMB_SQS(dev->cmbsz)) { if (dev->cmb && NVME_CMB_SQS(dev->cmbsz)) {
...@@ -1513,7 +1512,9 @@ static int nvme_setup_io_queues(struct nvme_dev *dev) ...@@ -1513,7 +1512,9 @@ static int nvme_setup_io_queues(struct nvme_dev *dev)
* If we enable msix early due to not intx, disable it again before * If we enable msix early due to not intx, disable it again before
* setting up the full range we need. * setting up the full range we need.
*/ */
if (!pdev->irq) if (pdev->msi_enabled)
pci_disable_msi(pdev);
else if (pdev->msix_enabled)
pci_disable_msix(pdev); pci_disable_msix(pdev);
for (i = 0; i < nr_io_queues; i++) for (i = 0; i < nr_io_queues; i++)
...@@ -1696,7 +1697,6 @@ static int nvme_pci_enable(struct nvme_dev *dev) ...@@ -1696,7 +1697,6 @@ static int nvme_pci_enable(struct nvme_dev *dev)
if (pci_enable_device_mem(pdev)) if (pci_enable_device_mem(pdev))
return result; return result;
dev->entry[0].vector = pdev->irq;
pci_set_master(pdev); pci_set_master(pdev);
if (dma_set_mask_and_coherent(dev->dev, DMA_BIT_MASK(64)) && if (dma_set_mask_and_coherent(dev->dev, DMA_BIT_MASK(64)) &&
...@@ -1709,13 +1709,18 @@ static int nvme_pci_enable(struct nvme_dev *dev) ...@@ -1709,13 +1709,18 @@ static int nvme_pci_enable(struct nvme_dev *dev)
} }
/* /*
* Some devices don't advertse INTx interrupts, pre-enable a single * Some devices and/or platforms don't advertise or work with INTx
* MSIX vec for setup. We'll adjust this later. * interrupts. Pre-enable a single MSIX or MSI vec for setup. We'll
* adjust this later.
*/ */
if (!pdev->irq) { if (pci_enable_msix(pdev, dev->entry, 1)) {
result = pci_enable_msix(pdev, dev->entry, 1); pci_enable_msi(pdev);
if (result < 0) dev->entry[0].vector = pdev->irq;
goto disable; }
if (!dev->entry[0].vector) {
result = -ENODEV;
goto disable;
} }
cap = lo_hi_readq(dev->bar + NVME_REG_CAP); cap = lo_hi_readq(dev->bar + NVME_REG_CAP);
...@@ -1859,6 +1864,9 @@ static void nvme_reset_work(struct work_struct *work) ...@@ -1859,6 +1864,9 @@ static void nvme_reset_work(struct work_struct *work)
if (dev->ctrl.ctrl_config & NVME_CC_ENABLE) if (dev->ctrl.ctrl_config & NVME_CC_ENABLE)
nvme_dev_disable(dev, false); nvme_dev_disable(dev, false);
if (test_bit(NVME_CTRL_REMOVING, &dev->flags))
goto out;
set_bit(NVME_CTRL_RESETTING, &dev->flags); set_bit(NVME_CTRL_RESETTING, &dev->flags);
result = nvme_pci_enable(dev); result = nvme_pci_enable(dev);
...@@ -2078,11 +2086,10 @@ static void nvme_remove(struct pci_dev *pdev) ...@@ -2078,11 +2086,10 @@ static void nvme_remove(struct pci_dev *pdev)
{ {
struct nvme_dev *dev = pci_get_drvdata(pdev); struct nvme_dev *dev = pci_get_drvdata(pdev);
del_timer_sync(&dev->watchdog_timer);
set_bit(NVME_CTRL_REMOVING, &dev->flags); set_bit(NVME_CTRL_REMOVING, &dev->flags);
pci_set_drvdata(pdev, NULL); pci_set_drvdata(pdev, NULL);
flush_work(&dev->async_work); flush_work(&dev->async_work);
flush_work(&dev->reset_work);
flush_work(&dev->scan_work); flush_work(&dev->scan_work);
nvme_remove_namespaces(&dev->ctrl); nvme_remove_namespaces(&dev->ctrl);
nvme_uninit_ctrl(&dev->ctrl); nvme_uninit_ctrl(&dev->ctrl);
......
...@@ -898,7 +898,7 @@ static atomic_t nr_wb_congested[2]; ...@@ -898,7 +898,7 @@ static atomic_t nr_wb_congested[2];
void clear_wb_congested(struct bdi_writeback_congested *congested, int sync) void clear_wb_congested(struct bdi_writeback_congested *congested, int sync)
{ {
wait_queue_head_t *wqh = &congestion_wqh[sync]; wait_queue_head_t *wqh = &congestion_wqh[sync];
enum wb_state bit; enum wb_congested_state bit;
bit = sync ? WB_sync_congested : WB_async_congested; bit = sync ? WB_sync_congested : WB_async_congested;
if (test_and_clear_bit(bit, &congested->state)) if (test_and_clear_bit(bit, &congested->state))
...@@ -911,7 +911,7 @@ EXPORT_SYMBOL(clear_wb_congested); ...@@ -911,7 +911,7 @@ EXPORT_SYMBOL(clear_wb_congested);
void set_wb_congested(struct bdi_writeback_congested *congested, int sync) void set_wb_congested(struct bdi_writeback_congested *congested, int sync)
{ {
enum wb_state bit; enum wb_congested_state bit;
bit = sync ? WB_sync_congested : WB_async_congested; bit = sync ? WB_sync_congested : WB_async_congested;
if (!test_and_set_bit(bit, &congested->state)) if (!test_and_set_bit(bit, &congested->state))
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment