Commit bf453a0a authored by Logan Gunthorpe's avatar Logan Gunthorpe Committed by Vinod Koul

dmaengine: ioat: Support in-use unbind

Don't allocate memory using the devm infrastructure and instead call
kfree with the new dmaengine device_release call back. This ensures
the structures are available until the last reference is dropped.

We also need to ensure we call ioat_shutdown() in ioat_remove() so
that all the channels are quiesced and further transaction fails.
Signed-off-by: default avatarLogan Gunthorpe <logang@deltatee.com>
Acked-by: default avatarDave Jiang <dave.jiang@intel.com>
Link: https://lore.kernel.org/r/20191216190120.21374-6-logang@deltatee.comSigned-off-by: default avatarVinod Koul <vkoul@kernel.org>
parent 8ad342a8
...@@ -556,10 +556,6 @@ static void ioat_dma_remove(struct ioatdma_device *ioat_dma) ...@@ -556,10 +556,6 @@ static void ioat_dma_remove(struct ioatdma_device *ioat_dma)
ioat_kobject_del(ioat_dma); ioat_kobject_del(ioat_dma);
dma_async_device_unregister(dma); dma_async_device_unregister(dma);
dma_pool_destroy(ioat_dma->completion_pool);
INIT_LIST_HEAD(&dma->channels);
} }
/** /**
...@@ -589,7 +585,7 @@ static void ioat_enumerate_channels(struct ioatdma_device *ioat_dma) ...@@ -589,7 +585,7 @@ static void ioat_enumerate_channels(struct ioatdma_device *ioat_dma)
dev_dbg(dev, "%s: xfercap = %d\n", __func__, 1 << xfercap_log); dev_dbg(dev, "%s: xfercap = %d\n", __func__, 1 << xfercap_log);
for (i = 0; i < dma->chancnt; i++) { for (i = 0; i < dma->chancnt; i++) {
ioat_chan = devm_kzalloc(dev, sizeof(*ioat_chan), GFP_KERNEL); ioat_chan = kzalloc(sizeof(*ioat_chan), GFP_KERNEL);
if (!ioat_chan) if (!ioat_chan)
break; break;
...@@ -624,12 +620,16 @@ static void ioat_free_chan_resources(struct dma_chan *c) ...@@ -624,12 +620,16 @@ static void ioat_free_chan_resources(struct dma_chan *c)
return; return;
ioat_stop(ioat_chan); ioat_stop(ioat_chan);
ioat_reset_hw(ioat_chan);
/* Put LTR to idle */ if (!test_bit(IOAT_CHAN_DOWN, &ioat_chan->state)) {
if (ioat_dma->version >= IOAT_VER_3_4) ioat_reset_hw(ioat_chan);
writeb(IOAT_CHAN_LTR_SWSEL_IDLE,
ioat_chan->reg_base + IOAT_CHAN_LTR_SWSEL_OFFSET); /* Put LTR to idle */
if (ioat_dma->version >= IOAT_VER_3_4)
writeb(IOAT_CHAN_LTR_SWSEL_IDLE,
ioat_chan->reg_base +
IOAT_CHAN_LTR_SWSEL_OFFSET);
}
spin_lock_bh(&ioat_chan->cleanup_lock); spin_lock_bh(&ioat_chan->cleanup_lock);
spin_lock_bh(&ioat_chan->prep_lock); spin_lock_bh(&ioat_chan->prep_lock);
...@@ -1322,16 +1322,28 @@ static struct pci_driver ioat_pci_driver = { ...@@ -1322,16 +1322,28 @@ static struct pci_driver ioat_pci_driver = {
.err_handler = &ioat_err_handler, .err_handler = &ioat_err_handler,
}; };
static void release_ioatdma(struct dma_device *device)
{
struct ioatdma_device *d = to_ioatdma_device(device);
int i;
for (i = 0; i < IOAT_MAX_CHANS; i++)
kfree(d->idx[i]);
dma_pool_destroy(d->completion_pool);
kfree(d);
}
static struct ioatdma_device * static struct ioatdma_device *
alloc_ioatdma(struct pci_dev *pdev, void __iomem *iobase) alloc_ioatdma(struct pci_dev *pdev, void __iomem *iobase)
{ {
struct device *dev = &pdev->dev; struct ioatdma_device *d = kzalloc(sizeof(*d), GFP_KERNEL);
struct ioatdma_device *d = devm_kzalloc(dev, sizeof(*d), GFP_KERNEL);
if (!d) if (!d)
return NULL; return NULL;
d->pdev = pdev; d->pdev = pdev;
d->reg_base = iobase; d->reg_base = iobase;
d->dma_dev.device_release = release_ioatdma;
return d; return d;
} }
...@@ -1400,6 +1412,8 @@ static void ioat_remove(struct pci_dev *pdev) ...@@ -1400,6 +1412,8 @@ static void ioat_remove(struct pci_dev *pdev)
if (!device) if (!device)
return; return;
ioat_shutdown(pdev);
dev_err(&pdev->dev, "Removing dma and dca services\n"); dev_err(&pdev->dev, "Removing dma and dca services\n");
if (device->dca) { if (device->dca) {
unregister_dca_provider(device->dca, &pdev->dev); unregister_dca_provider(device->dca, &pdev->dev);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment