Commit c997e30e authored by Dave Jiang's avatar Dave Jiang Committed by Vinod Koul

dmaengine: IOATDMA: revise channel reset workaround on CB3.3 platforms

Previously we unloaded the interrupts and reloaded in order to work around
a channel reset bug that cleared the MSIX table. This approach just isn't
practical when a reset needs to happen in the error handler that just
happens to be running in interrupt context (bottom half). It looks like we
can work around the hardware issue by just storing a shadow copy of the
MSIX table and restore it after reset.
Signed-off-by: default avatarDave Jiang <dave.jiang@intel.com>
Signed-off-by: default avatarVinod Koul <vinod.koul@intel.com>
parent dd4645eb
...@@ -804,40 +804,6 @@ ioat_tx_status(struct dma_chan *c, dma_cookie_t cookie, ...@@ -804,40 +804,6 @@ ioat_tx_status(struct dma_chan *c, dma_cookie_t cookie,
return dma_cookie_status(c, cookie, txstate); return dma_cookie_status(c, cookie, txstate);
} }
static int ioat_irq_reinit(struct ioatdma_device *ioat_dma)
{
struct pci_dev *pdev = ioat_dma->pdev;
int irq = pdev->irq, i;
if (!is_bwd_ioat(pdev))
return 0;
switch (ioat_dma->irq_mode) {
case IOAT_MSIX:
for (i = 0; i < ioat_dma->dma_dev.chancnt; i++) {
struct msix_entry *msix = &ioat_dma->msix_entries[i];
struct ioatdma_chan *ioat_chan;
ioat_chan = ioat_chan_by_index(ioat_dma, i);
devm_free_irq(&pdev->dev, msix->vector, ioat_chan);
}
pci_disable_msix(pdev);
break;
case IOAT_MSI:
pci_disable_msi(pdev);
/* fall through */
case IOAT_INTX:
devm_free_irq(&pdev->dev, irq, ioat_dma);
break;
default:
return 0;
}
ioat_dma->irq_mode = IOAT_NOIRQ;
return ioat_dma_setup_interrupts(ioat_dma);
}
int ioat_reset_hw(struct ioatdma_chan *ioat_chan) int ioat_reset_hw(struct ioatdma_chan *ioat_chan)
{ {
/* throw away whatever the channel was doing and get it /* throw away whatever the channel was doing and get it
...@@ -877,9 +843,21 @@ int ioat_reset_hw(struct ioatdma_chan *ioat_chan) ...@@ -877,9 +843,21 @@ int ioat_reset_hw(struct ioatdma_chan *ioat_chan)
} }
} }
if (is_bwd_ioat(pdev) && (ioat_dma->irq_mode == IOAT_MSIX)) {
ioat_dma->msixtba0 = readq(ioat_dma->reg_base + 0x1000);
ioat_dma->msixdata0 = readq(ioat_dma->reg_base + 0x1008);
ioat_dma->msixpba = readq(ioat_dma->reg_base + 0x1800);
}
err = ioat_reset_sync(ioat_chan, msecs_to_jiffies(200)); err = ioat_reset_sync(ioat_chan, msecs_to_jiffies(200));
if (!err) if (!err) {
err = ioat_irq_reinit(ioat_dma); if (is_bwd_ioat(pdev) && (ioat_dma->irq_mode == IOAT_MSIX)) {
writeq(ioat_dma->msixtba0, ioat_dma->reg_base + 0x1000);
writeq(ioat_dma->msixdata0, ioat_dma->reg_base + 0x1008);
writeq(ioat_dma->msixpba, ioat_dma->reg_base + 0x1800);
}
}
if (err) if (err)
dev_err(&pdev->dev, "Failed to reset: %d\n", err); dev_err(&pdev->dev, "Failed to reset: %d\n", err);
......
...@@ -86,6 +86,11 @@ struct ioatdma_device { ...@@ -86,6 +86,11 @@ struct ioatdma_device {
struct dca_provider *dca; struct dca_provider *dca;
enum ioat_irq_mode irq_mode; enum ioat_irq_mode irq_mode;
u32 cap; u32 cap;
/* shadow version for CB3.3 chan reset errata workaround */
u64 msixtba0;
u64 msixdata0;
u32 msixpba;
}; };
struct ioat_descs { struct ioat_descs {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment