Commit 0665a4e9 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'dmaengine-fix-5.8-rc6' of...

Merge tag 'dmaengine-fix-5.8-rc6' of git://git.kernel.org/pub/scm/linux/kernel/git/vkoul/dmaengine into master

Pull dmaengine fixes from Vinod Koul:

 - update dmaengine tree location to kernel.org

 - dmatest fix for completing threads

 - driver fixes for k3dma, fsl-dma, idxd, ,tegra, and few other drivers

* tag 'dmaengine-fix-5.8-rc6' of git://git.kernel.org/pub/scm/linux/kernel/git/vkoul/dmaengine: (21 commits)
  dmaengine: ioat setting ioat timeout as module parameter
  dmaengine: fsl-edma: fix wrong tcd endianness for big-endian cpu
  dmaengine: dmatest: stop completed threads when running without set channel
  dmaengine: fsl-edma-common: correct DSIZE_32BYTE
  dmaengine: dw: Initialize channel before each transfer
  dmaengine: idxd: fix misc interrupt handler thread unmasking
  dmaengine: idxd: cleanup workqueue config after disabling
  dmaengine: tegra210-adma: Fix runtime PM imbalance on error
  dmaengine: mcf-edma: Fix NULL pointer exception in mcf_edma_tx_handler
  dmaengine: fsl-edma: Fix NULL pointer exception in fsl_edma_tx_handler
  dmaengine: fsl-edma: Add lockdep assert for exported function
  dmaengine: idxd: fix hw descriptor fields for delta record
  dmaengine: ti: k3-udma: add missing put_device() call in of_xudma_dev_get()
  dmaengine: sh: usb-dmac: set tx_result parameters
  dmaengine: ti: k3-udma: Fix delayed_work usage for tx drain workaround
  dmaengine: idxd: fix cdev locking for open and release
  dmaengine: imx-sdma: Fix: Remove 'always true' comparison
  MAINTAINERS: switch dmaengine tree to kernel.org
  dmaengine: ti: k3-udma: Fix the running channel handling in alloc_chan_resources
  dmaengine: ti: k3-udma: Fix cleanup code for alloc_chan_resources
  ...
parents 6cbba1f9 87730ccb
...@@ -5112,7 +5112,7 @@ M: Vinod Koul <vkoul@kernel.org> ...@@ -5112,7 +5112,7 @@ M: Vinod Koul <vkoul@kernel.org>
L: dmaengine@vger.kernel.org L: dmaengine@vger.kernel.org
S: Maintained S: Maintained
Q: https://patchwork.kernel.org/project/linux-dmaengine/list/ Q: https://patchwork.kernel.org/project/linux-dmaengine/list/
T: git git://git.infradead.org/users/vkoul/slave-dma.git T: git git://git.kernel.org/pub/scm/linux/kernel/git/vkoul/dmaengine.git
F: Documentation/devicetree/bindings/dma/ F: Documentation/devicetree/bindings/dma/
F: Documentation/driver-api/dmaengine/ F: Documentation/driver-api/dmaengine/
F: drivers/dma/ F: drivers/dma/
......
...@@ -1176,6 +1176,8 @@ static int dmatest_run_set(const char *val, const struct kernel_param *kp) ...@@ -1176,6 +1176,8 @@ static int dmatest_run_set(const char *val, const struct kernel_param *kp)
} else if (dmatest_run) { } else if (dmatest_run) {
if (!is_threaded_test_pending(info)) { if (!is_threaded_test_pending(info)) {
pr_info("No channels configured, continue with any\n"); pr_info("No channels configured, continue with any\n");
if (!is_threaded_test_run(info))
stop_threaded_test(info);
add_threaded_test(info); add_threaded_test(info);
} }
start_threaded_tests(info); start_threaded_tests(info);
......
...@@ -118,16 +118,11 @@ static void dwc_initialize(struct dw_dma_chan *dwc) ...@@ -118,16 +118,11 @@ static void dwc_initialize(struct dw_dma_chan *dwc)
{ {
struct dw_dma *dw = to_dw_dma(dwc->chan.device); struct dw_dma *dw = to_dw_dma(dwc->chan.device);
if (test_bit(DW_DMA_IS_INITIALIZED, &dwc->flags))
return;
dw->initialize_chan(dwc); dw->initialize_chan(dwc);
/* Enable interrupts */ /* Enable interrupts */
channel_set_bit(dw, MASK.XFER, dwc->mask); channel_set_bit(dw, MASK.XFER, dwc->mask);
channel_set_bit(dw, MASK.ERROR, dwc->mask); channel_set_bit(dw, MASK.ERROR, dwc->mask);
set_bit(DW_DMA_IS_INITIALIZED, &dwc->flags);
} }
/*----------------------------------------------------------------------*/ /*----------------------------------------------------------------------*/
...@@ -954,8 +949,6 @@ static void dwc_issue_pending(struct dma_chan *chan) ...@@ -954,8 +949,6 @@ static void dwc_issue_pending(struct dma_chan *chan)
void do_dw_dma_off(struct dw_dma *dw) void do_dw_dma_off(struct dw_dma *dw)
{ {
unsigned int i;
dma_writel(dw, CFG, 0); dma_writel(dw, CFG, 0);
channel_clear_bit(dw, MASK.XFER, dw->all_chan_mask); channel_clear_bit(dw, MASK.XFER, dw->all_chan_mask);
...@@ -966,9 +959,6 @@ void do_dw_dma_off(struct dw_dma *dw) ...@@ -966,9 +959,6 @@ void do_dw_dma_off(struct dw_dma *dw)
while (dma_readl(dw, CFG) & DW_CFG_DMA_EN) while (dma_readl(dw, CFG) & DW_CFG_DMA_EN)
cpu_relax(); cpu_relax();
for (i = 0; i < dw->dma.chancnt; i++)
clear_bit(DW_DMA_IS_INITIALIZED, &dw->chan[i].flags);
} }
void do_dw_dma_on(struct dw_dma *dw) void do_dw_dma_on(struct dw_dma *dw)
...@@ -1032,8 +1022,6 @@ static void dwc_free_chan_resources(struct dma_chan *chan) ...@@ -1032,8 +1022,6 @@ static void dwc_free_chan_resources(struct dma_chan *chan)
/* Clear custom channel configuration */ /* Clear custom channel configuration */
memset(&dwc->dws, 0, sizeof(struct dw_dma_slave)); memset(&dwc->dws, 0, sizeof(struct dw_dma_slave));
clear_bit(DW_DMA_IS_INITIALIZED, &dwc->flags);
/* Disable interrupts */ /* Disable interrupts */
channel_clear_bit(dw, MASK.XFER, dwc->mask); channel_clear_bit(dw, MASK.XFER, dwc->mask);
channel_clear_bit(dw, MASK.BLOCK, dwc->mask); channel_clear_bit(dw, MASK.BLOCK, dwc->mask);
......
...@@ -352,26 +352,28 @@ static void fsl_edma_set_tcd_regs(struct fsl_edma_chan *fsl_chan, ...@@ -352,26 +352,28 @@ static void fsl_edma_set_tcd_regs(struct fsl_edma_chan *fsl_chan,
/* /*
* TCD parameters are stored in struct fsl_edma_hw_tcd in little * TCD parameters are stored in struct fsl_edma_hw_tcd in little
* endian format. However, we need to load the TCD registers in * endian format. However, we need to load the TCD registers in
* big- or little-endian obeying the eDMA engine model endian. * big- or little-endian obeying the eDMA engine model endian,
* and this is performed from specific edma_write functions
*/ */
edma_writew(edma, 0, &regs->tcd[ch].csr); edma_writew(edma, 0, &regs->tcd[ch].csr);
edma_writel(edma, le32_to_cpu(tcd->saddr), &regs->tcd[ch].saddr);
edma_writel(edma, le32_to_cpu(tcd->daddr), &regs->tcd[ch].daddr);
edma_writew(edma, le16_to_cpu(tcd->attr), &regs->tcd[ch].attr); edma_writel(edma, (s32)tcd->saddr, &regs->tcd[ch].saddr);
edma_writew(edma, le16_to_cpu(tcd->soff), &regs->tcd[ch].soff); edma_writel(edma, (s32)tcd->daddr, &regs->tcd[ch].daddr);
edma_writel(edma, le32_to_cpu(tcd->nbytes), &regs->tcd[ch].nbytes); edma_writew(edma, (s16)tcd->attr, &regs->tcd[ch].attr);
edma_writel(edma, le32_to_cpu(tcd->slast), &regs->tcd[ch].slast); edma_writew(edma, tcd->soff, &regs->tcd[ch].soff);
edma_writew(edma, le16_to_cpu(tcd->citer), &regs->tcd[ch].citer); edma_writel(edma, (s32)tcd->nbytes, &regs->tcd[ch].nbytes);
edma_writew(edma, le16_to_cpu(tcd->biter), &regs->tcd[ch].biter); edma_writel(edma, (s32)tcd->slast, &regs->tcd[ch].slast);
edma_writew(edma, le16_to_cpu(tcd->doff), &regs->tcd[ch].doff);
edma_writel(edma, le32_to_cpu(tcd->dlast_sga), edma_writew(edma, (s16)tcd->citer, &regs->tcd[ch].citer);
edma_writew(edma, (s16)tcd->biter, &regs->tcd[ch].biter);
edma_writew(edma, (s16)tcd->doff, &regs->tcd[ch].doff);
edma_writel(edma, (s32)tcd->dlast_sga,
&regs->tcd[ch].dlast_sga); &regs->tcd[ch].dlast_sga);
edma_writew(edma, le16_to_cpu(tcd->csr), &regs->tcd[ch].csr); edma_writew(edma, (s16)tcd->csr, &regs->tcd[ch].csr);
} }
static inline static inline
...@@ -589,6 +591,8 @@ void fsl_edma_xfer_desc(struct fsl_edma_chan *fsl_chan) ...@@ -589,6 +591,8 @@ void fsl_edma_xfer_desc(struct fsl_edma_chan *fsl_chan)
{ {
struct virt_dma_desc *vdesc; struct virt_dma_desc *vdesc;
lockdep_assert_held(&fsl_chan->vchan.lock);
vdesc = vchan_next_desc(&fsl_chan->vchan); vdesc = vchan_next_desc(&fsl_chan->vchan);
if (!vdesc) if (!vdesc)
return; return;
......
...@@ -33,7 +33,7 @@ ...@@ -33,7 +33,7 @@
#define EDMA_TCD_ATTR_DSIZE_16BIT BIT(0) #define EDMA_TCD_ATTR_DSIZE_16BIT BIT(0)
#define EDMA_TCD_ATTR_DSIZE_32BIT BIT(1) #define EDMA_TCD_ATTR_DSIZE_32BIT BIT(1)
#define EDMA_TCD_ATTR_DSIZE_64BIT (BIT(0) | BIT(1)) #define EDMA_TCD_ATTR_DSIZE_64BIT (BIT(0) | BIT(1))
#define EDMA_TCD_ATTR_DSIZE_32BYTE (BIT(3) | BIT(0)) #define EDMA_TCD_ATTR_DSIZE_32BYTE (BIT(2) | BIT(0))
#define EDMA_TCD_ATTR_SSIZE_8BIT 0 #define EDMA_TCD_ATTR_SSIZE_8BIT 0
#define EDMA_TCD_ATTR_SSIZE_16BIT (EDMA_TCD_ATTR_DSIZE_16BIT << 8) #define EDMA_TCD_ATTR_SSIZE_16BIT (EDMA_TCD_ATTR_DSIZE_16BIT << 8)
#define EDMA_TCD_ATTR_SSIZE_32BIT (EDMA_TCD_ATTR_DSIZE_32BIT << 8) #define EDMA_TCD_ATTR_SSIZE_32BIT (EDMA_TCD_ATTR_DSIZE_32BIT << 8)
......
...@@ -45,6 +45,13 @@ static irqreturn_t fsl_edma_tx_handler(int irq, void *dev_id) ...@@ -45,6 +45,13 @@ static irqreturn_t fsl_edma_tx_handler(int irq, void *dev_id)
fsl_chan = &fsl_edma->chans[ch]; fsl_chan = &fsl_edma->chans[ch];
spin_lock(&fsl_chan->vchan.lock); spin_lock(&fsl_chan->vchan.lock);
if (!fsl_chan->edesc) {
/* terminate_all called before */
spin_unlock(&fsl_chan->vchan.lock);
continue;
}
if (!fsl_chan->edesc->iscyclic) { if (!fsl_chan->edesc->iscyclic) {
list_del(&fsl_chan->edesc->vdesc.node); list_del(&fsl_chan->edesc->vdesc.node);
vchan_cookie_complete(&fsl_chan->edesc->vdesc); vchan_cookie_complete(&fsl_chan->edesc->vdesc);
......
...@@ -74,6 +74,7 @@ static int idxd_cdev_open(struct inode *inode, struct file *filp) ...@@ -74,6 +74,7 @@ static int idxd_cdev_open(struct inode *inode, struct file *filp)
struct idxd_device *idxd; struct idxd_device *idxd;
struct idxd_wq *wq; struct idxd_wq *wq;
struct device *dev; struct device *dev;
int rc = 0;
wq = inode_wq(inode); wq = inode_wq(inode);
idxd = wq->idxd; idxd = wq->idxd;
...@@ -81,17 +82,27 @@ static int idxd_cdev_open(struct inode *inode, struct file *filp) ...@@ -81,17 +82,27 @@ static int idxd_cdev_open(struct inode *inode, struct file *filp)
dev_dbg(dev, "%s called: %d\n", __func__, idxd_wq_refcount(wq)); dev_dbg(dev, "%s called: %d\n", __func__, idxd_wq_refcount(wq));
if (idxd_wq_refcount(wq) > 0 && wq_dedicated(wq))
return -EBUSY;
ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
if (!ctx) if (!ctx)
return -ENOMEM; return -ENOMEM;
mutex_lock(&wq->wq_lock);
if (idxd_wq_refcount(wq) > 0 && wq_dedicated(wq)) {
rc = -EBUSY;
goto failed;
}
ctx->wq = wq; ctx->wq = wq;
filp->private_data = ctx; filp->private_data = ctx;
idxd_wq_get(wq); idxd_wq_get(wq);
mutex_unlock(&wq->wq_lock);
return 0; return 0;
failed:
mutex_unlock(&wq->wq_lock);
kfree(ctx);
return rc;
} }
static int idxd_cdev_release(struct inode *node, struct file *filep) static int idxd_cdev_release(struct inode *node, struct file *filep)
...@@ -105,7 +116,9 @@ static int idxd_cdev_release(struct inode *node, struct file *filep) ...@@ -105,7 +116,9 @@ static int idxd_cdev_release(struct inode *node, struct file *filep)
filep->private_data = NULL; filep->private_data = NULL;
kfree(ctx); kfree(ctx);
mutex_lock(&wq->wq_lock);
idxd_wq_put(wq); idxd_wq_put(wq);
mutex_unlock(&wq->wq_lock);
return 0; return 0;
} }
......
...@@ -320,6 +320,31 @@ void idxd_wq_unmap_portal(struct idxd_wq *wq) ...@@ -320,6 +320,31 @@ void idxd_wq_unmap_portal(struct idxd_wq *wq)
devm_iounmap(dev, wq->dportal); devm_iounmap(dev, wq->dportal);
} }
void idxd_wq_disable_cleanup(struct idxd_wq *wq)
{
struct idxd_device *idxd = wq->idxd;
struct device *dev = &idxd->pdev->dev;
int i, wq_offset;
lockdep_assert_held(&idxd->dev_lock);
memset(&wq->wqcfg, 0, sizeof(wq->wqcfg));
wq->type = IDXD_WQT_NONE;
wq->size = 0;
wq->group = NULL;
wq->threshold = 0;
wq->priority = 0;
clear_bit(WQ_FLAG_DEDICATED, &wq->flags);
memset(wq->name, 0, WQ_NAME_SIZE);
for (i = 0; i < 8; i++) {
wq_offset = idxd->wqcfg_offset + wq->id * 32 + i * sizeof(u32);
iowrite32(0, idxd->reg_base + wq_offset);
dev_dbg(dev, "WQ[%d][%d][%#x]: %#x\n",
wq->id, i, wq_offset,
ioread32(idxd->reg_base + wq_offset));
}
}
/* Device control bits */ /* Device control bits */
static inline bool idxd_is_enabled(struct idxd_device *idxd) static inline bool idxd_is_enabled(struct idxd_device *idxd)
{ {
......
...@@ -290,6 +290,7 @@ int idxd_wq_enable(struct idxd_wq *wq); ...@@ -290,6 +290,7 @@ int idxd_wq_enable(struct idxd_wq *wq);
int idxd_wq_disable(struct idxd_wq *wq); int idxd_wq_disable(struct idxd_wq *wq);
int idxd_wq_map_portal(struct idxd_wq *wq); int idxd_wq_map_portal(struct idxd_wq *wq);
void idxd_wq_unmap_portal(struct idxd_wq *wq); void idxd_wq_unmap_portal(struct idxd_wq *wq);
void idxd_wq_disable_cleanup(struct idxd_wq *wq);
/* submission */ /* submission */
int idxd_submit_desc(struct idxd_wq *wq, struct idxd_desc *desc); int idxd_submit_desc(struct idxd_wq *wq, struct idxd_desc *desc);
......
...@@ -141,7 +141,7 @@ irqreturn_t idxd_misc_thread(int vec, void *data) ...@@ -141,7 +141,7 @@ irqreturn_t idxd_misc_thread(int vec, void *data)
iowrite32(cause, idxd->reg_base + IDXD_INTCAUSE_OFFSET); iowrite32(cause, idxd->reg_base + IDXD_INTCAUSE_OFFSET);
if (!err) if (!err)
return IRQ_HANDLED; goto out;
gensts.bits = ioread32(idxd->reg_base + IDXD_GENSTATS_OFFSET); gensts.bits = ioread32(idxd->reg_base + IDXD_GENSTATS_OFFSET);
if (gensts.state == IDXD_DEVICE_STATE_HALT) { if (gensts.state == IDXD_DEVICE_STATE_HALT) {
...@@ -162,6 +162,7 @@ irqreturn_t idxd_misc_thread(int vec, void *data) ...@@ -162,6 +162,7 @@ irqreturn_t idxd_misc_thread(int vec, void *data)
spin_unlock_bh(&idxd->dev_lock); spin_unlock_bh(&idxd->dev_lock);
} }
out:
idxd_unmask_msix_vector(idxd, irq_entry->id); idxd_unmask_msix_vector(idxd, irq_entry->id);
return IRQ_HANDLED; return IRQ_HANDLED;
} }
......
...@@ -315,6 +315,11 @@ static int idxd_config_bus_remove(struct device *dev) ...@@ -315,6 +315,11 @@ static int idxd_config_bus_remove(struct device *dev)
idxd_unregister_dma_device(idxd); idxd_unregister_dma_device(idxd);
spin_lock_irqsave(&idxd->dev_lock, flags); spin_lock_irqsave(&idxd->dev_lock, flags);
rc = idxd_device_disable(idxd); rc = idxd_device_disable(idxd);
for (i = 0; i < idxd->max_wqs; i++) {
struct idxd_wq *wq = &idxd->wqs[i];
idxd_wq_disable_cleanup(wq);
}
spin_unlock_irqrestore(&idxd->dev_lock, flags); spin_unlock_irqrestore(&idxd->dev_lock, flags);
module_put(THIS_MODULE); module_put(THIS_MODULE);
if (rc < 0) if (rc < 0)
......
...@@ -1331,8 +1331,7 @@ static void sdma_free_chan_resources(struct dma_chan *chan) ...@@ -1331,8 +1331,7 @@ static void sdma_free_chan_resources(struct dma_chan *chan)
sdma_channel_synchronize(chan); sdma_channel_synchronize(chan);
if (sdmac->event_id0 >= 0) sdma_event_disable(sdmac, sdmac->event_id0);
sdma_event_disable(sdmac, sdmac->event_id0);
if (sdmac->event_id1) if (sdmac->event_id1)
sdma_event_disable(sdmac, sdmac->event_id1); sdma_event_disable(sdmac, sdmac->event_id1);
...@@ -1632,11 +1631,9 @@ static int sdma_config(struct dma_chan *chan, ...@@ -1632,11 +1631,9 @@ static int sdma_config(struct dma_chan *chan,
memcpy(&sdmac->slave_config, dmaengine_cfg, sizeof(*dmaengine_cfg)); memcpy(&sdmac->slave_config, dmaengine_cfg, sizeof(*dmaengine_cfg));
/* Set ENBLn earlier to make sure dma request triggered after that */ /* Set ENBLn earlier to make sure dma request triggered after that */
if (sdmac->event_id0 >= 0) { if (sdmac->event_id0 >= sdmac->sdma->drvdata->num_events)
if (sdmac->event_id0 >= sdmac->sdma->drvdata->num_events) return -EINVAL;
return -EINVAL; sdma_event_enable(sdmac, sdmac->event_id0);
sdma_event_enable(sdmac, sdmac->event_id0);
}
if (sdmac->event_id1) { if (sdmac->event_id1) {
if (sdmac->event_id1 >= sdmac->sdma->drvdata->num_events) if (sdmac->event_id1 >= sdmac->sdma->drvdata->num_events)
......
...@@ -26,6 +26,18 @@ ...@@ -26,6 +26,18 @@
#include "../dmaengine.h" #include "../dmaengine.h"
int completion_timeout = 200;
module_param(completion_timeout, int, 0644);
MODULE_PARM_DESC(completion_timeout,
"set ioat completion timeout [msec] (default 200 [msec])");
int idle_timeout = 2000;
module_param(idle_timeout, int, 0644);
MODULE_PARM_DESC(idle_timeout,
"set ioat idel timeout [msec] (default 2000 [msec])");
#define IDLE_TIMEOUT msecs_to_jiffies(idle_timeout)
#define COMPLETION_TIMEOUT msecs_to_jiffies(completion_timeout)
static char *chanerr_str[] = { static char *chanerr_str[] = {
"DMA Transfer Source Address Error", "DMA Transfer Source Address Error",
"DMA Transfer Destination Address Error", "DMA Transfer Destination Address Error",
......
...@@ -104,8 +104,6 @@ struct ioatdma_chan { ...@@ -104,8 +104,6 @@ struct ioatdma_chan {
#define IOAT_RUN 5 #define IOAT_RUN 5
#define IOAT_CHAN_ACTIVE 6 #define IOAT_CHAN_ACTIVE 6
struct timer_list timer; struct timer_list timer;
#define COMPLETION_TIMEOUT msecs_to_jiffies(100)
#define IDLE_TIMEOUT msecs_to_jiffies(2000)
#define RESET_DELAY msecs_to_jiffies(100) #define RESET_DELAY msecs_to_jiffies(100)
struct ioatdma_device *ioat_dma; struct ioatdma_device *ioat_dma;
dma_addr_t completion_dma; dma_addr_t completion_dma;
......
...@@ -35,6 +35,13 @@ static irqreturn_t mcf_edma_tx_handler(int irq, void *dev_id) ...@@ -35,6 +35,13 @@ static irqreturn_t mcf_edma_tx_handler(int irq, void *dev_id)
mcf_chan = &mcf_edma->chans[ch]; mcf_chan = &mcf_edma->chans[ch];
spin_lock(&mcf_chan->vchan.lock); spin_lock(&mcf_chan->vchan.lock);
if (!mcf_chan->edesc) {
/* terminate_all called before */
spin_unlock(&mcf_chan->vchan.lock);
continue;
}
if (!mcf_chan->edesc->iscyclic) { if (!mcf_chan->edesc->iscyclic) {
list_del(&mcf_chan->edesc->vdesc.node); list_del(&mcf_chan->edesc->vdesc.node);
vchan_cookie_complete(&mcf_chan->edesc->vdesc); vchan_cookie_complete(&mcf_chan->edesc->vdesc);
......
...@@ -586,6 +586,8 @@ static void usb_dmac_isr_transfer_end(struct usb_dmac_chan *chan) ...@@ -586,6 +586,8 @@ static void usb_dmac_isr_transfer_end(struct usb_dmac_chan *chan)
desc->residue = usb_dmac_get_current_residue(chan, desc, desc->residue = usb_dmac_get_current_residue(chan, desc,
desc->sg_index - 1); desc->sg_index - 1);
desc->done_cookie = desc->vd.tx.cookie; desc->done_cookie = desc->vd.tx.cookie;
desc->vd.tx_result.result = DMA_TRANS_NOERROR;
desc->vd.tx_result.residue = desc->residue;
vchan_cookie_complete(&desc->vd); vchan_cookie_complete(&desc->vd);
/* Restart the next transfer if this driver has a next desc */ /* Restart the next transfer if this driver has a next desc */
......
...@@ -658,6 +658,7 @@ static int tegra_adma_alloc_chan_resources(struct dma_chan *dc) ...@@ -658,6 +658,7 @@ static int tegra_adma_alloc_chan_resources(struct dma_chan *dc)
ret = pm_runtime_get_sync(tdc2dev(tdc)); ret = pm_runtime_get_sync(tdc2dev(tdc));
if (ret < 0) { if (ret < 0) {
pm_runtime_put_noidle(tdc2dev(tdc));
free_irq(tdc->irq, tdc); free_irq(tdc->irq, tdc);
return ret; return ret;
} }
...@@ -869,8 +870,10 @@ static int tegra_adma_probe(struct platform_device *pdev) ...@@ -869,8 +870,10 @@ static int tegra_adma_probe(struct platform_device *pdev)
pm_runtime_enable(&pdev->dev); pm_runtime_enable(&pdev->dev);
ret = pm_runtime_get_sync(&pdev->dev); ret = pm_runtime_get_sync(&pdev->dev);
if (ret < 0) if (ret < 0) {
pm_runtime_put_noidle(&pdev->dev);
goto rpm_disable; goto rpm_disable;
}
ret = tegra_adma_init(tdma); ret = tegra_adma_init(tdma);
if (ret) if (ret)
......
...@@ -42,6 +42,7 @@ struct udma_dev *of_xudma_dev_get(struct device_node *np, const char *property) ...@@ -42,6 +42,7 @@ struct udma_dev *of_xudma_dev_get(struct device_node *np, const char *property)
ud = platform_get_drvdata(pdev); ud = platform_get_drvdata(pdev);
if (!ud) { if (!ud) {
pr_debug("UDMA has not been probed\n"); pr_debug("UDMA has not been probed\n");
put_device(&pdev->dev);
return ERR_PTR(-EPROBE_DEFER); return ERR_PTR(-EPROBE_DEFER);
} }
......
...@@ -1753,7 +1753,8 @@ static int udma_alloc_chan_resources(struct dma_chan *chan) ...@@ -1753,7 +1753,8 @@ static int udma_alloc_chan_resources(struct dma_chan *chan)
dev_err(ud->ddev.dev, dev_err(ud->ddev.dev,
"Descriptor pool allocation failed\n"); "Descriptor pool allocation failed\n");
uc->use_dma_pool = false; uc->use_dma_pool = false;
return -ENOMEM; ret = -ENOMEM;
goto err_cleanup;
} }
} }
...@@ -1773,16 +1774,18 @@ static int udma_alloc_chan_resources(struct dma_chan *chan) ...@@ -1773,16 +1774,18 @@ static int udma_alloc_chan_resources(struct dma_chan *chan)
ret = udma_get_chan_pair(uc); ret = udma_get_chan_pair(uc);
if (ret) if (ret)
return ret; goto err_cleanup;
ret = udma_alloc_tx_resources(uc); ret = udma_alloc_tx_resources(uc);
if (ret) if (ret) {
return ret; udma_put_rchan(uc);
goto err_cleanup;
}
ret = udma_alloc_rx_resources(uc); ret = udma_alloc_rx_resources(uc);
if (ret) { if (ret) {
udma_free_tx_resources(uc); udma_free_tx_resources(uc);
return ret; goto err_cleanup;
} }
uc->config.src_thread = ud->psil_base + uc->tchan->id; uc->config.src_thread = ud->psil_base + uc->tchan->id;
...@@ -1800,10 +1803,8 @@ static int udma_alloc_chan_resources(struct dma_chan *chan) ...@@ -1800,10 +1803,8 @@ static int udma_alloc_chan_resources(struct dma_chan *chan)
uc->id); uc->id);
ret = udma_alloc_tx_resources(uc); ret = udma_alloc_tx_resources(uc);
if (ret) { if (ret)
uc->config.remote_thread_id = -1; goto err_cleanup;
return ret;
}
uc->config.src_thread = ud->psil_base + uc->tchan->id; uc->config.src_thread = ud->psil_base + uc->tchan->id;
uc->config.dst_thread = uc->config.remote_thread_id; uc->config.dst_thread = uc->config.remote_thread_id;
...@@ -1820,10 +1821,8 @@ static int udma_alloc_chan_resources(struct dma_chan *chan) ...@@ -1820,10 +1821,8 @@ static int udma_alloc_chan_resources(struct dma_chan *chan)
uc->id); uc->id);
ret = udma_alloc_rx_resources(uc); ret = udma_alloc_rx_resources(uc);
if (ret) { if (ret)
uc->config.remote_thread_id = -1; goto err_cleanup;
return ret;
}
uc->config.src_thread = uc->config.remote_thread_id; uc->config.src_thread = uc->config.remote_thread_id;
uc->config.dst_thread = (ud->psil_base + uc->rchan->id) | uc->config.dst_thread = (ud->psil_base + uc->rchan->id) |
...@@ -1838,7 +1837,9 @@ static int udma_alloc_chan_resources(struct dma_chan *chan) ...@@ -1838,7 +1837,9 @@ static int udma_alloc_chan_resources(struct dma_chan *chan)
/* Can not happen */ /* Can not happen */
dev_err(uc->ud->dev, "%s: chan%d invalid direction (%u)\n", dev_err(uc->ud->dev, "%s: chan%d invalid direction (%u)\n",
__func__, uc->id, uc->config.dir); __func__, uc->id, uc->config.dir);
return -EINVAL; ret = -EINVAL;
goto err_cleanup;
} }
/* check if the channel configuration was successful */ /* check if the channel configuration was successful */
...@@ -1847,7 +1848,7 @@ static int udma_alloc_chan_resources(struct dma_chan *chan) ...@@ -1847,7 +1848,7 @@ static int udma_alloc_chan_resources(struct dma_chan *chan)
if (udma_is_chan_running(uc)) { if (udma_is_chan_running(uc)) {
dev_warn(ud->dev, "chan%d: is running!\n", uc->id); dev_warn(ud->dev, "chan%d: is running!\n", uc->id);
udma_stop(uc); udma_reset_chan(uc, false);
if (udma_is_chan_running(uc)) { if (udma_is_chan_running(uc)) {
dev_err(ud->dev, "chan%d: won't stop!\n", uc->id); dev_err(ud->dev, "chan%d: won't stop!\n", uc->id);
ret = -EBUSY; ret = -EBUSY;
...@@ -1906,8 +1907,6 @@ static int udma_alloc_chan_resources(struct dma_chan *chan) ...@@ -1906,8 +1907,6 @@ static int udma_alloc_chan_resources(struct dma_chan *chan)
udma_reset_rings(uc); udma_reset_rings(uc);
INIT_DELAYED_WORK_ONSTACK(&uc->tx_drain.work,
udma_check_tx_completion);
return 0; return 0;
err_irq_free: err_irq_free:
...@@ -1919,7 +1918,7 @@ static int udma_alloc_chan_resources(struct dma_chan *chan) ...@@ -1919,7 +1918,7 @@ static int udma_alloc_chan_resources(struct dma_chan *chan)
err_res_free: err_res_free:
udma_free_tx_resources(uc); udma_free_tx_resources(uc);
udma_free_rx_resources(uc); udma_free_rx_resources(uc);
err_cleanup:
udma_reset_uchan(uc); udma_reset_uchan(uc);
if (uc->use_dma_pool) { if (uc->use_dma_pool) {
...@@ -3019,7 +3018,6 @@ static void udma_free_chan_resources(struct dma_chan *chan) ...@@ -3019,7 +3018,6 @@ static void udma_free_chan_resources(struct dma_chan *chan)
} }
cancel_delayed_work_sync(&uc->tx_drain.work); cancel_delayed_work_sync(&uc->tx_drain.work);
destroy_delayed_work_on_stack(&uc->tx_drain.work);
if (uc->irq_num_ring > 0) { if (uc->irq_num_ring > 0) {
free_irq(uc->irq_num_ring, uc); free_irq(uc->irq_num_ring, uc);
...@@ -3593,7 +3591,7 @@ static int udma_probe(struct platform_device *pdev) ...@@ -3593,7 +3591,7 @@ static int udma_probe(struct platform_device *pdev)
return ret; return ret;
} }
ret = of_property_read_u32(navss_node, "ti,udma-atype", &ud->atype); ret = of_property_read_u32(dev->of_node, "ti,udma-atype", &ud->atype);
if (!ret && ud->atype > 2) { if (!ret && ud->atype > 2) {
dev_err(dev, "Invalid atype: %u\n", ud->atype); dev_err(dev, "Invalid atype: %u\n", ud->atype);
return -EINVAL; return -EINVAL;
...@@ -3711,6 +3709,7 @@ static int udma_probe(struct platform_device *pdev) ...@@ -3711,6 +3709,7 @@ static int udma_probe(struct platform_device *pdev)
tasklet_init(&uc->vc.task, udma_vchan_complete, tasklet_init(&uc->vc.task, udma_vchan_complete,
(unsigned long)&uc->vc); (unsigned long)&uc->vc);
init_completion(&uc->teardown_completed); init_completion(&uc->teardown_completed);
INIT_DELAYED_WORK(&uc->tx_drain.work, udma_check_tx_completion);
} }
ret = dma_async_device_register(&ud->ddev); ret = dma_async_device_register(&ud->ddev);
......
...@@ -110,9 +110,12 @@ struct dsa_hw_desc { ...@@ -110,9 +110,12 @@ struct dsa_hw_desc {
uint16_t rsvd1; uint16_t rsvd1;
union { union {
uint8_t expected_res; uint8_t expected_res;
/* create delta record */
struct { struct {
uint64_t delta_addr; uint64_t delta_addr;
uint32_t max_delta_size; uint32_t max_delta_size;
uint32_t delt_rsvd;
uint8_t expected_res_mask;
}; };
uint32_t delta_rec_size; uint32_t delta_rec_size;
uint64_t dest2; uint64_t dest2;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment