Commit aee4d1fa authored by Robert Baldyga's avatar Robert Baldyga Committed by Vinod Koul

dmaengine: pl330: improve pl330_tx_status() function

This patch adds possibility to read residue of DMA transfer. It's useful
when we want to know how many bytes have been transferred before we
terminate channel. It can take place, for example, on timeout interrupt.
Signed-off-by: default avatarLukasz Czerwinski <l.czerwinski@samsung.com>
Signed-off-by: default avatarRobert Baldyga <r.baldyga@samsung.com>
Signed-off-by: default avatarVinod Koul <vinod.koul@intel.com>
parent be6893e1
...@@ -504,6 +504,9 @@ struct dma_pl330_desc { ...@@ -504,6 +504,9 @@ struct dma_pl330_desc {
enum desc_status status; enum desc_status status;
int bytes_requested;
bool last;
/* The channel which currently holds this desc */ /* The channel which currently holds this desc */
struct dma_pl330_chan *pchan; struct dma_pl330_chan *pchan;
...@@ -2173,11 +2176,74 @@ static void pl330_free_chan_resources(struct dma_chan *chan) ...@@ -2173,11 +2176,74 @@ static void pl330_free_chan_resources(struct dma_chan *chan)
pm_runtime_put_autosuspend(pch->dmac->ddma.dev); pm_runtime_put_autosuspend(pch->dmac->ddma.dev);
} }
int pl330_get_current_xferred_count(struct dma_pl330_chan *pch,
struct dma_pl330_desc *desc)
{
struct pl330_thread *thrd = pch->thread;
struct pl330_dmac *pl330 = pch->dmac;
void __iomem *regs = thrd->dmac->base;
u32 val, addr;
pm_runtime_get_sync(pl330->ddma.dev);
val = addr = 0;
if (desc->rqcfg.src_inc) {
val = readl(regs + SA(thrd->id));
addr = desc->px.src_addr;
} else {
val = readl(regs + DA(thrd->id));
addr = desc->px.dst_addr;
}
pm_runtime_mark_last_busy(pch->dmac->ddma.dev);
pm_runtime_put_autosuspend(pl330->ddma.dev);
return val - addr;
}
static enum dma_status static enum dma_status
pl330_tx_status(struct dma_chan *chan, dma_cookie_t cookie, pl330_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
struct dma_tx_state *txstate) struct dma_tx_state *txstate)
{ {
return dma_cookie_status(chan, cookie, txstate); enum dma_status ret;
unsigned long flags;
struct dma_pl330_desc *desc, *running = NULL;
struct dma_pl330_chan *pch = to_pchan(chan);
unsigned int transferred, residual = 0;
ret = dma_cookie_status(chan, cookie, txstate);
if (!txstate)
return ret;
if (ret == DMA_COMPLETE)
goto out;
spin_lock_irqsave(&pch->lock, flags);
if (pch->thread->req_running != -1)
running = pch->thread->req[pch->thread->req_running].desc;
/* Check in pending list */
list_for_each_entry(desc, &pch->work_list, node) {
if (desc->status == DONE)
transferred = desc->bytes_requested;
else if (running && desc == running)
transferred =
pl330_get_current_xferred_count(pch, desc);
else
transferred = 0;
residual += desc->bytes_requested - transferred;
if (desc->txd.cookie == cookie) {
ret = desc->status;
break;
}
if (desc->last)
residual = 0;
}
spin_unlock_irqrestore(&pch->lock, flags);
out:
dma_set_residue(txstate, residual);
return ret;
} }
static void pl330_issue_pending(struct dma_chan *chan) static void pl330_issue_pending(struct dma_chan *chan)
...@@ -2222,12 +2288,14 @@ static dma_cookie_t pl330_tx_submit(struct dma_async_tx_descriptor *tx) ...@@ -2222,12 +2288,14 @@ static dma_cookie_t pl330_tx_submit(struct dma_async_tx_descriptor *tx)
desc->txd.callback = last->txd.callback; desc->txd.callback = last->txd.callback;
desc->txd.callback_param = last->txd.callback_param; desc->txd.callback_param = last->txd.callback_param;
} }
last->last = false;
dma_cookie_assign(&desc->txd); dma_cookie_assign(&desc->txd);
list_move_tail(&desc->node, &pch->submitted_list); list_move_tail(&desc->node, &pch->submitted_list);
} }
last->last = true;
cookie = dma_cookie_assign(&last->txd); cookie = dma_cookie_assign(&last->txd);
list_add_tail(&last->node, &pch->submitted_list); list_add_tail(&last->node, &pch->submitted_list);
spin_unlock_irqrestore(&pch->lock, flags); spin_unlock_irqrestore(&pch->lock, flags);
...@@ -2450,6 +2518,7 @@ static struct dma_async_tx_descriptor *pl330_prep_dma_cyclic( ...@@ -2450,6 +2518,7 @@ static struct dma_async_tx_descriptor *pl330_prep_dma_cyclic(
desc->rqtype = direction; desc->rqtype = direction;
desc->rqcfg.brst_size = pch->burst_sz; desc->rqcfg.brst_size = pch->burst_sz;
desc->rqcfg.brst_len = 1; desc->rqcfg.brst_len = 1;
desc->bytes_requested = period_len;
fill_px(&desc->px, dst, src, period_len); fill_px(&desc->px, dst, src, period_len);
if (!first) if (!first)
...@@ -2592,6 +2661,7 @@ pl330_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, ...@@ -2592,6 +2661,7 @@ pl330_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
desc->rqcfg.brst_size = pch->burst_sz; desc->rqcfg.brst_size = pch->burst_sz;
desc->rqcfg.brst_len = 1; desc->rqcfg.brst_len = 1;
desc->rqtype = direction; desc->rqtype = direction;
desc->bytes_requested = sg_dma_len(sg);
} }
/* Return the last desc in the chain */ /* Return the last desc in the chain */
...@@ -2777,7 +2847,7 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id) ...@@ -2777,7 +2847,7 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
pd->src_addr_widths = PL330_DMA_BUSWIDTHS; pd->src_addr_widths = PL330_DMA_BUSWIDTHS;
pd->dst_addr_widths = PL330_DMA_BUSWIDTHS; pd->dst_addr_widths = PL330_DMA_BUSWIDTHS;
pd->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV); pd->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
pd->residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR; pd->residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT;
ret = dma_async_device_register(pd); ret = dma_async_device_register(pd);
if (ret) { if (ret) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment