Commit 306a63be authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'dmaengine-fix-4.6-rc4' of git://git.infradead.org/users/vkoul/slave-dma

Pull dmaengine fixes from Vinod Koul:
 "This time we have some odd fixes in hsu, edma, omap and xilinx.

  Usual fixes and nothing special"

* tag 'dmaengine-fix-4.6-rc4' of git://git.infradead.org/users/vkoul/slave-dma:
  dmaengine: dw: fix master selection
  dmaengine: edma: special case slot limit workaround
  dmaengine: edma: Remove dynamic TPTC power management feature
  dmaengine: vdma: don't crash when bad channel is requested
  dmaengine: omap-dma: Do not suppress interrupts for memcpy
  dmaengine: omap-dma: Fix polled channel completion detection and handling
  dmaengine: hsu: correct use of channel status register
  dmaengine: hsu: correct residue calculation of active descriptor
  dmaengine: hsu: set HSU_CH_MTSR to memory width
parents ac82a57a 956e6c8e
...@@ -130,26 +130,14 @@ static void dwc_desc_put(struct dw_dma_chan *dwc, struct dw_desc *desc) ...@@ -130,26 +130,14 @@ static void dwc_desc_put(struct dw_dma_chan *dwc, struct dw_desc *desc)
static void dwc_initialize(struct dw_dma_chan *dwc) static void dwc_initialize(struct dw_dma_chan *dwc)
{ {
struct dw_dma *dw = to_dw_dma(dwc->chan.device); struct dw_dma *dw = to_dw_dma(dwc->chan.device);
struct dw_dma_slave *dws = dwc->chan.private;
u32 cfghi = DWC_CFGH_FIFO_MODE; u32 cfghi = DWC_CFGH_FIFO_MODE;
u32 cfglo = DWC_CFGL_CH_PRIOR(dwc->priority); u32 cfglo = DWC_CFGL_CH_PRIOR(dwc->priority);
if (dwc->initialized == true) if (dwc->initialized == true)
return; return;
if (dws) { cfghi |= DWC_CFGH_DST_PER(dwc->dst_id);
/* cfghi |= DWC_CFGH_SRC_PER(dwc->src_id);
* We need controller-specific data to set up slave
* transfers.
*/
BUG_ON(!dws->dma_dev || dws->dma_dev != dw->dma.dev);
cfghi |= DWC_CFGH_DST_PER(dws->dst_id);
cfghi |= DWC_CFGH_SRC_PER(dws->src_id);
} else {
cfghi |= DWC_CFGH_DST_PER(dwc->dst_id);
cfghi |= DWC_CFGH_SRC_PER(dwc->src_id);
}
channel_writel(dwc, CFG_LO, cfglo); channel_writel(dwc, CFG_LO, cfglo);
channel_writel(dwc, CFG_HI, cfghi); channel_writel(dwc, CFG_HI, cfghi);
...@@ -941,7 +929,7 @@ bool dw_dma_filter(struct dma_chan *chan, void *param) ...@@ -941,7 +929,7 @@ bool dw_dma_filter(struct dma_chan *chan, void *param)
struct dw_dma_chan *dwc = to_dw_dma_chan(chan); struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
struct dw_dma_slave *dws = param; struct dw_dma_slave *dws = param;
if (!dws || dws->dma_dev != chan->device->dev) if (dws->dma_dev != chan->device->dev)
return false; return false;
/* We have to copy data since dws can be temporary storage */ /* We have to copy data since dws can be temporary storage */
...@@ -1165,6 +1153,14 @@ static int dwc_alloc_chan_resources(struct dma_chan *chan) ...@@ -1165,6 +1153,14 @@ static int dwc_alloc_chan_resources(struct dma_chan *chan)
* doesn't mean what you think it means), and status writeback. * doesn't mean what you think it means), and status writeback.
*/ */
/*
* We need controller-specific data to set up slave transfers.
*/
if (chan->private && !dw_dma_filter(chan, chan->private)) {
dev_warn(chan2dev(chan), "Wrong controller-specific data\n");
return -EINVAL;
}
/* Enable controller here if needed */ /* Enable controller here if needed */
if (!dw->in_use) if (!dw->in_use)
dw_dma_on(dw); dw_dma_on(dw);
...@@ -1226,6 +1222,14 @@ static void dwc_free_chan_resources(struct dma_chan *chan) ...@@ -1226,6 +1222,14 @@ static void dwc_free_chan_resources(struct dma_chan *chan)
spin_lock_irqsave(&dwc->lock, flags); spin_lock_irqsave(&dwc->lock, flags);
list_splice_init(&dwc->free_list, &list); list_splice_init(&dwc->free_list, &list);
dwc->descs_allocated = 0; dwc->descs_allocated = 0;
/* Clear custom channel configuration */
dwc->src_id = 0;
dwc->dst_id = 0;
dwc->src_master = 0;
dwc->dst_master = 0;
dwc->initialized = false; dwc->initialized = false;
/* Disable interrupts */ /* Disable interrupts */
......
...@@ -1238,6 +1238,7 @@ static struct dma_async_tx_descriptor *edma_prep_dma_cyclic( ...@@ -1238,6 +1238,7 @@ static struct dma_async_tx_descriptor *edma_prep_dma_cyclic(
struct edma_desc *edesc; struct edma_desc *edesc;
dma_addr_t src_addr, dst_addr; dma_addr_t src_addr, dst_addr;
enum dma_slave_buswidth dev_width; enum dma_slave_buswidth dev_width;
bool use_intermediate = false;
u32 burst; u32 burst;
int i, ret, nslots; int i, ret, nslots;
...@@ -1279,8 +1280,21 @@ static struct dma_async_tx_descriptor *edma_prep_dma_cyclic( ...@@ -1279,8 +1280,21 @@ static struct dma_async_tx_descriptor *edma_prep_dma_cyclic(
* but the synchronization is difficult to achieve with Cyclic and * but the synchronization is difficult to achieve with Cyclic and
* cannot be guaranteed, so we error out early. * cannot be guaranteed, so we error out early.
*/ */
if (nslots > MAX_NR_SG) if (nslots > MAX_NR_SG) {
return NULL; /*
* If the burst and period sizes are the same, we can put
* the full buffer into a single period and activate
* intermediate interrupts. This will produce interrupts
* after each burst, which is also after each desired period.
*/
if (burst == period_len) {
period_len = buf_len;
nslots = 2;
use_intermediate = true;
} else {
return NULL;
}
}
edesc = kzalloc(sizeof(*edesc) + nslots * sizeof(edesc->pset[0]), edesc = kzalloc(sizeof(*edesc) + nslots * sizeof(edesc->pset[0]),
GFP_ATOMIC); GFP_ATOMIC);
...@@ -1358,8 +1372,13 @@ static struct dma_async_tx_descriptor *edma_prep_dma_cyclic( ...@@ -1358,8 +1372,13 @@ static struct dma_async_tx_descriptor *edma_prep_dma_cyclic(
/* /*
* Enable period interrupt only if it is requested * Enable period interrupt only if it is requested
*/ */
if (tx_flags & DMA_PREP_INTERRUPT) if (tx_flags & DMA_PREP_INTERRUPT) {
edesc->pset[i].param.opt |= TCINTEN; edesc->pset[i].param.opt |= TCINTEN;
/* Also enable intermediate interrupts if necessary */
if (use_intermediate)
edesc->pset[i].param.opt |= ITCINTEN;
}
} }
/* Place the cyclic channel to highest priority queue */ /* Place the cyclic channel to highest priority queue */
...@@ -1570,32 +1589,6 @@ static irqreturn_t dma_ccerr_handler(int irq, void *data) ...@@ -1570,32 +1589,6 @@ static irqreturn_t dma_ccerr_handler(int irq, void *data)
return IRQ_HANDLED; return IRQ_HANDLED;
} }
static void edma_tc_set_pm_state(struct edma_tc *tc, bool enable)
{
struct platform_device *tc_pdev;
int ret;
if (!IS_ENABLED(CONFIG_OF) || !tc)
return;
tc_pdev = of_find_device_by_node(tc->node);
if (!tc_pdev) {
pr_err("%s: TPTC device is not found\n", __func__);
return;
}
if (!pm_runtime_enabled(&tc_pdev->dev))
pm_runtime_enable(&tc_pdev->dev);
if (enable)
ret = pm_runtime_get_sync(&tc_pdev->dev);
else
ret = pm_runtime_put_sync(&tc_pdev->dev);
if (ret < 0)
pr_err("%s: pm_runtime_%s_sync() failed for %s\n", __func__,
enable ? "get" : "put", dev_name(&tc_pdev->dev));
}
/* Alloc channel resources */ /* Alloc channel resources */
static int edma_alloc_chan_resources(struct dma_chan *chan) static int edma_alloc_chan_resources(struct dma_chan *chan)
{ {
...@@ -1632,8 +1625,6 @@ static int edma_alloc_chan_resources(struct dma_chan *chan) ...@@ -1632,8 +1625,6 @@ static int edma_alloc_chan_resources(struct dma_chan *chan)
EDMA_CHAN_SLOT(echan->ch_num), chan->chan_id, EDMA_CHAN_SLOT(echan->ch_num), chan->chan_id,
echan->hw_triggered ? "HW" : "SW"); echan->hw_triggered ? "HW" : "SW");
edma_tc_set_pm_state(echan->tc, true);
return 0; return 0;
err_slot: err_slot:
...@@ -1670,7 +1661,6 @@ static void edma_free_chan_resources(struct dma_chan *chan) ...@@ -1670,7 +1661,6 @@ static void edma_free_chan_resources(struct dma_chan *chan)
echan->alloced = false; echan->alloced = false;
} }
edma_tc_set_pm_state(echan->tc, false);
echan->tc = NULL; echan->tc = NULL;
echan->hw_triggered = false; echan->hw_triggered = false;
...@@ -2417,10 +2407,8 @@ static int edma_pm_suspend(struct device *dev) ...@@ -2417,10 +2407,8 @@ static int edma_pm_suspend(struct device *dev)
int i; int i;
for (i = 0; i < ecc->num_channels; i++) { for (i = 0; i < ecc->num_channels; i++) {
if (echan[i].alloced) { if (echan[i].alloced)
edma_setup_interrupt(&echan[i], false); edma_setup_interrupt(&echan[i], false);
edma_tc_set_pm_state(echan[i].tc, false);
}
} }
return 0; return 0;
...@@ -2450,8 +2438,6 @@ static int edma_pm_resume(struct device *dev) ...@@ -2450,8 +2438,6 @@ static int edma_pm_resume(struct device *dev)
/* Set up channel -> slot mapping for the entry slot */ /* Set up channel -> slot mapping for the entry slot */
edma_set_chmap(&echan[i], echan[i].slot[0]); edma_set_chmap(&echan[i], echan[i].slot[0]);
edma_tc_set_pm_state(echan[i].tc, true);
} }
} }
...@@ -2475,7 +2461,8 @@ static struct platform_driver edma_driver = { ...@@ -2475,7 +2461,8 @@ static struct platform_driver edma_driver = {
static int edma_tptc_probe(struct platform_device *pdev) static int edma_tptc_probe(struct platform_device *pdev)
{ {
return 0; pm_runtime_enable(&pdev->dev);
return pm_runtime_get_sync(&pdev->dev);
} }
static struct platform_driver edma_tptc_driver = { static struct platform_driver edma_tptc_driver = {
......
...@@ -64,10 +64,10 @@ static void hsu_dma_chan_start(struct hsu_dma_chan *hsuc) ...@@ -64,10 +64,10 @@ static void hsu_dma_chan_start(struct hsu_dma_chan *hsuc)
if (hsuc->direction == DMA_MEM_TO_DEV) { if (hsuc->direction == DMA_MEM_TO_DEV) {
bsr = config->dst_maxburst; bsr = config->dst_maxburst;
mtsr = config->dst_addr_width; mtsr = config->src_addr_width;
} else if (hsuc->direction == DMA_DEV_TO_MEM) { } else if (hsuc->direction == DMA_DEV_TO_MEM) {
bsr = config->src_maxburst; bsr = config->src_maxburst;
mtsr = config->src_addr_width; mtsr = config->dst_addr_width;
} }
hsu_chan_disable(hsuc); hsu_chan_disable(hsuc);
...@@ -135,7 +135,7 @@ static u32 hsu_dma_chan_get_sr(struct hsu_dma_chan *hsuc) ...@@ -135,7 +135,7 @@ static u32 hsu_dma_chan_get_sr(struct hsu_dma_chan *hsuc)
sr = hsu_chan_readl(hsuc, HSU_CH_SR); sr = hsu_chan_readl(hsuc, HSU_CH_SR);
spin_unlock_irqrestore(&hsuc->vchan.lock, flags); spin_unlock_irqrestore(&hsuc->vchan.lock, flags);
return sr; return sr & ~(HSU_CH_SR_DESCE_ANY | HSU_CH_SR_CDESC_ANY);
} }
irqreturn_t hsu_dma_irq(struct hsu_dma_chip *chip, unsigned short nr) irqreturn_t hsu_dma_irq(struct hsu_dma_chip *chip, unsigned short nr)
...@@ -254,10 +254,13 @@ static void hsu_dma_issue_pending(struct dma_chan *chan) ...@@ -254,10 +254,13 @@ static void hsu_dma_issue_pending(struct dma_chan *chan)
static size_t hsu_dma_active_desc_size(struct hsu_dma_chan *hsuc) static size_t hsu_dma_active_desc_size(struct hsu_dma_chan *hsuc)
{ {
struct hsu_dma_desc *desc = hsuc->desc; struct hsu_dma_desc *desc = hsuc->desc;
size_t bytes = desc->length; size_t bytes = 0;
int i; int i;
i = desc->active % HSU_DMA_CHAN_NR_DESC; for (i = desc->active; i < desc->nents; i++)
bytes += desc->sg[i].len;
i = HSU_DMA_CHAN_NR_DESC - 1;
do { do {
bytes += hsu_chan_readl(hsuc, HSU_CH_DxTSR(i)); bytes += hsu_chan_readl(hsuc, HSU_CH_DxTSR(i));
} while (--i >= 0); } while (--i >= 0);
......
...@@ -41,6 +41,9 @@ ...@@ -41,6 +41,9 @@
#define HSU_CH_SR_DESCTO(x) BIT(8 + (x)) #define HSU_CH_SR_DESCTO(x) BIT(8 + (x))
#define HSU_CH_SR_DESCTO_ANY (BIT(11) | BIT(10) | BIT(9) | BIT(8)) #define HSU_CH_SR_DESCTO_ANY (BIT(11) | BIT(10) | BIT(9) | BIT(8))
#define HSU_CH_SR_CHE BIT(15) #define HSU_CH_SR_CHE BIT(15)
#define HSU_CH_SR_DESCE(x) BIT(16 + (x))
#define HSU_CH_SR_DESCE_ANY (BIT(19) | BIT(18) | BIT(17) | BIT(16))
#define HSU_CH_SR_CDESC_ANY (BIT(31) | BIT(30))
/* Bits in HSU_CH_CR */ /* Bits in HSU_CH_CR */
#define HSU_CH_CR_CHA BIT(0) #define HSU_CH_CR_CHA BIT(0)
......
...@@ -48,6 +48,7 @@ struct omap_chan { ...@@ -48,6 +48,7 @@ struct omap_chan {
unsigned dma_sig; unsigned dma_sig;
bool cyclic; bool cyclic;
bool paused; bool paused;
bool running;
int dma_ch; int dma_ch;
struct omap_desc *desc; struct omap_desc *desc;
...@@ -294,6 +295,8 @@ static void omap_dma_start(struct omap_chan *c, struct omap_desc *d) ...@@ -294,6 +295,8 @@ static void omap_dma_start(struct omap_chan *c, struct omap_desc *d)
/* Enable channel */ /* Enable channel */
omap_dma_chan_write(c, CCR, d->ccr | CCR_ENABLE); omap_dma_chan_write(c, CCR, d->ccr | CCR_ENABLE);
c->running = true;
} }
static void omap_dma_stop(struct omap_chan *c) static void omap_dma_stop(struct omap_chan *c)
...@@ -355,6 +358,8 @@ static void omap_dma_stop(struct omap_chan *c) ...@@ -355,6 +358,8 @@ static void omap_dma_stop(struct omap_chan *c)
omap_dma_chan_write(c, CLNK_CTRL, val); omap_dma_chan_write(c, CLNK_CTRL, val);
} }
c->running = false;
} }
static void omap_dma_start_sg(struct omap_chan *c, struct omap_desc *d, static void omap_dma_start_sg(struct omap_chan *c, struct omap_desc *d,
...@@ -673,15 +678,20 @@ static enum dma_status omap_dma_tx_status(struct dma_chan *chan, ...@@ -673,15 +678,20 @@ static enum dma_status omap_dma_tx_status(struct dma_chan *chan,
struct omap_chan *c = to_omap_dma_chan(chan); struct omap_chan *c = to_omap_dma_chan(chan);
struct virt_dma_desc *vd; struct virt_dma_desc *vd;
enum dma_status ret; enum dma_status ret;
uint32_t ccr;
unsigned long flags; unsigned long flags;
ccr = omap_dma_chan_read(c, CCR);
/* The channel is no longer active, handle the completion right away */
if (!(ccr & CCR_ENABLE))
omap_dma_callback(c->dma_ch, 0, c);
ret = dma_cookie_status(chan, cookie, txstate); ret = dma_cookie_status(chan, cookie, txstate);
if (!c->paused && c->running) {
uint32_t ccr = omap_dma_chan_read(c, CCR);
/*
* The channel is no longer active, set the return value
* accordingly
*/
if (!(ccr & CCR_ENABLE))
ret = DMA_COMPLETE;
}
if (ret == DMA_COMPLETE || !txstate) if (ret == DMA_COMPLETE || !txstate)
return ret; return ret;
...@@ -945,9 +955,7 @@ static struct dma_async_tx_descriptor *omap_dma_prep_dma_memcpy( ...@@ -945,9 +955,7 @@ static struct dma_async_tx_descriptor *omap_dma_prep_dma_memcpy(
d->ccr = c->ccr; d->ccr = c->ccr;
d->ccr |= CCR_DST_AMODE_POSTINC | CCR_SRC_AMODE_POSTINC; d->ccr |= CCR_DST_AMODE_POSTINC | CCR_SRC_AMODE_POSTINC;
d->cicr = CICR_DROP_IE; d->cicr = CICR_DROP_IE | CICR_FRAME_IE;
if (tx_flags & DMA_PREP_INTERRUPT)
d->cicr |= CICR_FRAME_IE;
d->csdp = data_type; d->csdp = data_type;
......
...@@ -1236,7 +1236,7 @@ static struct dma_chan *of_dma_xilinx_xlate(struct of_phandle_args *dma_spec, ...@@ -1236,7 +1236,7 @@ static struct dma_chan *of_dma_xilinx_xlate(struct of_phandle_args *dma_spec,
struct xilinx_vdma_device *xdev = ofdma->of_dma_data; struct xilinx_vdma_device *xdev = ofdma->of_dma_data;
int chan_id = dma_spec->args[0]; int chan_id = dma_spec->args[0];
if (chan_id >= XILINX_VDMA_MAX_CHANS_PER_DEVICE) if (chan_id >= XILINX_VDMA_MAX_CHANS_PER_DEVICE || !xdev->chan[chan_id])
return NULL; return NULL;
return dma_get_slave_channel(&xdev->chan[chan_id]->common); return dma_get_slave_channel(&xdev->chan[chan_id]->common);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment