Commit f0ad1771 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'dmaengine-fix-4.10-rc4' of git://git.infradead.org/users/vkoul/slave-dma

Pull dmaengine fixes from Vinod Koul:
 "The fixes this time around are spread over drivers, pretty normal
  update:

   - PCI ID for SKL ioatdma, workaround for SKX and
     ioat_alloc_chan_resources sleepy allocation fix

   - dw kconfig typo fix

   - null pointer deref for stm32

   - MAINTAINERS Update for at_hdmac

   - pl330 runtime pm fixes

   - omap-dma port window fix

   - rcar-dmac unmap slave resource fix"

* tag 'dmaengine-fix-4.10-rc4' of git://git.infradead.org/users/vkoul/slave-dma:
  dmaengine: rcar-dmac: unmap slave resource when channel is freed
  dmaengine: omap-dma: Fix the port_window support
  dmaengine: iota: ioat_alloc_chan_resources should not perform sleeping allocations.
  dmaengine: pl330: Fix runtime PM support for terminated transfers
  MAINTAINERS: dmaengine: Update + Hand over the at_hdmac driver to Ludovic
  dmaengine: omap-dma: Fix dynamic lch_map allocation
  dmaengine: ti-dma-crossbar: Add some 'of_node_put()' in error path.
  dmaengine: stm32-dma: Fix null pointer dereference in stm32_dma_tx_status
  dmaengine: stm32-dma: Set correct args number for DMA request from DT
  dmaengine: dw: fix typo in Kconfig
  dmaengine: ioatdma: workaround SKX ioatdma version
  dmaengine: ioatdma: Add Skylake PCI Dev ID
parents e96f8f18 3139dc8d
...@@ -2193,14 +2193,6 @@ L: alsa-devel@alsa-project.org (moderated for non-subscribers) ...@@ -2193,14 +2193,6 @@ L: alsa-devel@alsa-project.org (moderated for non-subscribers)
S: Supported S: Supported
F: sound/soc/atmel F: sound/soc/atmel
ATMEL DMA DRIVER
M: Nicolas Ferre <nicolas.ferre@atmel.com>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Supported
F: drivers/dma/at_hdmac.c
F: drivers/dma/at_hdmac_regs.h
F: include/linux/platform_data/dma-atmel.h
ATMEL XDMA DRIVER ATMEL XDMA DRIVER
M: Ludovic Desroches <ludovic.desroches@atmel.com> M: Ludovic Desroches <ludovic.desroches@atmel.com>
L: linux-arm-kernel@lists.infradead.org L: linux-arm-kernel@lists.infradead.org
...@@ -8178,6 +8170,15 @@ S: Maintained ...@@ -8178,6 +8170,15 @@ S: Maintained
F: drivers/tty/serial/atmel_serial.c F: drivers/tty/serial/atmel_serial.c
F: include/linux/atmel_serial.h F: include/linux/atmel_serial.h
MICROCHIP / ATMEL DMA DRIVER
M: Ludovic Desroches <ludovic.desroches@microchip.com>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
L: dmaengine@vger.kernel.org
S: Supported
F: drivers/dma/at_hdmac.c
F: drivers/dma/at_hdmac_regs.h
F: include/linux/platform_data/dma-atmel.h
MICROCHIP / ATMEL ISC DRIVER MICROCHIP / ATMEL ISC DRIVER
M: Songjun Wu <songjun.wu@microchip.com> M: Songjun Wu <songjun.wu@microchip.com>
L: linux-media@vger.kernel.org L: linux-media@vger.kernel.org
......
...@@ -24,5 +24,5 @@ config DW_DMAC_PCI ...@@ -24,5 +24,5 @@ config DW_DMAC_PCI
select DW_DMAC_CORE select DW_DMAC_CORE
help help
Support the Synopsys DesignWare AHB DMA controller on the Support the Synopsys DesignWare AHB DMA controller on the
platfroms that enumerate it as a PCI device. For example, platforms that enumerate it as a PCI device. For example,
Intel Medfield has integrated this GPDMA controller. Intel Medfield has integrated this GPDMA controller.
...@@ -64,6 +64,8 @@ ...@@ -64,6 +64,8 @@
#define PCI_DEVICE_ID_INTEL_IOAT_BDX8 0x6f2e #define PCI_DEVICE_ID_INTEL_IOAT_BDX8 0x6f2e
#define PCI_DEVICE_ID_INTEL_IOAT_BDX9 0x6f2f #define PCI_DEVICE_ID_INTEL_IOAT_BDX9 0x6f2f
#define PCI_DEVICE_ID_INTEL_IOAT_SKX 0x2021
#define IOAT_VER_1_2 0x12 /* Version 1.2 */ #define IOAT_VER_1_2 0x12 /* Version 1.2 */
#define IOAT_VER_2_0 0x20 /* Version 2.0 */ #define IOAT_VER_2_0 0x20 /* Version 2.0 */
#define IOAT_VER_3_0 0x30 /* Version 3.0 */ #define IOAT_VER_3_0 0x30 /* Version 3.0 */
......
...@@ -106,6 +106,8 @@ static struct pci_device_id ioat_pci_tbl[] = { ...@@ -106,6 +106,8 @@ static struct pci_device_id ioat_pci_tbl[] = {
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDX8) }, { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDX8) },
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDX9) }, { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDX9) },
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SKX) },
/* I/OAT v3.3 platforms */ /* I/OAT v3.3 platforms */
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BWD0) }, { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BWD0) },
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BWD1) }, { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BWD1) },
...@@ -243,10 +245,15 @@ static bool is_bdx_ioat(struct pci_dev *pdev) ...@@ -243,10 +245,15 @@ static bool is_bdx_ioat(struct pci_dev *pdev)
} }
} }
static inline bool is_skx_ioat(struct pci_dev *pdev)
{
return (pdev->device == PCI_DEVICE_ID_INTEL_IOAT_SKX) ? true : false;
}
static bool is_xeon_cb32(struct pci_dev *pdev) static bool is_xeon_cb32(struct pci_dev *pdev)
{ {
return is_jf_ioat(pdev) || is_snb_ioat(pdev) || is_ivb_ioat(pdev) || return is_jf_ioat(pdev) || is_snb_ioat(pdev) || is_ivb_ioat(pdev) ||
is_hsw_ioat(pdev) || is_bdx_ioat(pdev); is_hsw_ioat(pdev) || is_bdx_ioat(pdev) || is_skx_ioat(pdev);
} }
bool is_bwd_ioat(struct pci_dev *pdev) bool is_bwd_ioat(struct pci_dev *pdev)
...@@ -693,7 +700,7 @@ static int ioat_alloc_chan_resources(struct dma_chan *c) ...@@ -693,7 +700,7 @@ static int ioat_alloc_chan_resources(struct dma_chan *c)
/* doing 2 32bit writes to mmio since 1 64b write doesn't work */ /* doing 2 32bit writes to mmio since 1 64b write doesn't work */
ioat_chan->completion = ioat_chan->completion =
dma_pool_zalloc(ioat_chan->ioat_dma->completion_pool, dma_pool_zalloc(ioat_chan->ioat_dma->completion_pool,
GFP_KERNEL, &ioat_chan->completion_dma); GFP_NOWAIT, &ioat_chan->completion_dma);
if (!ioat_chan->completion) if (!ioat_chan->completion)
return -ENOMEM; return -ENOMEM;
...@@ -703,7 +710,7 @@ static int ioat_alloc_chan_resources(struct dma_chan *c) ...@@ -703,7 +710,7 @@ static int ioat_alloc_chan_resources(struct dma_chan *c)
ioat_chan->reg_base + IOAT_CHANCMP_OFFSET_HIGH); ioat_chan->reg_base + IOAT_CHANCMP_OFFSET_HIGH);
order = IOAT_MAX_ORDER; order = IOAT_MAX_ORDER;
ring = ioat_alloc_ring(c, order, GFP_KERNEL); ring = ioat_alloc_ring(c, order, GFP_NOWAIT);
if (!ring) if (!ring)
return -ENOMEM; return -ENOMEM;
...@@ -1357,6 +1364,8 @@ static int ioat_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) ...@@ -1357,6 +1364,8 @@ static int ioat_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
device->version = readb(device->reg_base + IOAT_VER_OFFSET); device->version = readb(device->reg_base + IOAT_VER_OFFSET);
if (device->version >= IOAT_VER_3_0) { if (device->version >= IOAT_VER_3_0) {
if (is_skx_ioat(pdev))
device->version = IOAT_VER_3_2;
err = ioat3_dma_probe(device, ioat_dca_enabled); err = ioat3_dma_probe(device, ioat_dca_enabled);
if (device->version >= IOAT_VER_3_3) if (device->version >= IOAT_VER_3_3)
......
...@@ -938,21 +938,14 @@ static struct dma_async_tx_descriptor *omap_dma_prep_slave_sg( ...@@ -938,21 +938,14 @@ static struct dma_async_tx_descriptor *omap_dma_prep_slave_sg(
d->ccr |= CCR_DST_AMODE_POSTINC; d->ccr |= CCR_DST_AMODE_POSTINC;
if (port_window) { if (port_window) {
d->ccr |= CCR_SRC_AMODE_DBLIDX; d->ccr |= CCR_SRC_AMODE_DBLIDX;
d->ei = 1;
/*
* One frame covers the port_window and by configure
* the source frame index to be -1 * (port_window - 1)
* we instruct the sDMA that after a frame is processed
* it should move back to the start of the window.
*/
d->fi = -(port_window_bytes - 1);
if (port_window_bytes >= 64) if (port_window_bytes >= 64)
d->csdp = CSDP_SRC_BURST_64 | CSDP_SRC_PACKED; d->csdp |= CSDP_SRC_BURST_64;
else if (port_window_bytes >= 32) else if (port_window_bytes >= 32)
d->csdp = CSDP_SRC_BURST_32 | CSDP_SRC_PACKED; d->csdp |= CSDP_SRC_BURST_32;
else if (port_window_bytes >= 16) else if (port_window_bytes >= 16)
d->csdp = CSDP_SRC_BURST_16 | CSDP_SRC_PACKED; d->csdp |= CSDP_SRC_BURST_16;
} else { } else {
d->ccr |= CCR_SRC_AMODE_CONSTANT; d->ccr |= CCR_SRC_AMODE_CONSTANT;
} }
...@@ -962,13 +955,21 @@ static struct dma_async_tx_descriptor *omap_dma_prep_slave_sg( ...@@ -962,13 +955,21 @@ static struct dma_async_tx_descriptor *omap_dma_prep_slave_sg(
d->ccr |= CCR_SRC_AMODE_POSTINC; d->ccr |= CCR_SRC_AMODE_POSTINC;
if (port_window) { if (port_window) {
d->ccr |= CCR_DST_AMODE_DBLIDX; d->ccr |= CCR_DST_AMODE_DBLIDX;
d->ei = 1;
/*
* One frame covers the port_window and by configure
* the source frame index to be -1 * (port_window - 1)
* we instruct the sDMA that after a frame is processed
* it should move back to the start of the window.
*/
d->fi = -(port_window_bytes - 1);
if (port_window_bytes >= 64) if (port_window_bytes >= 64)
d->csdp = CSDP_DST_BURST_64 | CSDP_DST_PACKED; d->csdp |= CSDP_DST_BURST_64;
else if (port_window_bytes >= 32) else if (port_window_bytes >= 32)
d->csdp = CSDP_DST_BURST_32 | CSDP_DST_PACKED; d->csdp |= CSDP_DST_BURST_32;
else if (port_window_bytes >= 16) else if (port_window_bytes >= 16)
d->csdp = CSDP_DST_BURST_16 | CSDP_DST_PACKED; d->csdp |= CSDP_DST_BURST_16;
} else { } else {
d->ccr |= CCR_DST_AMODE_CONSTANT; d->ccr |= CCR_DST_AMODE_CONSTANT;
} }
...@@ -1017,7 +1018,7 @@ static struct dma_async_tx_descriptor *omap_dma_prep_slave_sg( ...@@ -1017,7 +1018,7 @@ static struct dma_async_tx_descriptor *omap_dma_prep_slave_sg(
osg->addr = sg_dma_address(sgent); osg->addr = sg_dma_address(sgent);
osg->en = en; osg->en = en;
osg->fn = sg_dma_len(sgent) / frame_bytes; osg->fn = sg_dma_len(sgent) / frame_bytes;
if (port_window && dir == DMA_MEM_TO_DEV) { if (port_window && dir == DMA_DEV_TO_MEM) {
osg->ei = 1; osg->ei = 1;
/* /*
* One frame covers the port_window and by configure * One frame covers the port_window and by configure
...@@ -1452,6 +1453,7 @@ static int omap_dma_probe(struct platform_device *pdev) ...@@ -1452,6 +1453,7 @@ static int omap_dma_probe(struct platform_device *pdev)
struct omap_dmadev *od; struct omap_dmadev *od;
struct resource *res; struct resource *res;
int rc, i, irq; int rc, i, irq;
u32 lch_count;
od = devm_kzalloc(&pdev->dev, sizeof(*od), GFP_KERNEL); od = devm_kzalloc(&pdev->dev, sizeof(*od), GFP_KERNEL);
if (!od) if (!od)
...@@ -1494,20 +1496,31 @@ static int omap_dma_probe(struct platform_device *pdev) ...@@ -1494,20 +1496,31 @@ static int omap_dma_probe(struct platform_device *pdev)
spin_lock_init(&od->lock); spin_lock_init(&od->lock);
spin_lock_init(&od->irq_lock); spin_lock_init(&od->irq_lock);
if (!pdev->dev.of_node) { /* Number of DMA requests */
od->dma_requests = od->plat->dma_attr->lch_count; od->dma_requests = OMAP_SDMA_REQUESTS;
if (unlikely(!od->dma_requests)) if (pdev->dev.of_node && of_property_read_u32(pdev->dev.of_node,
od->dma_requests = OMAP_SDMA_REQUESTS; "dma-requests",
} else if (of_property_read_u32(pdev->dev.of_node, "dma-requests", &od->dma_requests)) {
&od->dma_requests)) {
dev_info(&pdev->dev, dev_info(&pdev->dev,
"Missing dma-requests property, using %u.\n", "Missing dma-requests property, using %u.\n",
OMAP_SDMA_REQUESTS); OMAP_SDMA_REQUESTS);
od->dma_requests = OMAP_SDMA_REQUESTS;
} }
od->lch_map = devm_kcalloc(&pdev->dev, od->dma_requests, /* Number of available logical channels */
sizeof(*od->lch_map), GFP_KERNEL); if (!pdev->dev.of_node) {
lch_count = od->plat->dma_attr->lch_count;
if (unlikely(!lch_count))
lch_count = OMAP_SDMA_CHANNELS;
} else if (of_property_read_u32(pdev->dev.of_node, "dma-channels",
&lch_count)) {
dev_info(&pdev->dev,
"Missing dma-channels property, using %u.\n",
OMAP_SDMA_CHANNELS);
lch_count = OMAP_SDMA_CHANNELS;
}
od->lch_map = devm_kcalloc(&pdev->dev, lch_count, sizeof(*od->lch_map),
GFP_KERNEL);
if (!od->lch_map) if (!od->lch_map)
return -ENOMEM; return -ENOMEM;
......
...@@ -448,6 +448,9 @@ struct dma_pl330_chan { ...@@ -448,6 +448,9 @@ struct dma_pl330_chan {
/* for cyclic capability */ /* for cyclic capability */
bool cyclic; bool cyclic;
/* for runtime pm tracking */
bool active;
}; };
struct pl330_dmac { struct pl330_dmac {
...@@ -2033,6 +2036,7 @@ static void pl330_tasklet(unsigned long data) ...@@ -2033,6 +2036,7 @@ static void pl330_tasklet(unsigned long data)
_stop(pch->thread); _stop(pch->thread);
spin_unlock(&pch->thread->dmac->lock); spin_unlock(&pch->thread->dmac->lock);
power_down = true; power_down = true;
pch->active = false;
} else { } else {
/* Make sure the PL330 Channel thread is active */ /* Make sure the PL330 Channel thread is active */
spin_lock(&pch->thread->dmac->lock); spin_lock(&pch->thread->dmac->lock);
...@@ -2052,6 +2056,7 @@ static void pl330_tasklet(unsigned long data) ...@@ -2052,6 +2056,7 @@ static void pl330_tasklet(unsigned long data)
desc->status = PREP; desc->status = PREP;
list_move_tail(&desc->node, &pch->work_list); list_move_tail(&desc->node, &pch->work_list);
if (power_down) { if (power_down) {
pch->active = true;
spin_lock(&pch->thread->dmac->lock); spin_lock(&pch->thread->dmac->lock);
_start(pch->thread); _start(pch->thread);
spin_unlock(&pch->thread->dmac->lock); spin_unlock(&pch->thread->dmac->lock);
...@@ -2166,6 +2171,7 @@ static int pl330_terminate_all(struct dma_chan *chan) ...@@ -2166,6 +2171,7 @@ static int pl330_terminate_all(struct dma_chan *chan)
unsigned long flags; unsigned long flags;
struct pl330_dmac *pl330 = pch->dmac; struct pl330_dmac *pl330 = pch->dmac;
LIST_HEAD(list); LIST_HEAD(list);
bool power_down = false;
pm_runtime_get_sync(pl330->ddma.dev); pm_runtime_get_sync(pl330->ddma.dev);
spin_lock_irqsave(&pch->lock, flags); spin_lock_irqsave(&pch->lock, flags);
...@@ -2176,6 +2182,8 @@ static int pl330_terminate_all(struct dma_chan *chan) ...@@ -2176,6 +2182,8 @@ static int pl330_terminate_all(struct dma_chan *chan)
pch->thread->req[0].desc = NULL; pch->thread->req[0].desc = NULL;
pch->thread->req[1].desc = NULL; pch->thread->req[1].desc = NULL;
pch->thread->req_running = -1; pch->thread->req_running = -1;
power_down = pch->active;
pch->active = false;
/* Mark all desc done */ /* Mark all desc done */
list_for_each_entry(desc, &pch->submitted_list, node) { list_for_each_entry(desc, &pch->submitted_list, node) {
...@@ -2193,6 +2201,8 @@ static int pl330_terminate_all(struct dma_chan *chan) ...@@ -2193,6 +2201,8 @@ static int pl330_terminate_all(struct dma_chan *chan)
list_splice_tail_init(&pch->completed_list, &pl330->desc_pool); list_splice_tail_init(&pch->completed_list, &pl330->desc_pool);
spin_unlock_irqrestore(&pch->lock, flags); spin_unlock_irqrestore(&pch->lock, flags);
pm_runtime_mark_last_busy(pl330->ddma.dev); pm_runtime_mark_last_busy(pl330->ddma.dev);
if (power_down)
pm_runtime_put_autosuspend(pl330->ddma.dev);
pm_runtime_put_autosuspend(pl330->ddma.dev); pm_runtime_put_autosuspend(pl330->ddma.dev);
return 0; return 0;
...@@ -2357,6 +2367,7 @@ static void pl330_issue_pending(struct dma_chan *chan) ...@@ -2357,6 +2367,7 @@ static void pl330_issue_pending(struct dma_chan *chan)
* updated on work_list emptiness status. * updated on work_list emptiness status.
*/ */
WARN_ON(list_empty(&pch->submitted_list)); WARN_ON(list_empty(&pch->submitted_list));
pch->active = true;
pm_runtime_get_sync(pch->dmac->ddma.dev); pm_runtime_get_sync(pch->dmac->ddma.dev);
} }
list_splice_tail_init(&pch->submitted_list, &pch->work_list); list_splice_tail_init(&pch->submitted_list, &pch->work_list);
......
...@@ -986,6 +986,7 @@ static void rcar_dmac_free_chan_resources(struct dma_chan *chan) ...@@ -986,6 +986,7 @@ static void rcar_dmac_free_chan_resources(struct dma_chan *chan)
{ {
struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan); struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan);
struct rcar_dmac *dmac = to_rcar_dmac(chan->device); struct rcar_dmac *dmac = to_rcar_dmac(chan->device);
struct rcar_dmac_chan_map *map = &rchan->map;
struct rcar_dmac_desc_page *page, *_page; struct rcar_dmac_desc_page *page, *_page;
struct rcar_dmac_desc *desc; struct rcar_dmac_desc *desc;
LIST_HEAD(list); LIST_HEAD(list);
...@@ -1019,6 +1020,13 @@ static void rcar_dmac_free_chan_resources(struct dma_chan *chan) ...@@ -1019,6 +1020,13 @@ static void rcar_dmac_free_chan_resources(struct dma_chan *chan)
free_page((unsigned long)page); free_page((unsigned long)page);
} }
/* Remove slave mapping if present. */
if (map->slave.xfer_size) {
dma_unmap_resource(chan->device->dev, map->addr,
map->slave.xfer_size, map->dir, 0);
map->slave.xfer_size = 0;
}
pm_runtime_put(chan->device->dev); pm_runtime_put(chan->device->dev);
} }
......
...@@ -880,7 +880,7 @@ static enum dma_status stm32_dma_tx_status(struct dma_chan *c, ...@@ -880,7 +880,7 @@ static enum dma_status stm32_dma_tx_status(struct dma_chan *c,
struct virt_dma_desc *vdesc; struct virt_dma_desc *vdesc;
enum dma_status status; enum dma_status status;
unsigned long flags; unsigned long flags;
u32 residue; u32 residue = 0;
status = dma_cookie_status(c, cookie, state); status = dma_cookie_status(c, cookie, state);
if ((status == DMA_COMPLETE) || (!state)) if ((status == DMA_COMPLETE) || (!state))
...@@ -888,16 +888,12 @@ static enum dma_status stm32_dma_tx_status(struct dma_chan *c, ...@@ -888,16 +888,12 @@ static enum dma_status stm32_dma_tx_status(struct dma_chan *c,
spin_lock_irqsave(&chan->vchan.lock, flags); spin_lock_irqsave(&chan->vchan.lock, flags);
vdesc = vchan_find_desc(&chan->vchan, cookie); vdesc = vchan_find_desc(&chan->vchan, cookie);
if (cookie == chan->desc->vdesc.tx.cookie) { if (chan->desc && cookie == chan->desc->vdesc.tx.cookie)
residue = stm32_dma_desc_residue(chan, chan->desc, residue = stm32_dma_desc_residue(chan, chan->desc,
chan->next_sg); chan->next_sg);
} else if (vdesc) { else if (vdesc)
residue = stm32_dma_desc_residue(chan, residue = stm32_dma_desc_residue(chan,
to_stm32_dma_desc(vdesc), 0); to_stm32_dma_desc(vdesc), 0);
} else {
residue = 0;
}
dma_set_residue(state, residue); dma_set_residue(state, residue);
spin_unlock_irqrestore(&chan->vchan.lock, flags); spin_unlock_irqrestore(&chan->vchan.lock, flags);
...@@ -972,21 +968,18 @@ static struct dma_chan *stm32_dma_of_xlate(struct of_phandle_args *dma_spec, ...@@ -972,21 +968,18 @@ static struct dma_chan *stm32_dma_of_xlate(struct of_phandle_args *dma_spec,
struct stm32_dma_chan *chan; struct stm32_dma_chan *chan;
struct dma_chan *c; struct dma_chan *c;
if (dma_spec->args_count < 3) if (dma_spec->args_count < 4)
return NULL; return NULL;
cfg.channel_id = dma_spec->args[0]; cfg.channel_id = dma_spec->args[0];
cfg.request_line = dma_spec->args[1]; cfg.request_line = dma_spec->args[1];
cfg.stream_config = dma_spec->args[2]; cfg.stream_config = dma_spec->args[2];
cfg.threshold = 0; cfg.threshold = dma_spec->args[3];
if ((cfg.channel_id >= STM32_DMA_MAX_CHANNELS) || (cfg.request_line >= if ((cfg.channel_id >= STM32_DMA_MAX_CHANNELS) || (cfg.request_line >=
STM32_DMA_MAX_REQUEST_ID)) STM32_DMA_MAX_REQUEST_ID))
return NULL; return NULL;
if (dma_spec->args_count > 3)
cfg.threshold = dma_spec->args[3];
chan = &dmadev->chan[cfg.channel_id]; chan = &dmadev->chan[cfg.channel_id];
c = dma_get_slave_channel(&chan->vchan.chan); c = dma_get_slave_channel(&chan->vchan.chan);
......
...@@ -149,6 +149,7 @@ static int ti_am335x_xbar_probe(struct platform_device *pdev) ...@@ -149,6 +149,7 @@ static int ti_am335x_xbar_probe(struct platform_device *pdev)
match = of_match_node(ti_am335x_master_match, dma_node); match = of_match_node(ti_am335x_master_match, dma_node);
if (!match) { if (!match) {
dev_err(&pdev->dev, "DMA master is not supported\n"); dev_err(&pdev->dev, "DMA master is not supported\n");
of_node_put(dma_node);
return -EINVAL; return -EINVAL;
} }
...@@ -339,6 +340,7 @@ static int ti_dra7_xbar_probe(struct platform_device *pdev) ...@@ -339,6 +340,7 @@ static int ti_dra7_xbar_probe(struct platform_device *pdev)
match = of_match_node(ti_dra7_master_match, dma_node); match = of_match_node(ti_dra7_master_match, dma_node);
if (!match) { if (!match) {
dev_err(&pdev->dev, "DMA master is not supported\n"); dev_err(&pdev->dev, "DMA master is not supported\n");
of_node_put(dma_node);
return -EINVAL; return -EINVAL;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment