Commit a41efc2a authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'dmaengine-4.20-rc1' of git://git.infradead.org/users/vkoul/slave-dma

Pull dmaengine updates from Vinod Koul:

 - Support for ColdFire mcf5441x edma controller

 - Support for link list mode in sprd dma

 - More users of managed dmaenginem_async_device_register API

 - Cyclic mode support in owl dma driver

 - DT updates for renesas drivers, dma-jz4780 updates and support for
   JZ4770, JZ4740 and JZ4725B controllers

 - Removal of deprecated dma_slave_config direction in dmaengine
   drivers, few more users will be removed in next cycle and eventually
   removed.

 - Minor updates to idma64, ioat, pxa, ppc drivers

* tag 'dmaengine-4.20-rc1' of git://git.infradead.org/users/vkoul/slave-dma: (66 commits)
  dmaengine: ppc4xx: fix off-by-one build failure
  dmaengine: owl: Fix warnings generated during build
  dmaengine: fsl-edma: remove dma_slave_config direction usage
  dmaengine: rcar-dmac: set scatter/gather max segment size
  dmaengine: mmp_tdma: remove dma_slave_config direction usage
  dmaengine: ep93xx_dma: remove dma_slave_config direction usage
  dmaengine: k3dma: remove dma_slave_config direction usage
  dmaengine: k3dma: dont use direction for memcpy
  dmaengine: imx-dma: remove dma_slave_config direction usage
  dmaengine: idma: remove dma_slave_config direction usage
  dmaengine: hsu: remove dma_slave_config direction usage
  dmaengine: dw: remove dma_slave_config direction usage
  dmaengine: jz4740: remove dma_slave_config direction usage
  dmaengine: coh901318: remove dma_slave_config direction usage
  dmaengine: bcm2835: remove dma_slave_config direction usage
  dmaengine: at_hdmac: remove dma_slave_config direction usage
  dmaengine: owl: Add Slave and Cyclic mode support for Actions Semi Owl S900 SoC
  dmaengine: ioat: fix prototype of ioat_enumerate_channels
  dmaengine: stm32-dma: check whether length is aligned on FIFO threshold
  dt-bindings: dmaengine: usb-dmac: Add binding for r8a7744
  ...
parents 36168d71 71f021cf
...@@ -2,8 +2,13 @@ ...@@ -2,8 +2,13 @@
Required properties: Required properties:
- compatible: Should be "ingenic,jz4780-dma" - compatible: Should be one of:
- reg: Should contain the DMA controller registers location and length. * ingenic,jz4740-dma
* ingenic,jz4725b-dma
* ingenic,jz4770-dma
* ingenic,jz4780-dma
- reg: Should contain the DMA channel registers location and length, followed
by the DMA controller registers location and length.
- interrupts: Should contain the interrupt specifier of the DMA controller. - interrupts: Should contain the interrupt specifier of the DMA controller.
- clocks: Should contain a clock specifier for the JZ4780 PDMA clock. - clocks: Should contain a clock specifier for the JZ4780 PDMA clock.
- #dma-cells: Must be <2>. Number of integer cells in the dmas property of - #dma-cells: Must be <2>. Number of integer cells in the dmas property of
...@@ -19,9 +24,10 @@ Optional properties: ...@@ -19,9 +24,10 @@ Optional properties:
Example: Example:
dma: dma@13420000 { dma: dma-controller@13420000 {
compatible = "ingenic,jz4780-dma"; compatible = "ingenic,jz4780-dma";
reg = <0x13420000 0x10000>; reg = <0x13420000 0x400
0x13421000 0x40>;
interrupt-parent = <&intc>; interrupt-parent = <&intc>;
interrupts = <10>; interrupts = <10>;
......
...@@ -17,6 +17,7 @@ Required Properties: ...@@ -17,6 +17,7 @@ Required Properties:
- compatible: "renesas,dmac-<soctype>", "renesas,rcar-dmac" as fallback. - compatible: "renesas,dmac-<soctype>", "renesas,rcar-dmac" as fallback.
Examples with soctypes are: Examples with soctypes are:
- "renesas,dmac-r8a7743" (RZ/G1M) - "renesas,dmac-r8a7743" (RZ/G1M)
- "renesas,dmac-r8a7744" (RZ/G1N)
- "renesas,dmac-r8a7745" (RZ/G1E) - "renesas,dmac-r8a7745" (RZ/G1E)
- "renesas,dmac-r8a77470" (RZ/G1C) - "renesas,dmac-r8a77470" (RZ/G1C)
- "renesas,dmac-r8a7790" (R-Car H2) - "renesas,dmac-r8a7790" (R-Car H2)
......
...@@ -4,6 +4,7 @@ Required Properties: ...@@ -4,6 +4,7 @@ Required Properties:
-compatible: "renesas,<soctype>-usb-dmac", "renesas,usb-dmac" as fallback. -compatible: "renesas,<soctype>-usb-dmac", "renesas,usb-dmac" as fallback.
Examples with soctypes are: Examples with soctypes are:
- "renesas,r8a7743-usb-dmac" (RZ/G1M) - "renesas,r8a7743-usb-dmac" (RZ/G1M)
- "renesas,r8a7744-usb-dmac" (RZ/G1N)
- "renesas,r8a7745-usb-dmac" (RZ/G1E) - "renesas,r8a7745-usb-dmac" (RZ/G1E)
- "renesas,r8a7790-usb-dmac" (R-Car H2) - "renesas,r8a7790-usb-dmac" (R-Car H2)
- "renesas,r8a7791-usb-dmac" (R-Car M2-W) - "renesas,r8a7791-usb-dmac" (R-Car M2-W)
......
...@@ -154,6 +154,21 @@ uart1: serial@10031000 { ...@@ -154,6 +154,21 @@ uart1: serial@10031000 {
clock-names = "baud", "module"; clock-names = "baud", "module";
}; };
dmac: dma-controller@13020000 {
compatible = "ingenic,jz4740-dma";
reg = <0x13020000 0xbc
0x13020300 0x14>;
#dma-cells = <2>;
interrupt-parent = <&intc>;
interrupts = <29>;
clocks = <&cgu JZ4740_CLK_DMA>;
/* Disable dmac until we have something that uses it */
status = "disabled";
};
uhc: uhc@13030000 { uhc: uhc@13030000 {
compatible = "ingenic,jz4740-ohci", "generic-ohci"; compatible = "ingenic,jz4740-ohci", "generic-ohci";
reg = <0x13030000 0x1000>; reg = <0x13030000 0x1000>;
......
...@@ -196,6 +196,36 @@ uart3: serial@10033000 { ...@@ -196,6 +196,36 @@ uart3: serial@10033000 {
status = "disabled"; status = "disabled";
}; };
dmac0: dma-controller@13420000 {
compatible = "ingenic,jz4770-dma";
reg = <0x13420000 0xC0
0x13420300 0x20>;
#dma-cells = <1>;
clocks = <&cgu JZ4770_CLK_DMA>;
interrupt-parent = <&intc>;
interrupts = <24>;
/* Disable dmac0 until we have something that uses it */
status = "disabled";
};
dmac1: dma-controller@13420100 {
compatible = "ingenic,jz4770-dma";
reg = <0x13420100 0xC0
0x13420400 0x20>;
#dma-cells = <1>;
clocks = <&cgu JZ4770_CLK_DMA>;
interrupt-parent = <&intc>;
interrupts = <23>;
/* Disable dmac1 until we have something that uses it */
status = "disabled";
};
uhc: uhc@13430000 { uhc: uhc@13430000 {
compatible = "generic-ohci"; compatible = "generic-ohci";
reg = <0x13430000 0x1000>; reg = <0x13430000 0x1000>;
......
...@@ -266,7 +266,8 @@ nemc: nemc@13410000 { ...@@ -266,7 +266,8 @@ nemc: nemc@13410000 {
dma: dma@13420000 { dma: dma@13420000 {
compatible = "ingenic,jz4780-dma"; compatible = "ingenic,jz4780-dma";
reg = <0x13420000 0x10000>; reg = <0x13420000 0x400
0x13421000 0x40>;
#dma-cells = <2>; #dma-cells = <2>;
interrupt-parent = <&intc>; interrupt-parent = <&intc>;
......
...@@ -143,7 +143,7 @@ config DMA_JZ4740 ...@@ -143,7 +143,7 @@ config DMA_JZ4740
config DMA_JZ4780 config DMA_JZ4780
tristate "JZ4780 DMA support" tristate "JZ4780 DMA support"
depends on MACH_JZ4780 || COMPILE_TEST depends on MIPS || COMPILE_TEST
select DMA_ENGINE select DMA_ENGINE
select DMA_VIRTUAL_CHANNELS select DMA_VIRTUAL_CHANNELS
help help
...@@ -321,6 +321,17 @@ config LPC18XX_DMAMUX ...@@ -321,6 +321,17 @@ config LPC18XX_DMAMUX
Enable support for DMA on NXP LPC18xx/43xx platforms Enable support for DMA on NXP LPC18xx/43xx platforms
with PL080 and multiplexed DMA request lines. with PL080 and multiplexed DMA request lines.
config MCF_EDMA
tristate "Freescale eDMA engine support, ColdFire mcf5441x SoCs"
depends on M5441x || COMPILE_TEST
select DMA_ENGINE
select DMA_VIRTUAL_CHANNELS
help
Support the Freescale ColdFire eDMA engine, 64-channel
implementation that performs complex data transfers with
minimal intervention from a host processor.
This module can be found on Freescale ColdFire mcf5441x SoCs.
config MMP_PDMA config MMP_PDMA
bool "MMP PDMA support" bool "MMP PDMA support"
depends on ARCH_MMP || ARCH_PXA || COMPILE_TEST depends on ARCH_MMP || ARCH_PXA || COMPILE_TEST
......
...@@ -31,7 +31,8 @@ obj-$(CONFIG_DW_AXI_DMAC) += dw-axi-dmac/ ...@@ -31,7 +31,8 @@ obj-$(CONFIG_DW_AXI_DMAC) += dw-axi-dmac/
obj-$(CONFIG_DW_DMAC_CORE) += dw/ obj-$(CONFIG_DW_DMAC_CORE) += dw/
obj-$(CONFIG_EP93XX_DMA) += ep93xx_dma.o obj-$(CONFIG_EP93XX_DMA) += ep93xx_dma.o
obj-$(CONFIG_FSL_DMA) += fsldma.o obj-$(CONFIG_FSL_DMA) += fsldma.o
obj-$(CONFIG_FSL_EDMA) += fsl-edma.o obj-$(CONFIG_FSL_EDMA) += fsl-edma.o fsl-edma-common.o
obj-$(CONFIG_MCF_EDMA) += mcf-edma.o fsl-edma-common.o
obj-$(CONFIG_FSL_RAID) += fsl_raid.o obj-$(CONFIG_FSL_RAID) += fsl_raid.o
obj-$(CONFIG_HSU_DMA) += hsu/ obj-$(CONFIG_HSU_DMA) += hsu/
obj-$(CONFIG_IMG_MDC_DMA) += img-mdc-dma.o obj-$(CONFIG_IMG_MDC_DMA) += img-mdc-dma.o
......
...@@ -1320,7 +1320,7 @@ atc_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len, ...@@ -1320,7 +1320,7 @@ atc_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
if (unlikely(!is_slave_direction(direction))) if (unlikely(!is_slave_direction(direction)))
goto err_out; goto err_out;
if (sconfig->direction == DMA_MEM_TO_DEV) if (direction == DMA_MEM_TO_DEV)
reg_width = convert_buswidth(sconfig->dst_addr_width); reg_width = convert_buswidth(sconfig->dst_addr_width);
else else
reg_width = convert_buswidth(sconfig->src_addr_width); reg_width = convert_buswidth(sconfig->src_addr_width);
......
...@@ -1600,7 +1600,7 @@ static void at_xdmac_tasklet(unsigned long data) ...@@ -1600,7 +1600,7 @@ static void at_xdmac_tasklet(unsigned long data)
if (atchan->status & AT_XDMAC_CIS_ROIS) if (atchan->status & AT_XDMAC_CIS_ROIS)
dev_err(chan2dev(&atchan->chan), "request overflow error!!!"); dev_err(chan2dev(&atchan->chan), "request overflow error!!!");
spin_lock_bh(&atchan->lock); spin_lock(&atchan->lock);
desc = list_first_entry(&atchan->xfers_list, desc = list_first_entry(&atchan->xfers_list,
struct at_xdmac_desc, struct at_xdmac_desc,
xfer_node); xfer_node);
...@@ -1610,7 +1610,7 @@ static void at_xdmac_tasklet(unsigned long data) ...@@ -1610,7 +1610,7 @@ static void at_xdmac_tasklet(unsigned long data)
txd = &desc->tx_dma_desc; txd = &desc->tx_dma_desc;
at_xdmac_remove_xfer(atchan, desc); at_xdmac_remove_xfer(atchan, desc);
spin_unlock_bh(&atchan->lock); spin_unlock(&atchan->lock);
if (!at_xdmac_chan_is_cyclic(atchan)) { if (!at_xdmac_chan_is_cyclic(atchan)) {
dma_cookie_complete(txd); dma_cookie_complete(txd);
......
...@@ -778,14 +778,6 @@ static int bcm2835_dma_slave_config(struct dma_chan *chan, ...@@ -778,14 +778,6 @@ static int bcm2835_dma_slave_config(struct dma_chan *chan,
{ {
struct bcm2835_chan *c = to_bcm2835_dma_chan(chan); struct bcm2835_chan *c = to_bcm2835_dma_chan(chan);
if ((cfg->direction == DMA_DEV_TO_MEM &&
cfg->src_addr_width != DMA_SLAVE_BUSWIDTH_4_BYTES) ||
(cfg->direction == DMA_MEM_TO_DEV &&
cfg->dst_addr_width != DMA_SLAVE_BUSWIDTH_4_BYTES) ||
!is_slave_direction(cfg->direction)) {
return -EINVAL;
}
c->cfg = *cfg; c->cfg = *cfg;
return 0; return 0;
......
...@@ -1306,6 +1306,7 @@ struct coh901318_chan { ...@@ -1306,6 +1306,7 @@ struct coh901318_chan {
unsigned long nbr_active_done; unsigned long nbr_active_done;
unsigned long busy; unsigned long busy;
struct dma_slave_config config;
u32 addr; u32 addr;
u32 ctrl; u32 ctrl;
...@@ -1402,6 +1403,10 @@ static inline struct coh901318_chan *to_coh901318_chan(struct dma_chan *chan) ...@@ -1402,6 +1403,10 @@ static inline struct coh901318_chan *to_coh901318_chan(struct dma_chan *chan)
return container_of(chan, struct coh901318_chan, chan); return container_of(chan, struct coh901318_chan, chan);
} }
static int coh901318_dma_set_runtimeconfig(struct dma_chan *chan,
struct dma_slave_config *config,
enum dma_transfer_direction direction);
static inline const struct coh901318_params * static inline const struct coh901318_params *
cohc_chan_param(struct coh901318_chan *cohc) cohc_chan_param(struct coh901318_chan *cohc)
{ {
...@@ -2360,6 +2365,8 @@ coh901318_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, ...@@ -2360,6 +2365,8 @@ coh901318_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
if (lli == NULL) if (lli == NULL)
goto err_dma_alloc; goto err_dma_alloc;
coh901318_dma_set_runtimeconfig(chan, &cohc->config, direction);
/* initiate allocated lli list */ /* initiate allocated lli list */
ret = coh901318_lli_fill_sg(&cohc->base->pool, lli, sgl, sg_len, ret = coh901318_lli_fill_sg(&cohc->base->pool, lli, sgl, sg_len,
cohc->addr, cohc->addr,
...@@ -2499,7 +2506,8 @@ static const struct burst_table burst_sizes[] = { ...@@ -2499,7 +2506,8 @@ static const struct burst_table burst_sizes[] = {
}; };
static int coh901318_dma_set_runtimeconfig(struct dma_chan *chan, static int coh901318_dma_set_runtimeconfig(struct dma_chan *chan,
struct dma_slave_config *config) struct dma_slave_config *config,
enum dma_transfer_direction direction)
{ {
struct coh901318_chan *cohc = to_coh901318_chan(chan); struct coh901318_chan *cohc = to_coh901318_chan(chan);
dma_addr_t addr; dma_addr_t addr;
...@@ -2509,11 +2517,11 @@ static int coh901318_dma_set_runtimeconfig(struct dma_chan *chan, ...@@ -2509,11 +2517,11 @@ static int coh901318_dma_set_runtimeconfig(struct dma_chan *chan,
int i = 0; int i = 0;
/* We only support mem to per or per to mem transfers */ /* We only support mem to per or per to mem transfers */
if (config->direction == DMA_DEV_TO_MEM) { if (direction == DMA_DEV_TO_MEM) {
addr = config->src_addr; addr = config->src_addr;
addr_width = config->src_addr_width; addr_width = config->src_addr_width;
maxburst = config->src_maxburst; maxburst = config->src_maxburst;
} else if (config->direction == DMA_MEM_TO_DEV) { } else if (direction == DMA_MEM_TO_DEV) {
addr = config->dst_addr; addr = config->dst_addr;
addr_width = config->dst_addr_width; addr_width = config->dst_addr_width;
maxburst = config->dst_maxburst; maxburst = config->dst_maxburst;
...@@ -2579,6 +2587,16 @@ static int coh901318_dma_set_runtimeconfig(struct dma_chan *chan, ...@@ -2579,6 +2587,16 @@ static int coh901318_dma_set_runtimeconfig(struct dma_chan *chan,
return 0; return 0;
} }
static int coh901318_dma_slave_config(struct dma_chan *chan,
struct dma_slave_config *config)
{
struct coh901318_chan *cohc = to_coh901318_chan(chan);
memcpy(&cohc->config, config, sizeof(*config));
return 0;
}
static void coh901318_base_init(struct dma_device *dma, const int *pick_chans, static void coh901318_base_init(struct dma_device *dma, const int *pick_chans,
struct coh901318_base *base) struct coh901318_base *base)
{ {
...@@ -2684,7 +2702,7 @@ static int __init coh901318_probe(struct platform_device *pdev) ...@@ -2684,7 +2702,7 @@ static int __init coh901318_probe(struct platform_device *pdev)
base->dma_slave.device_prep_slave_sg = coh901318_prep_slave_sg; base->dma_slave.device_prep_slave_sg = coh901318_prep_slave_sg;
base->dma_slave.device_tx_status = coh901318_tx_status; base->dma_slave.device_tx_status = coh901318_tx_status;
base->dma_slave.device_issue_pending = coh901318_issue_pending; base->dma_slave.device_issue_pending = coh901318_issue_pending;
base->dma_slave.device_config = coh901318_dma_set_runtimeconfig; base->dma_slave.device_config = coh901318_dma_slave_config;
base->dma_slave.device_pause = coh901318_pause; base->dma_slave.device_pause = coh901318_pause;
base->dma_slave.device_resume = coh901318_resume; base->dma_slave.device_resume = coh901318_resume;
base->dma_slave.device_terminate_all = coh901318_terminate_all; base->dma_slave.device_terminate_all = coh901318_terminate_all;
...@@ -2707,7 +2725,7 @@ static int __init coh901318_probe(struct platform_device *pdev) ...@@ -2707,7 +2725,7 @@ static int __init coh901318_probe(struct platform_device *pdev)
base->dma_memcpy.device_prep_dma_memcpy = coh901318_prep_memcpy; base->dma_memcpy.device_prep_dma_memcpy = coh901318_prep_memcpy;
base->dma_memcpy.device_tx_status = coh901318_tx_status; base->dma_memcpy.device_tx_status = coh901318_tx_status;
base->dma_memcpy.device_issue_pending = coh901318_issue_pending; base->dma_memcpy.device_issue_pending = coh901318_issue_pending;
base->dma_memcpy.device_config = coh901318_dma_set_runtimeconfig; base->dma_memcpy.device_config = coh901318_dma_slave_config;
base->dma_memcpy.device_pause = coh901318_pause; base->dma_memcpy.device_pause = coh901318_pause;
base->dma_memcpy.device_resume = coh901318_resume; base->dma_memcpy.device_resume = coh901318_resume;
base->dma_memcpy.device_terminate_all = coh901318_terminate_all; base->dma_memcpy.device_terminate_all = coh901318_terminate_all;
......
...@@ -113,6 +113,7 @@ struct jz4740_dma_desc { ...@@ -113,6 +113,7 @@ struct jz4740_dma_desc {
struct jz4740_dmaengine_chan { struct jz4740_dmaengine_chan {
struct virt_dma_chan vchan; struct virt_dma_chan vchan;
unsigned int id; unsigned int id;
struct dma_slave_config config;
dma_addr_t fifo_addr; dma_addr_t fifo_addr;
unsigned int transfer_shift; unsigned int transfer_shift;
...@@ -203,8 +204,9 @@ static enum jz4740_dma_transfer_size jz4740_dma_maxburst(u32 maxburst) ...@@ -203,8 +204,9 @@ static enum jz4740_dma_transfer_size jz4740_dma_maxburst(u32 maxburst)
return JZ4740_DMA_TRANSFER_SIZE_32BYTE; return JZ4740_DMA_TRANSFER_SIZE_32BYTE;
} }
static int jz4740_dma_slave_config(struct dma_chan *c, static int jz4740_dma_slave_config_write(struct dma_chan *c,
struct dma_slave_config *config) struct dma_slave_config *config,
enum dma_transfer_direction direction)
{ {
struct jz4740_dmaengine_chan *chan = to_jz4740_dma_chan(c); struct jz4740_dmaengine_chan *chan = to_jz4740_dma_chan(c);
struct jz4740_dma_dev *dmadev = jz4740_dma_chan_get_dev(chan); struct jz4740_dma_dev *dmadev = jz4740_dma_chan_get_dev(chan);
...@@ -214,7 +216,7 @@ static int jz4740_dma_slave_config(struct dma_chan *c, ...@@ -214,7 +216,7 @@ static int jz4740_dma_slave_config(struct dma_chan *c,
enum jz4740_dma_flags flags; enum jz4740_dma_flags flags;
uint32_t cmd; uint32_t cmd;
switch (config->direction) { switch (direction) {
case DMA_MEM_TO_DEV: case DMA_MEM_TO_DEV:
flags = JZ4740_DMA_SRC_AUTOINC; flags = JZ4740_DMA_SRC_AUTOINC;
transfer_size = jz4740_dma_maxburst(config->dst_maxburst); transfer_size = jz4740_dma_maxburst(config->dst_maxburst);
...@@ -265,6 +267,15 @@ static int jz4740_dma_slave_config(struct dma_chan *c, ...@@ -265,6 +267,15 @@ static int jz4740_dma_slave_config(struct dma_chan *c,
return 0; return 0;
} }
static int jz4740_dma_slave_config(struct dma_chan *c,
struct dma_slave_config *config)
{
struct jz4740_dmaengine_chan *chan = to_jz4740_dma_chan(c);
memcpy(&chan->config, config, sizeof(*config));
return 0;
}
static int jz4740_dma_terminate_all(struct dma_chan *c) static int jz4740_dma_terminate_all(struct dma_chan *c)
{ {
struct jz4740_dmaengine_chan *chan = to_jz4740_dma_chan(c); struct jz4740_dmaengine_chan *chan = to_jz4740_dma_chan(c);
...@@ -407,6 +418,8 @@ static struct dma_async_tx_descriptor *jz4740_dma_prep_slave_sg( ...@@ -407,6 +418,8 @@ static struct dma_async_tx_descriptor *jz4740_dma_prep_slave_sg(
desc->direction = direction; desc->direction = direction;
desc->cyclic = false; desc->cyclic = false;
jz4740_dma_slave_config_write(c, &chan->config, direction);
return vchan_tx_prep(&chan->vchan, &desc->vdesc, flags); return vchan_tx_prep(&chan->vchan, &desc->vdesc, flags);
} }
...@@ -438,6 +451,8 @@ static struct dma_async_tx_descriptor *jz4740_dma_prep_dma_cyclic( ...@@ -438,6 +451,8 @@ static struct dma_async_tx_descriptor *jz4740_dma_prep_dma_cyclic(
desc->direction = direction; desc->direction = direction;
desc->cyclic = true; desc->cyclic = true;
jz4740_dma_slave_config_write(c, &chan->config, direction);
return vchan_tx_prep(&chan->vchan, &desc->vdesc, flags); return vchan_tx_prep(&chan->vchan, &desc->vdesc, flags);
} }
......
This diff is collapsed.
...@@ -934,7 +934,7 @@ static int dw_probe(struct platform_device *pdev) ...@@ -934,7 +934,7 @@ static int dw_probe(struct platform_device *pdev)
pm_runtime_put(chip->dev); pm_runtime_put(chip->dev);
ret = dma_async_device_register(&dw->dma); ret = dmaenginem_async_device_register(&dw->dma);
if (ret) if (ret)
goto err_pm_disable; goto err_pm_disable;
...@@ -977,8 +977,6 @@ static int dw_remove(struct platform_device *pdev) ...@@ -977,8 +977,6 @@ static int dw_remove(struct platform_device *pdev)
tasklet_kill(&chan->vc.task); tasklet_kill(&chan->vc.task);
} }
dma_async_device_unregister(&dw->dma);
return 0; return 0;
} }
......
...@@ -886,12 +886,7 @@ static int dwc_config(struct dma_chan *chan, struct dma_slave_config *sconfig) ...@@ -886,12 +886,7 @@ static int dwc_config(struct dma_chan *chan, struct dma_slave_config *sconfig)
*/ */
u32 s = dw->pdata->is_idma32 ? 1 : 2; u32 s = dw->pdata->is_idma32 ? 1 : 2;
/* Check if chan will be configured for slave transfers */
if (!is_slave_direction(sconfig->direction))
return -EINVAL;
memcpy(&dwc->dma_sconfig, sconfig, sizeof(*sconfig)); memcpy(&dwc->dma_sconfig, sconfig, sizeof(*sconfig));
dwc->direction = sconfig->direction;
sc->src_maxburst = sc->src_maxburst > 1 ? fls(sc->src_maxburst) - s : 0; sc->src_maxburst = sc->src_maxburst > 1 ? fls(sc->src_maxburst) - s : 0;
sc->dst_maxburst = sc->dst_maxburst > 1 ? fls(sc->dst_maxburst) - s : 0; sc->dst_maxburst = sc->dst_maxburst > 1 ? fls(sc->dst_maxburst) - s : 0;
......
...@@ -284,6 +284,8 @@ MODULE_DEVICE_TABLE(of, dw_dma_of_id_table); ...@@ -284,6 +284,8 @@ MODULE_DEVICE_TABLE(of, dw_dma_of_id_table);
#ifdef CONFIG_ACPI #ifdef CONFIG_ACPI
static const struct acpi_device_id dw_dma_acpi_id_table[] = { static const struct acpi_device_id dw_dma_acpi_id_table[] = {
{ "INTL9C60", 0 }, { "INTL9C60", 0 },
{ "80862286", 0 },
{ "808622C0", 0 },
{ } { }
}; };
MODULE_DEVICE_TABLE(acpi, dw_dma_acpi_id_table); MODULE_DEVICE_TABLE(acpi, dw_dma_acpi_id_table);
......
...@@ -109,6 +109,9 @@ ...@@ -109,6 +109,9 @@
#define DMA_MAX_CHAN_DESCRIPTORS 32 #define DMA_MAX_CHAN_DESCRIPTORS 32
struct ep93xx_dma_engine; struct ep93xx_dma_engine;
static int ep93xx_dma_slave_config_write(struct dma_chan *chan,
enum dma_transfer_direction dir,
struct dma_slave_config *config);
/** /**
* struct ep93xx_dma_desc - EP93xx specific transaction descriptor * struct ep93xx_dma_desc - EP93xx specific transaction descriptor
...@@ -180,6 +183,7 @@ struct ep93xx_dma_chan { ...@@ -180,6 +183,7 @@ struct ep93xx_dma_chan {
struct list_head free_list; struct list_head free_list;
u32 runtime_addr; u32 runtime_addr;
u32 runtime_ctrl; u32 runtime_ctrl;
struct dma_slave_config slave_config;
}; };
/** /**
...@@ -1051,6 +1055,8 @@ ep93xx_dma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, ...@@ -1051,6 +1055,8 @@ ep93xx_dma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
return NULL; return NULL;
} }
ep93xx_dma_slave_config_write(chan, dir, &edmac->slave_config);
first = NULL; first = NULL;
for_each_sg(sgl, sg, sg_len, i) { for_each_sg(sgl, sg, sg_len, i) {
size_t len = sg_dma_len(sg); size_t len = sg_dma_len(sg);
...@@ -1136,6 +1142,8 @@ ep93xx_dma_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t dma_addr, ...@@ -1136,6 +1142,8 @@ ep93xx_dma_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t dma_addr,
return NULL; return NULL;
} }
ep93xx_dma_slave_config_write(chan, dir, &edmac->slave_config);
/* Split the buffer into period size chunks */ /* Split the buffer into period size chunks */
first = NULL; first = NULL;
for (offset = 0; offset < buf_len; offset += period_len) { for (offset = 0; offset < buf_len; offset += period_len) {
...@@ -1227,6 +1235,17 @@ static int ep93xx_dma_slave_config(struct dma_chan *chan, ...@@ -1227,6 +1235,17 @@ static int ep93xx_dma_slave_config(struct dma_chan *chan,
struct dma_slave_config *config) struct dma_slave_config *config)
{ {
struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan); struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
memcpy(&edmac->slave_config, config, sizeof(*config));
return 0;
}
static int ep93xx_dma_slave_config_write(struct dma_chan *chan,
enum dma_transfer_direction dir,
struct dma_slave_config *config)
{
struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
enum dma_slave_buswidth width; enum dma_slave_buswidth width;
unsigned long flags; unsigned long flags;
u32 addr, ctrl; u32 addr, ctrl;
...@@ -1234,7 +1253,7 @@ static int ep93xx_dma_slave_config(struct dma_chan *chan, ...@@ -1234,7 +1253,7 @@ static int ep93xx_dma_slave_config(struct dma_chan *chan,
if (!edmac->edma->m2m) if (!edmac->edma->m2m)
return -EINVAL; return -EINVAL;
switch (config->direction) { switch (dir) {
case DMA_DEV_TO_MEM: case DMA_DEV_TO_MEM:
width = config->src_addr_width; width = config->src_addr_width;
addr = config->src_addr; addr = config->src_addr;
......
This diff is collapsed.
/* SPDX-License-Identifier: GPL-2.0+ */
/*
* Copyright 2013-2014 Freescale Semiconductor, Inc.
* Copyright 2018 Angelo Dureghello <angelo@sysam.it>
*/
#ifndef _FSL_EDMA_COMMON_H_
#define _FSL_EDMA_COMMON_H_
#include "virt-dma.h"
#define EDMA_CR_EDBG BIT(1)
#define EDMA_CR_ERCA BIT(2)
#define EDMA_CR_ERGA BIT(3)
#define EDMA_CR_HOE BIT(4)
#define EDMA_CR_HALT BIT(5)
#define EDMA_CR_CLM BIT(6)
#define EDMA_CR_EMLM BIT(7)
#define EDMA_CR_ECX BIT(16)
#define EDMA_CR_CX BIT(17)
#define EDMA_SEEI_SEEI(x) ((x) & GENMASK(4, 0))
#define EDMA_CEEI_CEEI(x) ((x) & GENMASK(4, 0))
#define EDMA_CINT_CINT(x) ((x) & GENMASK(4, 0))
#define EDMA_CERR_CERR(x) ((x) & GENMASK(4, 0))
#define EDMA_TCD_ATTR_DSIZE(x) (((x) & GENMASK(2, 0)))
#define EDMA_TCD_ATTR_DMOD(x) (((x) & GENMASK(4, 0)) << 3)
#define EDMA_TCD_ATTR_SSIZE(x) (((x) & GENMASK(2, 0)) << 8)
#define EDMA_TCD_ATTR_SMOD(x) (((x) & GENMASK(4, 0)) << 11)
#define EDMA_TCD_ATTR_DSIZE_8BIT 0
#define EDMA_TCD_ATTR_DSIZE_16BIT BIT(0)
#define EDMA_TCD_ATTR_DSIZE_32BIT BIT(1)
#define EDMA_TCD_ATTR_DSIZE_64BIT (BIT(0) | BIT(1))
#define EDMA_TCD_ATTR_DSIZE_32BYTE (BIT(3) | BIT(0))
#define EDMA_TCD_ATTR_SSIZE_8BIT 0
#define EDMA_TCD_ATTR_SSIZE_16BIT (EDMA_TCD_ATTR_DSIZE_16BIT << 8)
#define EDMA_TCD_ATTR_SSIZE_32BIT (EDMA_TCD_ATTR_DSIZE_32BIT << 8)
#define EDMA_TCD_ATTR_SSIZE_64BIT (EDMA_TCD_ATTR_DSIZE_64BIT << 8)
#define EDMA_TCD_ATTR_SSIZE_32BYTE (EDMA_TCD_ATTR_DSIZE_32BYTE << 8)
#define EDMA_TCD_CITER_CITER(x) ((x) & GENMASK(14, 0))
#define EDMA_TCD_BITER_BITER(x) ((x) & GENMASK(14, 0))
#define EDMA_TCD_CSR_START BIT(0)
#define EDMA_TCD_CSR_INT_MAJOR BIT(1)
#define EDMA_TCD_CSR_INT_HALF BIT(2)
#define EDMA_TCD_CSR_D_REQ BIT(3)
#define EDMA_TCD_CSR_E_SG BIT(4)
#define EDMA_TCD_CSR_E_LINK BIT(5)
#define EDMA_TCD_CSR_ACTIVE BIT(6)
#define EDMA_TCD_CSR_DONE BIT(7)
#define EDMAMUX_CHCFG_DIS 0x0
#define EDMAMUX_CHCFG_ENBL 0x80
#define EDMAMUX_CHCFG_SOURCE(n) ((n) & 0x3F)
#define DMAMUX_NR 2
#define FSL_EDMA_BUSWIDTHS (BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | \
BIT(DMA_SLAVE_BUSWIDTH_8_BYTES))
enum fsl_edma_pm_state {
RUNNING = 0,
SUSPENDED,
};
struct fsl_edma_hw_tcd {
__le32 saddr;
__le16 soff;
__le16 attr;
__le32 nbytes;
__le32 slast;
__le32 daddr;
__le16 doff;
__le16 citer;
__le32 dlast_sga;
__le16 csr;
__le16 biter;
};
/*
* These are iomem pointers, for both v32 and v64.
*/
struct edma_regs {
void __iomem *cr;
void __iomem *es;
void __iomem *erqh;
void __iomem *erql; /* aka erq on v32 */
void __iomem *eeih;
void __iomem *eeil; /* aka eei on v32 */
void __iomem *seei;
void __iomem *ceei;
void __iomem *serq;
void __iomem *cerq;
void __iomem *cint;
void __iomem *cerr;
void __iomem *ssrt;
void __iomem *cdne;
void __iomem *inth;
void __iomem *intl;
void __iomem *errh;
void __iomem *errl;
struct fsl_edma_hw_tcd __iomem *tcd;
};
struct fsl_edma_sw_tcd {
dma_addr_t ptcd;
struct fsl_edma_hw_tcd *vtcd;
};
struct fsl_edma_chan {
struct virt_dma_chan vchan;
enum dma_status status;
enum fsl_edma_pm_state pm_state;
bool idle;
u32 slave_id;
struct fsl_edma_engine *edma;
struct fsl_edma_desc *edesc;
struct dma_slave_config cfg;
u32 attr;
struct dma_pool *tcd_pool;
};
struct fsl_edma_desc {
struct virt_dma_desc vdesc;
struct fsl_edma_chan *echan;
bool iscyclic;
enum dma_transfer_direction dirn;
unsigned int n_tcds;
struct fsl_edma_sw_tcd tcd[];
};
enum edma_version {
v1, /* 32ch, Vybdir, mpc57x, etc */
v2, /* 64ch Coldfire */
};
struct fsl_edma_engine {
struct dma_device dma_dev;
void __iomem *membase;
void __iomem *muxbase[DMAMUX_NR];
struct clk *muxclk[DMAMUX_NR];
struct mutex fsl_edma_mutex;
u32 n_chans;
int txirq;
int errirq;
bool big_endian;
enum edma_version version;
struct edma_regs regs;
struct fsl_edma_chan chans[];
};
/*
* R/W functions for big- or little-endian registers:
* The eDMA controller's endian is independent of the CPU core's endian.
* For the big-endian IP module, the offset for 8-bit or 16-bit registers
* should also be swapped opposite to that in little-endian IP.
*/
static inline u32 edma_readl(struct fsl_edma_engine *edma, void __iomem *addr)
{
if (edma->big_endian)
return ioread32be(addr);
else
return ioread32(addr);
}
static inline void edma_writeb(struct fsl_edma_engine *edma,
u8 val, void __iomem *addr)
{
/* swap the reg offset for these in big-endian mode */
if (edma->big_endian)
iowrite8(val, (void __iomem *)((unsigned long)addr ^ 0x3));
else
iowrite8(val, addr);
}
static inline void edma_writew(struct fsl_edma_engine *edma,
u16 val, void __iomem *addr)
{
/* swap the reg offset for these in big-endian mode */
if (edma->big_endian)
iowrite16be(val, (void __iomem *)((unsigned long)addr ^ 0x2));
else
iowrite16(val, addr);
}
static inline void edma_writel(struct fsl_edma_engine *edma,
u32 val, void __iomem *addr)
{
if (edma->big_endian)
iowrite32be(val, addr);
else
iowrite32(val, addr);
}
static inline struct fsl_edma_chan *to_fsl_edma_chan(struct dma_chan *chan)
{
return container_of(chan, struct fsl_edma_chan, vchan.chan);
}
static inline struct fsl_edma_desc *to_fsl_edma_desc(struct virt_dma_desc *vd)
{
return container_of(vd, struct fsl_edma_desc, vdesc);
}
void fsl_edma_disable_request(struct fsl_edma_chan *fsl_chan);
void fsl_edma_chan_mux(struct fsl_edma_chan *fsl_chan,
unsigned int slot, bool enable);
void fsl_edma_free_desc(struct virt_dma_desc *vdesc);
int fsl_edma_terminate_all(struct dma_chan *chan);
int fsl_edma_pause(struct dma_chan *chan);
int fsl_edma_resume(struct dma_chan *chan);
int fsl_edma_slave_config(struct dma_chan *chan,
struct dma_slave_config *cfg);
enum dma_status fsl_edma_tx_status(struct dma_chan *chan,
dma_cookie_t cookie, struct dma_tx_state *txstate);
struct dma_async_tx_descriptor *fsl_edma_prep_dma_cyclic(
struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len,
size_t period_len, enum dma_transfer_direction direction,
unsigned long flags);
struct dma_async_tx_descriptor *fsl_edma_prep_slave_sg(
struct dma_chan *chan, struct scatterlist *sgl,
unsigned int sg_len, enum dma_transfer_direction direction,
unsigned long flags, void *context);
void fsl_edma_xfer_desc(struct fsl_edma_chan *fsl_chan);
void fsl_edma_issue_pending(struct dma_chan *chan);
int fsl_edma_alloc_chan_resources(struct dma_chan *chan);
void fsl_edma_free_chan_resources(struct dma_chan *chan);
void fsl_edma_cleanup_vchan(struct dma_device *dmadev);
void fsl_edma_setup_regs(struct fsl_edma_engine *edma);
#endif /* _FSL_EDMA_COMMON_H_ */
This diff is collapsed.
...@@ -987,7 +987,7 @@ static void dma_do_tasklet(unsigned long data) ...@@ -987,7 +987,7 @@ static void dma_do_tasklet(unsigned long data)
chan_dbg(chan, "tasklet entry\n"); chan_dbg(chan, "tasklet entry\n");
spin_lock_bh(&chan->desc_lock); spin_lock(&chan->desc_lock);
/* the hardware is now idle and ready for more */ /* the hardware is now idle and ready for more */
chan->idle = true; chan->idle = true;
...@@ -995,7 +995,7 @@ static void dma_do_tasklet(unsigned long data) ...@@ -995,7 +995,7 @@ static void dma_do_tasklet(unsigned long data)
/* Run all cleanup for descriptors which have been completed */ /* Run all cleanup for descriptors which have been completed */
fsldma_cleanup_descriptors(chan); fsldma_cleanup_descriptors(chan);
spin_unlock_bh(&chan->desc_lock); spin_unlock(&chan->desc_lock);
chan_dbg(chan, "tasklet exit\n"); chan_dbg(chan, "tasklet exit\n");
} }
......
...@@ -348,10 +348,6 @@ static int hsu_dma_slave_config(struct dma_chan *chan, ...@@ -348,10 +348,6 @@ static int hsu_dma_slave_config(struct dma_chan *chan,
{ {
struct hsu_dma_chan *hsuc = to_hsu_dma_chan(chan); struct hsu_dma_chan *hsuc = to_hsu_dma_chan(chan);
/* Check if chan will be configured for slave transfers */
if (!is_slave_direction(config->direction))
return -EINVAL;
memcpy(&hsuc->config, config, sizeof(hsuc->config)); memcpy(&hsuc->config, config, sizeof(hsuc->config));
return 0; return 0;
......
...@@ -142,9 +142,8 @@ static void idma64_chan_irq(struct idma64 *idma64, unsigned short c, ...@@ -142,9 +142,8 @@ static void idma64_chan_irq(struct idma64 *idma64, unsigned short c,
{ {
struct idma64_chan *idma64c = &idma64->chan[c]; struct idma64_chan *idma64c = &idma64->chan[c];
struct idma64_desc *desc; struct idma64_desc *desc;
unsigned long flags;
spin_lock_irqsave(&idma64c->vchan.lock, flags); spin_lock(&idma64c->vchan.lock);
desc = idma64c->desc; desc = idma64c->desc;
if (desc) { if (desc) {
if (status_err & (1 << c)) { if (status_err & (1 << c)) {
...@@ -161,7 +160,7 @@ static void idma64_chan_irq(struct idma64 *idma64, unsigned short c, ...@@ -161,7 +160,7 @@ static void idma64_chan_irq(struct idma64 *idma64, unsigned short c,
if (idma64c->desc == NULL || desc->status == DMA_ERROR) if (idma64c->desc == NULL || desc->status == DMA_ERROR)
idma64_stop_transfer(idma64c); idma64_stop_transfer(idma64c);
} }
spin_unlock_irqrestore(&idma64c->vchan.lock, flags); spin_unlock(&idma64c->vchan.lock);
} }
static irqreturn_t idma64_irq(int irq, void *dev) static irqreturn_t idma64_irq(int irq, void *dev)
...@@ -408,10 +407,6 @@ static int idma64_slave_config(struct dma_chan *chan, ...@@ -408,10 +407,6 @@ static int idma64_slave_config(struct dma_chan *chan,
{ {
struct idma64_chan *idma64c = to_idma64_chan(chan); struct idma64_chan *idma64c = to_idma64_chan(chan);
/* Check if chan will be configured for slave transfers */
if (!is_slave_direction(config->direction))
return -EINVAL;
memcpy(&idma64c->config, config, sizeof(idma64c->config)); memcpy(&idma64c->config, config, sizeof(idma64c->config));
convert_burst(&idma64c->config.src_maxburst); convert_burst(&idma64c->config.src_maxburst);
......
...@@ -162,6 +162,7 @@ struct imxdma_channel { ...@@ -162,6 +162,7 @@ struct imxdma_channel {
bool enabled_2d; bool enabled_2d;
int slot_2d; int slot_2d;
unsigned int irq; unsigned int irq;
struct dma_slave_config config;
}; };
enum imx_dma_type { enum imx_dma_type {
...@@ -675,14 +676,15 @@ static int imxdma_terminate_all(struct dma_chan *chan) ...@@ -675,14 +676,15 @@ static int imxdma_terminate_all(struct dma_chan *chan)
return 0; return 0;
} }
static int imxdma_config(struct dma_chan *chan, static int imxdma_config_write(struct dma_chan *chan,
struct dma_slave_config *dmaengine_cfg) struct dma_slave_config *dmaengine_cfg,
enum dma_transfer_direction direction)
{ {
struct imxdma_channel *imxdmac = to_imxdma_chan(chan); struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
struct imxdma_engine *imxdma = imxdmac->imxdma; struct imxdma_engine *imxdma = imxdmac->imxdma;
unsigned int mode = 0; unsigned int mode = 0;
if (dmaengine_cfg->direction == DMA_DEV_TO_MEM) { if (direction == DMA_DEV_TO_MEM) {
imxdmac->per_address = dmaengine_cfg->src_addr; imxdmac->per_address = dmaengine_cfg->src_addr;
imxdmac->watermark_level = dmaengine_cfg->src_maxburst; imxdmac->watermark_level = dmaengine_cfg->src_maxburst;
imxdmac->word_size = dmaengine_cfg->src_addr_width; imxdmac->word_size = dmaengine_cfg->src_addr_width;
...@@ -723,6 +725,16 @@ static int imxdma_config(struct dma_chan *chan, ...@@ -723,6 +725,16 @@ static int imxdma_config(struct dma_chan *chan,
return 0; return 0;
} }
static int imxdma_config(struct dma_chan *chan,
struct dma_slave_config *dmaengine_cfg)
{
struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
memcpy(&imxdmac->config, dmaengine_cfg, sizeof(*dmaengine_cfg));
return 0;
}
static enum dma_status imxdma_tx_status(struct dma_chan *chan, static enum dma_status imxdma_tx_status(struct dma_chan *chan,
dma_cookie_t cookie, dma_cookie_t cookie,
struct dma_tx_state *txstate) struct dma_tx_state *txstate)
...@@ -905,6 +917,8 @@ static struct dma_async_tx_descriptor *imxdma_prep_dma_cyclic( ...@@ -905,6 +917,8 @@ static struct dma_async_tx_descriptor *imxdma_prep_dma_cyclic(
desc->desc.callback = NULL; desc->desc.callback = NULL;
desc->desc.callback_param = NULL; desc->desc.callback_param = NULL;
imxdma_config_write(chan, &imxdmac->config, direction);
return &desc->desc; return &desc->desc;
} }
......
...@@ -129,7 +129,7 @@ static void ...@@ -129,7 +129,7 @@ static void
ioat_init_channel(struct ioatdma_device *ioat_dma, ioat_init_channel(struct ioatdma_device *ioat_dma,
struct ioatdma_chan *ioat_chan, int idx); struct ioatdma_chan *ioat_chan, int idx);
static void ioat_intr_quirk(struct ioatdma_device *ioat_dma); static void ioat_intr_quirk(struct ioatdma_device *ioat_dma);
static int ioat_enumerate_channels(struct ioatdma_device *ioat_dma); static void ioat_enumerate_channels(struct ioatdma_device *ioat_dma);
static int ioat3_dma_self_test(struct ioatdma_device *ioat_dma); static int ioat3_dma_self_test(struct ioatdma_device *ioat_dma);
static int ioat_dca_enabled = 1; static int ioat_dca_enabled = 1;
...@@ -575,7 +575,7 @@ static void ioat_dma_remove(struct ioatdma_device *ioat_dma) ...@@ -575,7 +575,7 @@ static void ioat_dma_remove(struct ioatdma_device *ioat_dma)
* ioat_enumerate_channels - find and initialize the device's channels * ioat_enumerate_channels - find and initialize the device's channels
* @ioat_dma: the ioat dma device to be enumerated * @ioat_dma: the ioat dma device to be enumerated
*/ */
static int ioat_enumerate_channels(struct ioatdma_device *ioat_dma) static void ioat_enumerate_channels(struct ioatdma_device *ioat_dma)
{ {
struct ioatdma_chan *ioat_chan; struct ioatdma_chan *ioat_chan;
struct device *dev = &ioat_dma->pdev->dev; struct device *dev = &ioat_dma->pdev->dev;
...@@ -594,7 +594,7 @@ static int ioat_enumerate_channels(struct ioatdma_device *ioat_dma) ...@@ -594,7 +594,7 @@ static int ioat_enumerate_channels(struct ioatdma_device *ioat_dma)
xfercap_log = readb(ioat_dma->reg_base + IOAT_XFERCAP_OFFSET); xfercap_log = readb(ioat_dma->reg_base + IOAT_XFERCAP_OFFSET);
xfercap_log &= 0x1f; /* bits [4:0] valid */ xfercap_log &= 0x1f; /* bits [4:0] valid */
if (xfercap_log == 0) if (xfercap_log == 0)
return 0; return;
dev_dbg(dev, "%s: xfercap = %d\n", __func__, 1 << xfercap_log); dev_dbg(dev, "%s: xfercap = %d\n", __func__, 1 << xfercap_log);
for (i = 0; i < dma->chancnt; i++) { for (i = 0; i < dma->chancnt; i++) {
...@@ -611,7 +611,6 @@ static int ioat_enumerate_channels(struct ioatdma_device *ioat_dma) ...@@ -611,7 +611,6 @@ static int ioat_enumerate_channels(struct ioatdma_device *ioat_dma)
} }
} }
dma->chancnt = i; dma->chancnt = i;
return i;
} }
/** /**
...@@ -1205,8 +1204,15 @@ static void ioat_shutdown(struct pci_dev *pdev) ...@@ -1205,8 +1204,15 @@ static void ioat_shutdown(struct pci_dev *pdev)
spin_lock_bh(&ioat_chan->prep_lock); spin_lock_bh(&ioat_chan->prep_lock);
set_bit(IOAT_CHAN_DOWN, &ioat_chan->state); set_bit(IOAT_CHAN_DOWN, &ioat_chan->state);
del_timer_sync(&ioat_chan->timer);
spin_unlock_bh(&ioat_chan->prep_lock); spin_unlock_bh(&ioat_chan->prep_lock);
/*
* Synchronization rule for del_timer_sync():
* - The caller must not hold locks which would prevent
* completion of the timer's handler.
* So prep_lock cannot be held before calling it.
*/
del_timer_sync(&ioat_chan->timer);
/* this should quiesce then reset */ /* this should quiesce then reset */
ioat_reset_hw(ioat_chan); ioat_reset_hw(ioat_chan);
} }
......
...@@ -87,10 +87,10 @@ struct k3_dma_chan { ...@@ -87,10 +87,10 @@ struct k3_dma_chan {
struct virt_dma_chan vc; struct virt_dma_chan vc;
struct k3_dma_phy *phy; struct k3_dma_phy *phy;
struct list_head node; struct list_head node;
enum dma_transfer_direction dir;
dma_addr_t dev_addr; dma_addr_t dev_addr;
enum dma_status status; enum dma_status status;
bool cyclic; bool cyclic;
struct dma_slave_config slave_config;
}; };
struct k3_dma_phy { struct k3_dma_phy {
...@@ -118,6 +118,10 @@ struct k3_dma_dev { ...@@ -118,6 +118,10 @@ struct k3_dma_dev {
#define to_k3_dma(dmadev) container_of(dmadev, struct k3_dma_dev, slave) #define to_k3_dma(dmadev) container_of(dmadev, struct k3_dma_dev, slave)
static int k3_dma_config_write(struct dma_chan *chan,
enum dma_transfer_direction dir,
struct dma_slave_config *cfg);
static struct k3_dma_chan *to_k3_chan(struct dma_chan *chan) static struct k3_dma_chan *to_k3_chan(struct dma_chan *chan)
{ {
return container_of(chan, struct k3_dma_chan, vc.chan); return container_of(chan, struct k3_dma_chan, vc.chan);
...@@ -501,14 +505,8 @@ static struct dma_async_tx_descriptor *k3_dma_prep_memcpy( ...@@ -501,14 +505,8 @@ static struct dma_async_tx_descriptor *k3_dma_prep_memcpy(
copy = min_t(size_t, len, DMA_MAX_SIZE); copy = min_t(size_t, len, DMA_MAX_SIZE);
k3_dma_fill_desc(ds, dst, src, copy, num++, c->ccfg); k3_dma_fill_desc(ds, dst, src, copy, num++, c->ccfg);
if (c->dir == DMA_MEM_TO_DEV) { src += copy;
src += copy; dst += copy;
} else if (c->dir == DMA_DEV_TO_MEM) {
dst += copy;
} else {
src += copy;
dst += copy;
}
len -= copy; len -= copy;
} while (len); } while (len);
...@@ -542,6 +540,7 @@ static struct dma_async_tx_descriptor *k3_dma_prep_slave_sg( ...@@ -542,6 +540,7 @@ static struct dma_async_tx_descriptor *k3_dma_prep_slave_sg(
if (!ds) if (!ds)
return NULL; return NULL;
num = 0; num = 0;
k3_dma_config_write(chan, dir, &c->slave_config);
for_each_sg(sgl, sg, sglen, i) { for_each_sg(sgl, sg, sglen, i) {
addr = sg_dma_address(sg); addr = sg_dma_address(sg);
...@@ -602,6 +601,7 @@ k3_dma_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr, ...@@ -602,6 +601,7 @@ k3_dma_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr,
avail = buf_len; avail = buf_len;
total = avail; total = avail;
num = 0; num = 0;
k3_dma_config_write(chan, dir, &c->slave_config);
if (period_len < modulo) if (period_len < modulo)
modulo = period_len; modulo = period_len;
...@@ -642,18 +642,26 @@ static int k3_dma_config(struct dma_chan *chan, ...@@ -642,18 +642,26 @@ static int k3_dma_config(struct dma_chan *chan,
struct dma_slave_config *cfg) struct dma_slave_config *cfg)
{ {
struct k3_dma_chan *c = to_k3_chan(chan); struct k3_dma_chan *c = to_k3_chan(chan);
memcpy(&c->slave_config, cfg, sizeof(*cfg));
return 0;
}
static int k3_dma_config_write(struct dma_chan *chan,
enum dma_transfer_direction dir,
struct dma_slave_config *cfg)
{
struct k3_dma_chan *c = to_k3_chan(chan);
u32 maxburst = 0, val = 0; u32 maxburst = 0, val = 0;
enum dma_slave_buswidth width = DMA_SLAVE_BUSWIDTH_UNDEFINED; enum dma_slave_buswidth width = DMA_SLAVE_BUSWIDTH_UNDEFINED;
if (cfg == NULL) if (dir == DMA_DEV_TO_MEM) {
return -EINVAL;
c->dir = cfg->direction;
if (c->dir == DMA_DEV_TO_MEM) {
c->ccfg = CX_CFG_DSTINCR; c->ccfg = CX_CFG_DSTINCR;
c->dev_addr = cfg->src_addr; c->dev_addr = cfg->src_addr;
maxburst = cfg->src_maxburst; maxburst = cfg->src_maxburst;
width = cfg->src_addr_width; width = cfg->src_addr_width;
} else if (c->dir == DMA_MEM_TO_DEV) { } else if (dir == DMA_MEM_TO_DEV) {
c->ccfg = CX_CFG_SRCINCR; c->ccfg = CX_CFG_SRCINCR;
c->dev_addr = cfg->dst_addr; c->dev_addr = cfg->dst_addr;
maxburst = cfg->dst_maxburst; maxburst = cfg->dst_maxburst;
......
// SPDX-License-Identifier: GPL-2.0+
//
// Copyright (c) 2013-2014 Freescale Semiconductor, Inc
// Copyright (c) 2017 Sysam, Angelo Dureghello <angelo@sysam.it>
#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/dmaengine.h>
#include <linux/platform_device.h>
#include <linux/platform_data/dma-mcf-edma.h>
#include "fsl-edma-common.h"
#define EDMA_CHANNELS 64
#define EDMA_MASK_CH(x) ((x) & GENMASK(5, 0))
static irqreturn_t mcf_edma_tx_handler(int irq, void *dev_id)
{
struct fsl_edma_engine *mcf_edma = dev_id;
struct edma_regs *regs = &mcf_edma->regs;
unsigned int ch;
struct fsl_edma_chan *mcf_chan;
u64 intmap;
intmap = ioread32(regs->inth);
intmap <<= 32;
intmap |= ioread32(regs->intl);
if (!intmap)
return IRQ_NONE;
for (ch = 0; ch < mcf_edma->n_chans; ch++) {
if (intmap & BIT(ch)) {
iowrite8(EDMA_MASK_CH(ch), regs->cint);
mcf_chan = &mcf_edma->chans[ch];
spin_lock(&mcf_chan->vchan.lock);
if (!mcf_chan->edesc->iscyclic) {
list_del(&mcf_chan->edesc->vdesc.node);
vchan_cookie_complete(&mcf_chan->edesc->vdesc);
mcf_chan->edesc = NULL;
mcf_chan->status = DMA_COMPLETE;
mcf_chan->idle = true;
} else {
vchan_cyclic_callback(&mcf_chan->edesc->vdesc);
}
if (!mcf_chan->edesc)
fsl_edma_xfer_desc(mcf_chan);
spin_unlock(&mcf_chan->vchan.lock);
}
}
return IRQ_HANDLED;
}
static irqreturn_t mcf_edma_err_handler(int irq, void *dev_id)
{
struct fsl_edma_engine *mcf_edma = dev_id;
struct edma_regs *regs = &mcf_edma->regs;
unsigned int err, ch;
err = ioread32(regs->errl);
if (!err)
return IRQ_NONE;
for (ch = 0; ch < (EDMA_CHANNELS / 2); ch++) {
if (err & BIT(ch)) {
fsl_edma_disable_request(&mcf_edma->chans[ch]);
iowrite8(EDMA_CERR_CERR(ch), regs->cerr);
mcf_edma->chans[ch].status = DMA_ERROR;
mcf_edma->chans[ch].idle = true;
}
}
err = ioread32(regs->errh);
if (!err)
return IRQ_NONE;
for (ch = (EDMA_CHANNELS / 2); ch < EDMA_CHANNELS; ch++) {
if (err & (BIT(ch - (EDMA_CHANNELS / 2)))) {
fsl_edma_disable_request(&mcf_edma->chans[ch]);
iowrite8(EDMA_CERR_CERR(ch), regs->cerr);
mcf_edma->chans[ch].status = DMA_ERROR;
mcf_edma->chans[ch].idle = true;
}
}
return IRQ_HANDLED;
}
static int mcf_edma_irq_init(struct platform_device *pdev,
struct fsl_edma_engine *mcf_edma)
{
int ret = 0, i;
struct resource *res;
res = platform_get_resource_byname(pdev,
IORESOURCE_IRQ, "edma-tx-00-15");
if (!res)
return -1;
for (ret = 0, i = res->start; i <= res->end; ++i)
ret |= request_irq(i, mcf_edma_tx_handler, 0, "eDMA", mcf_edma);
if (ret)
return ret;
res = platform_get_resource_byname(pdev,
IORESOURCE_IRQ, "edma-tx-16-55");
if (!res)
return -1;
for (ret = 0, i = res->start; i <= res->end; ++i)
ret |= request_irq(i, mcf_edma_tx_handler, 0, "eDMA", mcf_edma);
if (ret)
return ret;
ret = platform_get_irq_byname(pdev, "edma-tx-56-63");
if (ret != -ENXIO) {
ret = request_irq(ret, mcf_edma_tx_handler,
0, "eDMA", mcf_edma);
if (ret)
return ret;
}
ret = platform_get_irq_byname(pdev, "edma-err");
if (ret != -ENXIO) {
ret = request_irq(ret, mcf_edma_err_handler,
0, "eDMA", mcf_edma);
if (ret)
return ret;
}
return 0;
}
static void mcf_edma_irq_free(struct platform_device *pdev,
struct fsl_edma_engine *mcf_edma)
{
int irq;
struct resource *res;
res = platform_get_resource_byname(pdev,
IORESOURCE_IRQ, "edma-tx-00-15");
if (res) {
for (irq = res->start; irq <= res->end; irq++)
free_irq(irq, mcf_edma);
}
res = platform_get_resource_byname(pdev,
IORESOURCE_IRQ, "edma-tx-16-55");
if (res) {
for (irq = res->start; irq <= res->end; irq++)
free_irq(irq, mcf_edma);
}
irq = platform_get_irq_byname(pdev, "edma-tx-56-63");
if (irq != -ENXIO)
free_irq(irq, mcf_edma);
irq = platform_get_irq_byname(pdev, "edma-err");
if (irq != -ENXIO)
free_irq(irq, mcf_edma);
}
static int mcf_edma_probe(struct platform_device *pdev)
{
struct mcf_edma_platform_data *pdata;
struct fsl_edma_engine *mcf_edma;
struct fsl_edma_chan *mcf_chan;
struct edma_regs *regs;
struct resource *res;
int ret, i, len, chans;
pdata = dev_get_platdata(&pdev->dev);
if (!pdata) {
dev_err(&pdev->dev, "no platform data supplied\n");
return -EINVAL;
}
chans = pdata->dma_channels;
len = sizeof(*mcf_edma) + sizeof(*mcf_chan) * chans;
mcf_edma = devm_kzalloc(&pdev->dev, len, GFP_KERNEL);
if (!mcf_edma)
return -ENOMEM;
mcf_edma->n_chans = chans;
/* Set up version for ColdFire edma */
mcf_edma->version = v2;
mcf_edma->big_endian = 1;
if (!mcf_edma->n_chans) {
dev_info(&pdev->dev, "setting default channel number to 64");
mcf_edma->n_chans = 64;
}
mutex_init(&mcf_edma->fsl_edma_mutex);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
mcf_edma->membase = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(mcf_edma->membase))
return PTR_ERR(mcf_edma->membase);
fsl_edma_setup_regs(mcf_edma);
regs = &mcf_edma->regs;
INIT_LIST_HEAD(&mcf_edma->dma_dev.channels);
for (i = 0; i < mcf_edma->n_chans; i++) {
struct fsl_edma_chan *mcf_chan = &mcf_edma->chans[i];
mcf_chan->edma = mcf_edma;
mcf_chan->slave_id = i;
mcf_chan->idle = true;
mcf_chan->vchan.desc_free = fsl_edma_free_desc;
vchan_init(&mcf_chan->vchan, &mcf_edma->dma_dev);
iowrite32(0x0, &regs->tcd[i].csr);
}
iowrite32(~0, regs->inth);
iowrite32(~0, regs->intl);
ret = mcf_edma_irq_init(pdev, mcf_edma);
if (ret)
return ret;
dma_cap_set(DMA_PRIVATE, mcf_edma->dma_dev.cap_mask);
dma_cap_set(DMA_SLAVE, mcf_edma->dma_dev.cap_mask);
dma_cap_set(DMA_CYCLIC, mcf_edma->dma_dev.cap_mask);
mcf_edma->dma_dev.dev = &pdev->dev;
mcf_edma->dma_dev.device_alloc_chan_resources =
fsl_edma_alloc_chan_resources;
mcf_edma->dma_dev.device_free_chan_resources =
fsl_edma_free_chan_resources;
mcf_edma->dma_dev.device_config = fsl_edma_slave_config;
mcf_edma->dma_dev.device_prep_dma_cyclic =
fsl_edma_prep_dma_cyclic;
mcf_edma->dma_dev.device_prep_slave_sg = fsl_edma_prep_slave_sg;
mcf_edma->dma_dev.device_tx_status = fsl_edma_tx_status;
mcf_edma->dma_dev.device_pause = fsl_edma_pause;
mcf_edma->dma_dev.device_resume = fsl_edma_resume;
mcf_edma->dma_dev.device_terminate_all = fsl_edma_terminate_all;
mcf_edma->dma_dev.device_issue_pending = fsl_edma_issue_pending;
mcf_edma->dma_dev.src_addr_widths = FSL_EDMA_BUSWIDTHS;
mcf_edma->dma_dev.dst_addr_widths = FSL_EDMA_BUSWIDTHS;
mcf_edma->dma_dev.directions =
BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
mcf_edma->dma_dev.filter.fn = mcf_edma_filter_fn;
mcf_edma->dma_dev.filter.map = pdata->slave_map;
mcf_edma->dma_dev.filter.mapcnt = pdata->slavecnt;
platform_set_drvdata(pdev, mcf_edma);
ret = dma_async_device_register(&mcf_edma->dma_dev);
if (ret) {
dev_err(&pdev->dev,
"Can't register Freescale eDMA engine. (%d)\n", ret);
return ret;
}
/* Enable round robin arbitration */
iowrite32(EDMA_CR_ERGA | EDMA_CR_ERCA, regs->cr);
return 0;
}
static int mcf_edma_remove(struct platform_device *pdev)
{
struct fsl_edma_engine *mcf_edma = platform_get_drvdata(pdev);
mcf_edma_irq_free(pdev, mcf_edma);
fsl_edma_cleanup_vchan(&mcf_edma->dma_dev);
dma_async_device_unregister(&mcf_edma->dma_dev);
return 0;
}
static struct platform_driver mcf_edma_driver = {
.driver = {
.name = "mcf-edma",
},
.probe = mcf_edma_probe,
.remove = mcf_edma_remove,
};
bool mcf_edma_filter_fn(struct dma_chan *chan, void *param)
{
if (chan->device->dev->driver == &mcf_edma_driver.driver) {
struct fsl_edma_chan *mcf_chan = to_fsl_edma_chan(chan);
return (mcf_chan->slave_id == (uintptr_t)param);
}
return false;
}
EXPORT_SYMBOL(mcf_edma_filter_fn);
static int __init mcf_edma_init(void)
{
return platform_driver_register(&mcf_edma_driver);
}
subsys_initcall(mcf_edma_init);
static void __exit mcf_edma_exit(void)
{
platform_driver_unregister(&mcf_edma_driver);
}
module_exit(mcf_edma_exit);
MODULE_ALIAS("platform:mcf-edma");
MODULE_DESCRIPTION("Freescale eDMA engine driver, ColdFire family");
MODULE_LICENSE("GPL v2");
...@@ -116,6 +116,7 @@ struct mmp_tdma_chan { ...@@ -116,6 +116,7 @@ struct mmp_tdma_chan {
u32 burst_sz; u32 burst_sz;
enum dma_slave_buswidth buswidth; enum dma_slave_buswidth buswidth;
enum dma_status status; enum dma_status status;
struct dma_slave_config slave_config;
int idx; int idx;
enum mmp_tdma_type type; enum mmp_tdma_type type;
...@@ -139,6 +140,10 @@ struct mmp_tdma_device { ...@@ -139,6 +140,10 @@ struct mmp_tdma_device {
#define to_mmp_tdma_chan(dchan) container_of(dchan, struct mmp_tdma_chan, chan) #define to_mmp_tdma_chan(dchan) container_of(dchan, struct mmp_tdma_chan, chan)
static int mmp_tdma_config_write(struct dma_chan *chan,
enum dma_transfer_direction dir,
struct dma_slave_config *dmaengine_cfg);
static void mmp_tdma_chan_set_desc(struct mmp_tdma_chan *tdmac, dma_addr_t phys) static void mmp_tdma_chan_set_desc(struct mmp_tdma_chan *tdmac, dma_addr_t phys)
{ {
writel(phys, tdmac->reg_base + TDNDPR); writel(phys, tdmac->reg_base + TDNDPR);
...@@ -442,6 +447,8 @@ static struct dma_async_tx_descriptor *mmp_tdma_prep_dma_cyclic( ...@@ -442,6 +447,8 @@ static struct dma_async_tx_descriptor *mmp_tdma_prep_dma_cyclic(
if (!desc) if (!desc)
goto err_out; goto err_out;
mmp_tdma_config_write(chan, direction, &tdmac->slave_config);
while (buf < buf_len) { while (buf < buf_len) {
desc = &tdmac->desc_arr[i]; desc = &tdmac->desc_arr[i];
...@@ -495,7 +502,18 @@ static int mmp_tdma_config(struct dma_chan *chan, ...@@ -495,7 +502,18 @@ static int mmp_tdma_config(struct dma_chan *chan,
{ {
struct mmp_tdma_chan *tdmac = to_mmp_tdma_chan(chan); struct mmp_tdma_chan *tdmac = to_mmp_tdma_chan(chan);
if (dmaengine_cfg->direction == DMA_DEV_TO_MEM) { memcpy(&tdmac->slave_config, dmaengine_cfg, sizeof(*dmaengine_cfg));
return 0;
}
static int mmp_tdma_config_write(struct dma_chan *chan,
enum dma_transfer_direction dir,
struct dma_slave_config *dmaengine_cfg)
{
struct mmp_tdma_chan *tdmac = to_mmp_tdma_chan(chan);
if (dir == DMA_DEV_TO_MEM) {
tdmac->dev_addr = dmaengine_cfg->src_addr; tdmac->dev_addr = dmaengine_cfg->src_addr;
tdmac->burst_sz = dmaengine_cfg->src_maxburst; tdmac->burst_sz = dmaengine_cfg->src_maxburst;
tdmac->buswidth = dmaengine_cfg->src_addr_width; tdmac->buswidth = dmaengine_cfg->src_addr_width;
...@@ -504,7 +522,7 @@ static int mmp_tdma_config(struct dma_chan *chan, ...@@ -504,7 +522,7 @@ static int mmp_tdma_config(struct dma_chan *chan,
tdmac->burst_sz = dmaengine_cfg->dst_maxburst; tdmac->burst_sz = dmaengine_cfg->dst_maxburst;
tdmac->buswidth = dmaengine_cfg->dst_addr_width; tdmac->buswidth = dmaengine_cfg->dst_addr_width;
} }
tdmac->dir = dmaengine_cfg->direction; tdmac->dir = dir;
return mmp_tdma_config_chan(chan); return mmp_tdma_config_chan(chan);
} }
...@@ -530,9 +548,6 @@ static void mmp_tdma_issue_pending(struct dma_chan *chan) ...@@ -530,9 +548,6 @@ static void mmp_tdma_issue_pending(struct dma_chan *chan)
static int mmp_tdma_remove(struct platform_device *pdev) static int mmp_tdma_remove(struct platform_device *pdev)
{ {
struct mmp_tdma_device *tdev = platform_get_drvdata(pdev);
dma_async_device_unregister(&tdev->device);
return 0; return 0;
} }
...@@ -696,7 +711,7 @@ static int mmp_tdma_probe(struct platform_device *pdev) ...@@ -696,7 +711,7 @@ static int mmp_tdma_probe(struct platform_device *pdev)
dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)); dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
platform_set_drvdata(pdev, tdev); platform_set_drvdata(pdev, tdev);
ret = dma_async_device_register(&tdev->device); ret = dmaenginem_async_device_register(&tdev->device);
if (ret) { if (ret) {
dev_err(tdev->device.dev, "unable to register\n"); dev_err(tdev->device.dev, "unable to register\n");
return ret; return ret;
...@@ -708,7 +723,7 @@ static int mmp_tdma_probe(struct platform_device *pdev) ...@@ -708,7 +723,7 @@ static int mmp_tdma_probe(struct platform_device *pdev)
if (ret) { if (ret) {
dev_err(tdev->device.dev, dev_err(tdev->device.dev,
"failed to register controller\n"); "failed to register controller\n");
dma_async_device_unregister(&tdev->device); return ret;
} }
} }
......
...@@ -348,9 +348,9 @@ static void mv_xor_tasklet(unsigned long data) ...@@ -348,9 +348,9 @@ static void mv_xor_tasklet(unsigned long data)
{ {
struct mv_xor_chan *chan = (struct mv_xor_chan *) data; struct mv_xor_chan *chan = (struct mv_xor_chan *) data;
spin_lock_bh(&chan->lock); spin_lock(&chan->lock);
mv_chan_slot_cleanup(chan); mv_chan_slot_cleanup(chan);
spin_unlock_bh(&chan->lock); spin_unlock(&chan->lock);
} }
static struct mv_xor_desc_slot * static struct mv_xor_desc_slot *
......
...@@ -847,7 +847,7 @@ static int __init mxs_dma_probe(struct platform_device *pdev) ...@@ -847,7 +847,7 @@ static int __init mxs_dma_probe(struct platform_device *pdev)
mxs_dma->dma_device.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST; mxs_dma->dma_device.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
mxs_dma->dma_device.device_issue_pending = mxs_dma_enable_chan; mxs_dma->dma_device.device_issue_pending = mxs_dma_enable_chan;
ret = dma_async_device_register(&mxs_dma->dma_device); ret = dmaenginem_async_device_register(&mxs_dma->dma_device);
if (ret) { if (ret) {
dev_err(mxs_dma->dma_device.dev, "unable to register\n"); dev_err(mxs_dma->dma_device.dev, "unable to register\n");
return ret; return ret;
...@@ -857,7 +857,6 @@ static int __init mxs_dma_probe(struct platform_device *pdev) ...@@ -857,7 +857,6 @@ static int __init mxs_dma_probe(struct platform_device *pdev)
if (ret) { if (ret) {
dev_err(mxs_dma->dma_device.dev, dev_err(mxs_dma->dma_device.dev,
"failed to register controller\n"); "failed to register controller\n");
dma_async_device_unregister(&mxs_dma->dma_device);
} }
dev_info(mxs_dma->dma_device.dev, "initialized\n"); dev_info(mxs_dma->dma_device.dev, "initialized\n");
......
// SPDX-License-Identifier: GPL-2.0
/* /*
* Copyright (C) 2013-2014 Renesas Electronics Europe Ltd. * Copyright (C) 2013-2014 Renesas Electronics Europe Ltd.
* Author: Guennadi Liakhovetski <g.liakhovetski@gmx.de> * Author: Guennadi Liakhovetski <g.liakhovetski@gmx.de>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*/ */
#include <linux/bitmap.h> #include <linux/bitmap.h>
...@@ -1095,8 +1092,8 @@ static struct dma_chan *nbpf_of_xlate(struct of_phandle_args *dma_spec, ...@@ -1095,8 +1092,8 @@ static struct dma_chan *nbpf_of_xlate(struct of_phandle_args *dma_spec,
if (!dchan) if (!dchan)
return NULL; return NULL;
dev_dbg(dchan->device->dev, "Entry %s(%s)\n", __func__, dev_dbg(dchan->device->dev, "Entry %s(%pOFn)\n", __func__,
dma_spec->np->name); dma_spec->np);
chan = nbpf_to_chan(dchan); chan = nbpf_to_chan(dchan);
......
This diff is collapsed.
...@@ -4360,7 +4360,7 @@ static ssize_t enable_store(struct device_driver *dev, const char *buf, ...@@ -4360,7 +4360,7 @@ static ssize_t enable_store(struct device_driver *dev, const char *buf,
} }
static DRIVER_ATTR_RW(enable); static DRIVER_ATTR_RW(enable);
static ssize_t poly_store(struct device_driver *dev, char *buf) static ssize_t poly_show(struct device_driver *dev, char *buf)
{ {
ssize_t size = 0; ssize_t size = 0;
u32 reg; u32 reg;
......
...@@ -1285,7 +1285,6 @@ static int pxad_remove(struct platform_device *op) ...@@ -1285,7 +1285,6 @@ static int pxad_remove(struct platform_device *op)
pxad_cleanup_debugfs(pdev); pxad_cleanup_debugfs(pdev);
pxad_free_channels(&pdev->slave); pxad_free_channels(&pdev->slave);
dma_async_device_unregister(&pdev->slave);
return 0; return 0;
} }
...@@ -1396,7 +1395,7 @@ static int pxad_init_dmadev(struct platform_device *op, ...@@ -1396,7 +1395,7 @@ static int pxad_init_dmadev(struct platform_device *op,
init_waitqueue_head(&c->wq_state); init_waitqueue_head(&c->wq_state);
} }
return dma_async_device_register(&pdev->slave); return dmaenginem_async_device_register(&pdev->slave);
} }
static int pxad_probe(struct platform_device *op) static int pxad_probe(struct platform_device *op)
...@@ -1433,7 +1432,7 @@ static int pxad_probe(struct platform_device *op) ...@@ -1433,7 +1432,7 @@ static int pxad_probe(struct platform_device *op)
"#dma-requests set to default 32 as missing in OF: %d", "#dma-requests set to default 32 as missing in OF: %d",
ret); ret);
nb_requestors = 32; nb_requestors = 32;
}; }
} else if (pdata && pdata->dma_channels) { } else if (pdata && pdata->dma_channels) {
dma_channels = pdata->dma_channels; dma_channels = pdata->dma_channels;
nb_requestors = pdata->nb_requestors; nb_requestors = pdata->nb_requestors;
......
...@@ -198,6 +198,7 @@ struct rcar_dmac { ...@@ -198,6 +198,7 @@ struct rcar_dmac {
struct dma_device engine; struct dma_device engine;
struct device *dev; struct device *dev;
void __iomem *iomem; void __iomem *iomem;
struct device_dma_parameters parms;
unsigned int n_channels; unsigned int n_channels;
struct rcar_dmac_chan *channels; struct rcar_dmac_chan *channels;
...@@ -1792,6 +1793,8 @@ static int rcar_dmac_probe(struct platform_device *pdev) ...@@ -1792,6 +1793,8 @@ static int rcar_dmac_probe(struct platform_device *pdev)
dmac->dev = &pdev->dev; dmac->dev = &pdev->dev;
platform_set_drvdata(pdev, dmac); platform_set_drvdata(pdev, dmac);
dmac->dev->dma_parms = &dmac->parms;
dma_set_max_seg_size(dmac->dev, RCAR_DMATCR_MASK);
dma_set_mask_and_coherent(dmac->dev, DMA_BIT_MASK(40)); dma_set_mask_and_coherent(dmac->dev, DMA_BIT_MASK(40));
ret = rcar_dmac_parse_of(&pdev->dev, dmac); ret = rcar_dmac_parse_of(&pdev->dev, dmac);
......
/* SPDX-License-Identifier: GPL-2.0 */
/* /*
* Renesas SuperH DMA Engine support * Renesas SuperH DMA Engine support
* *
* Copyright (C) 2013 Renesas Electronics, Inc. * Copyright (C) 2013 Renesas Electronics, Inc.
*
* This is free software; you can redistribute it and/or modify it under the
* terms of version 2 the GNU General Public License as published by the Free
* Software Foundation.
*/ */
#ifndef SHDMA_ARM_H #ifndef SHDMA_ARM_H
......
// SPDX-License-Identifier: GPL-2.0
/* /*
* Dmaengine driver base library for DMA controllers, found on SH-based SoCs * Dmaengine driver base library for DMA controllers, found on SH-based SoCs
* *
...@@ -7,10 +8,6 @@ ...@@ -7,10 +8,6 @@
* Copyright (C) 2009 Nobuhiro Iwamatsu <iwamatsu.nobuhiro@renesas.com> * Copyright (C) 2009 Nobuhiro Iwamatsu <iwamatsu.nobuhiro@renesas.com>
* Copyright (C) 2009 Renesas Solutions, Inc. All rights reserved. * Copyright (C) 2009 Renesas Solutions, Inc. All rights reserved.
* Copyright (C) 2007 Freescale Semiconductor, Inc. All rights reserved. * Copyright (C) 2007 Freescale Semiconductor, Inc. All rights reserved.
*
* This is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*/ */
#include <linux/delay.h> #include <linux/delay.h>
......
// SPDX-License-Identifier: GPL-2.0
/* /*
* SHDMA Device Tree glue * SHDMA Device Tree glue
* *
* Copyright (C) 2013 Renesas Electronics Inc. * Copyright (C) 2013 Renesas Electronics Inc.
* Author: Guennadi Liakhovetski <g.liakhovetski@gmx.de> * Author: Guennadi Liakhovetski <g.liakhovetski@gmx.de>
*
* This is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*/ */
#include <linux/dmaengine.h> #include <linux/dmaengine.h>
......
// SPDX-License-Identifier: GPL-2.0
/* /*
* Renesas SuperH DMA Engine support for r8a73a4 (APE6) SoCs * Renesas SuperH DMA Engine support for r8a73a4 (APE6) SoCs
* *
* Copyright (C) 2013 Renesas Electronics, Inc. * Copyright (C) 2013 Renesas Electronics, Inc.
*
* This is free software; you can redistribute it and/or modify it under the
* terms of version 2 the GNU General Public License as published by the Free
* Software Foundation.
*/ */
#include <linux/sh_dma.h> #include <linux/sh_dma.h>
......
/* SPDX-License-Identifier: GPL-2.0+ */
/* /*
* Renesas SuperH DMA Engine support * Renesas SuperH DMA Engine support
* *
* Copyright (C) 2009 Nobuhiro Iwamatsu <iwamatsu.nobuhiro@renesas.com> * Copyright (C) 2009 Nobuhiro Iwamatsu <iwamatsu.nobuhiro@renesas.com>
* Copyright (C) 2009 Renesas Solutions, Inc. All rights reserved. * Copyright (C) 2009 Renesas Solutions, Inc. All rights reserved.
* *
* This is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
*/ */
#ifndef __DMA_SHDMA_H #ifndef __DMA_SHDMA_H
#define __DMA_SHDMA_H #define __DMA_SHDMA_H
......
// SPDX-License-Identifier: GPL-2.0+
/* /*
* Renesas SuperH DMA Engine support * Renesas SuperH DMA Engine support
* *
...@@ -8,11 +9,6 @@ ...@@ -8,11 +9,6 @@
* Copyright (C) 2009 Renesas Solutions, Inc. All rights reserved. * Copyright (C) 2009 Renesas Solutions, Inc. All rights reserved.
* Copyright (C) 2007 Freescale Semiconductor, Inc. All rights reserved. * Copyright (C) 2007 Freescale Semiconductor, Inc. All rights reserved.
* *
* This is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* - DMA of SuperH does not have Hardware DMA chain mode. * - DMA of SuperH does not have Hardware DMA chain mode.
* - MAX DMA size is 16MB. * - MAX DMA size is 16MB.
* *
......
// SPDX-License-Identifier: GPL-2.0
/* /*
* Renesas SUDMAC support * Renesas SUDMAC support
* *
...@@ -8,10 +9,6 @@ ...@@ -8,10 +9,6 @@
* Copyright (C) 2009 Nobuhiro Iwamatsu <iwamatsu.nobuhiro@renesas.com> * Copyright (C) 2009 Nobuhiro Iwamatsu <iwamatsu.nobuhiro@renesas.com>
* Copyright (C) 2009 Renesas Solutions, Inc. All rights reserved. * Copyright (C) 2009 Renesas Solutions, Inc. All rights reserved.
* Copyright (C) 2007 Freescale Semiconductor, Inc. All rights reserved. * Copyright (C) 2007 Freescale Semiconductor, Inc. All rights reserved.
*
* This is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*/ */
#include <linux/dmaengine.h> #include <linux/dmaengine.h>
......
// SPDX-License-Identifier: GPL-2.0
/* /*
* Renesas USB DMA Controller Driver * Renesas USB DMA Controller Driver
* *
...@@ -6,10 +7,6 @@ ...@@ -6,10 +7,6 @@
* based on rcar-dmac.c * based on rcar-dmac.c
* Copyright (C) 2014 Renesas Electronics Inc. * Copyright (C) 2014 Renesas Electronics Inc.
* Author: Laurent Pinchart <laurent.pinchart@ideasonboard.com> * Author: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
*
* This is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*/ */
#include <linux/delay.h> #include <linux/delay.h>
......
...@@ -68,6 +68,7 @@ ...@@ -68,6 +68,7 @@
/* SPRD_DMA_CHN_CFG register definition */ /* SPRD_DMA_CHN_CFG register definition */
#define SPRD_DMA_CHN_EN BIT(0) #define SPRD_DMA_CHN_EN BIT(0)
#define SPRD_DMA_LINKLIST_EN BIT(4)
#define SPRD_DMA_WAIT_BDONE_OFFSET 24 #define SPRD_DMA_WAIT_BDONE_OFFSET 24
#define SPRD_DMA_DONOT_WAIT_BDONE 1 #define SPRD_DMA_DONOT_WAIT_BDONE 1
...@@ -103,7 +104,7 @@ ...@@ -103,7 +104,7 @@
#define SPRD_DMA_REQ_MODE_MASK GENMASK(1, 0) #define SPRD_DMA_REQ_MODE_MASK GENMASK(1, 0)
#define SPRD_DMA_FIX_SEL_OFFSET 21 #define SPRD_DMA_FIX_SEL_OFFSET 21
#define SPRD_DMA_FIX_EN_OFFSET 20 #define SPRD_DMA_FIX_EN_OFFSET 20
#define SPRD_DMA_LLIST_END_OFFSET 19 #define SPRD_DMA_LLIST_END BIT(19)
#define SPRD_DMA_FRG_LEN_MASK GENMASK(16, 0) #define SPRD_DMA_FRG_LEN_MASK GENMASK(16, 0)
/* SPRD_DMA_CHN_BLK_LEN register definition */ /* SPRD_DMA_CHN_BLK_LEN register definition */
...@@ -164,6 +165,7 @@ struct sprd_dma_desc { ...@@ -164,6 +165,7 @@ struct sprd_dma_desc {
struct sprd_dma_chn { struct sprd_dma_chn {
struct virt_dma_chan vc; struct virt_dma_chan vc;
void __iomem *chn_base; void __iomem *chn_base;
struct sprd_dma_linklist linklist;
struct dma_slave_config slave_cfg; struct dma_slave_config slave_cfg;
u32 chn_num; u32 chn_num;
u32 dev_id; u32 dev_id;
...@@ -582,7 +584,8 @@ static int sprd_dma_get_step(enum dma_slave_buswidth buswidth) ...@@ -582,7 +584,8 @@ static int sprd_dma_get_step(enum dma_slave_buswidth buswidth)
} }
static int sprd_dma_fill_desc(struct dma_chan *chan, static int sprd_dma_fill_desc(struct dma_chan *chan,
struct sprd_dma_desc *sdesc, struct sprd_dma_chn_hw *hw,
unsigned int sglen, int sg_index,
dma_addr_t src, dma_addr_t dst, u32 len, dma_addr_t src, dma_addr_t dst, u32 len,
enum dma_transfer_direction dir, enum dma_transfer_direction dir,
unsigned long flags, unsigned long flags,
...@@ -590,7 +593,6 @@ static int sprd_dma_fill_desc(struct dma_chan *chan, ...@@ -590,7 +593,6 @@ static int sprd_dma_fill_desc(struct dma_chan *chan,
{ {
struct sprd_dma_dev *sdev = to_sprd_dma_dev(chan); struct sprd_dma_dev *sdev = to_sprd_dma_dev(chan);
struct sprd_dma_chn *schan = to_sprd_dma_chan(chan); struct sprd_dma_chn *schan = to_sprd_dma_chan(chan);
struct sprd_dma_chn_hw *hw = &sdesc->chn_hw;
u32 req_mode = (flags >> SPRD_DMA_REQ_SHIFT) & SPRD_DMA_REQ_MODE_MASK; u32 req_mode = (flags >> SPRD_DMA_REQ_SHIFT) & SPRD_DMA_REQ_MODE_MASK;
u32 int_mode = flags & SPRD_DMA_INT_MASK; u32 int_mode = flags & SPRD_DMA_INT_MASK;
int src_datawidth, dst_datawidth, src_step, dst_step; int src_datawidth, dst_datawidth, src_step, dst_step;
...@@ -670,12 +672,52 @@ static int sprd_dma_fill_desc(struct dma_chan *chan, ...@@ -670,12 +672,52 @@ static int sprd_dma_fill_desc(struct dma_chan *chan,
temp |= (src_step & SPRD_DMA_TRSF_STEP_MASK) << SPRD_DMA_SRC_TRSF_STEP_OFFSET; temp |= (src_step & SPRD_DMA_TRSF_STEP_MASK) << SPRD_DMA_SRC_TRSF_STEP_OFFSET;
hw->trsf_step = temp; hw->trsf_step = temp;
/* link-list configuration */
if (schan->linklist.phy_addr) {
if (sg_index == sglen - 1)
hw->frg_len |= SPRD_DMA_LLIST_END;
hw->cfg |= SPRD_DMA_LINKLIST_EN;
/* link-list index */
temp = (sg_index + 1) % sglen;
/* Next link-list configuration's physical address offset */
temp = temp * sizeof(*hw) + SPRD_DMA_CHN_SRC_ADDR;
/*
* Set the link-list pointer point to next link-list
* configuration's physical address.
*/
hw->llist_ptr = schan->linklist.phy_addr + temp;
} else {
hw->llist_ptr = 0;
}
hw->frg_step = 0; hw->frg_step = 0;
hw->src_blk_step = 0; hw->src_blk_step = 0;
hw->des_blk_step = 0; hw->des_blk_step = 0;
return 0; return 0;
} }
static int sprd_dma_fill_linklist_desc(struct dma_chan *chan,
unsigned int sglen, int sg_index,
dma_addr_t src, dma_addr_t dst, u32 len,
enum dma_transfer_direction dir,
unsigned long flags,
struct dma_slave_config *slave_cfg)
{
struct sprd_dma_chn *schan = to_sprd_dma_chan(chan);
struct sprd_dma_chn_hw *hw;
if (!schan->linklist.virt_addr)
return -EINVAL;
hw = (struct sprd_dma_chn_hw *)(schan->linklist.virt_addr +
sg_index * sizeof(*hw));
return sprd_dma_fill_desc(chan, hw, sglen, sg_index, src, dst, len,
dir, flags, slave_cfg);
}
static struct dma_async_tx_descriptor * static struct dma_async_tx_descriptor *
sprd_dma_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, sprd_dma_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
size_t len, unsigned long flags) size_t len, unsigned long flags)
...@@ -744,10 +786,20 @@ sprd_dma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, ...@@ -744,10 +786,20 @@ sprd_dma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
u32 len = 0; u32 len = 0;
int ret, i; int ret, i;
/* TODO: now we only support one sg for each DMA configuration. */ if (!is_slave_direction(dir))
if (!is_slave_direction(dir) || sglen > 1)
return NULL; return NULL;
if (context) {
struct sprd_dma_linklist *ll_cfg =
(struct sprd_dma_linklist *)context;
schan->linklist.phy_addr = ll_cfg->phy_addr;
schan->linklist.virt_addr = ll_cfg->virt_addr;
} else {
schan->linklist.phy_addr = 0;
schan->linklist.virt_addr = 0;
}
sdesc = kzalloc(sizeof(*sdesc), GFP_NOWAIT); sdesc = kzalloc(sizeof(*sdesc), GFP_NOWAIT);
if (!sdesc) if (!sdesc)
return NULL; return NULL;
...@@ -762,10 +814,25 @@ sprd_dma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, ...@@ -762,10 +814,25 @@ sprd_dma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
src = slave_cfg->src_addr; src = slave_cfg->src_addr;
dst = sg_dma_address(sg); dst = sg_dma_address(sg);
} }
/*
* The link-list mode needs at least 2 link-list
* configurations. If there is only one sg, it doesn't
* need to fill the link-list configuration.
*/
if (sglen < 2)
break;
ret = sprd_dma_fill_linklist_desc(chan, sglen, i, src, dst, len,
dir, flags, slave_cfg);
if (ret) {
kfree(sdesc);
return NULL;
}
} }
ret = sprd_dma_fill_desc(chan, sdesc, src, dst, len, dir, flags, ret = sprd_dma_fill_desc(chan, &sdesc->chn_hw, 0, 0, src, dst, len,
slave_cfg); dir, flags, slave_cfg);
if (ret) { if (ret) {
kfree(sdesc); kfree(sdesc);
return NULL; return NULL;
......
...@@ -833,7 +833,7 @@ static int st_fdma_probe(struct platform_device *pdev) ...@@ -833,7 +833,7 @@ static int st_fdma_probe(struct platform_device *pdev)
fdev->dma_device.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV); fdev->dma_device.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
fdev->dma_device.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST; fdev->dma_device.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
ret = dma_async_device_register(&fdev->dma_device); ret = dmaenginem_async_device_register(&fdev->dma_device);
if (ret) { if (ret) {
dev_err(&pdev->dev, dev_err(&pdev->dev,
"Failed to register DMA device (%d)\n", ret); "Failed to register DMA device (%d)\n", ret);
...@@ -844,15 +844,13 @@ static int st_fdma_probe(struct platform_device *pdev) ...@@ -844,15 +844,13 @@ static int st_fdma_probe(struct platform_device *pdev)
if (ret) { if (ret) {
dev_err(&pdev->dev, dev_err(&pdev->dev,
"Failed to register controller (%d)\n", ret); "Failed to register controller (%d)\n", ret);
goto err_dma_dev; goto err_rproc;
} }
dev_info(&pdev->dev, "ST FDMA engine driver, irq:%d\n", fdev->irq); dev_info(&pdev->dev, "ST FDMA engine driver, irq:%d\n", fdev->irq);
return 0; return 0;
err_dma_dev:
dma_async_device_unregister(&fdev->dma_device);
err_rproc: err_rproc:
st_fdma_free(fdev); st_fdma_free(fdev);
st_slim_rproc_put(fdev->slim_rproc); st_slim_rproc_put(fdev->slim_rproc);
...@@ -867,7 +865,6 @@ static int st_fdma_remove(struct platform_device *pdev) ...@@ -867,7 +865,6 @@ static int st_fdma_remove(struct platform_device *pdev)
devm_free_irq(&pdev->dev, fdev->irq, fdev); devm_free_irq(&pdev->dev, fdev->irq, fdev);
st_slim_rproc_put(fdev->slim_rproc); st_slim_rproc_put(fdev->slim_rproc);
of_dma_controller_free(pdev->dev.of_node); of_dma_controller_free(pdev->dev.of_node);
dma_async_device_unregister(&fdev->dma_device);
return 0; return 0;
} }
......
...@@ -2839,7 +2839,7 @@ static int __init d40_dmaengine_init(struct d40_base *base, ...@@ -2839,7 +2839,7 @@ static int __init d40_dmaengine_init(struct d40_base *base,
d40_ops_init(base, &base->dma_slave); d40_ops_init(base, &base->dma_slave);
err = dma_async_device_register(&base->dma_slave); err = dmaenginem_async_device_register(&base->dma_slave);
if (err) { if (err) {
d40_err(base->dev, "Failed to register slave channels\n"); d40_err(base->dev, "Failed to register slave channels\n");
...@@ -2854,12 +2854,12 @@ static int __init d40_dmaengine_init(struct d40_base *base, ...@@ -2854,12 +2854,12 @@ static int __init d40_dmaengine_init(struct d40_base *base,
d40_ops_init(base, &base->dma_memcpy); d40_ops_init(base, &base->dma_memcpy);
err = dma_async_device_register(&base->dma_memcpy); err = dmaenginem_async_device_register(&base->dma_memcpy);
if (err) { if (err) {
d40_err(base->dev, d40_err(base->dev,
"Failed to register memcpy only channels\n"); "Failed to register memcpy only channels\n");
goto unregister_slave; goto exit;
} }
d40_chan_init(base, &base->dma_both, base->phy_chans, d40_chan_init(base, &base->dma_both, base->phy_chans,
...@@ -2871,18 +2871,14 @@ static int __init d40_dmaengine_init(struct d40_base *base, ...@@ -2871,18 +2871,14 @@ static int __init d40_dmaengine_init(struct d40_base *base,
dma_cap_set(DMA_CYCLIC, base->dma_slave.cap_mask); dma_cap_set(DMA_CYCLIC, base->dma_slave.cap_mask);
d40_ops_init(base, &base->dma_both); d40_ops_init(base, &base->dma_both);
err = dma_async_device_register(&base->dma_both); err = dmaenginem_async_device_register(&base->dma_both);
if (err) { if (err) {
d40_err(base->dev, d40_err(base->dev,
"Failed to register logical and physical capable channels\n"); "Failed to register logical and physical capable channels\n");
goto unregister_memcpy; goto exit;
} }
return 0; return 0;
unregister_memcpy:
dma_async_device_unregister(&base->dma_memcpy);
unregister_slave:
dma_async_device_unregister(&base->dma_slave);
exit: exit:
return err; return err;
} }
......
...@@ -308,20 +308,12 @@ static bool stm32_dma_fifo_threshold_is_allowed(u32 burst, u32 threshold, ...@@ -308,20 +308,12 @@ static bool stm32_dma_fifo_threshold_is_allowed(u32 burst, u32 threshold,
static bool stm32_dma_is_burst_possible(u32 buf_len, u32 threshold) static bool stm32_dma_is_burst_possible(u32 buf_len, u32 threshold)
{ {
switch (threshold) { /*
case STM32_DMA_FIFO_THRESHOLD_FULL: * Buffer or period length has to be aligned on FIFO depth.
if (buf_len >= STM32_DMA_MAX_BURST) * Otherwise bytes may be stuck within FIFO at buffer or period
return true; * length.
else */
return false; return ((buf_len % ((threshold + 1) * 4)) == 0);
case STM32_DMA_FIFO_THRESHOLD_HALFFULL:
if (buf_len >= STM32_DMA_MAX_BURST / 2)
return true;
else
return false;
default:
return false;
}
} }
static u32 stm32_dma_get_best_burst(u32 buf_len, u32 max_burst, u32 threshold, static u32 stm32_dma_get_best_burst(u32 buf_len, u32 max_burst, u32 threshold,
......
...@@ -1656,7 +1656,7 @@ static int stm32_mdma_probe(struct platform_device *pdev) ...@@ -1656,7 +1656,7 @@ static int stm32_mdma_probe(struct platform_device *pdev)
return ret; return ret;
} }
ret = dma_async_device_register(dd); ret = dmaenginem_async_device_register(dd);
if (ret) if (ret)
return ret; return ret;
...@@ -1674,8 +1674,6 @@ static int stm32_mdma_probe(struct platform_device *pdev) ...@@ -1674,8 +1674,6 @@ static int stm32_mdma_probe(struct platform_device *pdev)
return 0; return 0;
err_unregister: err_unregister:
dma_async_device_unregister(dd);
return ret; return ret;
} }
......
...@@ -545,7 +545,7 @@ static struct dma_async_tx_descriptor *td_prep_slave_sg(struct dma_chan *chan, ...@@ -545,7 +545,7 @@ static struct dma_async_tx_descriptor *td_prep_slave_sg(struct dma_chan *chan,
} }
dma_sync_single_for_device(chan2dmadev(chan), td_desc->txd.phys, dma_sync_single_for_device(chan2dmadev(chan), td_desc->txd.phys,
td_desc->desc_list_len, DMA_MEM_TO_DEV); td_desc->desc_list_len, DMA_TO_DEVICE);
return &td_desc->txd; return &td_desc->txd;
} }
......
...@@ -58,4 +58,73 @@ enum sprd_dma_int_type { ...@@ -58,4 +58,73 @@ enum sprd_dma_int_type {
SPRD_DMA_CFGERR_INT, SPRD_DMA_CFGERR_INT,
}; };
/*
* struct sprd_dma_linklist - DMA link-list address structure
* @virt_addr: link-list virtual address to configure link-list node
* @phy_addr: link-list physical address to link DMA transfer
*
* The Spreadtrum DMA controller supports the link-list mode, that means slaves
* can supply several groups configurations (each configuration represents one
* DMA transfer) saved in memory, and DMA controller will link these groups
* configurations by writing the physical address of each configuration into the
* link-list register.
*
* Just as shown below, the link-list pointer register will be pointed to the
* physical address of 'configuration 1', and the 'configuration 1' link-list
* pointer will be pointed to 'configuration 2', and so on.
* Once trigger the DMA transfer, the DMA controller will load 'configuration
* 1' to its registers automatically, after 'configuration 1' transaction is
* done, DMA controller will load 'configuration 2' automatically, until all
* DMA transactions are done.
*
* Note: The last link-list pointer should point to the physical address
* of 'configuration 1', which can avoid DMA controller loads incorrect
* configuration when the last configuration transaction is done.
*
* DMA controller linklist memory
* ====================== -----------------------
*| | | configuration 1 |<---
*| DMA controller | ------->| | |
*| | | | | |
*| | | | | |
*| | | | | |
*| linklist pointer reg |---- ----| linklist pointer | |
* ====================== | ----------------------- |
* | |
* | ----------------------- |
* | | configuration 2 | |
* --->| | |
* | | |
* | | |
* | | |
* ----| linklist pointer | |
* | ----------------------- |
* | |
* | ----------------------- |
* | | configuration 3 | |
* --->| | |
* | | |
* | . | |
* . |
* . |
* . |
* | . |
* | ----------------------- |
* | | configuration n | |
* --->| | |
* | | |
* | | |
* | | |
* | linklist pointer |----
* -----------------------
*
* To support the link-list mode, DMA slaves should allocate one segment memory
* from always-on IRAM or dma coherent memory to store these groups of DMA
* configuration, and pass the virtual and physical address to DMA controller.
*/
struct sprd_dma_linklist {
unsigned long virt_addr;
phys_addr_t phy_addr;
};
#endif #endif
...@@ -85,7 +85,7 @@ static inline enum dma_transfer_direction ...@@ -85,7 +85,7 @@ static inline enum dma_transfer_direction
ep93xx_dma_chan_direction(struct dma_chan *chan) ep93xx_dma_chan_direction(struct dma_chan *chan)
{ {
if (!ep93xx_dma_chan_is_m2p(chan)) if (!ep93xx_dma_chan_is_m2p(chan))
return DMA_NONE; return DMA_TRANS_NONE;
/* even channels are for TX, odd for RX */ /* even channels are for TX, odd for RX */
return (chan->chan_id % 2 == 0) ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM; return (chan->chan_id % 2 == 0) ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM;
......
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Freescale eDMA platform data, ColdFire SoC's family.
*
* Copyright (c) 2017 Angelo Dureghello <angelo@sysam.it>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#ifndef __LINUX_PLATFORM_DATA_MCF_EDMA_H__
#define __LINUX_PLATFORM_DATA_MCF_EDMA_H__
struct dma_slave_map;
bool mcf_edma_filter_fn(struct dma_chan *chan, void *param);
#define MCF_EDMA_FILTER_PARAM(ch) ((void *)ch)
/**
* struct mcf_edma_platform_data - platform specific data for eDMA engine
*
* @ver The eDMA module version.
* @dma_channels The number of eDMA channels.
*/
struct mcf_edma_platform_data {
int dma_channels;
const struct dma_slave_map *slave_map;
int slavecnt;
};
#endif /* __LINUX_PLATFORM_DATA_MCF_EDMA_H__ */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment