Commit c511dc1f authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'next' of git://git.infradead.org/users/vkoul/slave-dma

Pull slave-dmaengine update from Vinod Koul:
 "This time we have a new dmaengine driver from the tegra folks.  Also
  we have Guennadi's cleanup of sh drivers which incudes a library for
  sh drivers.  And the usual odd fixes in bunch of drivers and some nice
  cleanup of dw_dmac from Andy."

Fix up conflicts in drivers/mmc/host/sh_mmcif.c

* 'next' of git://git.infradead.org/users/vkoul/slave-dma: (46 commits)
  dmaengine: Cleanup logging messages
  mmc: sh_mmcif: switch to the new DMA channel allocation and configuration
  dma: sh: provide a migration path for slave drivers to stop using .private
  dma: sh: use an integer slave ID to improve API compatibility
  dmaengine: shdma: prepare to stop using struct dma_chan::private
  sh: remove unused DMA device pointer from SIU platform data
  ASoC: siu: don't use DMA device for channel filtering
  dmaengine: shdma: (cosmetic) simplify a static function
  dmaengine: at_hdmac: add a few const qualifiers
  dw_dmac: use 'u32' for LLI structure members, not dma_addr_t
  dw_dmac: mark dwc_dump_lli inline
  dma: mxs-dma: Export missing symbols from mxs-dma.c
  dma: shdma: convert to the shdma base library
  ASoC: fsi: prepare for conversion to the shdma base library
  usb: renesas_usbhs: prepare for conversion to the shdma base library
  ASoC: siu: prepare for conversion to the shdma base library
  serial: sh-sci: prepare for conversion to the shdma base library
  mmc: sh_mobile_sdhi: prepare for conversion to the shdma base library
  mmc: sh_mmcif: remove unneeded struct sh_mmcif_dma, prepare to shdma conversion
  dma: shdma: prepare for conversion to the shdma base library
  ...
parents 9161c3b7 63433250
...@@ -14,7 +14,6 @@ ...@@ -14,7 +14,6 @@
struct device; struct device;
struct siu_platform { struct siu_platform {
struct device *dma_dev;
unsigned int dma_slave_tx_a; unsigned int dma_slave_tx_a;
unsigned int dma_slave_rx_a; unsigned int dma_slave_rx_a;
unsigned int dma_slave_tx_b; unsigned int dma_slave_tx_b;
......
...@@ -512,7 +512,6 @@ static struct platform_device tmu2_device = { ...@@ -512,7 +512,6 @@ static struct platform_device tmu2_device = {
}; };
static struct siu_platform siu_platform_data = { static struct siu_platform siu_platform_data = {
.dma_dev = &dma_device.dev,
.dma_slave_tx_a = SHDMA_SLAVE_SIUA_TX, .dma_slave_tx_a = SHDMA_SLAVE_SIUA_TX,
.dma_slave_rx_a = SHDMA_SLAVE_SIUA_RX, .dma_slave_rx_a = SHDMA_SLAVE_SIUA_RX,
.dma_slave_tx_b = SHDMA_SLAVE_SIUB_TX, .dma_slave_tx_b = SHDMA_SLAVE_SIUB_TX,
......
...@@ -148,6 +148,20 @@ config TXX9_DMAC ...@@ -148,6 +148,20 @@ config TXX9_DMAC
Support the TXx9 SoC internal DMA controller. This can be Support the TXx9 SoC internal DMA controller. This can be
integrated in chips such as the Toshiba TX4927/38/39. integrated in chips such as the Toshiba TX4927/38/39.
config TEGRA20_APB_DMA
bool "NVIDIA Tegra20 APB DMA support"
depends on ARCH_TEGRA
select DMA_ENGINE
help
Support for the NVIDIA Tegra20 APB DMA controller driver. The
DMA controller is having multiple DMA channel which can be
configured for different peripherals like audio, UART, SPI,
I2C etc which is in APB bus.
This DMA controller transfers data from memory to peripheral fifo
or vice versa. It does not support memory to memory data transfer.
config SH_DMAE config SH_DMAE
tristate "Renesas SuperH DMAC support" tristate "Renesas SuperH DMAC support"
depends on (SUPERH && SH_DMA) || (ARM && ARCH_SHMOBILE) depends on (SUPERH && SH_DMA) || (ARM && ARCH_SHMOBILE)
...@@ -237,7 +251,7 @@ config IMX_DMA ...@@ -237,7 +251,7 @@ config IMX_DMA
config MXS_DMA config MXS_DMA
bool "MXS DMA support" bool "MXS DMA support"
depends on SOC_IMX23 || SOC_IMX28 depends on SOC_IMX23 || SOC_IMX28 || SOC_IMX6Q
select STMP_DEVICE select STMP_DEVICE
select DMA_ENGINE select DMA_ENGINE
help help
...@@ -260,6 +274,16 @@ config DMA_SA11X0 ...@@ -260,6 +274,16 @@ config DMA_SA11X0
SA-1110 SoCs. This DMA engine can only be used with on-chip SA-1110 SoCs. This DMA engine can only be used with on-chip
devices. devices.
config MMP_TDMA
bool "MMP Two-Channel DMA support"
depends on ARCH_MMP
select DMA_ENGINE
help
Support the MMP Two-Channel DMA engine.
This engine used for MMP Audio DMA and pxa910 SQU.
Say Y here if you enabled MMP ADMA, otherwise say N.
config DMA_ENGINE config DMA_ENGINE
bool bool
......
...@@ -14,7 +14,7 @@ obj-$(CONFIG_DW_DMAC) += dw_dmac.o ...@@ -14,7 +14,7 @@ obj-$(CONFIG_DW_DMAC) += dw_dmac.o
obj-$(CONFIG_AT_HDMAC) += at_hdmac.o obj-$(CONFIG_AT_HDMAC) += at_hdmac.o
obj-$(CONFIG_MX3_IPU) += ipu/ obj-$(CONFIG_MX3_IPU) += ipu/
obj-$(CONFIG_TXX9_DMAC) += txx9dmac.o obj-$(CONFIG_TXX9_DMAC) += txx9dmac.o
obj-$(CONFIG_SH_DMAE) += shdma.o obj-$(CONFIG_SH_DMAE) += sh/
obj-$(CONFIG_COH901318) += coh901318.o coh901318_lli.o obj-$(CONFIG_COH901318) += coh901318.o coh901318_lli.o
obj-$(CONFIG_AMCC_PPC440SPE_ADMA) += ppc4xx/ obj-$(CONFIG_AMCC_PPC440SPE_ADMA) += ppc4xx/
obj-$(CONFIG_IMX_SDMA) += imx-sdma.o obj-$(CONFIG_IMX_SDMA) += imx-sdma.o
...@@ -23,8 +23,10 @@ obj-$(CONFIG_MXS_DMA) += mxs-dma.o ...@@ -23,8 +23,10 @@ obj-$(CONFIG_MXS_DMA) += mxs-dma.o
obj-$(CONFIG_TIMB_DMA) += timb_dma.o obj-$(CONFIG_TIMB_DMA) += timb_dma.o
obj-$(CONFIG_SIRF_DMA) += sirf-dma.o obj-$(CONFIG_SIRF_DMA) += sirf-dma.o
obj-$(CONFIG_STE_DMA40) += ste_dma40.o ste_dma40_ll.o obj-$(CONFIG_STE_DMA40) += ste_dma40.o ste_dma40_ll.o
obj-$(CONFIG_TEGRA20_APB_DMA) += tegra20-apb-dma.o
obj-$(CONFIG_PL330_DMA) += pl330.o obj-$(CONFIG_PL330_DMA) += pl330.o
obj-$(CONFIG_PCH_DMA) += pch_dma.o obj-$(CONFIG_PCH_DMA) += pch_dma.o
obj-$(CONFIG_AMBA_PL08X) += amba-pl08x.o obj-$(CONFIG_AMBA_PL08X) += amba-pl08x.o
obj-$(CONFIG_EP93XX_DMA) += ep93xx_dma.o obj-$(CONFIG_EP93XX_DMA) += ep93xx_dma.o
obj-$(CONFIG_DMA_SA11X0) += sa11x0-dma.o obj-$(CONFIG_DMA_SA11X0) += sa11x0-dma.o
obj-$(CONFIG_MMP_TDMA) += mmp_tdma.o
...@@ -9,10 +9,9 @@ ...@@ -9,10 +9,9 @@
* (at your option) any later version. * (at your option) any later version.
* *
* *
* This supports the Atmel AHB DMA Controller, * This supports the Atmel AHB DMA Controller found in several Atmel SoCs.
* * The only Atmel DMA Controller that is not covered by this driver is the one
* The driver has currently been tested with the Atmel AT91SAM9RL * found on AT91SAM9263.
* and AT91SAM9G45 series.
*/ */
#include <linux/clk.h> #include <linux/clk.h>
...@@ -1217,7 +1216,7 @@ static const struct platform_device_id atdma_devtypes[] = { ...@@ -1217,7 +1216,7 @@ static const struct platform_device_id atdma_devtypes[] = {
} }
}; };
static inline struct at_dma_platform_data * __init at_dma_get_driver_data( static inline const struct at_dma_platform_data * __init at_dma_get_driver_data(
struct platform_device *pdev) struct platform_device *pdev)
{ {
if (pdev->dev.of_node) { if (pdev->dev.of_node) {
...@@ -1255,7 +1254,7 @@ static int __init at_dma_probe(struct platform_device *pdev) ...@@ -1255,7 +1254,7 @@ static int __init at_dma_probe(struct platform_device *pdev)
int irq; int irq;
int err; int err;
int i; int i;
struct at_dma_platform_data *plat_dat; const struct at_dma_platform_data *plat_dat;
/* setup platform data for each SoC */ /* setup platform data for each SoC */
dma_cap_set(DMA_MEMCPY, at91sam9rl_config.cap_mask); dma_cap_set(DMA_MEMCPY, at91sam9rl_config.cap_mask);
......
...@@ -1438,34 +1438,32 @@ static int __init coh901318_probe(struct platform_device *pdev) ...@@ -1438,34 +1438,32 @@ static int __init coh901318_probe(struct platform_device *pdev)
io = platform_get_resource(pdev, IORESOURCE_MEM, 0); io = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!io) if (!io)
goto err_get_resource; return -ENODEV;
/* Map DMA controller registers to virtual memory */ /* Map DMA controller registers to virtual memory */
if (request_mem_region(io->start, if (devm_request_mem_region(&pdev->dev,
resource_size(io), io->start,
pdev->dev.driver->name) == NULL) { resource_size(io),
err = -EBUSY; pdev->dev.driver->name) == NULL)
goto err_request_mem; return -ENOMEM;
}
pdata = pdev->dev.platform_data; pdata = pdev->dev.platform_data;
if (!pdata) if (!pdata)
goto err_no_platformdata; return -ENODEV;
base = kmalloc(ALIGN(sizeof(struct coh901318_base), 4) + base = devm_kzalloc(&pdev->dev,
pdata->max_channels * ALIGN(sizeof(struct coh901318_base), 4) +
sizeof(struct coh901318_chan), pdata->max_channels *
GFP_KERNEL); sizeof(struct coh901318_chan),
GFP_KERNEL);
if (!base) if (!base)
goto err_alloc_coh_dma_channels; return -ENOMEM;
base->chans = ((void *)base) + ALIGN(sizeof(struct coh901318_base), 4); base->chans = ((void *)base) + ALIGN(sizeof(struct coh901318_base), 4);
base->virtbase = ioremap(io->start, resource_size(io)); base->virtbase = devm_ioremap(&pdev->dev, io->start, resource_size(io));
if (!base->virtbase) { if (!base->virtbase)
err = -ENOMEM; return -ENOMEM;
goto err_no_ioremap;
}
base->dev = &pdev->dev; base->dev = &pdev->dev;
base->platform = pdata; base->platform = pdata;
...@@ -1474,25 +1472,20 @@ static int __init coh901318_probe(struct platform_device *pdev) ...@@ -1474,25 +1472,20 @@ static int __init coh901318_probe(struct platform_device *pdev)
COH901318_DEBUGFS_ASSIGN(debugfs_dma_base, base); COH901318_DEBUGFS_ASSIGN(debugfs_dma_base, base);
platform_set_drvdata(pdev, base);
irq = platform_get_irq(pdev, 0); irq = platform_get_irq(pdev, 0);
if (irq < 0) if (irq < 0)
goto err_no_irq; return irq;
err = request_irq(irq, dma_irq_handler, IRQF_DISABLED, err = devm_request_irq(&pdev->dev, irq, dma_irq_handler, IRQF_DISABLED,
"coh901318", base); "coh901318", base);
if (err) { if (err)
dev_crit(&pdev->dev, return err;
"Cannot allocate IRQ for DMA controller!\n");
goto err_request_irq;
}
err = coh901318_pool_create(&base->pool, &pdev->dev, err = coh901318_pool_create(&base->pool, &pdev->dev,
sizeof(struct coh901318_lli), sizeof(struct coh901318_lli),
32); 32);
if (err) if (err)
goto err_pool_create; return err;
/* init channels for device transfers */ /* init channels for device transfers */
coh901318_base_init(&base->dma_slave, base->platform->chans_slave, coh901318_base_init(&base->dma_slave, base->platform->chans_slave,
...@@ -1538,6 +1531,7 @@ static int __init coh901318_probe(struct platform_device *pdev) ...@@ -1538,6 +1531,7 @@ static int __init coh901318_probe(struct platform_device *pdev)
if (err) if (err)
goto err_register_memcpy; goto err_register_memcpy;
platform_set_drvdata(pdev, base);
dev_info(&pdev->dev, "Initialized COH901318 DMA on virtual base 0x%08x\n", dev_info(&pdev->dev, "Initialized COH901318 DMA on virtual base 0x%08x\n",
(u32) base->virtbase); (u32) base->virtbase);
...@@ -1547,19 +1541,6 @@ static int __init coh901318_probe(struct platform_device *pdev) ...@@ -1547,19 +1541,6 @@ static int __init coh901318_probe(struct platform_device *pdev)
dma_async_device_unregister(&base->dma_slave); dma_async_device_unregister(&base->dma_slave);
err_register_slave: err_register_slave:
coh901318_pool_destroy(&base->pool); coh901318_pool_destroy(&base->pool);
err_pool_create:
free_irq(platform_get_irq(pdev, 0), base);
err_request_irq:
err_no_irq:
iounmap(base->virtbase);
err_no_ioremap:
kfree(base);
err_alloc_coh_dma_channels:
err_no_platformdata:
release_mem_region(pdev->resource->start,
resource_size(pdev->resource));
err_request_mem:
err_get_resource:
return err; return err;
} }
...@@ -1570,11 +1551,6 @@ static int __exit coh901318_remove(struct platform_device *pdev) ...@@ -1570,11 +1551,6 @@ static int __exit coh901318_remove(struct platform_device *pdev)
dma_async_device_unregister(&base->dma_memcpy); dma_async_device_unregister(&base->dma_memcpy);
dma_async_device_unregister(&base->dma_slave); dma_async_device_unregister(&base->dma_slave);
coh901318_pool_destroy(&base->pool); coh901318_pool_destroy(&base->pool);
free_irq(platform_get_irq(pdev, 0), base);
iounmap(base->virtbase);
kfree(base);
release_mem_region(pdev->resource->start,
resource_size(pdev->resource));
return 0; return 0;
} }
......
...@@ -45,6 +45,8 @@ ...@@ -45,6 +45,8 @@
* See Documentation/dmaengine.txt for more details * See Documentation/dmaengine.txt for more details
*/ */
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/dma-mapping.h> #include <linux/dma-mapping.h>
#include <linux/init.h> #include <linux/init.h>
#include <linux/module.h> #include <linux/module.h>
...@@ -261,7 +263,7 @@ enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie) ...@@ -261,7 +263,7 @@ enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie)
do { do {
status = dma_async_is_tx_complete(chan, cookie, NULL, NULL); status = dma_async_is_tx_complete(chan, cookie, NULL, NULL);
if (time_after_eq(jiffies, dma_sync_wait_timeout)) { if (time_after_eq(jiffies, dma_sync_wait_timeout)) {
printk(KERN_ERR "dma_sync_wait_timeout!\n"); pr_err("%s: timeout!\n", __func__);
return DMA_ERROR; return DMA_ERROR;
} }
} while (status == DMA_IN_PROGRESS); } while (status == DMA_IN_PROGRESS);
...@@ -312,7 +314,7 @@ static int __init dma_channel_table_init(void) ...@@ -312,7 +314,7 @@ static int __init dma_channel_table_init(void)
} }
if (err) { if (err) {
pr_err("dmaengine: initialization failure\n"); pr_err("initialization failure\n");
for_each_dma_cap_mask(cap, dma_cap_mask_all) for_each_dma_cap_mask(cap, dma_cap_mask_all)
if (channel_table[cap]) if (channel_table[cap])
free_percpu(channel_table[cap]); free_percpu(channel_table[cap]);
...@@ -520,12 +522,12 @@ struct dma_chan *__dma_request_channel(dma_cap_mask_t *mask, dma_filter_fn fn, v ...@@ -520,12 +522,12 @@ struct dma_chan *__dma_request_channel(dma_cap_mask_t *mask, dma_filter_fn fn, v
err = dma_chan_get(chan); err = dma_chan_get(chan);
if (err == -ENODEV) { if (err == -ENODEV) {
pr_debug("%s: %s module removed\n", __func__, pr_debug("%s: %s module removed\n",
dma_chan_name(chan)); __func__, dma_chan_name(chan));
list_del_rcu(&device->global_node); list_del_rcu(&device->global_node);
} else if (err) } else if (err)
pr_debug("%s: failed to get %s: (%d)\n", pr_debug("%s: failed to get %s: (%d)\n",
__func__, dma_chan_name(chan), err); __func__, dma_chan_name(chan), err);
else else
break; break;
if (--device->privatecnt == 0) if (--device->privatecnt == 0)
...@@ -535,7 +537,9 @@ struct dma_chan *__dma_request_channel(dma_cap_mask_t *mask, dma_filter_fn fn, v ...@@ -535,7 +537,9 @@ struct dma_chan *__dma_request_channel(dma_cap_mask_t *mask, dma_filter_fn fn, v
} }
mutex_unlock(&dma_list_mutex); mutex_unlock(&dma_list_mutex);
pr_debug("%s: %s (%s)\n", __func__, chan ? "success" : "fail", pr_debug("%s: %s (%s)\n",
__func__,
chan ? "success" : "fail",
chan ? dma_chan_name(chan) : NULL); chan ? dma_chan_name(chan) : NULL);
return chan; return chan;
...@@ -579,7 +583,7 @@ void dmaengine_get(void) ...@@ -579,7 +583,7 @@ void dmaengine_get(void)
break; break;
} else if (err) } else if (err)
pr_err("%s: failed to get %s: (%d)\n", pr_err("%s: failed to get %s: (%d)\n",
__func__, dma_chan_name(chan), err); __func__, dma_chan_name(chan), err);
} }
} }
...@@ -1015,7 +1019,7 @@ dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx) ...@@ -1015,7 +1019,7 @@ dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx)
while (tx->cookie == -EBUSY) { while (tx->cookie == -EBUSY) {
if (time_after_eq(jiffies, dma_sync_wait_timeout)) { if (time_after_eq(jiffies, dma_sync_wait_timeout)) {
pr_err("%s timeout waiting for descriptor submission\n", pr_err("%s timeout waiting for descriptor submission\n",
__func__); __func__);
return DMA_ERROR; return DMA_ERROR;
} }
cpu_relax(); cpu_relax();
......
This diff is collapsed.
...@@ -82,7 +82,7 @@ struct dw_dma_regs { ...@@ -82,7 +82,7 @@ struct dw_dma_regs {
DW_REG(ID); DW_REG(ID);
DW_REG(TEST); DW_REG(TEST);
/* optional encoded params, 0x3c8..0x3 */ /* optional encoded params, 0x3c8..0x3f7 */
}; };
/* Bitfields in CTL_LO */ /* Bitfields in CTL_LO */
...@@ -219,9 +219,9 @@ static inline struct dw_dma *to_dw_dma(struct dma_device *ddev) ...@@ -219,9 +219,9 @@ static inline struct dw_dma *to_dw_dma(struct dma_device *ddev)
/* LLI == Linked List Item; a.k.a. DMA block descriptor */ /* LLI == Linked List Item; a.k.a. DMA block descriptor */
struct dw_lli { struct dw_lli {
/* values that are not changed by hardware */ /* values that are not changed by hardware */
dma_addr_t sar; u32 sar;
dma_addr_t dar; u32 dar;
dma_addr_t llp; /* chain to next lli */ u32 llp; /* chain to next lli */
u32 ctllo; u32 ctllo;
/* values that may get written back: */ /* values that may get written back: */
u32 ctlhi; u32 ctlhi;
......
This diff is collapsed.
...@@ -29,7 +29,6 @@ ...@@ -29,7 +29,6 @@
#include <linux/of_device.h> #include <linux/of_device.h>
#include <asm/irq.h> #include <asm/irq.h>
#include <mach/mxs.h>
#include "dmaengine.h" #include "dmaengine.h"
...@@ -201,6 +200,7 @@ int mxs_dma_is_apbh(struct dma_chan *chan) ...@@ -201,6 +200,7 @@ int mxs_dma_is_apbh(struct dma_chan *chan)
return dma_is_apbh(mxs_dma); return dma_is_apbh(mxs_dma);
} }
EXPORT_SYMBOL_GPL(mxs_dma_is_apbh);
int mxs_dma_is_apbx(struct dma_chan *chan) int mxs_dma_is_apbx(struct dma_chan *chan)
{ {
...@@ -209,6 +209,7 @@ int mxs_dma_is_apbx(struct dma_chan *chan) ...@@ -209,6 +209,7 @@ int mxs_dma_is_apbx(struct dma_chan *chan)
return !dma_is_apbh(mxs_dma); return !dma_is_apbh(mxs_dma);
} }
EXPORT_SYMBOL_GPL(mxs_dma_is_apbx);
static void mxs_dma_reset_chan(struct mxs_dma_chan *mxs_chan) static void mxs_dma_reset_chan(struct mxs_dma_chan *mxs_chan)
{ {
......
obj-$(CONFIG_SH_DMAE) += shdma-base.o
obj-$(CONFIG_SH_DMAE) += shdma.o
This diff is collapsed.
...@@ -13,42 +13,29 @@ ...@@ -13,42 +13,29 @@
#ifndef __DMA_SHDMA_H #ifndef __DMA_SHDMA_H
#define __DMA_SHDMA_H #define __DMA_SHDMA_H
#include <linux/sh_dma.h>
#include <linux/shdma-base.h>
#include <linux/dmaengine.h> #include <linux/dmaengine.h>
#include <linux/interrupt.h> #include <linux/interrupt.h>
#include <linux/list.h> #include <linux/list.h>
#define SH_DMAC_MAX_CHANNELS 20 #define SH_DMAE_MAX_CHANNELS 20
#define SH_DMA_SLAVE_NUMBER 256 #define SH_DMAE_TCR_MAX 0x00FFFFFF /* 16MB */
#define SH_DMA_TCR_MAX 0x00FFFFFF /* 16MB */
struct device; struct device;
enum dmae_pm_state {
DMAE_PM_ESTABLISHED,
DMAE_PM_BUSY,
DMAE_PM_PENDING,
};
struct sh_dmae_chan { struct sh_dmae_chan {
spinlock_t desc_lock; /* Descriptor operation lock */ struct shdma_chan shdma_chan;
struct list_head ld_queue; /* Link descriptors queue */ const struct sh_dmae_slave_config *config; /* Slave DMA configuration */
struct list_head ld_free; /* Link descriptors free */
struct dma_chan common; /* DMA common channel */
struct device *dev; /* Channel device */
struct tasklet_struct tasklet; /* Tasklet */
int descs_allocated; /* desc count */
int xmit_shift; /* log_2(bytes_per_xfer) */ int xmit_shift; /* log_2(bytes_per_xfer) */
int irq;
int id; /* Raw id of this channel */
u32 __iomem *base; u32 __iomem *base;
char dev_id[16]; /* unique name per DMAC of channel */ char dev_id[16]; /* unique name per DMAC of channel */
int pm_error; int pm_error;
enum dmae_pm_state pm_state;
}; };
struct sh_dmae_device { struct sh_dmae_device {
struct dma_device common; struct shdma_dev shdma_dev;
struct sh_dmae_chan *chan[SH_DMAC_MAX_CHANNELS]; struct sh_dmae_chan *chan[SH_DMAE_MAX_CHANNELS];
struct sh_dmae_pdata *pdata; struct sh_dmae_pdata *pdata;
struct list_head node; struct list_head node;
u32 __iomem *chan_reg; u32 __iomem *chan_reg;
...@@ -57,10 +44,21 @@ struct sh_dmae_device { ...@@ -57,10 +44,21 @@ struct sh_dmae_device {
u32 chcr_ie_bit; u32 chcr_ie_bit;
}; };
#define to_sh_chan(chan) container_of(chan, struct sh_dmae_chan, common) struct sh_dmae_regs {
u32 sar; /* SAR / source address */
u32 dar; /* DAR / destination address */
u32 tcr; /* TCR / transfer count */
};
struct sh_dmae_desc {
struct sh_dmae_regs hw;
struct shdma_desc shdma_desc;
};
#define to_sh_chan(chan) container_of(chan, struct sh_dmae_chan, shdma_chan)
#define to_sh_desc(lh) container_of(lh, struct sh_desc, node) #define to_sh_desc(lh) container_of(lh, struct sh_desc, node)
#define tx_to_sh_desc(tx) container_of(tx, struct sh_desc, async_tx) #define tx_to_sh_desc(tx) container_of(tx, struct sh_desc, async_tx)
#define to_sh_dev(chan) container_of(chan->common.device,\ #define to_sh_dev(chan) container_of(chan->shdma_chan.dma_chan.device,\
struct sh_dmae_device, common) struct sh_dmae_device, shdma_dev.dma_dev)
#endif /* __DMA_SHDMA_H */ #endif /* __DMA_SHDMA_H */
This diff is collapsed.
...@@ -213,8 +213,6 @@ struct sh_mmcif_host { ...@@ -213,8 +213,6 @@ struct sh_mmcif_host {
struct mmc_host *mmc; struct mmc_host *mmc;
struct mmc_request *mrq; struct mmc_request *mrq;
struct platform_device *pd; struct platform_device *pd;
struct sh_dmae_slave dma_slave_tx;
struct sh_dmae_slave dma_slave_rx;
struct clk *hclk; struct clk *hclk;
unsigned int clk; unsigned int clk;
int bus_width; int bus_width;
...@@ -373,59 +371,69 @@ static void sh_mmcif_start_dma_tx(struct sh_mmcif_host *host) ...@@ -373,59 +371,69 @@ static void sh_mmcif_start_dma_tx(struct sh_mmcif_host *host)
desc, cookie); desc, cookie);
} }
static bool sh_mmcif_filter(struct dma_chan *chan, void *arg)
{
dev_dbg(chan->device->dev, "%s: slave data %p\n", __func__, arg);
chan->private = arg;
return true;
}
static void sh_mmcif_request_dma(struct sh_mmcif_host *host, static void sh_mmcif_request_dma(struct sh_mmcif_host *host,
struct sh_mmcif_plat_data *pdata) struct sh_mmcif_plat_data *pdata)
{ {
struct sh_dmae_slave *tx, *rx; struct resource *res = platform_get_resource(host->pd, IORESOURCE_MEM, 0);
struct dma_slave_config cfg;
dma_cap_mask_t mask;
int ret;
host->dma_active = false; host->dma_active = false;
if (!pdata) if (!pdata)
return; return;
if (pdata->slave_id_tx <= 0 || pdata->slave_id_rx <= 0)
return;
/* We can only either use DMA for both Tx and Rx or not use it at all */ /* We can only either use DMA for both Tx and Rx or not use it at all */
if (pdata->dma) { dma_cap_zero(mask);
dev_warn(&host->pd->dev, dma_cap_set(DMA_SLAVE, mask);
"Update your platform to use embedded DMA slave IDs\n");
tx = &pdata->dma->chan_priv_tx;
rx = &pdata->dma->chan_priv_rx;
} else {
tx = &host->dma_slave_tx;
tx->slave_id = pdata->slave_id_tx;
rx = &host->dma_slave_rx;
rx->slave_id = pdata->slave_id_rx;
}
if (tx->slave_id > 0 && rx->slave_id > 0) {
dma_cap_mask_t mask;
dma_cap_zero(mask); host->chan_tx = dma_request_channel(mask, shdma_chan_filter,
dma_cap_set(DMA_SLAVE, mask); (void *)pdata->slave_id_tx);
dev_dbg(&host->pd->dev, "%s: TX: got channel %p\n", __func__,
host->chan_tx);
host->chan_tx = dma_request_channel(mask, sh_mmcif_filter, tx); if (!host->chan_tx)
dev_dbg(&host->pd->dev, "%s: TX: got channel %p\n", __func__, return;
host->chan_tx);
if (!host->chan_tx) cfg.slave_id = pdata->slave_id_tx;
return; cfg.direction = DMA_MEM_TO_DEV;
cfg.dst_addr = res->start + MMCIF_CE_DATA;
cfg.src_addr = 0;
ret = dmaengine_slave_config(host->chan_tx, &cfg);
if (ret < 0)
goto ecfgtx;
host->chan_rx = dma_request_channel(mask, sh_mmcif_filter, rx); host->chan_rx = dma_request_channel(mask, shdma_chan_filter,
dev_dbg(&host->pd->dev, "%s: RX: got channel %p\n", __func__, (void *)pdata->slave_id_rx);
host->chan_rx); dev_dbg(&host->pd->dev, "%s: RX: got channel %p\n", __func__,
host->chan_rx);
if (!host->chan_rx) { if (!host->chan_rx)
dma_release_channel(host->chan_tx); goto erqrx;
host->chan_tx = NULL;
return;
}
init_completion(&host->dma_complete); cfg.slave_id = pdata->slave_id_rx;
} cfg.direction = DMA_DEV_TO_MEM;
cfg.dst_addr = 0;
cfg.src_addr = res->start + MMCIF_CE_DATA;
ret = dmaengine_slave_config(host->chan_rx, &cfg);
if (ret < 0)
goto ecfgrx;
init_completion(&host->dma_complete);
return;
ecfgrx:
dma_release_channel(host->chan_rx);
host->chan_rx = NULL;
erqrx:
ecfgtx:
dma_release_channel(host->chan_tx);
host->chan_tx = NULL;
} }
static void sh_mmcif_release_dma(struct sh_mmcif_host *host) static void sh_mmcif_release_dma(struct sh_mmcif_host *host)
......
...@@ -169,10 +169,10 @@ static int __devinit sh_mobile_sdhi_probe(struct platform_device *pdev) ...@@ -169,10 +169,10 @@ static int __devinit sh_mobile_sdhi_probe(struct platform_device *pdev)
mmc_data->get_cd = sh_mobile_sdhi_get_cd; mmc_data->get_cd = sh_mobile_sdhi_get_cd;
if (p->dma_slave_tx > 0 && p->dma_slave_rx > 0) { if (p->dma_slave_tx > 0 && p->dma_slave_rx > 0) {
priv->param_tx.slave_id = p->dma_slave_tx; priv->param_tx.shdma_slave.slave_id = p->dma_slave_tx;
priv->param_rx.slave_id = p->dma_slave_rx; priv->param_rx.shdma_slave.slave_id = p->dma_slave_rx;
priv->dma_priv.chan_priv_tx = &priv->param_tx; priv->dma_priv.chan_priv_tx = &priv->param_tx.shdma_slave;
priv->dma_priv.chan_priv_rx = &priv->param_rx; priv->dma_priv.chan_priv_rx = &priv->param_rx.shdma_slave;
priv->dma_priv.alignment_shift = 1; /* 2-byte alignment */ priv->dma_priv.alignment_shift = 1; /* 2-byte alignment */
mmc_data->dma = &priv->dma_priv; mmc_data->dma = &priv->dma_priv;
} }
......
...@@ -1615,9 +1615,9 @@ static bool filter(struct dma_chan *chan, void *slave) ...@@ -1615,9 +1615,9 @@ static bool filter(struct dma_chan *chan, void *slave)
struct sh_dmae_slave *param = slave; struct sh_dmae_slave *param = slave;
dev_dbg(chan->device->dev, "%s: slave ID %d\n", __func__, dev_dbg(chan->device->dev, "%s: slave ID %d\n", __func__,
param->slave_id); param->shdma_slave.slave_id);
chan->private = param; chan->private = &param->shdma_slave;
return true; return true;
} }
...@@ -1656,7 +1656,7 @@ static void sci_request_dma(struct uart_port *port) ...@@ -1656,7 +1656,7 @@ static void sci_request_dma(struct uart_port *port)
param = &s->param_tx; param = &s->param_tx;
/* Slave ID, e.g., SHDMA_SLAVE_SCIF0_TX */ /* Slave ID, e.g., SHDMA_SLAVE_SCIF0_TX */
param->slave_id = s->cfg->dma_slave_tx; param->shdma_slave.slave_id = s->cfg->dma_slave_tx;
s->cookie_tx = -EINVAL; s->cookie_tx = -EINVAL;
chan = dma_request_channel(mask, filter, param); chan = dma_request_channel(mask, filter, param);
...@@ -1684,7 +1684,7 @@ static void sci_request_dma(struct uart_port *port) ...@@ -1684,7 +1684,7 @@ static void sci_request_dma(struct uart_port *port)
param = &s->param_rx; param = &s->param_rx;
/* Slave ID, e.g., SHDMA_SLAVE_SCIF0_RX */ /* Slave ID, e.g., SHDMA_SLAVE_SCIF0_RX */
param->slave_id = s->cfg->dma_slave_rx; param->shdma_slave.slave_id = s->cfg->dma_slave_rx;
chan = dma_request_channel(mask, filter, param); chan = dma_request_channel(mask, filter, param);
dev_dbg(port->dev, "%s: RX: got channel %p\n", __func__, chan); dev_dbg(port->dev, "%s: RX: got channel %p\n", __func__, chan);
......
...@@ -994,7 +994,7 @@ static bool usbhsf_dma_filter(struct dma_chan *chan, void *param) ...@@ -994,7 +994,7 @@ static bool usbhsf_dma_filter(struct dma_chan *chan, void *param)
* *
* usbhs doesn't recognize id = 0 as valid DMA * usbhs doesn't recognize id = 0 as valid DMA
*/ */
if (0 == slave->slave_id) if (0 == slave->shdma_slave.slave_id)
return false; return false;
chan->private = slave; chan->private = slave;
...@@ -1173,8 +1173,8 @@ int usbhs_fifo_probe(struct usbhs_priv *priv) ...@@ -1173,8 +1173,8 @@ int usbhs_fifo_probe(struct usbhs_priv *priv)
fifo->port = D0FIFO; fifo->port = D0FIFO;
fifo->sel = D0FIFOSEL; fifo->sel = D0FIFOSEL;
fifo->ctr = D0FIFOCTR; fifo->ctr = D0FIFOCTR;
fifo->tx_slave.slave_id = usbhs_get_dparam(priv, d0_tx_id); fifo->tx_slave.shdma_slave.slave_id = usbhs_get_dparam(priv, d0_tx_id);
fifo->rx_slave.slave_id = usbhs_get_dparam(priv, d0_rx_id); fifo->rx_slave.shdma_slave.slave_id = usbhs_get_dparam(priv, d0_rx_id);
/* D1FIFO */ /* D1FIFO */
fifo = usbhsf_get_d1fifo(priv); fifo = usbhsf_get_d1fifo(priv);
...@@ -1182,8 +1182,8 @@ int usbhs_fifo_probe(struct usbhs_priv *priv) ...@@ -1182,8 +1182,8 @@ int usbhs_fifo_probe(struct usbhs_priv *priv)
fifo->port = D1FIFO; fifo->port = D1FIFO;
fifo->sel = D1FIFOSEL; fifo->sel = D1FIFOSEL;
fifo->ctr = D1FIFOCTR; fifo->ctr = D1FIFOCTR;
fifo->tx_slave.slave_id = usbhs_get_dparam(priv, d1_tx_id); fifo->tx_slave.shdma_slave.slave_id = usbhs_get_dparam(priv, d1_tx_id);
fifo->rx_slave.slave_id = usbhs_get_dparam(priv, d1_rx_id); fifo->rx_slave.shdma_slave.slave_id = usbhs_get_dparam(priv, d1_rx_id);
return 0; return 0;
} }
......
...@@ -338,6 +338,9 @@ enum dma_slave_buswidth { ...@@ -338,6 +338,9 @@ enum dma_slave_buswidth {
* @device_fc: Flow Controller Settings. Only valid for slave channels. Fill * @device_fc: Flow Controller Settings. Only valid for slave channels. Fill
* with 'true' if peripheral should be flow controller. Direction will be * with 'true' if peripheral should be flow controller. Direction will be
* selected at Runtime. * selected at Runtime.
* @slave_id: Slave requester id. Only valid for slave channels. The dma
* slave peripheral will have unique id as dma requester which need to be
* pass as slave config.
* *
* This struct is passed in as configuration data to a DMA engine * This struct is passed in as configuration data to a DMA engine
* in order to set up a certain channel for DMA transport at runtime. * in order to set up a certain channel for DMA transport at runtime.
...@@ -365,6 +368,7 @@ struct dma_slave_config { ...@@ -365,6 +368,7 @@ struct dma_slave_config {
u32 src_maxburst; u32 src_maxburst;
u32 dst_maxburst; u32 dst_maxburst;
bool device_fc; bool device_fc;
unsigned int slave_id;
}; };
static inline const char *dma_chan_name(struct dma_chan *chan) static inline const char *dma_chan_name(struct dma_chan *chan)
......
...@@ -32,17 +32,11 @@ ...@@ -32,17 +32,11 @@
* 1111 : Peripheral clock (sup_pclk set '1') * 1111 : Peripheral clock (sup_pclk set '1')
*/ */
struct sh_mmcif_dma {
struct sh_dmae_slave chan_priv_tx;
struct sh_dmae_slave chan_priv_rx;
};
struct sh_mmcif_plat_data { struct sh_mmcif_plat_data {
void (*set_pwr)(struct platform_device *pdev, int state); void (*set_pwr)(struct platform_device *pdev, int state);
void (*down_pwr)(struct platform_device *pdev); void (*down_pwr)(struct platform_device *pdev);
int (*get_cd)(struct platform_device *pdef); int (*get_cd)(struct platform_device *pdef);
struct sh_mmcif_dma *dma; /* Deprecated. Instead */ unsigned int slave_id_tx; /* embedded slave_id_[tr]x */
unsigned int slave_id_tx; /* use embedded slave_id_[tr]x */
unsigned int slave_id_rx; unsigned int slave_id_rx;
bool use_cd_gpio : 1; bool use_cd_gpio : 1;
unsigned int cd_gpio; unsigned int cd_gpio;
......
...@@ -10,38 +10,27 @@ ...@@ -10,38 +10,27 @@
#ifndef SH_DMA_H #ifndef SH_DMA_H
#define SH_DMA_H #define SH_DMA_H
#include <linux/list.h>
#include <linux/dmaengine.h> #include <linux/dmaengine.h>
#include <linux/list.h>
#include <linux/shdma-base.h>
#include <linux/types.h>
struct device;
/* Used by slave DMA clients to request DMA to/from a specific peripheral */ /* Used by slave DMA clients to request DMA to/from a specific peripheral */
struct sh_dmae_slave { struct sh_dmae_slave {
unsigned int slave_id; /* Set by the platform */ struct shdma_slave shdma_slave; /* Set by the platform */
struct device *dma_dev; /* Set by the platform */
const struct sh_dmae_slave_config *config; /* Set by the driver */
};
struct sh_dmae_regs {
u32 sar; /* SAR / source address */
u32 dar; /* DAR / destination address */
u32 tcr; /* TCR / transfer count */
};
struct sh_desc {
struct sh_dmae_regs hw;
struct list_head node;
struct dma_async_tx_descriptor async_tx;
enum dma_transfer_direction direction;
dma_cookie_t cookie;
size_t partial;
int chunks;
int mark;
}; };
/*
* Supplied by platforms to specify, how a DMA channel has to be configured for
* a certain peripheral
*/
struct sh_dmae_slave_config { struct sh_dmae_slave_config {
unsigned int slave_id; int slave_id;
dma_addr_t addr; dma_addr_t addr;
u32 chcr; u32 chcr;
char mid_rid; char mid_rid;
}; };
struct sh_dmae_channel { struct sh_dmae_channel {
...@@ -110,4 +99,6 @@ struct sh_dmae_pdata { ...@@ -110,4 +99,6 @@ struct sh_dmae_pdata {
#define CHCR_TE 0x00000002 #define CHCR_TE 0x00000002
#define CHCR_IE 0x00000004 #define CHCR_IE 0x00000004
bool shdma_chan_filter(struct dma_chan *chan, void *arg);
#endif #endif
/*
* Dmaengine driver base library for DMA controllers, found on SH-based SoCs
*
* extracted from shdma.c and headers
*
* Copyright (C) 2011-2012 Guennadi Liakhovetski <g.liakhovetski@gmx.de>
* Copyright (C) 2009 Nobuhiro Iwamatsu <iwamatsu.nobuhiro@renesas.com>
* Copyright (C) 2009 Renesas Solutions, Inc. All rights reserved.
* Copyright (C) 2007 Freescale Semiconductor, Inc. All rights reserved.
*
* This is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*/
#ifndef SHDMA_BASE_H
#define SHDMA_BASE_H
#include <linux/dmaengine.h>
#include <linux/interrupt.h>
#include <linux/list.h>
#include <linux/types.h>
/**
* shdma_pm_state - DMA channel PM state
* SHDMA_PM_ESTABLISHED: either idle or during data transfer
* SHDMA_PM_BUSY: during the transfer preparation, when we have to
* drop the lock temporarily
* SHDMA_PM_PENDING: transfers pending
*/
enum shdma_pm_state {
SHDMA_PM_ESTABLISHED,
SHDMA_PM_BUSY,
SHDMA_PM_PENDING,
};
struct device;
/*
* Drivers, using this library are expected to embed struct shdma_dev,
* struct shdma_chan, struct shdma_desc, and struct shdma_slave
* in their respective device, channel, descriptor and slave objects.
*/
struct shdma_slave {
int slave_id;
};
struct shdma_desc {
struct list_head node;
struct dma_async_tx_descriptor async_tx;
enum dma_transfer_direction direction;
dma_cookie_t cookie;
int chunks;
int mark;
};
struct shdma_chan {
spinlock_t chan_lock; /* Channel operation lock */
struct list_head ld_queue; /* Link descriptors queue */
struct list_head ld_free; /* Free link descriptors */
struct dma_chan dma_chan; /* DMA channel */
struct device *dev; /* Channel device */
void *desc; /* buffer for descriptor array */
int desc_num; /* desc count */
size_t max_xfer_len; /* max transfer length */
int id; /* Raw id of this channel */
int irq; /* Channel IRQ */
int slave_id; /* Client ID for slave DMA */
enum shdma_pm_state pm_state;
};
/**
* struct shdma_ops - simple DMA driver operations
* desc_completed: return true, if this is the descriptor, that just has
* completed (atomic)
* halt_channel: stop DMA channel operation (atomic)
* channel_busy: return true, if the channel is busy (atomic)
* slave_addr: return slave DMA address
* desc_setup: set up the hardware specific descriptor portion (atomic)
* set_slave: bind channel to a slave
* setup_xfer: configure channel hardware for operation (atomic)
* start_xfer: start the DMA transfer (atomic)
* embedded_desc: return Nth struct shdma_desc pointer from the
* descriptor array
* chan_irq: process channel IRQ, return true if a transfer has
* completed (atomic)
*/
struct shdma_ops {
bool (*desc_completed)(struct shdma_chan *, struct shdma_desc *);
void (*halt_channel)(struct shdma_chan *);
bool (*channel_busy)(struct shdma_chan *);
dma_addr_t (*slave_addr)(struct shdma_chan *);
int (*desc_setup)(struct shdma_chan *, struct shdma_desc *,
dma_addr_t, dma_addr_t, size_t *);
int (*set_slave)(struct shdma_chan *, int, bool);
void (*setup_xfer)(struct shdma_chan *, int);
void (*start_xfer)(struct shdma_chan *, struct shdma_desc *);
struct shdma_desc *(*embedded_desc)(void *, int);
bool (*chan_irq)(struct shdma_chan *, int);
};
struct shdma_dev {
struct dma_device dma_dev;
struct shdma_chan **schan;
const struct shdma_ops *ops;
size_t desc_size;
};
#define shdma_for_each_chan(c, d, i) for (i = 0, c = (d)->schan[0]; \
i < (d)->dma_dev.chancnt; c = (d)->schan[++i])
int shdma_request_irq(struct shdma_chan *, int,
unsigned long, const char *);
void shdma_free_irq(struct shdma_chan *);
bool shdma_reset(struct shdma_dev *sdev);
void shdma_chan_probe(struct shdma_dev *sdev,
struct shdma_chan *schan, int id);
void shdma_chan_remove(struct shdma_chan *schan);
int shdma_init(struct device *dev, struct shdma_dev *sdev,
int chan_num);
void shdma_cleanup(struct shdma_dev *sdev);
#endif
...@@ -1631,8 +1631,8 @@ static void fsi_handler_init(struct fsi_priv *fsi) ...@@ -1631,8 +1631,8 @@ static void fsi_handler_init(struct fsi_priv *fsi)
fsi->capture.priv = fsi; fsi->capture.priv = fsi;
if (fsi->info->tx_id) { if (fsi->info->tx_id) {
fsi->playback.slave.slave_id = fsi->info->tx_id; fsi->playback.slave.shdma_slave.slave_id = fsi->info->tx_id;
fsi->playback.handler = &fsi_dma_push_handler; fsi->playback.handler = &fsi_dma_push_handler;
} }
} }
......
...@@ -330,12 +330,9 @@ static bool filter(struct dma_chan *chan, void *slave) ...@@ -330,12 +330,9 @@ static bool filter(struct dma_chan *chan, void *slave)
{ {
struct sh_dmae_slave *param = slave; struct sh_dmae_slave *param = slave;
pr_debug("%s: slave ID %d\n", __func__, param->slave_id); pr_debug("%s: slave ID %d\n", __func__, param->shdma_slave.slave_id);
if (unlikely(param->dma_dev != chan->device->dev)) chan->private = &param->shdma_slave;
return false;
chan->private = param;
return true; return true;
} }
...@@ -360,16 +357,15 @@ static int siu_pcm_open(struct snd_pcm_substream *ss) ...@@ -360,16 +357,15 @@ static int siu_pcm_open(struct snd_pcm_substream *ss)
if (ss->stream == SNDRV_PCM_STREAM_PLAYBACK) { if (ss->stream == SNDRV_PCM_STREAM_PLAYBACK) {
siu_stream = &port_info->playback; siu_stream = &port_info->playback;
param = &siu_stream->param; param = &siu_stream->param;
param->slave_id = port ? pdata->dma_slave_tx_b : param->shdma_slave.slave_id = port ? pdata->dma_slave_tx_b :
pdata->dma_slave_tx_a; pdata->dma_slave_tx_a;
} else { } else {
siu_stream = &port_info->capture; siu_stream = &port_info->capture;
param = &siu_stream->param; param = &siu_stream->param;
param->slave_id = port ? pdata->dma_slave_rx_b : param->shdma_slave.slave_id = port ? pdata->dma_slave_rx_b :
pdata->dma_slave_rx_a; pdata->dma_slave_rx_a;
} }
param->dma_dev = pdata->dma_dev;
/* Get DMA channel */ /* Get DMA channel */
siu_stream->chan = dma_request_channel(mask, filter, param); siu_stream->chan = dma_request_channel(mask, filter, param);
if (!siu_stream->chan) { if (!siu_stream->chan) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment