Commit 6447f55d authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'next' of git://git.kernel.org/pub/scm/linux/kernel/git/djbw/async_tx

* 'next' of git://git.kernel.org/pub/scm/linux/kernel/git/djbw/async_tx: (66 commits)
  avr32: at32ap700x: fix typo in DMA master configuration
  dmaengine/dmatest: Pass timeout via module params
  dma: let IMX_DMA depend on IMX_HAVE_DMA_V1 instead of an explicit list of SoCs
  fsldma: make halt behave nicely on all supported controllers
  fsldma: reduce locking during descriptor cleanup
  fsldma: support async_tx dependencies and automatic unmapping
  fsldma: fix controller lockups
  fsldma: minor codingstyle and consistency fixes
  fsldma: improve link descriptor debugging
  fsldma: use channel name in printk output
  fsldma: move related helper functions near each other
  dmatest: fix automatic buffer unmap type
  drivers, pch_dma: Fix warning when CONFIG_PM=n.
  dmaengine/dw_dmac fix: use readl & writel instead of __raw_readl & __raw_writel
  avr32: at32ap700x: Specify DMA Flow Controller, Src and Dst msize
  dw_dmac: Setting Default Burst length for transfers as 16.
  dw_dmac: Allow src/dst msize & flow controller to be configured at runtime
  dw_dmac: Changing type of src_master and dest_master to u8.
  dw_dmac: Pass Channel Priority from platform_data
  dw_dmac: Pass Channel Allocation Order from platform_data
  ...
parents c50e3f51 3ea205c4
/*
* Copyright 2011 Freescale Semiconductor, Inc. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#ifndef __MACH_MXS_DMA_H__
#define __MACH_MXS_DMA_H__
struct mxs_dma_data {
int chan_irq;
};
static inline int mxs_dma_is_apbh(struct dma_chan *chan)
{
return !strcmp(dev_name(chan->device->dev), "mxs-dma-apbh");
}
static inline int mxs_dma_is_apbx(struct dma_chan *chan)
{
return !strcmp(dev_name(chan->device->dev), "mxs-dma-apbx");
}
#endif /* __MACH_MXS_DMA_H__ */
...@@ -104,6 +104,8 @@ struct stedma40_half_channel_info { ...@@ -104,6 +104,8 @@ struct stedma40_half_channel_info {
* *
* @dir: MEM 2 MEM, PERIPH 2 MEM , MEM 2 PERIPH, PERIPH 2 PERIPH * @dir: MEM 2 MEM, PERIPH 2 MEM , MEM 2 PERIPH, PERIPH 2 PERIPH
* @high_priority: true if high-priority * @high_priority: true if high-priority
* @realtime: true if realtime mode is to be enabled. Only available on DMA40
* version 3+, i.e DB8500v2+
* @mode: channel mode: physical, logical, or operation * @mode: channel mode: physical, logical, or operation
* @mode_opt: options for the chosen channel mode * @mode_opt: options for the chosen channel mode
* @src_dev_type: Src device type * @src_dev_type: Src device type
...@@ -119,6 +121,7 @@ struct stedma40_half_channel_info { ...@@ -119,6 +121,7 @@ struct stedma40_half_channel_info {
struct stedma40_chan_cfg { struct stedma40_chan_cfg {
enum stedma40_xfer_dir dir; enum stedma40_xfer_dir dir;
bool high_priority; bool high_priority;
bool realtime;
enum stedma40_mode mode; enum stedma40_mode mode;
enum stedma40_mode_opt mode_opt; enum stedma40_mode_opt mode_opt;
int src_dev_type; int src_dev_type;
...@@ -168,25 +171,6 @@ struct stedma40_platform_data { ...@@ -168,25 +171,6 @@ struct stedma40_platform_data {
bool stedma40_filter(struct dma_chan *chan, void *data); bool stedma40_filter(struct dma_chan *chan, void *data);
/**
* stedma40_memcpy_sg() - extension of the dma framework, memcpy to/from
* scattergatter lists.
*
* @chan: dmaengine handle
* @sgl_dst: Destination scatter list
* @sgl_src: Source scatter list
* @sgl_len: The length of each scatterlist. Both lists must be of equal length
* and each element must match the corresponding element in the other scatter
* list.
* @flags: is actually enum dma_ctrl_flags. See dmaengine.h
*/
struct dma_async_tx_descriptor *stedma40_memcpy_sg(struct dma_chan *chan,
struct scatterlist *sgl_dst,
struct scatterlist *sgl_src,
unsigned int sgl_len,
unsigned long flags);
/** /**
* stedma40_slave_mem() - Transfers a raw data buffer to or from a slave * stedma40_slave_mem() - Transfers a raw data buffer to or from a slave
* (=device) * (=device)
......
...@@ -2048,6 +2048,11 @@ at32_add_device_ac97c(unsigned int id, struct ac97c_platform_data *data, ...@@ -2048,6 +2048,11 @@ at32_add_device_ac97c(unsigned int id, struct ac97c_platform_data *data,
rx_dws->reg_width = DW_DMA_SLAVE_WIDTH_16BIT; rx_dws->reg_width = DW_DMA_SLAVE_WIDTH_16BIT;
rx_dws->cfg_hi = DWC_CFGH_SRC_PER(3); rx_dws->cfg_hi = DWC_CFGH_SRC_PER(3);
rx_dws->cfg_lo &= ~(DWC_CFGL_HS_DST_POL | DWC_CFGL_HS_SRC_POL); rx_dws->cfg_lo &= ~(DWC_CFGL_HS_DST_POL | DWC_CFGL_HS_SRC_POL);
rx_dws->src_master = 0;
rx_dws->dst_master = 1;
rx_dws->src_msize = DW_DMA_MSIZE_1;
rx_dws->dst_msize = DW_DMA_MSIZE_1;
rx_dws->fc = DW_DMA_FC_D_P2M;
} }
/* Check if DMA slave interface for playback should be configured. */ /* Check if DMA slave interface for playback should be configured. */
...@@ -2056,6 +2061,11 @@ at32_add_device_ac97c(unsigned int id, struct ac97c_platform_data *data, ...@@ -2056,6 +2061,11 @@ at32_add_device_ac97c(unsigned int id, struct ac97c_platform_data *data,
tx_dws->reg_width = DW_DMA_SLAVE_WIDTH_16BIT; tx_dws->reg_width = DW_DMA_SLAVE_WIDTH_16BIT;
tx_dws->cfg_hi = DWC_CFGH_DST_PER(4); tx_dws->cfg_hi = DWC_CFGH_DST_PER(4);
tx_dws->cfg_lo &= ~(DWC_CFGL_HS_DST_POL | DWC_CFGL_HS_SRC_POL); tx_dws->cfg_lo &= ~(DWC_CFGL_HS_DST_POL | DWC_CFGL_HS_SRC_POL);
tx_dws->src_master = 0;
tx_dws->dst_master = 1;
tx_dws->src_msize = DW_DMA_MSIZE_1;
tx_dws->dst_msize = DW_DMA_MSIZE_1;
tx_dws->fc = DW_DMA_FC_D_M2P;
} }
if (platform_device_add_data(pdev, data, if (platform_device_add_data(pdev, data,
...@@ -2128,6 +2138,11 @@ at32_add_device_abdac(unsigned int id, struct atmel_abdac_pdata *data) ...@@ -2128,6 +2138,11 @@ at32_add_device_abdac(unsigned int id, struct atmel_abdac_pdata *data)
dws->reg_width = DW_DMA_SLAVE_WIDTH_32BIT; dws->reg_width = DW_DMA_SLAVE_WIDTH_32BIT;
dws->cfg_hi = DWC_CFGH_DST_PER(2); dws->cfg_hi = DWC_CFGH_DST_PER(2);
dws->cfg_lo &= ~(DWC_CFGL_HS_DST_POL | DWC_CFGL_HS_SRC_POL); dws->cfg_lo &= ~(DWC_CFGL_HS_DST_POL | DWC_CFGL_HS_SRC_POL);
dws->src_master = 0;
dws->dst_master = 1;
dws->src_msize = DW_DMA_MSIZE_1;
dws->dst_msize = DW_DMA_MSIZE_1;
dws->fc = DW_DMA_FC_D_M2P;
if (platform_device_add_data(pdev, data, if (platform_device_add_data(pdev, data,
sizeof(struct atmel_abdac_pdata))) sizeof(struct atmel_abdac_pdata)))
......
...@@ -82,7 +82,7 @@ config INTEL_IOP_ADMA ...@@ -82,7 +82,7 @@ config INTEL_IOP_ADMA
config DW_DMAC config DW_DMAC
tristate "Synopsys DesignWare AHB DMA support" tristate "Synopsys DesignWare AHB DMA support"
depends on AVR32 depends on HAVE_CLK
select DMA_ENGINE select DMA_ENGINE
default y if CPU_AT32AP7000 default y if CPU_AT32AP7000
help help
...@@ -221,12 +221,20 @@ config IMX_SDMA ...@@ -221,12 +221,20 @@ config IMX_SDMA
config IMX_DMA config IMX_DMA
tristate "i.MX DMA support" tristate "i.MX DMA support"
depends on ARCH_MX1 || ARCH_MX21 || MACH_MX27 depends on IMX_HAVE_DMA_V1
select DMA_ENGINE select DMA_ENGINE
help help
Support the i.MX DMA engine. This engine is integrated into Support the i.MX DMA engine. This engine is integrated into
Freescale i.MX1/21/27 chips. Freescale i.MX1/21/27 chips.
config MXS_DMA
bool "MXS DMA support"
depends on SOC_IMX23 || SOC_IMX28
select DMA_ENGINE
help
Support the MXS DMA engine. This engine including APBH-DMA
and APBX-DMA is integrated into Freescale i.MX23/28 chips.
config DMA_ENGINE config DMA_ENGINE
bool bool
......
...@@ -19,6 +19,7 @@ obj-$(CONFIG_COH901318) += coh901318.o coh901318_lli.o ...@@ -19,6 +19,7 @@ obj-$(CONFIG_COH901318) += coh901318.o coh901318_lli.o
obj-$(CONFIG_AMCC_PPC440SPE_ADMA) += ppc4xx/ obj-$(CONFIG_AMCC_PPC440SPE_ADMA) += ppc4xx/
obj-$(CONFIG_IMX_SDMA) += imx-sdma.o obj-$(CONFIG_IMX_SDMA) += imx-sdma.o
obj-$(CONFIG_IMX_DMA) += imx-dma.o obj-$(CONFIG_IMX_DMA) += imx-dma.o
obj-$(CONFIG_MXS_DMA) += mxs-dma.o
obj-$(CONFIG_TIMB_DMA) += timb_dma.o obj-$(CONFIG_TIMB_DMA) += timb_dma.o
obj-$(CONFIG_STE_DMA40) += ste_dma40.o ste_dma40_ll.o obj-$(CONFIG_STE_DMA40) += ste_dma40.o ste_dma40_ll.o
obj-$(CONFIG_PL330_DMA) += pl330.o obj-$(CONFIG_PL330_DMA) += pl330.o
......
...@@ -54,6 +54,11 @@ module_param(pq_sources, uint, S_IRUGO); ...@@ -54,6 +54,11 @@ module_param(pq_sources, uint, S_IRUGO);
MODULE_PARM_DESC(pq_sources, MODULE_PARM_DESC(pq_sources,
"Number of p+q source buffers (default: 3)"); "Number of p+q source buffers (default: 3)");
static int timeout = 3000;
module_param(timeout, uint, S_IRUGO);
MODULE_PARM_DESC(timeout, "Transfer Timeout in msec (default: 3000), \
Pass -1 for infinite timeout");
/* /*
* Initialization patterns. All bytes in the source buffer has bit 7 * Initialization patterns. All bytes in the source buffer has bit 7
* set, all bytes in the destination buffer has bit 7 cleared. * set, all bytes in the destination buffer has bit 7 cleared.
...@@ -285,7 +290,12 @@ static int dmatest_func(void *data) ...@@ -285,7 +290,12 @@ static int dmatest_func(void *data)
set_user_nice(current, 10); set_user_nice(current, 10);
flags = DMA_CTRL_ACK | DMA_COMPL_SKIP_DEST_UNMAP | DMA_PREP_INTERRUPT; /*
* src buffers are freed by the DMAEngine code with dma_unmap_single()
* dst buffers are freed by ourselves below
*/
flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT
| DMA_COMPL_SKIP_DEST_UNMAP | DMA_COMPL_SRC_UNMAP_SINGLE;
while (!kthread_should_stop() while (!kthread_should_stop()
&& !(iterations && total_tests >= iterations)) { && !(iterations && total_tests >= iterations)) {
...@@ -294,7 +304,7 @@ static int dmatest_func(void *data) ...@@ -294,7 +304,7 @@ static int dmatest_func(void *data)
dma_addr_t dma_srcs[src_cnt]; dma_addr_t dma_srcs[src_cnt];
dma_addr_t dma_dsts[dst_cnt]; dma_addr_t dma_dsts[dst_cnt];
struct completion cmp; struct completion cmp;
unsigned long tmo = msecs_to_jiffies(3000); unsigned long tmo = msecs_to_jiffies(timeout);
u8 align = 0; u8 align = 0;
total_tests++; total_tests++;
......
...@@ -32,26 +32,30 @@ ...@@ -32,26 +32,30 @@
* which does not support descriptor writeback. * which does not support descriptor writeback.
*/ */
/* NOTE: DMS+SMS is system-specific. We should get this information #define DWC_DEFAULT_CTLLO(private) ({ \
* from the platform code somehow. struct dw_dma_slave *__slave = (private); \
*/ int dms = __slave ? __slave->dst_master : 0; \
#define DWC_DEFAULT_CTLLO (DWC_CTLL_DST_MSIZE(0) \ int sms = __slave ? __slave->src_master : 1; \
| DWC_CTLL_SRC_MSIZE(0) \ u8 smsize = __slave ? __slave->src_msize : DW_DMA_MSIZE_16; \
| DWC_CTLL_DMS(0) \ u8 dmsize = __slave ? __slave->dst_msize : DW_DMA_MSIZE_16; \
| DWC_CTLL_SMS(1) \ \
| DWC_CTLL_LLP_D_EN \ (DWC_CTLL_DST_MSIZE(dmsize) \
| DWC_CTLL_LLP_S_EN) | DWC_CTLL_SRC_MSIZE(smsize) \
| DWC_CTLL_LLP_D_EN \
| DWC_CTLL_LLP_S_EN \
| DWC_CTLL_DMS(dms) \
| DWC_CTLL_SMS(sms)); \
})
/* /*
* This is configuration-dependent and usually a funny size like 4095. * This is configuration-dependent and usually a funny size like 4095.
* Let's round it down to the nearest power of two.
* *
* Note that this is a transfer count, i.e. if we transfer 32-bit * Note that this is a transfer count, i.e. if we transfer 32-bit
* words, we can do 8192 bytes per descriptor. * words, we can do 16380 bytes per descriptor.
* *
* This parameter is also system-specific. * This parameter is also system-specific.
*/ */
#define DWC_MAX_COUNT 2048U #define DWC_MAX_COUNT 4095U
/* /*
* Number of descriptors to allocate for each channel. This should be * Number of descriptors to allocate for each channel. This should be
...@@ -84,11 +88,6 @@ static struct dw_desc *dwc_first_active(struct dw_dma_chan *dwc) ...@@ -84,11 +88,6 @@ static struct dw_desc *dwc_first_active(struct dw_dma_chan *dwc)
return list_entry(dwc->active_list.next, struct dw_desc, desc_node); return list_entry(dwc->active_list.next, struct dw_desc, desc_node);
} }
static struct dw_desc *dwc_first_queued(struct dw_dma_chan *dwc)
{
return list_entry(dwc->queue.next, struct dw_desc, desc_node);
}
static struct dw_desc *dwc_desc_get(struct dw_dma_chan *dwc) static struct dw_desc *dwc_desc_get(struct dw_dma_chan *dwc)
{ {
struct dw_desc *desc, *_desc; struct dw_desc *desc, *_desc;
...@@ -201,6 +200,7 @@ dwc_descriptor_complete(struct dw_dma_chan *dwc, struct dw_desc *desc) ...@@ -201,6 +200,7 @@ dwc_descriptor_complete(struct dw_dma_chan *dwc, struct dw_desc *desc)
dma_async_tx_callback callback; dma_async_tx_callback callback;
void *param; void *param;
struct dma_async_tx_descriptor *txd = &desc->txd; struct dma_async_tx_descriptor *txd = &desc->txd;
struct dw_desc *child;
dev_vdbg(chan2dev(&dwc->chan), "descriptor %u complete\n", txd->cookie); dev_vdbg(chan2dev(&dwc->chan), "descriptor %u complete\n", txd->cookie);
...@@ -209,6 +209,12 @@ dwc_descriptor_complete(struct dw_dma_chan *dwc, struct dw_desc *desc) ...@@ -209,6 +209,12 @@ dwc_descriptor_complete(struct dw_dma_chan *dwc, struct dw_desc *desc)
param = txd->callback_param; param = txd->callback_param;
dwc_sync_desc_for_cpu(dwc, desc); dwc_sync_desc_for_cpu(dwc, desc);
/* async_tx_ack */
list_for_each_entry(child, &desc->tx_list, desc_node)
async_tx_ack(&child->txd);
async_tx_ack(&desc->txd);
list_splice_init(&desc->tx_list, &dwc->free_list); list_splice_init(&desc->tx_list, &dwc->free_list);
list_move(&desc->desc_node, &dwc->free_list); list_move(&desc->desc_node, &dwc->free_list);
...@@ -259,10 +265,11 @@ static void dwc_complete_all(struct dw_dma *dw, struct dw_dma_chan *dwc) ...@@ -259,10 +265,11 @@ static void dwc_complete_all(struct dw_dma *dw, struct dw_dma_chan *dwc)
* Submit queued descriptors ASAP, i.e. before we go through * Submit queued descriptors ASAP, i.e. before we go through
* the completed ones. * the completed ones.
*/ */
if (!list_empty(&dwc->queue))
dwc_dostart(dwc, dwc_first_queued(dwc));
list_splice_init(&dwc->active_list, &list); list_splice_init(&dwc->active_list, &list);
list_splice_init(&dwc->queue, &dwc->active_list); if (!list_empty(&dwc->queue)) {
list_move(dwc->queue.next, &dwc->active_list);
dwc_dostart(dwc, dwc_first_active(dwc));
}
list_for_each_entry_safe(desc, _desc, &list, desc_node) list_for_each_entry_safe(desc, _desc, &list, desc_node)
dwc_descriptor_complete(dwc, desc); dwc_descriptor_complete(dwc, desc);
...@@ -291,6 +298,9 @@ static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc) ...@@ -291,6 +298,9 @@ static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc)
return; return;
} }
if (list_empty(&dwc->active_list))
return;
dev_vdbg(chan2dev(&dwc->chan), "scan_descriptors: llp=0x%x\n", llp); dev_vdbg(chan2dev(&dwc->chan), "scan_descriptors: llp=0x%x\n", llp);
list_for_each_entry_safe(desc, _desc, &dwc->active_list, desc_node) { list_for_each_entry_safe(desc, _desc, &dwc->active_list, desc_node) {
...@@ -319,8 +329,8 @@ static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc) ...@@ -319,8 +329,8 @@ static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc)
cpu_relax(); cpu_relax();
if (!list_empty(&dwc->queue)) { if (!list_empty(&dwc->queue)) {
dwc_dostart(dwc, dwc_first_queued(dwc)); list_move(dwc->queue.next, &dwc->active_list);
list_splice_init(&dwc->queue, &dwc->active_list); dwc_dostart(dwc, dwc_first_active(dwc));
} }
} }
...@@ -346,7 +356,7 @@ static void dwc_handle_error(struct dw_dma *dw, struct dw_dma_chan *dwc) ...@@ -346,7 +356,7 @@ static void dwc_handle_error(struct dw_dma *dw, struct dw_dma_chan *dwc)
*/ */
bad_desc = dwc_first_active(dwc); bad_desc = dwc_first_active(dwc);
list_del_init(&bad_desc->desc_node); list_del_init(&bad_desc->desc_node);
list_splice_init(&dwc->queue, dwc->active_list.prev); list_move(dwc->queue.next, dwc->active_list.prev);
/* Clear the error flag and try to restart the controller */ /* Clear the error flag and try to restart the controller */
dma_writel(dw, CLEAR.ERROR, dwc->mask); dma_writel(dw, CLEAR.ERROR, dwc->mask);
...@@ -541,8 +551,8 @@ static dma_cookie_t dwc_tx_submit(struct dma_async_tx_descriptor *tx) ...@@ -541,8 +551,8 @@ static dma_cookie_t dwc_tx_submit(struct dma_async_tx_descriptor *tx)
if (list_empty(&dwc->active_list)) { if (list_empty(&dwc->active_list)) {
dev_vdbg(chan2dev(tx->chan), "tx_submit: started %u\n", dev_vdbg(chan2dev(tx->chan), "tx_submit: started %u\n",
desc->txd.cookie); desc->txd.cookie);
dwc_dostart(dwc, desc);
list_add_tail(&desc->desc_node, &dwc->active_list); list_add_tail(&desc->desc_node, &dwc->active_list);
dwc_dostart(dwc, dwc_first_active(dwc));
} else { } else {
dev_vdbg(chan2dev(tx->chan), "tx_submit: queued %u\n", dev_vdbg(chan2dev(tx->chan), "tx_submit: queued %u\n",
desc->txd.cookie); desc->txd.cookie);
...@@ -581,14 +591,16 @@ dwc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, ...@@ -581,14 +591,16 @@ dwc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
* We can be a lot more clever here, but this should take care * We can be a lot more clever here, but this should take care
* of the most common optimization. * of the most common optimization.
*/ */
if (!((src | dest | len) & 3)) if (!((src | dest | len) & 7))
src_width = dst_width = 3;
else if (!((src | dest | len) & 3))
src_width = dst_width = 2; src_width = dst_width = 2;
else if (!((src | dest | len) & 1)) else if (!((src | dest | len) & 1))
src_width = dst_width = 1; src_width = dst_width = 1;
else else
src_width = dst_width = 0; src_width = dst_width = 0;
ctllo = DWC_DEFAULT_CTLLO ctllo = DWC_DEFAULT_CTLLO(chan->private)
| DWC_CTLL_DST_WIDTH(dst_width) | DWC_CTLL_DST_WIDTH(dst_width)
| DWC_CTLL_SRC_WIDTH(src_width) | DWC_CTLL_SRC_WIDTH(src_width)
| DWC_CTLL_DST_INC | DWC_CTLL_DST_INC
...@@ -669,11 +681,11 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, ...@@ -669,11 +681,11 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
switch (direction) { switch (direction) {
case DMA_TO_DEVICE: case DMA_TO_DEVICE:
ctllo = (DWC_DEFAULT_CTLLO ctllo = (DWC_DEFAULT_CTLLO(chan->private)
| DWC_CTLL_DST_WIDTH(reg_width) | DWC_CTLL_DST_WIDTH(reg_width)
| DWC_CTLL_DST_FIX | DWC_CTLL_DST_FIX
| DWC_CTLL_SRC_INC | DWC_CTLL_SRC_INC
| DWC_CTLL_FC_M2P); | DWC_CTLL_FC(dws->fc));
reg = dws->tx_reg; reg = dws->tx_reg;
for_each_sg(sgl, sg, sg_len, i) { for_each_sg(sgl, sg, sg_len, i) {
struct dw_desc *desc; struct dw_desc *desc;
...@@ -714,11 +726,11 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, ...@@ -714,11 +726,11 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
} }
break; break;
case DMA_FROM_DEVICE: case DMA_FROM_DEVICE:
ctllo = (DWC_DEFAULT_CTLLO ctllo = (DWC_DEFAULT_CTLLO(chan->private)
| DWC_CTLL_SRC_WIDTH(reg_width) | DWC_CTLL_SRC_WIDTH(reg_width)
| DWC_CTLL_DST_INC | DWC_CTLL_DST_INC
| DWC_CTLL_SRC_FIX | DWC_CTLL_SRC_FIX
| DWC_CTLL_FC_P2M); | DWC_CTLL_FC(dws->fc));
reg = dws->rx_reg; reg = dws->rx_reg;
for_each_sg(sgl, sg, sg_len, i) { for_each_sg(sgl, sg, sg_len, i) {
...@@ -834,7 +846,9 @@ dwc_tx_status(struct dma_chan *chan, ...@@ -834,7 +846,9 @@ dwc_tx_status(struct dma_chan *chan,
ret = dma_async_is_complete(cookie, last_complete, last_used); ret = dma_async_is_complete(cookie, last_complete, last_used);
if (ret != DMA_SUCCESS) { if (ret != DMA_SUCCESS) {
spin_lock_bh(&dwc->lock);
dwc_scan_descriptors(to_dw_dma(chan->device), dwc); dwc_scan_descriptors(to_dw_dma(chan->device), dwc);
spin_unlock_bh(&dwc->lock);
last_complete = dwc->completed; last_complete = dwc->completed;
last_used = chan->cookie; last_used = chan->cookie;
...@@ -889,8 +903,11 @@ static int dwc_alloc_chan_resources(struct dma_chan *chan) ...@@ -889,8 +903,11 @@ static int dwc_alloc_chan_resources(struct dma_chan *chan)
BUG_ON(!dws->dma_dev || dws->dma_dev != dw->dma.dev); BUG_ON(!dws->dma_dev || dws->dma_dev != dw->dma.dev);
cfghi = dws->cfg_hi; cfghi = dws->cfg_hi;
cfglo = dws->cfg_lo; cfglo = dws->cfg_lo & ~DWC_CFGL_CH_PRIOR_MASK;
} }
cfglo |= DWC_CFGL_CH_PRIOR(dwc->priority);
channel_writel(dwc, CFG_LO, cfglo); channel_writel(dwc, CFG_LO, cfglo);
channel_writel(dwc, CFG_HI, cfghi); channel_writel(dwc, CFG_HI, cfghi);
...@@ -1126,23 +1143,23 @@ struct dw_cyclic_desc *dw_dma_cyclic_prep(struct dma_chan *chan, ...@@ -1126,23 +1143,23 @@ struct dw_cyclic_desc *dw_dma_cyclic_prep(struct dma_chan *chan,
case DMA_TO_DEVICE: case DMA_TO_DEVICE:
desc->lli.dar = dws->tx_reg; desc->lli.dar = dws->tx_reg;
desc->lli.sar = buf_addr + (period_len * i); desc->lli.sar = buf_addr + (period_len * i);
desc->lli.ctllo = (DWC_DEFAULT_CTLLO desc->lli.ctllo = (DWC_DEFAULT_CTLLO(chan->private)
| DWC_CTLL_DST_WIDTH(reg_width) | DWC_CTLL_DST_WIDTH(reg_width)
| DWC_CTLL_SRC_WIDTH(reg_width) | DWC_CTLL_SRC_WIDTH(reg_width)
| DWC_CTLL_DST_FIX | DWC_CTLL_DST_FIX
| DWC_CTLL_SRC_INC | DWC_CTLL_SRC_INC
| DWC_CTLL_FC_M2P | DWC_CTLL_FC(dws->fc)
| DWC_CTLL_INT_EN); | DWC_CTLL_INT_EN);
break; break;
case DMA_FROM_DEVICE: case DMA_FROM_DEVICE:
desc->lli.dar = buf_addr + (period_len * i); desc->lli.dar = buf_addr + (period_len * i);
desc->lli.sar = dws->rx_reg; desc->lli.sar = dws->rx_reg;
desc->lli.ctllo = (DWC_DEFAULT_CTLLO desc->lli.ctllo = (DWC_DEFAULT_CTLLO(chan->private)
| DWC_CTLL_SRC_WIDTH(reg_width) | DWC_CTLL_SRC_WIDTH(reg_width)
| DWC_CTLL_DST_WIDTH(reg_width) | DWC_CTLL_DST_WIDTH(reg_width)
| DWC_CTLL_DST_INC | DWC_CTLL_DST_INC
| DWC_CTLL_SRC_FIX | DWC_CTLL_SRC_FIX
| DWC_CTLL_FC_P2M | DWC_CTLL_FC(dws->fc)
| DWC_CTLL_INT_EN); | DWC_CTLL_INT_EN);
break; break;
default: default:
...@@ -1307,7 +1324,17 @@ static int __init dw_probe(struct platform_device *pdev) ...@@ -1307,7 +1324,17 @@ static int __init dw_probe(struct platform_device *pdev)
dwc->chan.device = &dw->dma; dwc->chan.device = &dw->dma;
dwc->chan.cookie = dwc->completed = 1; dwc->chan.cookie = dwc->completed = 1;
dwc->chan.chan_id = i; dwc->chan.chan_id = i;
list_add_tail(&dwc->chan.device_node, &dw->dma.channels); if (pdata->chan_allocation_order == CHAN_ALLOCATION_ASCENDING)
list_add_tail(&dwc->chan.device_node,
&dw->dma.channels);
else
list_add(&dwc->chan.device_node, &dw->dma.channels);
/* 7 is highest priority & 0 is lowest. */
if (pdata->chan_priority == CHAN_PRIORITY_ASCENDING)
dwc->priority = 7 - i;
else
dwc->priority = i;
dwc->ch_regs = &__dw_regs(dw)->CHAN[i]; dwc->ch_regs = &__dw_regs(dw)->CHAN[i];
spin_lock_init(&dwc->lock); spin_lock_init(&dwc->lock);
...@@ -1335,6 +1362,8 @@ static int __init dw_probe(struct platform_device *pdev) ...@@ -1335,6 +1362,8 @@ static int __init dw_probe(struct platform_device *pdev)
dma_cap_set(DMA_MEMCPY, dw->dma.cap_mask); dma_cap_set(DMA_MEMCPY, dw->dma.cap_mask);
dma_cap_set(DMA_SLAVE, dw->dma.cap_mask); dma_cap_set(DMA_SLAVE, dw->dma.cap_mask);
if (pdata->is_private)
dma_cap_set(DMA_PRIVATE, dw->dma.cap_mask);
dw->dma.dev = &pdev->dev; dw->dma.dev = &pdev->dev;
dw->dma.device_alloc_chan_resources = dwc_alloc_chan_resources; dw->dma.device_alloc_chan_resources = dwc_alloc_chan_resources;
dw->dma.device_free_chan_resources = dwc_free_chan_resources; dw->dma.device_free_chan_resources = dwc_free_chan_resources;
...@@ -1447,7 +1476,7 @@ static int __init dw_init(void) ...@@ -1447,7 +1476,7 @@ static int __init dw_init(void)
{ {
return platform_driver_probe(&dw_driver, dw_probe); return platform_driver_probe(&dw_driver, dw_probe);
} }
module_init(dw_init); subsys_initcall(dw_init);
static void __exit dw_exit(void) static void __exit dw_exit(void)
{ {
......
...@@ -86,6 +86,7 @@ struct dw_dma_regs { ...@@ -86,6 +86,7 @@ struct dw_dma_regs {
#define DWC_CTLL_SRC_MSIZE(n) ((n)<<14) #define DWC_CTLL_SRC_MSIZE(n) ((n)<<14)
#define DWC_CTLL_S_GATH_EN (1 << 17) /* src gather, !FIX */ #define DWC_CTLL_S_GATH_EN (1 << 17) /* src gather, !FIX */
#define DWC_CTLL_D_SCAT_EN (1 << 18) /* dst scatter, !FIX */ #define DWC_CTLL_D_SCAT_EN (1 << 18) /* dst scatter, !FIX */
#define DWC_CTLL_FC(n) ((n) << 20)
#define DWC_CTLL_FC_M2M (0 << 20) /* mem-to-mem */ #define DWC_CTLL_FC_M2M (0 << 20) /* mem-to-mem */
#define DWC_CTLL_FC_M2P (1 << 20) /* mem-to-periph */ #define DWC_CTLL_FC_M2P (1 << 20) /* mem-to-periph */
#define DWC_CTLL_FC_P2M (2 << 20) /* periph-to-mem */ #define DWC_CTLL_FC_P2M (2 << 20) /* periph-to-mem */
...@@ -101,6 +102,8 @@ struct dw_dma_regs { ...@@ -101,6 +102,8 @@ struct dw_dma_regs {
#define DWC_CTLH_BLOCK_TS_MASK 0x00000fff #define DWC_CTLH_BLOCK_TS_MASK 0x00000fff
/* Bitfields in CFG_LO. Platform-configurable bits are in <linux/dw_dmac.h> */ /* Bitfields in CFG_LO. Platform-configurable bits are in <linux/dw_dmac.h> */
#define DWC_CFGL_CH_PRIOR_MASK (0x7 << 5) /* priority mask */
#define DWC_CFGL_CH_PRIOR(x) ((x) << 5) /* priority */
#define DWC_CFGL_CH_SUSP (1 << 8) /* pause xfer */ #define DWC_CFGL_CH_SUSP (1 << 8) /* pause xfer */
#define DWC_CFGL_FIFO_EMPTY (1 << 9) /* pause xfer */ #define DWC_CFGL_FIFO_EMPTY (1 << 9) /* pause xfer */
#define DWC_CFGL_HS_DST (1 << 10) /* handshake w/dst */ #define DWC_CFGL_HS_DST (1 << 10) /* handshake w/dst */
...@@ -134,6 +137,7 @@ struct dw_dma_chan { ...@@ -134,6 +137,7 @@ struct dw_dma_chan {
struct dma_chan chan; struct dma_chan chan;
void __iomem *ch_regs; void __iomem *ch_regs;
u8 mask; u8 mask;
u8 priority;
spinlock_t lock; spinlock_t lock;
...@@ -155,9 +159,9 @@ __dwc_regs(struct dw_dma_chan *dwc) ...@@ -155,9 +159,9 @@ __dwc_regs(struct dw_dma_chan *dwc)
} }
#define channel_readl(dwc, name) \ #define channel_readl(dwc, name) \
__raw_readl(&(__dwc_regs(dwc)->name)) readl(&(__dwc_regs(dwc)->name))
#define channel_writel(dwc, name, val) \ #define channel_writel(dwc, name, val) \
__raw_writel((val), &(__dwc_regs(dwc)->name)) writel((val), &(__dwc_regs(dwc)->name))
static inline struct dw_dma_chan *to_dw_dma_chan(struct dma_chan *chan) static inline struct dw_dma_chan *to_dw_dma_chan(struct dma_chan *chan)
{ {
...@@ -181,9 +185,9 @@ static inline struct dw_dma_regs __iomem *__dw_regs(struct dw_dma *dw) ...@@ -181,9 +185,9 @@ static inline struct dw_dma_regs __iomem *__dw_regs(struct dw_dma *dw)
} }
#define dma_readl(dw, name) \ #define dma_readl(dw, name) \
__raw_readl(&(__dw_regs(dw)->name)) readl(&(__dw_regs(dw)->name))
#define dma_writel(dw, name, val) \ #define dma_writel(dw, name, val) \
__raw_writel((val), &(__dw_regs(dw)->name)) writel((val), &(__dw_regs(dw)->name))
#define channel_set_bit(dw, reg, mask) \ #define channel_set_bit(dw, reg, mask) \
dma_writel(dw, reg, ((mask) << 8) | (mask)) dma_writel(dw, reg, ((mask) << 8) | (mask))
......
This diff is collapsed.
...@@ -102,8 +102,8 @@ struct fsl_desc_sw { ...@@ -102,8 +102,8 @@ struct fsl_desc_sw {
} __attribute__((aligned(32))); } __attribute__((aligned(32)));
struct fsldma_chan_regs { struct fsldma_chan_regs {
u32 mr; /* 0x00 - Mode Register */ u32 mr; /* 0x00 - Mode Register */
u32 sr; /* 0x04 - Status Register */ u32 sr; /* 0x04 - Status Register */
u64 cdar; /* 0x08 - Current descriptor address register */ u64 cdar; /* 0x08 - Current descriptor address register */
u64 sar; /* 0x10 - Source Address Register */ u64 sar; /* 0x10 - Source Address Register */
u64 dar; /* 0x18 - Destination Address Register */ u64 dar; /* 0x18 - Destination Address Register */
...@@ -135,6 +135,7 @@ struct fsldma_device { ...@@ -135,6 +135,7 @@ struct fsldma_device {
#define FSL_DMA_CHAN_START_EXT 0x00002000 #define FSL_DMA_CHAN_START_EXT 0x00002000
struct fsldma_chan { struct fsldma_chan {
char name[8]; /* Channel name */
struct fsldma_chan_regs __iomem *regs; struct fsldma_chan_regs __iomem *regs;
dma_cookie_t completed_cookie; /* The maximum cookie completed */ dma_cookie_t completed_cookie; /* The maximum cookie completed */
spinlock_t desc_lock; /* Descriptor operation lock */ spinlock_t desc_lock; /* Descriptor operation lock */
...@@ -147,6 +148,7 @@ struct fsldma_chan { ...@@ -147,6 +148,7 @@ struct fsldma_chan {
int id; /* Raw id of this channel */ int id; /* Raw id of this channel */
struct tasklet_struct tasklet; struct tasklet_struct tasklet;
u32 feature; u32 feature;
bool idle; /* DMA controller is idle */
void (*toggle_ext_pause)(struct fsldma_chan *fsl_chan, int enable); void (*toggle_ext_pause)(struct fsldma_chan *fsl_chan, int enable);
void (*toggle_ext_start)(struct fsldma_chan *fsl_chan, int enable); void (*toggle_ext_start)(struct fsldma_chan *fsl_chan, int enable);
......
This diff is collapsed.
...@@ -82,7 +82,7 @@ struct pch_dma_regs { ...@@ -82,7 +82,7 @@ struct pch_dma_regs {
u32 dma_sts1; u32 dma_sts1;
u32 reserved2; u32 reserved2;
u32 reserved3; u32 reserved3;
struct pch_dma_desc_regs desc[0]; struct pch_dma_desc_regs desc[MAX_CHAN_NR];
}; };
struct pch_dma_desc { struct pch_dma_desc {
...@@ -124,7 +124,7 @@ struct pch_dma { ...@@ -124,7 +124,7 @@ struct pch_dma {
struct pci_pool *pool; struct pci_pool *pool;
struct pch_dma_regs regs; struct pch_dma_regs regs;
struct pch_dma_desc_regs ch_regs[MAX_CHAN_NR]; struct pch_dma_desc_regs ch_regs[MAX_CHAN_NR];
struct pch_dma_chan channels[0]; struct pch_dma_chan channels[MAX_CHAN_NR];
}; };
#define PCH_DMA_CTL0 0x00 #define PCH_DMA_CTL0 0x00
...@@ -366,7 +366,7 @@ static dma_cookie_t pd_tx_submit(struct dma_async_tx_descriptor *txd) ...@@ -366,7 +366,7 @@ static dma_cookie_t pd_tx_submit(struct dma_async_tx_descriptor *txd)
struct pch_dma_chan *pd_chan = to_pd_chan(txd->chan); struct pch_dma_chan *pd_chan = to_pd_chan(txd->chan);
dma_cookie_t cookie; dma_cookie_t cookie;
spin_lock_bh(&pd_chan->lock); spin_lock(&pd_chan->lock);
cookie = pdc_assign_cookie(pd_chan, desc); cookie = pdc_assign_cookie(pd_chan, desc);
if (list_empty(&pd_chan->active_list)) { if (list_empty(&pd_chan->active_list)) {
...@@ -376,7 +376,7 @@ static dma_cookie_t pd_tx_submit(struct dma_async_tx_descriptor *txd) ...@@ -376,7 +376,7 @@ static dma_cookie_t pd_tx_submit(struct dma_async_tx_descriptor *txd)
list_add_tail(&desc->desc_node, &pd_chan->queue); list_add_tail(&desc->desc_node, &pd_chan->queue);
} }
spin_unlock_bh(&pd_chan->lock); spin_unlock(&pd_chan->lock);
return 0; return 0;
} }
...@@ -386,7 +386,7 @@ static struct pch_dma_desc *pdc_alloc_desc(struct dma_chan *chan, gfp_t flags) ...@@ -386,7 +386,7 @@ static struct pch_dma_desc *pdc_alloc_desc(struct dma_chan *chan, gfp_t flags)
struct pch_dma *pd = to_pd(chan->device); struct pch_dma *pd = to_pd(chan->device);
dma_addr_t addr; dma_addr_t addr;
desc = pci_pool_alloc(pd->pool, GFP_KERNEL, &addr); desc = pci_pool_alloc(pd->pool, flags, &addr);
if (desc) { if (desc) {
memset(desc, 0, sizeof(struct pch_dma_desc)); memset(desc, 0, sizeof(struct pch_dma_desc));
INIT_LIST_HEAD(&desc->tx_list); INIT_LIST_HEAD(&desc->tx_list);
...@@ -405,7 +405,7 @@ static struct pch_dma_desc *pdc_desc_get(struct pch_dma_chan *pd_chan) ...@@ -405,7 +405,7 @@ static struct pch_dma_desc *pdc_desc_get(struct pch_dma_chan *pd_chan)
struct pch_dma_desc *ret = NULL; struct pch_dma_desc *ret = NULL;
int i; int i;
spin_lock_bh(&pd_chan->lock); spin_lock(&pd_chan->lock);
list_for_each_entry_safe(desc, _d, &pd_chan->free_list, desc_node) { list_for_each_entry_safe(desc, _d, &pd_chan->free_list, desc_node) {
i++; i++;
if (async_tx_test_ack(&desc->txd)) { if (async_tx_test_ack(&desc->txd)) {
...@@ -415,15 +415,15 @@ static struct pch_dma_desc *pdc_desc_get(struct pch_dma_chan *pd_chan) ...@@ -415,15 +415,15 @@ static struct pch_dma_desc *pdc_desc_get(struct pch_dma_chan *pd_chan)
} }
dev_dbg(chan2dev(&pd_chan->chan), "desc %p not ACKed\n", desc); dev_dbg(chan2dev(&pd_chan->chan), "desc %p not ACKed\n", desc);
} }
spin_unlock_bh(&pd_chan->lock); spin_unlock(&pd_chan->lock);
dev_dbg(chan2dev(&pd_chan->chan), "scanned %d descriptors\n", i); dev_dbg(chan2dev(&pd_chan->chan), "scanned %d descriptors\n", i);
if (!ret) { if (!ret) {
ret = pdc_alloc_desc(&pd_chan->chan, GFP_NOIO); ret = pdc_alloc_desc(&pd_chan->chan, GFP_NOIO);
if (ret) { if (ret) {
spin_lock_bh(&pd_chan->lock); spin_lock(&pd_chan->lock);
pd_chan->descs_allocated++; pd_chan->descs_allocated++;
spin_unlock_bh(&pd_chan->lock); spin_unlock(&pd_chan->lock);
} else { } else {
dev_err(chan2dev(&pd_chan->chan), dev_err(chan2dev(&pd_chan->chan),
"failed to alloc desc\n"); "failed to alloc desc\n");
...@@ -437,10 +437,10 @@ static void pdc_desc_put(struct pch_dma_chan *pd_chan, ...@@ -437,10 +437,10 @@ static void pdc_desc_put(struct pch_dma_chan *pd_chan,
struct pch_dma_desc *desc) struct pch_dma_desc *desc)
{ {
if (desc) { if (desc) {
spin_lock_bh(&pd_chan->lock); spin_lock(&pd_chan->lock);
list_splice_init(&desc->tx_list, &pd_chan->free_list); list_splice_init(&desc->tx_list, &pd_chan->free_list);
list_add(&desc->desc_node, &pd_chan->free_list); list_add(&desc->desc_node, &pd_chan->free_list);
spin_unlock_bh(&pd_chan->lock); spin_unlock(&pd_chan->lock);
} }
} }
...@@ -530,9 +530,9 @@ static void pd_issue_pending(struct dma_chan *chan) ...@@ -530,9 +530,9 @@ static void pd_issue_pending(struct dma_chan *chan)
struct pch_dma_chan *pd_chan = to_pd_chan(chan); struct pch_dma_chan *pd_chan = to_pd_chan(chan);
if (pdc_is_idle(pd_chan)) { if (pdc_is_idle(pd_chan)) {
spin_lock_bh(&pd_chan->lock); spin_lock(&pd_chan->lock);
pdc_advance_work(pd_chan); pdc_advance_work(pd_chan);
spin_unlock_bh(&pd_chan->lock); spin_unlock(&pd_chan->lock);
} }
} }
...@@ -592,7 +592,6 @@ static struct dma_async_tx_descriptor *pd_prep_slave_sg(struct dma_chan *chan, ...@@ -592,7 +592,6 @@ static struct dma_async_tx_descriptor *pd_prep_slave_sg(struct dma_chan *chan,
goto err_desc_get; goto err_desc_get;
} }
if (!first) { if (!first) {
first = desc; first = desc;
} else { } else {
...@@ -641,13 +640,13 @@ static int pd_device_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, ...@@ -641,13 +640,13 @@ static int pd_device_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
spin_unlock_bh(&pd_chan->lock); spin_unlock_bh(&pd_chan->lock);
return 0; return 0;
} }
static void pdc_tasklet(unsigned long data) static void pdc_tasklet(unsigned long data)
{ {
struct pch_dma_chan *pd_chan = (struct pch_dma_chan *)data; struct pch_dma_chan *pd_chan = (struct pch_dma_chan *)data;
unsigned long flags;
if (!pdc_is_idle(pd_chan)) { if (!pdc_is_idle(pd_chan)) {
dev_err(chan2dev(&pd_chan->chan), dev_err(chan2dev(&pd_chan->chan),
...@@ -655,12 +654,12 @@ static void pdc_tasklet(unsigned long data) ...@@ -655,12 +654,12 @@ static void pdc_tasklet(unsigned long data)
return; return;
} }
spin_lock_bh(&pd_chan->lock); spin_lock_irqsave(&pd_chan->lock, flags);
if (test_and_clear_bit(0, &pd_chan->err_status)) if (test_and_clear_bit(0, &pd_chan->err_status))
pdc_handle_error(pd_chan); pdc_handle_error(pd_chan);
else else
pdc_advance_work(pd_chan); pdc_advance_work(pd_chan);
spin_unlock_bh(&pd_chan->lock); spin_unlock_irqrestore(&pd_chan->lock, flags);
} }
static irqreturn_t pd_irq(int irq, void *devid) static irqreturn_t pd_irq(int irq, void *devid)
...@@ -694,6 +693,7 @@ static irqreturn_t pd_irq(int irq, void *devid) ...@@ -694,6 +693,7 @@ static irqreturn_t pd_irq(int irq, void *devid)
return ret; return ret;
} }
#ifdef CONFIG_PM
static void pch_dma_save_regs(struct pch_dma *pd) static void pch_dma_save_regs(struct pch_dma *pd)
{ {
struct pch_dma_chan *pd_chan; struct pch_dma_chan *pd_chan;
...@@ -771,6 +771,7 @@ static int pch_dma_resume(struct pci_dev *pdev) ...@@ -771,6 +771,7 @@ static int pch_dma_resume(struct pci_dev *pdev)
return 0; return 0;
} }
#endif
static int __devinit pch_dma_probe(struct pci_dev *pdev, static int __devinit pch_dma_probe(struct pci_dev *pdev,
const struct pci_device_id *id) const struct pci_device_id *id)
......
This diff is collapsed.
This diff is collapsed.
...@@ -163,6 +163,22 @@ ...@@ -163,6 +163,22 @@
#define D40_DREG_LCEIS1 0x0B4 #define D40_DREG_LCEIS1 0x0B4
#define D40_DREG_LCEIS2 0x0B8 #define D40_DREG_LCEIS2 0x0B8
#define D40_DREG_LCEIS3 0x0BC #define D40_DREG_LCEIS3 0x0BC
#define D40_DREG_PSEG1 0x110
#define D40_DREG_PSEG2 0x114
#define D40_DREG_PSEG3 0x118
#define D40_DREG_PSEG4 0x11C
#define D40_DREG_PCEG1 0x120
#define D40_DREG_PCEG2 0x124
#define D40_DREG_PCEG3 0x128
#define D40_DREG_PCEG4 0x12C
#define D40_DREG_RSEG1 0x130
#define D40_DREG_RSEG2 0x134
#define D40_DREG_RSEG3 0x138
#define D40_DREG_RSEG4 0x13C
#define D40_DREG_RCEG1 0x140
#define D40_DREG_RCEG2 0x144
#define D40_DREG_RCEG3 0x148
#define D40_DREG_RCEG4 0x14C
#define D40_DREG_STFU 0xFC8 #define D40_DREG_STFU 0xFC8
#define D40_DREG_ICFG 0xFCC #define D40_DREG_ICFG 0xFCC
#define D40_DREG_PERIPHID0 0xFE0 #define D40_DREG_PERIPHID0 0xFE0
...@@ -277,6 +293,13 @@ struct d40_def_lcsp { ...@@ -277,6 +293,13 @@ struct d40_def_lcsp {
/* Physical channels */ /* Physical channels */
enum d40_lli_flags {
LLI_ADDR_INC = 1 << 0,
LLI_TERM_INT = 1 << 1,
LLI_CYCLIC = 1 << 2,
LLI_LAST_LINK = 1 << 3,
};
void d40_phy_cfg(struct stedma40_chan_cfg *cfg, void d40_phy_cfg(struct stedma40_chan_cfg *cfg,
u32 *src_cfg, u32 *src_cfg,
u32 *dst_cfg, u32 *dst_cfg,
...@@ -292,46 +315,15 @@ int d40_phy_sg_to_lli(struct scatterlist *sg, ...@@ -292,46 +315,15 @@ int d40_phy_sg_to_lli(struct scatterlist *sg,
struct d40_phy_lli *lli, struct d40_phy_lli *lli,
dma_addr_t lli_phys, dma_addr_t lli_phys,
u32 reg_cfg, u32 reg_cfg,
u32 data_width1, struct stedma40_half_channel_info *info,
u32 data_width2, struct stedma40_half_channel_info *otherinfo,
int psize); unsigned long flags);
struct d40_phy_lli *d40_phy_buf_to_lli(struct d40_phy_lli *lli,
dma_addr_t data,
u32 data_size,
int psize,
dma_addr_t next_lli,
u32 reg_cfg,
bool term_int,
u32 data_width1,
u32 data_width2,
bool is_device);
void d40_phy_lli_write(void __iomem *virtbase,
u32 phy_chan_num,
struct d40_phy_lli *lli_dst,
struct d40_phy_lli *lli_src);
/* Logical channels */ /* Logical channels */
struct d40_log_lli *d40_log_buf_to_lli(struct d40_log_lli *lli_sg,
dma_addr_t addr,
int size,
u32 lcsp13, /* src or dst*/
u32 data_width1, u32 data_width2,
bool addr_inc);
int d40_log_sg_to_dev(struct scatterlist *sg,
int sg_len,
struct d40_log_lli_bidir *lli,
struct d40_def_lcsp *lcsp,
u32 src_data_width,
u32 dst_data_width,
enum dma_data_direction direction,
dma_addr_t dev_addr);
int d40_log_sg_to_lli(struct scatterlist *sg, int d40_log_sg_to_lli(struct scatterlist *sg,
int sg_len, int sg_len,
dma_addr_t dev_addr,
struct d40_log_lli *lli_sg, struct d40_log_lli *lli_sg,
u32 lcsp13, /* src or dst*/ u32 lcsp13, /* src or dst*/
u32 data_width1, u32 data_width2); u32 data_width1, u32 data_width2);
...@@ -339,11 +331,11 @@ int d40_log_sg_to_lli(struct scatterlist *sg, ...@@ -339,11 +331,11 @@ int d40_log_sg_to_lli(struct scatterlist *sg,
void d40_log_lli_lcpa_write(struct d40_log_lli_full *lcpa, void d40_log_lli_lcpa_write(struct d40_log_lli_full *lcpa,
struct d40_log_lli *lli_dst, struct d40_log_lli *lli_dst,
struct d40_log_lli *lli_src, struct d40_log_lli *lli_src,
int next); int next, unsigned int flags);
void d40_log_lli_lcla_write(struct d40_log_lli *lcla, void d40_log_lli_lcla_write(struct d40_log_lli *lcla,
struct d40_log_lli *lli_dst, struct d40_log_lli *lli_dst,
struct d40_log_lli *lli_src, struct d40_log_lli *lli_src,
int next); int next, unsigned int flags);
#endif /* STE_DMA40_LLI_H */ #endif /* STE_DMA40_LLI_H */
...@@ -16,9 +16,18 @@ ...@@ -16,9 +16,18 @@
/** /**
* struct dw_dma_platform_data - Controller configuration parameters * struct dw_dma_platform_data - Controller configuration parameters
* @nr_channels: Number of channels supported by hardware (max 8) * @nr_channels: Number of channels supported by hardware (max 8)
* @is_private: The device channels should be marked as private and not for
* by the general purpose DMA channel allocator.
*/ */
struct dw_dma_platform_data { struct dw_dma_platform_data {
unsigned int nr_channels; unsigned int nr_channels;
bool is_private;
#define CHAN_ALLOCATION_ASCENDING 0 /* zero to seven */
#define CHAN_ALLOCATION_DESCENDING 1 /* seven to zero */
unsigned char chan_allocation_order;
#define CHAN_PRIORITY_ASCENDING 0 /* chan0 highest */
#define CHAN_PRIORITY_DESCENDING 1 /* chan7 highest */
unsigned char chan_priority;
}; };
/** /**
...@@ -33,6 +42,30 @@ enum dw_dma_slave_width { ...@@ -33,6 +42,30 @@ enum dw_dma_slave_width {
DW_DMA_SLAVE_WIDTH_32BIT, DW_DMA_SLAVE_WIDTH_32BIT,
}; };
/* bursts size */
enum dw_dma_msize {
DW_DMA_MSIZE_1,
DW_DMA_MSIZE_4,
DW_DMA_MSIZE_8,
DW_DMA_MSIZE_16,
DW_DMA_MSIZE_32,
DW_DMA_MSIZE_64,
DW_DMA_MSIZE_128,
DW_DMA_MSIZE_256,
};
/* flow controller */
enum dw_dma_fc {
DW_DMA_FC_D_M2M,
DW_DMA_FC_D_M2P,
DW_DMA_FC_D_P2M,
DW_DMA_FC_D_P2P,
DW_DMA_FC_P_P2M,
DW_DMA_FC_SP_P2P,
DW_DMA_FC_P_M2P,
DW_DMA_FC_DP_P2P,
};
/** /**
* struct dw_dma_slave - Controller-specific information about a slave * struct dw_dma_slave - Controller-specific information about a slave
* *
...@@ -44,6 +77,11 @@ enum dw_dma_slave_width { ...@@ -44,6 +77,11 @@ enum dw_dma_slave_width {
* @reg_width: peripheral register width * @reg_width: peripheral register width
* @cfg_hi: Platform-specific initializer for the CFG_HI register * @cfg_hi: Platform-specific initializer for the CFG_HI register
* @cfg_lo: Platform-specific initializer for the CFG_LO register * @cfg_lo: Platform-specific initializer for the CFG_LO register
* @src_master: src master for transfers on allocated channel.
* @dst_master: dest master for transfers on allocated channel.
* @src_msize: src burst size.
* @dst_msize: dest burst size.
* @fc: flow controller for DMA transfer
*/ */
struct dw_dma_slave { struct dw_dma_slave {
struct device *dma_dev; struct device *dma_dev;
...@@ -52,6 +90,11 @@ struct dw_dma_slave { ...@@ -52,6 +90,11 @@ struct dw_dma_slave {
enum dw_dma_slave_width reg_width; enum dw_dma_slave_width reg_width;
u32 cfg_hi; u32 cfg_hi;
u32 cfg_lo; u32 cfg_lo;
u8 src_master;
u8 dst_master;
u8 src_msize;
u8 dst_msize;
u8 fc;
}; };
/* Platform-configurable bits in CFG_HI */ /* Platform-configurable bits in CFG_HI */
...@@ -62,7 +105,6 @@ struct dw_dma_slave { ...@@ -62,7 +105,6 @@ struct dw_dma_slave {
#define DWC_CFGH_DST_PER(x) ((x) << 11) #define DWC_CFGH_DST_PER(x) ((x) << 11)
/* Platform-configurable bits in CFG_LO */ /* Platform-configurable bits in CFG_LO */
#define DWC_CFGL_PRIO(x) ((x) << 5) /* priority */
#define DWC_CFGL_LOCK_CH_XFER (0 << 12) /* scope of LOCK_CH */ #define DWC_CFGL_LOCK_CH_XFER (0 << 12) /* scope of LOCK_CH */
#define DWC_CFGL_LOCK_CH_BLOCK (1 << 12) #define DWC_CFGL_LOCK_CH_BLOCK (1 << 12)
#define DWC_CFGL_LOCK_CH_XACT (2 << 12) #define DWC_CFGL_LOCK_CH_XACT (2 << 12)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment