Commit 4a63a8b3 authored by Andy Shevchenko's avatar Andy Shevchenko Committed by Vinod Koul

dw_dmac: autoconfigure block_size or use platform data

The maximum block size is a configurable parameter for the chip. So, driver
will try to get it from the encoded component parameters. Otherwise it will
come from the platform data.
Signed-off-by: default avatarAndy Shevchenko <andriy.shevchenko@linux.intel.com>
Acked-by: default avatarViresh Kumar <viresh.kumar@linaro.org>
Signed-off-by: default avatarVinod Koul <vinod.koul@linux.intel.com>
parent 482c67ea
...@@ -78,6 +78,7 @@ struct dw_dma_platform_data dmac_plat_data = { ...@@ -78,6 +78,7 @@ struct dw_dma_platform_data dmac_plat_data = {
.nr_channels = 8, .nr_channels = 8,
.chan_allocation_order = CHAN_ALLOCATION_DESCENDING, .chan_allocation_order = CHAN_ALLOCATION_DESCENDING,
.chan_priority = CHAN_PRIORITY_DESCENDING, .chan_priority = CHAN_PRIORITY_DESCENDING,
.block_size = 4095U,
}; };
void __init spear13xx_l2x0_init(void) void __init spear13xx_l2x0_init(void)
......
...@@ -605,6 +605,7 @@ static void __init genclk_init_parent(struct clk *clk) ...@@ -605,6 +605,7 @@ static void __init genclk_init_parent(struct clk *clk)
static struct dw_dma_platform_data dw_dmac0_data = { static struct dw_dma_platform_data dw_dmac0_data = {
.nr_channels = 3, .nr_channels = 3,
.block_size = 4095U,
}; };
static struct resource dw_dmac0_resource[] = { static struct resource dw_dmac0_resource[] = {
......
...@@ -55,16 +55,6 @@ ...@@ -55,16 +55,6 @@
| DWC_CTLL_SMS(_sms)); \ | DWC_CTLL_SMS(_sms)); \
}) })
/*
* This is configuration-dependent and usually a funny size like 4095.
*
* Note that this is a transfer count, i.e. if we transfer 32-bit
* words, we can do 16380 bytes per descriptor.
*
* This parameter is also system-specific.
*/
#define DWC_MAX_COUNT 4095U
/* /*
* Number of descriptors to allocate for each channel. This should be * Number of descriptors to allocate for each channel. This should be
* made configurable somehow; preferably, the clients (at least the * made configurable somehow; preferably, the clients (at least the
...@@ -672,7 +662,7 @@ dwc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, ...@@ -672,7 +662,7 @@ dwc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
for (offset = 0; offset < len; offset += xfer_count << src_width) { for (offset = 0; offset < len; offset += xfer_count << src_width) {
xfer_count = min_t(size_t, (len - offset) >> src_width, xfer_count = min_t(size_t, (len - offset) >> src_width,
DWC_MAX_COUNT); dwc->block_size);
desc = dwc_desc_get(dwc); desc = dwc_desc_get(dwc);
if (!desc) if (!desc)
...@@ -773,8 +763,8 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, ...@@ -773,8 +763,8 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
desc->lli.sar = mem; desc->lli.sar = mem;
desc->lli.dar = reg; desc->lli.dar = reg;
desc->lli.ctllo = ctllo | DWC_CTLL_SRC_WIDTH(mem_width); desc->lli.ctllo = ctllo | DWC_CTLL_SRC_WIDTH(mem_width);
if ((len >> mem_width) > DWC_MAX_COUNT) { if ((len >> mem_width) > dwc->block_size) {
dlen = DWC_MAX_COUNT << mem_width; dlen = dwc->block_size << mem_width;
mem += dlen; mem += dlen;
len -= dlen; len -= dlen;
} else { } else {
...@@ -833,8 +823,8 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, ...@@ -833,8 +823,8 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
desc->lli.sar = reg; desc->lli.sar = reg;
desc->lli.dar = mem; desc->lli.dar = mem;
desc->lli.ctllo = ctllo | DWC_CTLL_DST_WIDTH(mem_width); desc->lli.ctllo = ctllo | DWC_CTLL_DST_WIDTH(mem_width);
if ((len >> reg_width) > DWC_MAX_COUNT) { if ((len >> reg_width) > dwc->block_size) {
dlen = DWC_MAX_COUNT << reg_width; dlen = dwc->block_size << reg_width;
mem += dlen; mem += dlen;
len -= dlen; len -= dlen;
} else { } else {
...@@ -1217,7 +1207,7 @@ struct dw_cyclic_desc *dw_dma_cyclic_prep(struct dma_chan *chan, ...@@ -1217,7 +1207,7 @@ struct dw_cyclic_desc *dw_dma_cyclic_prep(struct dma_chan *chan,
periods = buf_len / period_len; periods = buf_len / period_len;
/* Check for too big/unaligned periods and unaligned DMA buffer. */ /* Check for too big/unaligned periods and unaligned DMA buffer. */
if (period_len > (DWC_MAX_COUNT << reg_width)) if (period_len > (dwc->block_size << reg_width))
goto out_err; goto out_err;
if (unlikely(period_len & ((1 << reg_width) - 1))) if (unlikely(period_len & ((1 << reg_width) - 1)))
goto out_err; goto out_err;
...@@ -1383,6 +1373,7 @@ static int __devinit dw_probe(struct platform_device *pdev) ...@@ -1383,6 +1373,7 @@ static int __devinit dw_probe(struct platform_device *pdev)
bool autocfg; bool autocfg;
unsigned int dw_params; unsigned int dw_params;
unsigned int nr_channels; unsigned int nr_channels;
unsigned int max_blk_size = 0;
int irq; int irq;
int err; int err;
int i; int i;
...@@ -1423,6 +1414,10 @@ static int __devinit dw_probe(struct platform_device *pdev) ...@@ -1423,6 +1414,10 @@ static int __devinit dw_probe(struct platform_device *pdev)
dw->regs = regs; dw->regs = regs;
/* get hardware configuration parameters */
if (autocfg)
max_blk_size = dma_readl(dw, MAX_BLK_SIZE);
/* Calculate all channel mask before DMA setup */ /* Calculate all channel mask before DMA setup */
dw->all_chan_mask = (1 << nr_channels) - 1; dw->all_chan_mask = (1 << nr_channels) - 1;
...@@ -1468,6 +1463,16 @@ static int __devinit dw_probe(struct platform_device *pdev) ...@@ -1468,6 +1463,16 @@ static int __devinit dw_probe(struct platform_device *pdev)
INIT_LIST_HEAD(&dwc->free_list); INIT_LIST_HEAD(&dwc->free_list);
channel_clear_bit(dw, CH_EN, dwc->mask); channel_clear_bit(dw, CH_EN, dwc->mask);
/* hardware configuration */
if (autocfg)
/* Decode maximum block size for given channel. The
* stored 4 bit value represents blocks from 0x00 for 3
* up to 0x0a for 4095. */
dwc->block_size =
(4 << ((max_blk_size >> 4 * i) & 0xf)) - 1;
else
dwc->block_size = pdata->block_size;
} }
/* Clear all interrupts on all channels. */ /* Clear all interrupts on all channels. */
......
...@@ -193,6 +193,9 @@ struct dw_dma_chan { ...@@ -193,6 +193,9 @@ struct dw_dma_chan {
unsigned int descs_allocated; unsigned int descs_allocated;
/* hardware configuration */
unsigned int block_size;
/* configuration passed via DMA_SLAVE_CONFIG */ /* configuration passed via DMA_SLAVE_CONFIG */
struct dma_slave_config dma_sconfig; struct dma_slave_config dma_sconfig;
}; };
......
...@@ -19,6 +19,7 @@ ...@@ -19,6 +19,7 @@
* @nr_channels: Number of channels supported by hardware (max 8) * @nr_channels: Number of channels supported by hardware (max 8)
* @is_private: The device channels should be marked as private and not for * @is_private: The device channels should be marked as private and not for
* by the general purpose DMA channel allocator. * by the general purpose DMA channel allocator.
* @block_size: Maximum block size supported by the controller
*/ */
struct dw_dma_platform_data { struct dw_dma_platform_data {
unsigned int nr_channels; unsigned int nr_channels;
...@@ -29,6 +30,7 @@ struct dw_dma_platform_data { ...@@ -29,6 +30,7 @@ struct dw_dma_platform_data {
#define CHAN_PRIORITY_ASCENDING 0 /* chan0 highest */ #define CHAN_PRIORITY_ASCENDING 0 /* chan0 highest */
#define CHAN_PRIORITY_DESCENDING 1 /* chan7 highest */ #define CHAN_PRIORITY_DESCENDING 1 /* chan7 highest */
unsigned char chan_priority; unsigned char chan_priority;
unsigned short block_size;
}; };
/* bursts size */ /* bursts size */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment