Commit 1ae105aa authored by Vinod Koul's avatar Vinod Koul

Merge branch 'next' into for-linus-3.0

parents 02f8c6ae 5a42fb93
This diff is collapsed.
......@@ -88,6 +88,16 @@ static void __init ts72xx_init_machine(void)
ARRAY_SIZE(ts72xx_spi_devices));
}
The driver can use DMA for the transfers also. In this case ts72xx_spi_info
becomes:
static struct ep93xx_spi_info ts72xx_spi_info = {
.num_chipselect = ARRAY_SIZE(ts72xx_spi_devices),
.use_dma = true;
};
Note that CONFIG_EP93XX_DMA should be enabled as well.
Thanks to
=========
Martin Guy, H. Hartley Sweeten and others who helped me during development of
......
#
# Makefile for the linux kernel.
#
obj-y := core.o clock.o dma-m2p.o gpio.o
obj-y := core.o clock.o gpio.o
obj-m :=
obj-n :=
obj- :=
obj-$(CONFIG_EP93XX_DMA) += dma.o
obj-$(CONFIG_MACH_ADSSPHERE) += adssphere.o
obj-$(CONFIG_MACH_EDB93XX) += edb93xx.o
obj-$(CONFIG_MACH_GESBC9312) += gesbc9312.o
......
......@@ -492,11 +492,15 @@ static struct resource ep93xx_spi_resources[] = {
},
};
static u64 ep93xx_spi_dma_mask = DMA_BIT_MASK(32);
static struct platform_device ep93xx_spi_device = {
.name = "ep93xx-spi",
.id = 0,
.dev = {
.platform_data = &ep93xx_spi_master_data,
.platform_data = &ep93xx_spi_master_data,
.coherent_dma_mask = DMA_BIT_MASK(32),
.dma_mask = &ep93xx_spi_dma_mask,
},
.num_resources = ARRAY_SIZE(ep93xx_spi_resources),
.resource = ep93xx_spi_resources,
......
This diff is collapsed.
/*
* arch/arm/mach-ep93xx/dma.c
*
* Platform support code for the EP93xx dmaengine driver.
*
* Copyright (C) 2011 Mika Westerberg
*
* This work is based on the original dma-m2p implementation with
* following copyrights:
*
* Copyright (C) 2006 Lennert Buytenhek <buytenh@wantstofly.org>
* Copyright (C) 2006 Applied Data Systems
* Copyright (C) 2009 Ryan Mallon <rmallon@gmail.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or (at
* your option) any later version.
*/
#include <linux/dmaengine.h>
#include <linux/dma-mapping.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/platform_device.h>
#include <mach/dma.h>
#include <mach/hardware.h>
#define DMA_CHANNEL(_name, _base, _irq) \
{ .name = (_name), .base = (_base), .irq = (_irq) }
/*
* DMA M2P channels.
*
* On the EP93xx chip the following peripherals my be allocated to the 10
* Memory to Internal Peripheral (M2P) channels (5 transmit + 5 receive).
*
* I2S contains 3 Tx and 3 Rx DMA Channels
* AAC contains 3 Tx and 3 Rx DMA Channels
* UART1 contains 1 Tx and 1 Rx DMA Channels
* UART2 contains 1 Tx and 1 Rx DMA Channels
* UART3 contains 1 Tx and 1 Rx DMA Channels
* IrDA contains 1 Tx and 1 Rx DMA Channels
*
* Registers are mapped statically in ep93xx_map_io().
*/
static struct ep93xx_dma_chan_data ep93xx_dma_m2p_channels[] = {
DMA_CHANNEL("m2p0", EP93XX_DMA_BASE + 0x0000, IRQ_EP93XX_DMAM2P0),
DMA_CHANNEL("m2p1", EP93XX_DMA_BASE + 0x0040, IRQ_EP93XX_DMAM2P1),
DMA_CHANNEL("m2p2", EP93XX_DMA_BASE + 0x0080, IRQ_EP93XX_DMAM2P2),
DMA_CHANNEL("m2p3", EP93XX_DMA_BASE + 0x00c0, IRQ_EP93XX_DMAM2P3),
DMA_CHANNEL("m2p4", EP93XX_DMA_BASE + 0x0240, IRQ_EP93XX_DMAM2P4),
DMA_CHANNEL("m2p5", EP93XX_DMA_BASE + 0x0200, IRQ_EP93XX_DMAM2P5),
DMA_CHANNEL("m2p6", EP93XX_DMA_BASE + 0x02c0, IRQ_EP93XX_DMAM2P6),
DMA_CHANNEL("m2p7", EP93XX_DMA_BASE + 0x0280, IRQ_EP93XX_DMAM2P7),
DMA_CHANNEL("m2p8", EP93XX_DMA_BASE + 0x0340, IRQ_EP93XX_DMAM2P8),
DMA_CHANNEL("m2p9", EP93XX_DMA_BASE + 0x0300, IRQ_EP93XX_DMAM2P9),
};
static struct ep93xx_dma_platform_data ep93xx_dma_m2p_data = {
.channels = ep93xx_dma_m2p_channels,
.num_channels = ARRAY_SIZE(ep93xx_dma_m2p_channels),
};
static struct platform_device ep93xx_dma_m2p_device = {
.name = "ep93xx-dma-m2p",
.id = -1,
.dev = {
.platform_data = &ep93xx_dma_m2p_data,
},
};
/*
* DMA M2M channels.
*
* There are 2 M2M channels which support memcpy/memset and in addition simple
* hardware requests from/to SSP and IDE. We do not implement an external
* hardware requests.
*
* Registers are mapped statically in ep93xx_map_io().
*/
static struct ep93xx_dma_chan_data ep93xx_dma_m2m_channels[] = {
DMA_CHANNEL("m2m0", EP93XX_DMA_BASE + 0x0100, IRQ_EP93XX_DMAM2M0),
DMA_CHANNEL("m2m1", EP93XX_DMA_BASE + 0x0140, IRQ_EP93XX_DMAM2M1),
};
static struct ep93xx_dma_platform_data ep93xx_dma_m2m_data = {
.channels = ep93xx_dma_m2m_channels,
.num_channels = ARRAY_SIZE(ep93xx_dma_m2m_channels),
};
static struct platform_device ep93xx_dma_m2m_device = {
.name = "ep93xx-dma-m2m",
.id = -1,
.dev = {
.platform_data = &ep93xx_dma_m2m_data,
},
};
static int __init ep93xx_dma_init(void)
{
platform_device_register(&ep93xx_dma_m2p_device);
platform_device_register(&ep93xx_dma_m2m_device);
return 0;
}
arch_initcall(ep93xx_dma_init);
/**
* DOC: EP93xx DMA M2P memory to peripheral and peripheral to memory engine
*
* The EP93xx DMA M2P subsystem handles DMA transfers between memory and
* peripherals. DMA M2P channels are available for audio, UARTs and IrDA.
* See chapter 10 of the EP93xx users guide for full details on the DMA M2P
* engine.
*
* See sound/soc/ep93xx/ep93xx-pcm.c for an example use of the DMA M2P code.
*
*/
#ifndef __ASM_ARCH_DMA_H
#define __ASM_ARCH_DMA_H
#include <linux/list.h>
#include <linux/types.h>
#include <linux/dmaengine.h>
#include <linux/dma-mapping.h>
/**
* struct ep93xx_dma_buffer - Information about a buffer to be transferred
* using the DMA M2P engine
/*
* M2P channels.
*
* @list: Entry in DMA buffer list
* @bus_addr: Physical address of the buffer
* @size: Size of the buffer in bytes
* Note that these values are also directly used for setting the PPALLOC
* register.
*/
struct ep93xx_dma_buffer {
struct list_head list;
u32 bus_addr;
u16 size;
};
#define EP93XX_DMA_I2S1 0
#define EP93XX_DMA_I2S2 1
#define EP93XX_DMA_AAC1 2
#define EP93XX_DMA_AAC2 3
#define EP93XX_DMA_AAC3 4
#define EP93XX_DMA_I2S3 5
#define EP93XX_DMA_UART1 6
#define EP93XX_DMA_UART2 7
#define EP93XX_DMA_UART3 8
#define EP93XX_DMA_IRDA 9
/* M2M channels */
#define EP93XX_DMA_SSP 10
#define EP93XX_DMA_IDE 11
/**
* struct ep93xx_dma_m2p_client - Information about a DMA M2P client
*
* @name: Unique name for this client
* @flags: Client flags
* @cookie: User data to pass to callback functions
* @buffer_started: Non NULL function to call when a transfer is started.
* The arguments are the user data cookie and the DMA
* buffer which is starting.
* @buffer_finished: Non NULL function to call when a transfer is completed.
* The arguments are the user data cookie, the DMA buffer
* which has completed, and a boolean flag indicating if
* the transfer had an error.
* struct ep93xx_dma_data - configuration data for the EP93xx dmaengine
* @port: peripheral which is requesting the channel
* @direction: TX/RX channel
* @name: optional name for the channel, this is displayed in /proc/interrupts
*
* This information is passed as private channel parameter in a filter
* function. Note that this is only needed for slave/cyclic channels. For
* memcpy channels %NULL data should be passed.
*/
struct ep93xx_dma_m2p_client {
char *name;
u8 flags;
void *cookie;
void (*buffer_started)(void *cookie,
struct ep93xx_dma_buffer *buf);
void (*buffer_finished)(void *cookie,
struct ep93xx_dma_buffer *buf,
int bytes, int error);
/* private: Internal use only */
void *channel;
struct ep93xx_dma_data {
int port;
enum dma_data_direction direction;
const char *name;
};
/* DMA M2P ports */
#define EP93XX_DMA_M2P_PORT_I2S1 0x00
#define EP93XX_DMA_M2P_PORT_I2S2 0x01
#define EP93XX_DMA_M2P_PORT_AAC1 0x02
#define EP93XX_DMA_M2P_PORT_AAC2 0x03
#define EP93XX_DMA_M2P_PORT_AAC3 0x04
#define EP93XX_DMA_M2P_PORT_I2S3 0x05
#define EP93XX_DMA_M2P_PORT_UART1 0x06
#define EP93XX_DMA_M2P_PORT_UART2 0x07
#define EP93XX_DMA_M2P_PORT_UART3 0x08
#define EP93XX_DMA_M2P_PORT_IRDA 0x09
#define EP93XX_DMA_M2P_PORT_MASK 0x0f
/* DMA M2P client flags */
#define EP93XX_DMA_M2P_TX 0x00 /* Memory to peripheral */
#define EP93XX_DMA_M2P_RX 0x10 /* Peripheral to memory */
/*
* DMA M2P client error handling flags. See the EP93xx users guide
* documentation on the DMA M2P CONTROL register for more details
*/
#define EP93XX_DMA_M2P_ABORT_ON_ERROR 0x20 /* Abort on peripheral error */
#define EP93XX_DMA_M2P_IGNORE_ERROR 0x40 /* Ignore peripheral errors */
#define EP93XX_DMA_M2P_ERROR_MASK 0x60 /* Mask of error bits */
/**
* ep93xx_dma_m2p_client_register - Register a client with the DMA M2P
* subsystem
*
* @m2p: Client information to register
* returns 0 on success
*
* The DMA M2P subsystem allocates a channel and an interrupt line for the DMA
* client
* struct ep93xx_dma_chan_data - platform specific data for a DMA channel
* @name: name of the channel, used for getting the right clock for the channel
* @base: mapped registers
* @irq: interrupt number used by this channel
*/
int ep93xx_dma_m2p_client_register(struct ep93xx_dma_m2p_client *m2p);
struct ep93xx_dma_chan_data {
const char *name;
void __iomem *base;
int irq;
};
/**
* ep93xx_dma_m2p_client_unregister - Unregister a client from the DMA M2P
* subsystem
*
* @m2p: Client to unregister
* struct ep93xx_dma_platform_data - platform data for the dmaengine driver
* @channels: array of channels which are passed to the driver
* @num_channels: number of channels in the array
*
* Any transfers currently in progress will be completed in hardware, but
* ignored in software.
* This structure is passed to the DMA engine driver via platform data. For
* M2P channels, contract is that even channels are for TX and odd for RX.
* There is no requirement for the M2M channels.
*/
void ep93xx_dma_m2p_client_unregister(struct ep93xx_dma_m2p_client *m2p);
struct ep93xx_dma_platform_data {
struct ep93xx_dma_chan_data *channels;
size_t num_channels;
};
/**
* ep93xx_dma_m2p_submit - Submit a DMA M2P transfer
*
* @m2p: DMA Client to submit the transfer on
* @buf: DMA Buffer to submit
*
* If the current or next transfer positions are free on the M2P client then
* the transfer is started immediately. If not, the transfer is added to the
* list of pending transfers. This function must not be called from the
* buffer_finished callback for an M2P channel.
*
*/
void ep93xx_dma_m2p_submit(struct ep93xx_dma_m2p_client *m2p,
struct ep93xx_dma_buffer *buf);
static inline bool ep93xx_dma_chan_is_m2p(struct dma_chan *chan)
{
return !strcmp(dev_name(chan->device->dev), "ep93xx-dma-m2p");
}
/**
* ep93xx_dma_m2p_submit_recursive - Put a DMA transfer on the pending list
* for an M2P channel
* ep93xx_dma_chan_direction - returns direction the channel can be used
* @chan: channel
*
* @m2p: DMA Client to submit the transfer on
* @buf: DMA Buffer to submit
*
* This function must only be called from the buffer_finished callback for an
* M2P channel. It is commonly used to add the next transfer in a chained list
* of DMA transfers.
* This function can be used in filter functions to find out whether the
* channel supports given DMA direction. Only M2P channels have such
* limitation, for M2M channels the direction is configurable.
*/
void ep93xx_dma_m2p_submit_recursive(struct ep93xx_dma_m2p_client *m2p,
struct ep93xx_dma_buffer *buf);
static inline enum dma_data_direction
ep93xx_dma_chan_direction(struct dma_chan *chan)
{
if (!ep93xx_dma_chan_is_m2p(chan))
return DMA_NONE;
/**
* ep93xx_dma_m2p_flush - Flush all pending transfers on a DMA M2P client
*
* @m2p: DMA client to flush transfers on
*
* Any transfers currently in progress will be completed in hardware, but
* ignored in software.
*
*/
void ep93xx_dma_m2p_flush(struct ep93xx_dma_m2p_client *m2p);
/* even channels are for TX, odd for RX */
return (chan->chan_id % 2 == 0) ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
}
#endif /* __ASM_ARCH_DMA_H */
......@@ -7,9 +7,11 @@ struct spi_device;
* struct ep93xx_spi_info - EP93xx specific SPI descriptor
* @num_chipselect: number of chip selects on this board, must be
* at least one
* @use_dma: use DMA for the transfers
*/
struct ep93xx_spi_info {
int num_chipselect;
bool use_dma;
};
/**
......
......@@ -237,6 +237,13 @@ config MXS_DMA
Support the MXS DMA engine. This engine including APBH-DMA
and APBX-DMA is integrated into Freescale i.MX23/28 chips.
config EP93XX_DMA
bool "Cirrus Logic EP93xx DMA support"
depends on ARCH_EP93XX
select DMA_ENGINE
help
Enable support for the Cirrus Logic EP93xx M2P/M2M DMA controller.
config DMA_ENGINE
bool
......
......@@ -25,3 +25,4 @@ obj-$(CONFIG_STE_DMA40) += ste_dma40.o ste_dma40_ll.o
obj-$(CONFIG_PL330_DMA) += pl330.o
obj-$(CONFIG_PCH_DMA) += pch_dma.o
obj-$(CONFIG_AMBA_PL08X) += amba-pl08x.o
obj-$(CONFIG_EP93XX_DMA) += ep93xx_dma.o
......@@ -9,6 +9,5 @@ TODO for slave dma
- mxs-dma.c
- dw_dmac
- intel_mid_dma
- ste_dma40
4. Check other subsystems for dma drivers and merge/move to dmaengine
5. Remove dma_slave_config's dma direction.
This diff is collapsed.
......@@ -1216,7 +1216,7 @@ static int __init at_dma_probe(struct platform_device *pdev)
atdma->dma_common.cap_mask = pdata->cap_mask;
atdma->all_chan_mask = (1 << pdata->nr_channels) - 1;
size = io->end - io->start + 1;
size = resource_size(io);
if (!request_mem_region(io->start, size, pdev->dev.driver->name)) {
err = -EBUSY;
goto err_kfree;
......@@ -1362,7 +1362,7 @@ static int __exit at_dma_remove(struct platform_device *pdev)
atdma->regs = NULL;
io = platform_get_resource(pdev, IORESOURCE_MEM, 0);
release_mem_region(io->start, io->end - io->start + 1);
release_mem_region(io->start, resource_size(io));
kfree(atdma);
......
......@@ -40,6 +40,8 @@ struct coh901318_desc {
struct coh901318_lli *lli;
enum dma_data_direction dir;
unsigned long flags;
u32 head_config;
u32 head_ctrl;
};
struct coh901318_base {
......@@ -660,6 +662,9 @@ static struct coh901318_desc *coh901318_queue_start(struct coh901318_chan *cohc)
coh901318_desc_submit(cohc, cohd);
/* Program the transaction head */
coh901318_set_conf(cohc, cohd->head_config);
coh901318_set_ctrl(cohc, cohd->head_ctrl);
coh901318_prep_linked_list(cohc, cohd->lli);
/* start dma job on this channel */
......@@ -1090,8 +1095,6 @@ coh901318_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
} else
goto err_direction;
coh901318_set_conf(cohc, config);
/* The dma only supports transmitting packages up to
* MAX_DMA_PACKET_SIZE. Calculate to total number of
* dma elemts required to send the entire sg list
......@@ -1128,16 +1131,18 @@ coh901318_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
if (ret)
goto err_lli_fill;
/*
* Set the default ctrl for the channel to the one from the lli,
* things may have changed due to odd buffer alignment etc.
*/
coh901318_set_ctrl(cohc, lli->control);
COH_DBG(coh901318_list_print(cohc, lli));
/* Pick a descriptor to handle this transfer */
cohd = coh901318_desc_get(cohc);
cohd->head_config = config;
/*
* Set the default head ctrl for the channel to the one from the
* lli, things may have changed due to odd buffer alignment
* etc.
*/
cohd->head_ctrl = lli->control;
cohd->dir = direction;
cohd->flags = flags;
cohd->desc.tx_submit = coh901318_tx_submit;
......
......@@ -509,8 +509,8 @@ struct dma_chan *__dma_request_channel(dma_cap_mask_t *mask, dma_filter_fn fn, v
dma_chan_name(chan));
list_del_rcu(&device->global_node);
} else if (err)
pr_err("dmaengine: failed to get %s: (%d)\n",
dma_chan_name(chan), err);
pr_debug("dmaengine: failed to get %s: (%d)\n",
dma_chan_name(chan), err);
else
break;
if (--device->privatecnt == 0)
......
This diff is collapsed.
......@@ -1281,8 +1281,10 @@ static int __init sdma_probe(struct platform_device *pdev)
goto err_request_irq;
sdma->script_addrs = kzalloc(sizeof(*sdma->script_addrs), GFP_KERNEL);
if (!sdma->script_addrs)
if (!sdma->script_addrs) {
ret = -ENOMEM;
goto err_alloc;
}
sdma->version = pdata->sdma_version;
......
......@@ -1351,7 +1351,6 @@ int dma_suspend(struct pci_dev *pci, pm_message_t state)
return -EAGAIN;
}
device->state = SUSPENDED;
pci_set_drvdata(pci, device);
pci_save_state(pci);
pci_disable_device(pci);
pci_set_power_state(pci, PCI_D3hot);
......@@ -1380,7 +1379,6 @@ int dma_resume(struct pci_dev *pci)
}
device->state = RUNNING;
iowrite32(REG_BIT0, device->dma_base + DMA_CFG);
pci_set_drvdata(pci, device);
return 0;
}
......
......@@ -1705,16 +1705,14 @@ static int __init ipu_probe(struct platform_device *pdev)
ipu_data.irq_fn, ipu_data.irq_err, ipu_data.irq_base);
/* Remap IPU common registers */
ipu_data.reg_ipu = ioremap(mem_ipu->start,
mem_ipu->end - mem_ipu->start + 1);
ipu_data.reg_ipu = ioremap(mem_ipu->start, resource_size(mem_ipu));
if (!ipu_data.reg_ipu) {
ret = -ENOMEM;
goto err_ioremap_ipu;
}
/* Remap Image Converter and Image DMA Controller registers */
ipu_data.reg_ic = ioremap(mem_ic->start,
mem_ic->end - mem_ic->start + 1);
ipu_data.reg_ic = ioremap(mem_ic->start, resource_size(mem_ic));
if (!ipu_data.reg_ic) {
ret = -ENOMEM;
goto err_ioremap_ic;
......
......@@ -1305,7 +1305,7 @@ static int mv_xor_shared_probe(struct platform_device *pdev)
return -ENODEV;
msp->xor_base = devm_ioremap(&pdev->dev, res->start,
res->end - res->start + 1);
resource_size(res));
if (!msp->xor_base)
return -EBUSY;
......@@ -1314,7 +1314,7 @@ static int mv_xor_shared_probe(struct platform_device *pdev)
return -ENODEV;
msp->xor_high_base = devm_ioremap(&pdev->dev, res->start,
res->end - res->start + 1);
resource_size(res));
if (!msp->xor_high_base)
return -EBUSY;
......
......@@ -327,10 +327,12 @@ static int mxs_dma_alloc_chan_resources(struct dma_chan *chan)
memset(mxs_chan->ccw, 0, PAGE_SIZE);
ret = request_irq(mxs_chan->chan_irq, mxs_dma_int_handler,
0, "mxs-dma", mxs_dma);
if (ret)
goto err_irq;
if (mxs_chan->chan_irq != NO_IRQ) {
ret = request_irq(mxs_chan->chan_irq, mxs_dma_int_handler,
0, "mxs-dma", mxs_dma);
if (ret)
goto err_irq;
}
ret = clk_enable(mxs_dma->clk);
if (ret)
......@@ -535,6 +537,7 @@ static int mxs_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
switch (cmd) {
case DMA_TERMINATE_ALL:
mxs_dma_disable_chan(mxs_chan);
mxs_dma_reset_chan(mxs_chan);
break;
case DMA_PAUSE:
mxs_dma_pause_chan(mxs_chan);
......@@ -707,6 +710,8 @@ static struct platform_device_id mxs_dma_type[] = {
}, {
.name = "mxs-dma-apbx",
.driver_data = MXS_DMA_APBX,
}, {
/* end of list */
}
};
......
......@@ -45,7 +45,8 @@
#define DMA_STATUS_MASK_BITS 0x3
#define DMA_STATUS_SHIFT_BITS 16
#define DMA_STATUS_IRQ(x) (0x1 << (x))
#define DMA_STATUS_ERR(x) (0x1 << ((x) + 8))
#define DMA_STATUS0_ERR(x) (0x1 << ((x) + 8))
#define DMA_STATUS2_ERR(x) (0x1 << (x))
#define DMA_DESC_WIDTH_SHIFT_BITS 12
#define DMA_DESC_WIDTH_1_BYTE (0x3 << DMA_DESC_WIDTH_SHIFT_BITS)
......@@ -61,6 +62,9 @@
#define MAX_CHAN_NR 8
#define DMA_MASK_CTL0_MODE 0x33333333
#define DMA_MASK_CTL2_MODE 0x00003333
static unsigned int init_nr_desc_per_channel = 64;
module_param(init_nr_desc_per_channel, uint, 0644);
MODULE_PARM_DESC(init_nr_desc_per_channel,
......@@ -133,6 +137,7 @@ struct pch_dma {
#define PCH_DMA_CTL3 0x0C
#define PCH_DMA_STS0 0x10
#define PCH_DMA_STS1 0x14
#define PCH_DMA_STS2 0x18
#define dma_readl(pd, name) \
readl((pd)->membase + PCH_DMA_##name)
......@@ -183,13 +188,19 @@ static void pdc_enable_irq(struct dma_chan *chan, int enable)
{
struct pch_dma *pd = to_pd(chan->device);
u32 val;
int pos;
if (chan->chan_id < 8)
pos = chan->chan_id;
else
pos = chan->chan_id + 8;
val = dma_readl(pd, CTL2);
if (enable)
val |= 0x1 << chan->chan_id;
val |= 0x1 << pos;
else
val &= ~(0x1 << chan->chan_id);
val &= ~(0x1 << pos);
dma_writel(pd, CTL2, val);
......@@ -202,10 +213,17 @@ static void pdc_set_dir(struct dma_chan *chan)
struct pch_dma_chan *pd_chan = to_pd_chan(chan);
struct pch_dma *pd = to_pd(chan->device);
u32 val;
u32 mask_mode;
u32 mask_ctl;
if (chan->chan_id < 8) {
val = dma_readl(pd, CTL0);
mask_mode = DMA_CTL0_MODE_MASK_BITS <<
(DMA_CTL0_BITS_PER_CH * chan->chan_id);
mask_ctl = DMA_MASK_CTL0_MODE & ~(DMA_CTL0_MODE_MASK_BITS <<
(DMA_CTL0_BITS_PER_CH * chan->chan_id));
val &= mask_mode;
if (pd_chan->dir == DMA_TO_DEVICE)
val |= 0x1 << (DMA_CTL0_BITS_PER_CH * chan->chan_id +
DMA_CTL0_DIR_SHIFT_BITS);
......@@ -213,18 +231,24 @@ static void pdc_set_dir(struct dma_chan *chan)
val &= ~(0x1 << (DMA_CTL0_BITS_PER_CH * chan->chan_id +
DMA_CTL0_DIR_SHIFT_BITS));
val |= mask_ctl;
dma_writel(pd, CTL0, val);
} else {
int ch = chan->chan_id - 8; /* ch8-->0 ch9-->1 ... ch11->3 */
val = dma_readl(pd, CTL3);
mask_mode = DMA_CTL0_MODE_MASK_BITS <<
(DMA_CTL0_BITS_PER_CH * ch);
mask_ctl = DMA_MASK_CTL2_MODE & ~(DMA_CTL0_MODE_MASK_BITS <<
(DMA_CTL0_BITS_PER_CH * ch));
val &= mask_mode;
if (pd_chan->dir == DMA_TO_DEVICE)
val |= 0x1 << (DMA_CTL0_BITS_PER_CH * ch +
DMA_CTL0_DIR_SHIFT_BITS);
else
val &= ~(0x1 << (DMA_CTL0_BITS_PER_CH * ch +
DMA_CTL0_DIR_SHIFT_BITS));
val |= mask_ctl;
dma_writel(pd, CTL3, val);
}
......@@ -236,33 +260,37 @@ static void pdc_set_mode(struct dma_chan *chan, u32 mode)
{
struct pch_dma *pd = to_pd(chan->device);
u32 val;
u32 mask_ctl;
u32 mask_dir;
if (chan->chan_id < 8) {
mask_ctl = DMA_MASK_CTL0_MODE & ~(DMA_CTL0_MODE_MASK_BITS <<
(DMA_CTL0_BITS_PER_CH * chan->chan_id));
mask_dir = 1 << (DMA_CTL0_BITS_PER_CH * chan->chan_id +\
DMA_CTL0_DIR_SHIFT_BITS);
val = dma_readl(pd, CTL0);
val &= ~(DMA_CTL0_MODE_MASK_BITS <<
(DMA_CTL0_BITS_PER_CH * chan->chan_id));
val &= mask_dir;
val |= mode << (DMA_CTL0_BITS_PER_CH * chan->chan_id);
val |= mask_ctl;
dma_writel(pd, CTL0, val);
} else {
int ch = chan->chan_id - 8; /* ch8-->0 ch9-->1 ... ch11->3 */
mask_ctl = DMA_MASK_CTL2_MODE & ~(DMA_CTL0_MODE_MASK_BITS <<
(DMA_CTL0_BITS_PER_CH * ch));
mask_dir = 1 << (DMA_CTL0_BITS_PER_CH * ch +\
DMA_CTL0_DIR_SHIFT_BITS);
val = dma_readl(pd, CTL3);
val &= ~(DMA_CTL0_MODE_MASK_BITS <<
(DMA_CTL0_BITS_PER_CH * ch));
val &= mask_dir;
val |= mode << (DMA_CTL0_BITS_PER_CH * ch);
val |= mask_ctl;
dma_writel(pd, CTL3, val);
}
dev_dbg(chan2dev(chan), "pdc_set_mode: chan %d -> %x\n",
chan->chan_id, val);
}
static u32 pdc_get_status(struct pch_dma_chan *pd_chan)
static u32 pdc_get_status0(struct pch_dma_chan *pd_chan)
{
struct pch_dma *pd = to_pd(pd_chan->chan.device);
u32 val;
......@@ -272,9 +300,27 @@ static u32 pdc_get_status(struct pch_dma_chan *pd_chan)
DMA_STATUS_BITS_PER_CH * pd_chan->chan.chan_id));
}
static u32 pdc_get_status2(struct pch_dma_chan *pd_chan)
{
struct pch_dma *pd = to_pd(pd_chan->chan.device);
u32 val;
val = dma_readl(pd, STS2);
return DMA_STATUS_MASK_BITS & (val >> (DMA_STATUS_SHIFT_BITS +
DMA_STATUS_BITS_PER_CH * (pd_chan->chan.chan_id - 8)));
}
static bool pdc_is_idle(struct pch_dma_chan *pd_chan)
{
if (pdc_get_status(pd_chan) == DMA_STATUS_IDLE)
u32 sts;
if (pd_chan->chan.chan_id < 8)
sts = pdc_get_status0(pd_chan);
else
sts = pdc_get_status2(pd_chan);
if (sts == DMA_STATUS_IDLE)
return true;
else
return false;
......@@ -495,11 +541,11 @@ static int pd_alloc_chan_resources(struct dma_chan *chan)
list_add_tail(&desc->desc_node, &tmp_list);
}
spin_lock_bh(&pd_chan->lock);
spin_lock_irq(&pd_chan->lock);
list_splice(&tmp_list, &pd_chan->free_list);
pd_chan->descs_allocated = i;
pd_chan->completed_cookie = chan->cookie = 1;
spin_unlock_bh(&pd_chan->lock);
spin_unlock_irq(&pd_chan->lock);
pdc_enable_irq(chan, 1);
......@@ -517,10 +563,10 @@ static void pd_free_chan_resources(struct dma_chan *chan)
BUG_ON(!list_empty(&pd_chan->active_list));
BUG_ON(!list_empty(&pd_chan->queue));
spin_lock_bh(&pd_chan->lock);
spin_lock_irq(&pd_chan->lock);
list_splice_init(&pd_chan->free_list, &tmp_list);
pd_chan->descs_allocated = 0;
spin_unlock_bh(&pd_chan->lock);
spin_unlock_irq(&pd_chan->lock);
list_for_each_entry_safe(desc, _d, &tmp_list, desc_node)
pci_pool_free(pd->pool, desc, desc->txd.phys);
......@@ -536,10 +582,10 @@ static enum dma_status pd_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
dma_cookie_t last_completed;
int ret;
spin_lock_bh(&pd_chan->lock);
spin_lock_irq(&pd_chan->lock);
last_completed = pd_chan->completed_cookie;
last_used = chan->cookie;
spin_unlock_bh(&pd_chan->lock);
spin_unlock_irq(&pd_chan->lock);
ret = dma_async_is_complete(cookie, last_completed, last_used);
......@@ -654,7 +700,7 @@ static int pd_device_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
if (cmd != DMA_TERMINATE_ALL)
return -ENXIO;
spin_lock_bh(&pd_chan->lock);
spin_lock_irq(&pd_chan->lock);
pdc_set_mode(&pd_chan->chan, DMA_CTL0_DISABLE);
......@@ -664,7 +710,7 @@ static int pd_device_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
list_for_each_entry_safe(desc, _d, &list, desc_node)
pdc_chain_complete(pd_chan, desc);
spin_unlock_bh(&pd_chan->lock);
spin_unlock_irq(&pd_chan->lock);
return 0;
}
......@@ -693,30 +739,45 @@ static irqreturn_t pd_irq(int irq, void *devid)
struct pch_dma *pd = (struct pch_dma *)devid;
struct pch_dma_chan *pd_chan;
u32 sts0;
u32 sts2;
int i;
int ret = IRQ_NONE;
int ret0 = IRQ_NONE;
int ret2 = IRQ_NONE;
sts0 = dma_readl(pd, STS0);
sts2 = dma_readl(pd, STS2);
dev_dbg(pd->dma.dev, "pd_irq sts0: %x\n", sts0);
for (i = 0; i < pd->dma.chancnt; i++) {
pd_chan = &pd->channels[i];
if (sts0 & DMA_STATUS_IRQ(i)) {
if (sts0 & DMA_STATUS_ERR(i))
set_bit(0, &pd_chan->err_status);
if (i < 8) {
if (sts0 & DMA_STATUS_IRQ(i)) {
if (sts0 & DMA_STATUS0_ERR(i))
set_bit(0, &pd_chan->err_status);
tasklet_schedule(&pd_chan->tasklet);
ret = IRQ_HANDLED;
}
tasklet_schedule(&pd_chan->tasklet);
ret0 = IRQ_HANDLED;
}
} else {
if (sts2 & DMA_STATUS_IRQ(i - 8)) {
if (sts2 & DMA_STATUS2_ERR(i))
set_bit(0, &pd_chan->err_status);
tasklet_schedule(&pd_chan->tasklet);
ret2 = IRQ_HANDLED;
}
}
}
/* clear interrupt bits in status register */
dma_writel(pd, STS0, sts0);
if (ret0)
dma_writel(pd, STS0, sts0);
if (ret2)
dma_writel(pd, STS2, sts2);
return ret;
return ret0 | ret2;
}
#ifdef CONFIG_PM
......
......@@ -82,7 +82,7 @@ struct dma_pl330_dmac {
spinlock_t pool_lock;
/* Peripheral channels connected to this DMAC */
struct dma_pl330_chan peripherals[0]; /* keep at end */
struct dma_pl330_chan *peripherals; /* keep at end */
};
struct dma_pl330_desc {
......@@ -451,8 +451,13 @@ static struct dma_pl330_desc *pl330_get_desc(struct dma_pl330_chan *pch)
desc->txd.cookie = 0;
async_tx_ack(&desc->txd);
desc->req.rqtype = peri->rqtype;
desc->req.peri = peri->peri_id;
if (peri) {
desc->req.rqtype = peri->rqtype;
desc->req.peri = peri->peri_id;
} else {
desc->req.rqtype = MEMTOMEM;
desc->req.peri = 0;
}
dma_async_tx_descriptor_init(&desc->txd, &pch->chan);
......@@ -529,10 +534,10 @@ pl330_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dst,
struct pl330_info *pi;
int burst;
if (unlikely(!pch || !len || !peri))
if (unlikely(!pch || !len))
return NULL;
if (peri->rqtype != MEMTOMEM)
if (peri && peri->rqtype != MEMTOMEM)
return NULL;
pi = &pch->dmac->pif;
......@@ -577,7 +582,7 @@ pl330_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
int i, burst_size;
dma_addr_t addr;
if (unlikely(!pch || !sgl || !sg_len))
if (unlikely(!pch || !sgl || !sg_len || !peri))
return NULL;
/* Make sure the direction is consistent */
......@@ -666,17 +671,12 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
struct dma_device *pd;
struct resource *res;
int i, ret, irq;
int num_chan;
pdat = adev->dev.platform_data;
if (!pdat || !pdat->nr_valid_peri) {
dev_err(&adev->dev, "platform data missing\n");
return -ENODEV;
}
/* Allocate a new DMAC and its Channels */
pdmac = kzalloc(pdat->nr_valid_peri * sizeof(*pch)
+ sizeof(*pdmac), GFP_KERNEL);
pdmac = kzalloc(sizeof(*pdmac), GFP_KERNEL);
if (!pdmac) {
dev_err(&adev->dev, "unable to allocate mem\n");
return -ENOMEM;
......@@ -685,7 +685,7 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
pi = &pdmac->pif;
pi->dev = &adev->dev;
pi->pl330_data = NULL;
pi->mcbufsz = pdat->mcbuf_sz;
pi->mcbufsz = pdat ? pdat->mcbuf_sz : 0;
res = &adev->res;
request_mem_region(res->start, resource_size(res), "dma-pl330");
......@@ -717,27 +717,35 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
INIT_LIST_HEAD(&pd->channels);
/* Initialize channel parameters */
for (i = 0; i < pdat->nr_valid_peri; i++) {
struct dma_pl330_peri *peri = &pdat->peri[i];
pch = &pdmac->peripherals[i];
num_chan = max(pdat ? pdat->nr_valid_peri : 0, (u8)pi->pcfg.num_chan);
pdmac->peripherals = kzalloc(num_chan * sizeof(*pch), GFP_KERNEL);
switch (peri->rqtype) {
case MEMTOMEM:
for (i = 0; i < num_chan; i++) {
pch = &pdmac->peripherals[i];
if (pdat) {
struct dma_pl330_peri *peri = &pdat->peri[i];
switch (peri->rqtype) {
case MEMTOMEM:
dma_cap_set(DMA_MEMCPY, pd->cap_mask);
break;
case MEMTODEV:
case DEVTOMEM:
dma_cap_set(DMA_SLAVE, pd->cap_mask);
break;
default:
dev_err(&adev->dev, "DEVTODEV Not Supported\n");
continue;
}
pch->chan.private = peri;
} else {
dma_cap_set(DMA_MEMCPY, pd->cap_mask);
break;
case MEMTODEV:
case DEVTOMEM:
dma_cap_set(DMA_SLAVE, pd->cap_mask);
break;
default:
dev_err(&adev->dev, "DEVTODEV Not Supported\n");
continue;
pch->chan.private = NULL;
}
INIT_LIST_HEAD(&pch->work_list);
spin_lock_init(&pch->lock);
pch->pl330_chid = NULL;
pch->chan.private = peri;
pch->chan.device = pd;
pch->chan.chan_id = i;
pch->dmac = pdmac;
......
This diff is collapsed.
......@@ -184,9 +184,6 @@
#define D40_DREG_PERIPHID0 0xFE0
#define D40_DREG_PERIPHID1 0xFE4
#define D40_DREG_PERIPHID2 0xFE8
#define D40_DREG_PERIPHID2_REV_POS 4
#define D40_DREG_PERIPHID2_REV_MASK (0xf << D40_DREG_PERIPHID2_REV_POS)
#define D40_DREG_PERIPHID2_DESIGNER_MASK 0xf
#define D40_DREG_PERIPHID3 0xFEC
#define D40_DREG_CELLID0 0xFF0
#define D40_DREG_CELLID1 0xFF4
......
This diff is collapsed.
......@@ -172,8 +172,11 @@ struct pl08x_dma_chan {
int phychan_hold;
struct tasklet_struct tasklet;
char *name;
struct pl08x_channel_data *cd;
dma_addr_t runtime_addr;
const struct pl08x_channel_data *cd;
dma_addr_t src_addr;
dma_addr_t dst_addr;
u32 src_cctl;
u32 dst_cctl;
enum dma_data_direction runtime_direction;
dma_cookie_t lc;
struct list_head pend_list;
......@@ -202,7 +205,7 @@ struct pl08x_dma_chan {
* @mem_buses: buses which memory can be accessed from: PL08X_AHB1 | PL08X_AHB2
*/
struct pl08x_platform_data {
struct pl08x_channel_data *slave_channels;
const struct pl08x_channel_data *slave_channels;
unsigned int num_slave_channels;
struct pl08x_channel_data memcpy_channel;
int (*get_signal)(struct pl08x_dma_chan *);
......
......@@ -106,12 +106,12 @@ static struct ep93xx_ac97_info *ep93xx_ac97_info;
static struct ep93xx_pcm_dma_params ep93xx_ac97_pcm_out = {
.name = "ac97-pcm-out",
.dma_port = EP93XX_DMA_M2P_PORT_AAC1,
.dma_port = EP93XX_DMA_AAC1,
};
static struct ep93xx_pcm_dma_params ep93xx_ac97_pcm_in = {
.name = "ac97-pcm-in",
.dma_port = EP93XX_DMA_M2P_PORT_AAC1,
.dma_port = EP93XX_DMA_AAC1,
};
static inline unsigned ep93xx_ac97_read_reg(struct ep93xx_ac97_info *info,
......
......@@ -70,11 +70,11 @@ struct ep93xx_i2s_info {
struct ep93xx_pcm_dma_params ep93xx_i2s_dma_params[] = {
[SNDRV_PCM_STREAM_PLAYBACK] = {
.name = "i2s-pcm-out",
.dma_port = EP93XX_DMA_M2P_PORT_I2S1,
.dma_port = EP93XX_DMA_I2S1,
},
[SNDRV_PCM_STREAM_CAPTURE] = {
.name = "i2s-pcm-in",
.dma_port = EP93XX_DMA_M2P_PORT_I2S1,
.dma_port = EP93XX_DMA_I2S1,
},
};
......
......@@ -16,6 +16,7 @@
#include <linux/init.h>
#include <linux/device.h>
#include <linux/slab.h>
#include <linux/dmaengine.h>
#include <linux/dma-mapping.h>
#include <sound/core.h>
......@@ -53,43 +54,34 @@ static const struct snd_pcm_hardware ep93xx_pcm_hardware = {
struct ep93xx_runtime_data
{
struct ep93xx_dma_m2p_client cl;
struct ep93xx_pcm_dma_params *params;
int pointer_bytes;
struct tasklet_struct period_tasklet;
int periods;
struct ep93xx_dma_buffer buf[32];
int period_bytes;
struct dma_chan *dma_chan;
struct ep93xx_dma_data dma_data;
};
static void ep93xx_pcm_period_elapsed(unsigned long data)
static void ep93xx_pcm_dma_callback(void *data)
{
struct snd_pcm_substream *substream = (struct snd_pcm_substream *)data;
snd_pcm_period_elapsed(substream);
}
struct snd_pcm_substream *substream = data;
struct ep93xx_runtime_data *rtd = substream->runtime->private_data;
static void ep93xx_pcm_buffer_started(void *cookie,
struct ep93xx_dma_buffer *buf)
{
rtd->pointer_bytes += rtd->period_bytes;
rtd->pointer_bytes %= rtd->period_bytes * rtd->periods;
snd_pcm_period_elapsed(substream);
}
static void ep93xx_pcm_buffer_finished(void *cookie,
struct ep93xx_dma_buffer *buf,
int bytes, int error)
static bool ep93xx_pcm_dma_filter(struct dma_chan *chan, void *filter_param)
{
struct snd_pcm_substream *substream = cookie;
struct ep93xx_runtime_data *rtd = substream->runtime->private_data;
if (buf == rtd->buf + rtd->periods - 1)
rtd->pointer_bytes = 0;
else
rtd->pointer_bytes += buf->size;
struct ep93xx_dma_data *data = filter_param;
if (!error) {
ep93xx_dma_m2p_submit_recursive(&rtd->cl, buf);
tasklet_schedule(&rtd->period_tasklet);
} else {
snd_pcm_stop(substream, SNDRV_PCM_STATE_XRUN);
if (data->direction == ep93xx_dma_chan_direction(chan)) {
chan->private = data;
return true;
}
return false;
}
static int ep93xx_pcm_open(struct snd_pcm_substream *substream)
......@@ -98,30 +90,38 @@ static int ep93xx_pcm_open(struct snd_pcm_substream *substream)
struct snd_soc_dai *cpu_dai = soc_rtd->cpu_dai;
struct ep93xx_pcm_dma_params *dma_params;
struct ep93xx_runtime_data *rtd;
dma_cap_mask_t mask;
int ret;
dma_params = snd_soc_dai_get_dma_data(cpu_dai, substream);
ret = snd_pcm_hw_constraint_integer(substream->runtime,
SNDRV_PCM_HW_PARAM_PERIODS);
if (ret < 0)
return ret;
snd_soc_set_runtime_hwparams(substream, &ep93xx_pcm_hardware);
rtd = kmalloc(sizeof(*rtd), GFP_KERNEL);
if (!rtd)
return -ENOMEM;
memset(&rtd->period_tasklet, 0, sizeof(rtd->period_tasklet));
rtd->period_tasklet.func = ep93xx_pcm_period_elapsed;
rtd->period_tasklet.data = (unsigned long)substream;
rtd->cl.name = dma_params->name;
rtd->cl.flags = dma_params->dma_port | EP93XX_DMA_M2P_IGNORE_ERROR |
((substream->stream == SNDRV_PCM_STREAM_PLAYBACK) ?
EP93XX_DMA_M2P_TX : EP93XX_DMA_M2P_RX);
rtd->cl.cookie = substream;
rtd->cl.buffer_started = ep93xx_pcm_buffer_started;
rtd->cl.buffer_finished = ep93xx_pcm_buffer_finished;
ret = ep93xx_dma_m2p_client_register(&rtd->cl);
if (ret < 0) {
dma_cap_zero(mask);
dma_cap_set(DMA_SLAVE, mask);
dma_cap_set(DMA_CYCLIC, mask);
dma_params = snd_soc_dai_get_dma_data(cpu_dai, substream);
rtd->dma_data.port = dma_params->dma_port;
rtd->dma_data.name = dma_params->name;
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
rtd->dma_data.direction = DMA_TO_DEVICE;
else
rtd->dma_data.direction = DMA_FROM_DEVICE;
rtd->dma_chan = dma_request_channel(mask, ep93xx_pcm_dma_filter,
&rtd->dma_data);
if (!rtd->dma_chan) {
kfree(rtd);
return ret;
return -EINVAL;
}
substream->runtime->private_data = rtd;
......@@ -132,31 +132,52 @@ static int ep93xx_pcm_close(struct snd_pcm_substream *substream)
{
struct ep93xx_runtime_data *rtd = substream->runtime->private_data;
ep93xx_dma_m2p_client_unregister(&rtd->cl);
dma_release_channel(rtd->dma_chan);
kfree(rtd);
return 0;
}
static int ep93xx_pcm_dma_submit(struct snd_pcm_substream *substream)
{
struct snd_pcm_runtime *runtime = substream->runtime;
struct ep93xx_runtime_data *rtd = runtime->private_data;
struct dma_chan *chan = rtd->dma_chan;
struct dma_device *dma_dev = chan->device;
struct dma_async_tx_descriptor *desc;
rtd->pointer_bytes = 0;
desc = dma_dev->device_prep_dma_cyclic(chan, runtime->dma_addr,
rtd->period_bytes * rtd->periods,
rtd->period_bytes,
rtd->dma_data.direction);
if (!desc)
return -EINVAL;
desc->callback = ep93xx_pcm_dma_callback;
desc->callback_param = substream;
dmaengine_submit(desc);
return 0;
}
static void ep93xx_pcm_dma_flush(struct snd_pcm_substream *substream)
{
struct snd_pcm_runtime *runtime = substream->runtime;
struct ep93xx_runtime_data *rtd = runtime->private_data;
dmaengine_terminate_all(rtd->dma_chan);
}
static int ep93xx_pcm_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params)
{
struct snd_pcm_runtime *runtime = substream->runtime;
struct ep93xx_runtime_data *rtd = runtime->private_data;
size_t totsize = params_buffer_bytes(params);
size_t period = params_period_bytes(params);
int i;
snd_pcm_set_runtime_buffer(substream, &substream->dma_buffer);
runtime->dma_bytes = totsize;
rtd->periods = (totsize + period - 1) / period;
for (i = 0; i < rtd->periods; i++) {
rtd->buf[i].bus_addr = runtime->dma_addr + (i * period);
rtd->buf[i].size = period;
if ((i + 1) * period > totsize)
rtd->buf[i].size = totsize - (i * period);
}
rtd->periods = params_periods(params);
rtd->period_bytes = params_period_bytes(params);
return 0;
}
......@@ -168,24 +189,20 @@ static int ep93xx_pcm_hw_free(struct snd_pcm_substream *substream)
static int ep93xx_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
{
struct ep93xx_runtime_data *rtd = substream->runtime->private_data;
int ret;
int i;
ret = 0;
switch (cmd) {
case SNDRV_PCM_TRIGGER_START:
case SNDRV_PCM_TRIGGER_RESUME:
case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
rtd->pointer_bytes = 0;
for (i = 0; i < rtd->periods; i++)
ep93xx_dma_m2p_submit(&rtd->cl, rtd->buf + i);
ret = ep93xx_pcm_dma_submit(substream);
break;
case SNDRV_PCM_TRIGGER_STOP:
case SNDRV_PCM_TRIGGER_SUSPEND:
case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
ep93xx_dma_m2p_flush(&rtd->cl);
ep93xx_pcm_dma_flush(substream);
break;
default:
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment