Commit 95335f1f authored by Maxime Ripard's avatar Maxime Ripard Committed by Vinod Koul

dmaengine: mpc512x: Split device_control

Split the device_control callback of the Freescale MPC512x DMA driver to make
use of the newly introduced callbacks, that will eventually be used to retrieve
slave capabilities.
Signed-off-by: default avatarMaxime Ripard <maxime.ripard@free-electrons.com>
Signed-off-by: default avatarVinod Koul <vinod.koul@intel.com>
parent b7f7552b
...@@ -800,79 +800,69 @@ mpc_dma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, ...@@ -800,79 +800,69 @@ mpc_dma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
return NULL; return NULL;
} }
static int mpc_dma_device_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, static int mpc_dma_device_config(struct dma_chan *chan,
unsigned long arg) struct dma_slave_config *cfg)
{ {
struct mpc_dma_chan *mchan; struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(chan);
struct mpc_dma *mdma;
struct dma_slave_config *cfg;
unsigned long flags; unsigned long flags;
mchan = dma_chan_to_mpc_dma_chan(chan); /*
switch (cmd) { * Software constraints:
case DMA_TERMINATE_ALL: * - only transfers between a peripheral device and
/* Disable channel requests */ * memory are supported;
mdma = dma_chan_to_mpc_dma(chan); * - only peripheral devices with 4-byte FIFO access register
* are supported;
spin_lock_irqsave(&mchan->lock, flags); * - minimal transfer chunk is 4 bytes and consequently
* source and destination addresses must be 4-byte aligned
out_8(&mdma->regs->dmacerq, chan->chan_id); * and transfer size must be aligned on (4 * maxburst)
list_splice_tail_init(&mchan->prepared, &mchan->free); * boundary;
list_splice_tail_init(&mchan->queued, &mchan->free); * - during the transfer RAM address is being incremented by
list_splice_tail_init(&mchan->active, &mchan->free); * the size of minimal transfer chunk;
* - peripheral port's address is constant during the transfer.
spin_unlock_irqrestore(&mchan->lock, flags); */
return 0; if (cfg->src_addr_width != DMA_SLAVE_BUSWIDTH_4_BYTES ||
cfg->dst_addr_width != DMA_SLAVE_BUSWIDTH_4_BYTES ||
!IS_ALIGNED(cfg->src_addr, 4) ||
!IS_ALIGNED(cfg->dst_addr, 4)) {
return -EINVAL;
}
case DMA_SLAVE_CONFIG: spin_lock_irqsave(&mchan->lock, flags);
/*
* Software constraints:
* - only transfers between a peripheral device and
* memory are supported;
* - only peripheral devices with 4-byte FIFO access register
* are supported;
* - minimal transfer chunk is 4 bytes and consequently
* source and destination addresses must be 4-byte aligned
* and transfer size must be aligned on (4 * maxburst)
* boundary;
* - during the transfer RAM address is being incremented by
* the size of minimal transfer chunk;
* - peripheral port's address is constant during the transfer.
*/
cfg = (void *)arg; mchan->src_per_paddr = cfg->src_addr;
mchan->src_tcd_nunits = cfg->src_maxburst;
mchan->dst_per_paddr = cfg->dst_addr;
mchan->dst_tcd_nunits = cfg->dst_maxburst;
if (cfg->src_addr_width != DMA_SLAVE_BUSWIDTH_4_BYTES || /* Apply defaults */
cfg->dst_addr_width != DMA_SLAVE_BUSWIDTH_4_BYTES || if (mchan->src_tcd_nunits == 0)
!IS_ALIGNED(cfg->src_addr, 4) || mchan->src_tcd_nunits = 1;
!IS_ALIGNED(cfg->dst_addr, 4)) { if (mchan->dst_tcd_nunits == 0)
return -EINVAL; mchan->dst_tcd_nunits = 1;
}
spin_lock_irqsave(&mchan->lock, flags); spin_unlock_irqrestore(&mchan->lock, flags);
mchan->src_per_paddr = cfg->src_addr; return 0;
mchan->src_tcd_nunits = cfg->src_maxburst; }
mchan->dst_per_paddr = cfg->dst_addr;
mchan->dst_tcd_nunits = cfg->dst_maxburst;
/* Apply defaults */ static int mpc_dma_device_terminate_all(struct dma_chan *chan)
if (mchan->src_tcd_nunits == 0) {
mchan->src_tcd_nunits = 1; struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(chan);
if (mchan->dst_tcd_nunits == 0) struct mpc_dma *mdma = dma_chan_to_mpc_dma(chan);
mchan->dst_tcd_nunits = 1; unsigned long flags;
spin_unlock_irqrestore(&mchan->lock, flags); /* Disable channel requests */
spin_lock_irqsave(&mchan->lock, flags);
return 0; out_8(&mdma->regs->dmacerq, chan->chan_id);
list_splice_tail_init(&mchan->prepared, &mchan->free);
list_splice_tail_init(&mchan->queued, &mchan->free);
list_splice_tail_init(&mchan->active, &mchan->free);
default: spin_unlock_irqrestore(&mchan->lock, flags);
/* Unknown command */
break;
}
return -ENXIO; return 0;
} }
static int mpc_dma_probe(struct platform_device *op) static int mpc_dma_probe(struct platform_device *op)
...@@ -963,7 +953,8 @@ static int mpc_dma_probe(struct platform_device *op) ...@@ -963,7 +953,8 @@ static int mpc_dma_probe(struct platform_device *op)
dma->device_tx_status = mpc_dma_tx_status; dma->device_tx_status = mpc_dma_tx_status;
dma->device_prep_dma_memcpy = mpc_dma_prep_memcpy; dma->device_prep_dma_memcpy = mpc_dma_prep_memcpy;
dma->device_prep_slave_sg = mpc_dma_prep_slave_sg; dma->device_prep_slave_sg = mpc_dma_prep_slave_sg;
dma->device_control = mpc_dma_device_control; dma->device_config = mpc_dma_device_config;
dma->device_terminate_all = mpc_dma_device_terminate_all;
INIT_LIST_HEAD(&dma->channels); INIT_LIST_HEAD(&dma->channels);
dma_cap_set(DMA_MEMCPY, dma->cap_mask); dma_cap_set(DMA_MEMCPY, dma->cap_mask);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment