Commit 6fc4573c authored by Daniel Mack's avatar Daniel Mack Committed by Vinod Koul

dma: mmp_pdma: add support for byte-aligned transfers

The PXA DMA controller has a DALGN register which allows for
byte-aligned DMA transfers. Use it in case any of the transfer
descriptors is not aligned to a mask of ~0x7.
Signed-off-by: default avatarDaniel Mack <zonque@gmail.com>
Acked-by: default avatarArnd Bergmann <arnd@arndb.de>
Signed-off-by: default avatarVinod Koul <vinod.koul@intel.com>
parent 8fd6aac3
...@@ -109,6 +109,7 @@ struct mmp_pdma_chan { ...@@ -109,6 +109,7 @@ struct mmp_pdma_chan {
struct list_head chain_pending; /* Link descriptors queue for pending */ struct list_head chain_pending; /* Link descriptors queue for pending */
struct list_head chain_running; /* Link descriptors queue for running */ struct list_head chain_running; /* Link descriptors queue for running */
bool idle; /* channel statue machine */ bool idle; /* channel statue machine */
bool byte_align;
struct dma_pool *desc_pool; /* Descriptors pool */ struct dma_pool *desc_pool; /* Descriptors pool */
}; };
...@@ -142,7 +143,7 @@ static void set_desc(struct mmp_pdma_phy *phy, dma_addr_t addr) ...@@ -142,7 +143,7 @@ static void set_desc(struct mmp_pdma_phy *phy, dma_addr_t addr)
static void enable_chan(struct mmp_pdma_phy *phy) static void enable_chan(struct mmp_pdma_phy *phy)
{ {
u32 reg; u32 reg, dalgn;
if (!phy->vchan) if (!phy->vchan)
return; return;
...@@ -150,6 +151,13 @@ static void enable_chan(struct mmp_pdma_phy *phy) ...@@ -150,6 +151,13 @@ static void enable_chan(struct mmp_pdma_phy *phy)
reg = DRCMR(phy->vchan->drcmr); reg = DRCMR(phy->vchan->drcmr);
writel(DRCMR_MAPVLD | phy->idx, phy->base + reg); writel(DRCMR_MAPVLD | phy->idx, phy->base + reg);
dalgn = readl(phy->base + DALGN);
if (phy->vchan->byte_align)
dalgn |= 1 << phy->idx;
else
dalgn &= ~(1 << phy->idx);
writel(dalgn, phy->base + DALGN);
reg = (phy->idx << 2) + DCSR; reg = (phy->idx << 2) + DCSR;
writel(readl(phy->base + reg) | DCSR_RUN, writel(readl(phy->base + reg) | DCSR_RUN,
phy->base + reg); phy->base + reg);
...@@ -455,6 +463,7 @@ mmp_pdma_prep_memcpy(struct dma_chan *dchan, ...@@ -455,6 +463,7 @@ mmp_pdma_prep_memcpy(struct dma_chan *dchan,
return NULL; return NULL;
chan = to_mmp_pdma_chan(dchan); chan = to_mmp_pdma_chan(dchan);
chan->byte_align = false;
if (!chan->dir) { if (!chan->dir) {
chan->dir = DMA_MEM_TO_MEM; chan->dir = DMA_MEM_TO_MEM;
...@@ -471,6 +480,8 @@ mmp_pdma_prep_memcpy(struct dma_chan *dchan, ...@@ -471,6 +480,8 @@ mmp_pdma_prep_memcpy(struct dma_chan *dchan,
} }
copy = min_t(size_t, len, PDMA_MAX_DESC_BYTES); copy = min_t(size_t, len, PDMA_MAX_DESC_BYTES);
if (dma_src & 0x7 || dma_dst & 0x7)
chan->byte_align = true;
new->desc.dcmd = chan->dcmd | (DCMD_LENGTH & copy); new->desc.dcmd = chan->dcmd | (DCMD_LENGTH & copy);
new->desc.dsadr = dma_src; new->desc.dsadr = dma_src;
...@@ -530,12 +541,16 @@ mmp_pdma_prep_slave_sg(struct dma_chan *dchan, struct scatterlist *sgl, ...@@ -530,12 +541,16 @@ mmp_pdma_prep_slave_sg(struct dma_chan *dchan, struct scatterlist *sgl,
if ((sgl == NULL) || (sg_len == 0)) if ((sgl == NULL) || (sg_len == 0))
return NULL; return NULL;
chan->byte_align = false;
for_each_sg(sgl, sg, sg_len, i) { for_each_sg(sgl, sg, sg_len, i) {
addr = sg_dma_address(sg); addr = sg_dma_address(sg);
avail = sg_dma_len(sgl); avail = sg_dma_len(sgl);
do { do {
len = min_t(size_t, avail, PDMA_MAX_DESC_BYTES); len = min_t(size_t, avail, PDMA_MAX_DESC_BYTES);
if (addr & 0x7)
chan->byte_align = true;
/* allocate and populate the descriptor */ /* allocate and populate the descriptor */
new = mmp_pdma_alloc_descriptor(chan); new = mmp_pdma_alloc_descriptor(chan);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment