Commit b8349172 authored by Andrea Merello's avatar Andrea Merello Committed by Vinod Koul

dmaengine: xilinx_dma: Drop SG support for VDMA IP

xilinx_vdma_start_transfer() is used only for VDMA IP, still it contains
conditional code on has_sg variable. has_sg is set only whenever the HW
does support SG mode, that is never true for VDMA IP.

This patch drops the never-taken branches.
Signed-off-by: default avatarAndrea Merello <andrea.merello@gmail.com>
Reviewed-by: default avatarRadhey Shyam Pandey <radhey.shyam.pandey@xilinx.com>
Signed-off-by: default avatarVinod Koul <vkoul@kernel.org>
parent 29b9ee4a
...@@ -1102,6 +1102,8 @@ static void xilinx_vdma_start_transfer(struct xilinx_dma_chan *chan) ...@@ -1102,6 +1102,8 @@ static void xilinx_vdma_start_transfer(struct xilinx_dma_chan *chan)
struct xilinx_dma_tx_descriptor *desc, *tail_desc; struct xilinx_dma_tx_descriptor *desc, *tail_desc;
u32 reg, j; u32 reg, j;
struct xilinx_vdma_tx_segment *tail_segment; struct xilinx_vdma_tx_segment *tail_segment;
struct xilinx_vdma_tx_segment *segment, *last = NULL;
int i = 0;
/* This function was invoked with lock held */ /* This function was invoked with lock held */
if (chan->err) if (chan->err)
...@@ -1121,14 +1123,6 @@ static void xilinx_vdma_start_transfer(struct xilinx_dma_chan *chan) ...@@ -1121,14 +1123,6 @@ static void xilinx_vdma_start_transfer(struct xilinx_dma_chan *chan)
tail_segment = list_last_entry(&tail_desc->segments, tail_segment = list_last_entry(&tail_desc->segments,
struct xilinx_vdma_tx_segment, node); struct xilinx_vdma_tx_segment, node);
/*
* If hardware is idle, then all descriptors on the running lists are
* done, start new transfers
*/
if (chan->has_sg)
dma_ctrl_write(chan, XILINX_DMA_REG_CURDESC,
desc->async_tx.phys);
/* Configure the hardware using info in the config structure */ /* Configure the hardware using info in the config structure */
if (chan->has_vflip) { if (chan->has_vflip) {
reg = dma_read(chan, XILINX_VDMA_REG_ENABLE_VERTICAL_FLIP); reg = dma_read(chan, XILINX_VDMA_REG_ENABLE_VERTICAL_FLIP);
...@@ -1145,15 +1139,11 @@ static void xilinx_vdma_start_transfer(struct xilinx_dma_chan *chan) ...@@ -1145,15 +1139,11 @@ static void xilinx_vdma_start_transfer(struct xilinx_dma_chan *chan)
else else
reg &= ~XILINX_DMA_DMACR_FRAMECNT_EN; reg &= ~XILINX_DMA_DMACR_FRAMECNT_EN;
/* /* If not parking, enable circular mode */
* With SG, start with circular mode, so that BDs can be fetched.
* In direct register mode, if not parking, enable circular mode
*/
if (chan->has_sg || !config->park)
reg |= XILINX_DMA_DMACR_CIRC_EN;
if (config->park) if (config->park)
reg &= ~XILINX_DMA_DMACR_CIRC_EN; reg &= ~XILINX_DMA_DMACR_CIRC_EN;
else
reg |= XILINX_DMA_DMACR_CIRC_EN;
dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, reg); dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, reg);
...@@ -1175,48 +1165,38 @@ static void xilinx_vdma_start_transfer(struct xilinx_dma_chan *chan) ...@@ -1175,48 +1165,38 @@ static void xilinx_vdma_start_transfer(struct xilinx_dma_chan *chan)
return; return;
/* Start the transfer */ /* Start the transfer */
if (chan->has_sg) { if (chan->desc_submitcount < chan->num_frms)
dma_ctrl_write(chan, XILINX_DMA_REG_TAILDESC, i = chan->desc_submitcount;
tail_segment->phys);
list_splice_tail_init(&chan->pending_list, &chan->active_list); list_for_each_entry(segment, &desc->segments, node) {
chan->desc_pendingcount = 0; if (chan->ext_addr)
} else { vdma_desc_write_64(chan,
struct xilinx_vdma_tx_segment *segment, *last = NULL; XILINX_VDMA_REG_START_ADDRESS_64(i++),
int i = 0; segment->hw.buf_addr,
segment->hw.buf_addr_msb);
if (chan->desc_submitcount < chan->num_frms) else
i = chan->desc_submitcount; vdma_desc_write(chan,
list_for_each_entry(segment, &desc->segments, node) {
if (chan->ext_addr)
vdma_desc_write_64(chan,
XILINX_VDMA_REG_START_ADDRESS_64(i++),
segment->hw.buf_addr,
segment->hw.buf_addr_msb);
else
vdma_desc_write(chan,
XILINX_VDMA_REG_START_ADDRESS(i++), XILINX_VDMA_REG_START_ADDRESS(i++),
segment->hw.buf_addr); segment->hw.buf_addr);
last = segment; last = segment;
} }
if (!last)
return;
/* HW expects these parameters to be same for one transaction */ if (!last)
vdma_desc_write(chan, XILINX_DMA_REG_HSIZE, last->hw.hsize); return;
vdma_desc_write(chan, XILINX_DMA_REG_FRMDLY_STRIDE,
last->hw.stride);
vdma_desc_write(chan, XILINX_DMA_REG_VSIZE, last->hw.vsize);
chan->desc_submitcount++; /* HW expects these parameters to be same for one transaction */
chan->desc_pendingcount--; vdma_desc_write(chan, XILINX_DMA_REG_HSIZE, last->hw.hsize);
list_del(&desc->node); vdma_desc_write(chan, XILINX_DMA_REG_FRMDLY_STRIDE,
list_add_tail(&desc->node, &chan->active_list); last->hw.stride);
if (chan->desc_submitcount == chan->num_frms) vdma_desc_write(chan, XILINX_DMA_REG_VSIZE, last->hw.vsize);
chan->desc_submitcount = 0;
} chan->desc_submitcount++;
chan->desc_pendingcount--;
list_del(&desc->node);
list_add_tail(&desc->node, &chan->active_list);
if (chan->desc_submitcount == chan->num_frms)
chan->desc_submitcount = 0;
chan->idle = false; chan->idle = false;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment