Commit f0579c8c authored by Andy Shevchenko's avatar Andy Shevchenko Committed by Vinod Koul

dmaengine: hsu: speed up residue calculation

There is no need to calculate an overall length of the descriptor each time we
call for DMA transfer status. Instead we do this at descriptor allocation stage
and keep the stored length for further usage.
Signed-off-by: default avatarAndy Shevchenko <andriy.shevchenko@linux.intel.com>
Signed-off-by: default avatarVinod Koul <vinod.koul@intel.com>
parent f94cf9f4
...@@ -228,6 +228,8 @@ static struct dma_async_tx_descriptor *hsu_dma_prep_slave_sg( ...@@ -228,6 +228,8 @@ static struct dma_async_tx_descriptor *hsu_dma_prep_slave_sg(
for_each_sg(sgl, sg, sg_len, i) { for_each_sg(sgl, sg, sg_len, i) {
desc->sg[i].addr = sg_dma_address(sg); desc->sg[i].addr = sg_dma_address(sg);
desc->sg[i].len = sg_dma_len(sg); desc->sg[i].len = sg_dma_len(sg);
desc->length += sg_dma_len(sg);
} }
desc->nents = sg_len; desc->nents = sg_len;
...@@ -249,21 +251,10 @@ static void hsu_dma_issue_pending(struct dma_chan *chan) ...@@ -249,21 +251,10 @@ static void hsu_dma_issue_pending(struct dma_chan *chan)
spin_unlock_irqrestore(&hsuc->vchan.lock, flags); spin_unlock_irqrestore(&hsuc->vchan.lock, flags);
} }
static size_t hsu_dma_desc_size(struct hsu_dma_desc *desc)
{
size_t bytes = 0;
unsigned int i;
for (i = desc->active; i < desc->nents; i++)
bytes += desc->sg[i].len;
return bytes;
}
static size_t hsu_dma_active_desc_size(struct hsu_dma_chan *hsuc) static size_t hsu_dma_active_desc_size(struct hsu_dma_chan *hsuc)
{ {
struct hsu_dma_desc *desc = hsuc->desc; struct hsu_dma_desc *desc = hsuc->desc;
size_t bytes = hsu_dma_desc_size(desc); size_t bytes = desc->length;
int i; int i;
i = desc->active % HSU_DMA_CHAN_NR_DESC; i = desc->active % HSU_DMA_CHAN_NR_DESC;
...@@ -294,7 +285,7 @@ static enum dma_status hsu_dma_tx_status(struct dma_chan *chan, ...@@ -294,7 +285,7 @@ static enum dma_status hsu_dma_tx_status(struct dma_chan *chan,
dma_set_residue(state, bytes); dma_set_residue(state, bytes);
status = hsuc->desc->status; status = hsuc->desc->status;
} else if (vdesc) { } else if (vdesc) {
bytes = hsu_dma_desc_size(to_hsu_dma_desc(vdesc)); bytes = to_hsu_dma_desc(vdesc)->length;
dma_set_residue(state, bytes); dma_set_residue(state, bytes);
} }
spin_unlock_irqrestore(&hsuc->vchan.lock, flags); spin_unlock_irqrestore(&hsuc->vchan.lock, flags);
......
...@@ -65,6 +65,7 @@ struct hsu_dma_desc { ...@@ -65,6 +65,7 @@ struct hsu_dma_desc {
enum dma_transfer_direction direction; enum dma_transfer_direction direction;
struct hsu_dma_sg *sg; struct hsu_dma_sg *sg;
unsigned int nents; unsigned int nents;
size_t length;
unsigned int active; unsigned int active;
enum dma_status status; enum dma_status status;
}; };
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment