Commit 9324fdf5 authored by Vinod Koul's avatar Vinod Koul

Merge branch 'topic/core' into for-linus

parents 4983a501 5f88d970
...@@ -345,11 +345,12 @@ where to put them) ...@@ -345,11 +345,12 @@ where to put them)
that abstracts it away. that abstracts it away.
* DMA_CTRL_ACK * DMA_CTRL_ACK
- Undocumented feature - If set, the transfer can be reused after being completed.
- No one really has an idea of what it's about, besides being - There is a guarantee the transfer won't be freed until it is acked
related to reusing the DMA transaction descriptors or having by async_tx_ack().
additional transactions added to it in the async-tx API - As a consequence, if a device driver wants to skip the dma_map_sg() and
- Useless in the case of the slave API dma_unmap_sg() in between 2 transfers, because the DMA'd data wasn't used,
it can resubmit the transfer right after its completion.
General Design Notes General Design Notes
-------------------- --------------------
......
...@@ -29,7 +29,7 @@ dma_cookie_t vchan_tx_submit(struct dma_async_tx_descriptor *tx) ...@@ -29,7 +29,7 @@ dma_cookie_t vchan_tx_submit(struct dma_async_tx_descriptor *tx)
spin_lock_irqsave(&vc->lock, flags); spin_lock_irqsave(&vc->lock, flags);
cookie = dma_cookie_assign(tx); cookie = dma_cookie_assign(tx);
list_add_tail(&vd->node, &vc->desc_submitted); list_move_tail(&vd->node, &vc->desc_submitted);
spin_unlock_irqrestore(&vc->lock, flags); spin_unlock_irqrestore(&vc->lock, flags);
dev_dbg(vc->chan.device->dev, "vchan %p: txd %p[%x]: submitted\n", dev_dbg(vc->chan.device->dev, "vchan %p: txd %p[%x]: submitted\n",
...@@ -83,7 +83,9 @@ static void vchan_complete(unsigned long arg) ...@@ -83,7 +83,9 @@ static void vchan_complete(unsigned long arg)
cb_data = vd->tx.callback_param; cb_data = vd->tx.callback_param;
list_del(&vd->node); list_del(&vd->node);
if (async_tx_test_ack(&vd->tx))
list_add(&vd->node, &vc->desc_allocated);
else
vc->desc_free(vd); vc->desc_free(vd);
if (cb) if (cb)
...@@ -96,10 +98,14 @@ void vchan_dma_desc_free_list(struct virt_dma_chan *vc, struct list_head *head) ...@@ -96,10 +98,14 @@ void vchan_dma_desc_free_list(struct virt_dma_chan *vc, struct list_head *head)
while (!list_empty(head)) { while (!list_empty(head)) {
struct virt_dma_desc *vd = list_first_entry(head, struct virt_dma_desc *vd = list_first_entry(head,
struct virt_dma_desc, node); struct virt_dma_desc, node);
list_del(&vd->node); if (async_tx_test_ack(&vd->tx)) {
list_move_tail(&vd->node, &vc->desc_allocated);
} else {
dev_dbg(vc->chan.device->dev, "txd %p: freeing\n", vd); dev_dbg(vc->chan.device->dev, "txd %p: freeing\n", vd);
list_del(&vd->node);
vc->desc_free(vd); vc->desc_free(vd);
} }
}
} }
EXPORT_SYMBOL_GPL(vchan_dma_desc_free_list); EXPORT_SYMBOL_GPL(vchan_dma_desc_free_list);
...@@ -108,6 +114,7 @@ void vchan_init(struct virt_dma_chan *vc, struct dma_device *dmadev) ...@@ -108,6 +114,7 @@ void vchan_init(struct virt_dma_chan *vc, struct dma_device *dmadev)
dma_cookie_init(&vc->chan); dma_cookie_init(&vc->chan);
spin_lock_init(&vc->lock); spin_lock_init(&vc->lock);
INIT_LIST_HEAD(&vc->desc_allocated);
INIT_LIST_HEAD(&vc->desc_submitted); INIT_LIST_HEAD(&vc->desc_submitted);
INIT_LIST_HEAD(&vc->desc_issued); INIT_LIST_HEAD(&vc->desc_issued);
INIT_LIST_HEAD(&vc->desc_completed); INIT_LIST_HEAD(&vc->desc_completed);
......
...@@ -29,6 +29,7 @@ struct virt_dma_chan { ...@@ -29,6 +29,7 @@ struct virt_dma_chan {
spinlock_t lock; spinlock_t lock;
/* protected by vc.lock */ /* protected by vc.lock */
struct list_head desc_allocated;
struct list_head desc_submitted; struct list_head desc_submitted;
struct list_head desc_issued; struct list_head desc_issued;
struct list_head desc_completed; struct list_head desc_completed;
...@@ -55,11 +56,16 @@ static inline struct dma_async_tx_descriptor *vchan_tx_prep(struct virt_dma_chan ...@@ -55,11 +56,16 @@ static inline struct dma_async_tx_descriptor *vchan_tx_prep(struct virt_dma_chan
struct virt_dma_desc *vd, unsigned long tx_flags) struct virt_dma_desc *vd, unsigned long tx_flags)
{ {
extern dma_cookie_t vchan_tx_submit(struct dma_async_tx_descriptor *); extern dma_cookie_t vchan_tx_submit(struct dma_async_tx_descriptor *);
unsigned long flags;
dma_async_tx_descriptor_init(&vd->tx, &vc->chan); dma_async_tx_descriptor_init(&vd->tx, &vc->chan);
vd->tx.flags = tx_flags; vd->tx.flags = tx_flags;
vd->tx.tx_submit = vchan_tx_submit; vd->tx.tx_submit = vchan_tx_submit;
spin_lock_irqsave(&vc->lock, flags);
list_add_tail(&vd->node, &vc->desc_allocated);
spin_unlock_irqrestore(&vc->lock, flags);
return &vd->tx; return &vd->tx;
} }
...@@ -122,7 +128,8 @@ static inline struct virt_dma_desc *vchan_next_desc(struct virt_dma_chan *vc) ...@@ -122,7 +128,8 @@ static inline struct virt_dma_desc *vchan_next_desc(struct virt_dma_chan *vc)
} }
/** /**
* vchan_get_all_descriptors - obtain all submitted and issued descriptors * vchan_get_all_descriptors - obtain all allocated, submitted and issued
* descriptors
* vc: virtual channel to get descriptors from * vc: virtual channel to get descriptors from
* head: list of descriptors found * head: list of descriptors found
* *
...@@ -134,6 +141,7 @@ static inline struct virt_dma_desc *vchan_next_desc(struct virt_dma_chan *vc) ...@@ -134,6 +141,7 @@ static inline struct virt_dma_desc *vchan_next_desc(struct virt_dma_chan *vc)
static inline void vchan_get_all_descriptors(struct virt_dma_chan *vc, static inline void vchan_get_all_descriptors(struct virt_dma_chan *vc,
struct list_head *head) struct list_head *head)
{ {
list_splice_tail_init(&vc->desc_allocated, head);
list_splice_tail_init(&vc->desc_submitted, head); list_splice_tail_init(&vc->desc_submitted, head);
list_splice_tail_init(&vc->desc_issued, head); list_splice_tail_init(&vc->desc_issued, head);
list_splice_tail_init(&vc->desc_completed, head); list_splice_tail_init(&vc->desc_completed, head);
...@@ -141,11 +149,14 @@ static inline void vchan_get_all_descriptors(struct virt_dma_chan *vc, ...@@ -141,11 +149,14 @@ static inline void vchan_get_all_descriptors(struct virt_dma_chan *vc,
static inline void vchan_free_chan_resources(struct virt_dma_chan *vc) static inline void vchan_free_chan_resources(struct virt_dma_chan *vc)
{ {
struct virt_dma_desc *vd;
unsigned long flags; unsigned long flags;
LIST_HEAD(head); LIST_HEAD(head);
spin_lock_irqsave(&vc->lock, flags); spin_lock_irqsave(&vc->lock, flags);
vchan_get_all_descriptors(vc, &head); vchan_get_all_descriptors(vc, &head);
list_for_each_entry(vd, &head, node)
async_tx_clear_ack(&vd->tx);
spin_unlock_irqrestore(&vc->lock, flags); spin_unlock_irqrestore(&vc->lock, flags);
vchan_dma_desc_free_list(vc, &head); vchan_dma_desc_free_list(vc, &head);
......
...@@ -123,10 +123,18 @@ enum dma_transfer_direction { ...@@ -123,10 +123,18 @@ enum dma_transfer_direction {
* chunk and before first src/dst address for next chunk. * chunk and before first src/dst address for next chunk.
* Ignored for dst(assumed 0), if dst_inc is true and dst_sgl is false. * Ignored for dst(assumed 0), if dst_inc is true and dst_sgl is false.
* Ignored for src(assumed 0), if src_inc is true and src_sgl is false. * Ignored for src(assumed 0), if src_inc is true and src_sgl is false.
* @dst_icg: Number of bytes to jump after last dst address of this
* chunk and before the first dst address for next chunk.
* Ignored if dst_inc is true and dst_sgl is false.
* @src_icg: Number of bytes to jump after last src address of this
* chunk and before the first src address for next chunk.
* Ignored if src_inc is true and src_sgl is false.
*/ */
struct data_chunk { struct data_chunk {
size_t size; size_t size;
size_t icg; size_t icg;
size_t dst_icg;
size_t src_icg;
}; };
/** /**
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment