Commit 5eec9438 authored by Vinod Koul's avatar Vinod Koul

Merge branch 'topic/desc_reuse' into for-linus

parents 0c328de7 d3651b8e
...@@ -493,6 +493,7 @@ int dma_get_slave_caps(struct dma_chan *chan, struct dma_slave_caps *caps) ...@@ -493,6 +493,7 @@ int dma_get_slave_caps(struct dma_chan *chan, struct dma_slave_caps *caps)
caps->dst_addr_widths = device->dst_addr_widths; caps->dst_addr_widths = device->dst_addr_widths;
caps->directions = device->directions; caps->directions = device->directions;
caps->residue_granularity = device->residue_granularity; caps->residue_granularity = device->residue_granularity;
caps->descriptor_reuse = device->descriptor_reuse;
/* /*
* Some devices implement only pause (e.g. to get residuum) but no * Some devices implement only pause (e.g. to get residuum) but no
......
...@@ -1414,6 +1414,7 @@ static int pxad_probe(struct platform_device *op) ...@@ -1414,6 +1414,7 @@ static int pxad_probe(struct platform_device *op)
pdev->slave.dst_addr_widths = widths; pdev->slave.dst_addr_widths = widths;
pdev->slave.directions = BIT(DMA_MEM_TO_DEV) | BIT(DMA_DEV_TO_MEM); pdev->slave.directions = BIT(DMA_MEM_TO_DEV) | BIT(DMA_DEV_TO_MEM);
pdev->slave.residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR; pdev->slave.residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR;
pdev->slave.descriptor_reuse = true;
pdev->slave.dev = &op->dev; pdev->slave.dev = &op->dev;
ret = pxad_init_dmadev(op, pdev, dma_channels); ret = pxad_init_dmadev(op, pdev, dma_channels);
......
...@@ -29,7 +29,7 @@ dma_cookie_t vchan_tx_submit(struct dma_async_tx_descriptor *tx) ...@@ -29,7 +29,7 @@ dma_cookie_t vchan_tx_submit(struct dma_async_tx_descriptor *tx)
spin_lock_irqsave(&vc->lock, flags); spin_lock_irqsave(&vc->lock, flags);
cookie = dma_cookie_assign(tx); cookie = dma_cookie_assign(tx);
list_add_tail(&vd->node, &vc->desc_submitted); list_move_tail(&vd->node, &vc->desc_submitted);
spin_unlock_irqrestore(&vc->lock, flags); spin_unlock_irqrestore(&vc->lock, flags);
dev_dbg(vc->chan.device->dev, "vchan %p: txd %p[%x]: submitted\n", dev_dbg(vc->chan.device->dev, "vchan %p: txd %p[%x]: submitted\n",
...@@ -39,6 +39,33 @@ dma_cookie_t vchan_tx_submit(struct dma_async_tx_descriptor *tx) ...@@ -39,6 +39,33 @@ dma_cookie_t vchan_tx_submit(struct dma_async_tx_descriptor *tx)
} }
EXPORT_SYMBOL_GPL(vchan_tx_submit); EXPORT_SYMBOL_GPL(vchan_tx_submit);
/**
* vchan_tx_desc_free - free a reusable descriptor
* @tx: the transfer
*
* This function frees a previously allocated reusable descriptor. The only
* other way is to clear the DMA_CTRL_REUSE flag and submit one last time the
* transfer.
*
* Returns 0 upon success
*/
int vchan_tx_desc_free(struct dma_async_tx_descriptor *tx)
{
struct virt_dma_chan *vc = to_virt_chan(tx->chan);
struct virt_dma_desc *vd = to_virt_desc(tx);
unsigned long flags;
spin_lock_irqsave(&vc->lock, flags);
list_del(&vd->node);
spin_unlock_irqrestore(&vc->lock, flags);
dev_dbg(vc->chan.device->dev, "vchan %p: txd %p[%x]: freeing\n",
vc, vd, vd->tx.cookie);
vc->desc_free(vd);
return 0;
}
EXPORT_SYMBOL_GPL(vchan_tx_desc_free);
struct virt_dma_desc *vchan_find_desc(struct virt_dma_chan *vc, struct virt_dma_desc *vchan_find_desc(struct virt_dma_chan *vc,
dma_cookie_t cookie) dma_cookie_t cookie)
{ {
...@@ -83,8 +110,10 @@ static void vchan_complete(unsigned long arg) ...@@ -83,8 +110,10 @@ static void vchan_complete(unsigned long arg)
cb_data = vd->tx.callback_param; cb_data = vd->tx.callback_param;
list_del(&vd->node); list_del(&vd->node);
if (dmaengine_desc_test_reuse(&vd->tx))
vc->desc_free(vd); list_add(&vd->node, &vc->desc_allocated);
else
vc->desc_free(vd);
if (cb) if (cb)
cb(cb_data); cb(cb_data);
...@@ -96,9 +125,13 @@ void vchan_dma_desc_free_list(struct virt_dma_chan *vc, struct list_head *head) ...@@ -96,9 +125,13 @@ void vchan_dma_desc_free_list(struct virt_dma_chan *vc, struct list_head *head)
while (!list_empty(head)) { while (!list_empty(head)) {
struct virt_dma_desc *vd = list_first_entry(head, struct virt_dma_desc *vd = list_first_entry(head,
struct virt_dma_desc, node); struct virt_dma_desc, node);
list_del(&vd->node); if (dmaengine_desc_test_reuse(&vd->tx)) {
dev_dbg(vc->chan.device->dev, "txd %p: freeing\n", vd); list_move_tail(&vd->node, &vc->desc_allocated);
vc->desc_free(vd); } else {
dev_dbg(vc->chan.device->dev, "txd %p: freeing\n", vd);
list_del(&vd->node);
vc->desc_free(vd);
}
} }
} }
EXPORT_SYMBOL_GPL(vchan_dma_desc_free_list); EXPORT_SYMBOL_GPL(vchan_dma_desc_free_list);
...@@ -108,6 +141,7 @@ void vchan_init(struct virt_dma_chan *vc, struct dma_device *dmadev) ...@@ -108,6 +141,7 @@ void vchan_init(struct virt_dma_chan *vc, struct dma_device *dmadev)
dma_cookie_init(&vc->chan); dma_cookie_init(&vc->chan);
spin_lock_init(&vc->lock); spin_lock_init(&vc->lock);
INIT_LIST_HEAD(&vc->desc_allocated);
INIT_LIST_HEAD(&vc->desc_submitted); INIT_LIST_HEAD(&vc->desc_submitted);
INIT_LIST_HEAD(&vc->desc_issued); INIT_LIST_HEAD(&vc->desc_issued);
INIT_LIST_HEAD(&vc->desc_completed); INIT_LIST_HEAD(&vc->desc_completed);
......
...@@ -29,6 +29,7 @@ struct virt_dma_chan { ...@@ -29,6 +29,7 @@ struct virt_dma_chan {
spinlock_t lock; spinlock_t lock;
/* protected by vc.lock */ /* protected by vc.lock */
struct list_head desc_allocated;
struct list_head desc_submitted; struct list_head desc_submitted;
struct list_head desc_issued; struct list_head desc_issued;
struct list_head desc_completed; struct list_head desc_completed;
...@@ -55,10 +56,17 @@ static inline struct dma_async_tx_descriptor *vchan_tx_prep(struct virt_dma_chan ...@@ -55,10 +56,17 @@ static inline struct dma_async_tx_descriptor *vchan_tx_prep(struct virt_dma_chan
struct virt_dma_desc *vd, unsigned long tx_flags) struct virt_dma_desc *vd, unsigned long tx_flags)
{ {
extern dma_cookie_t vchan_tx_submit(struct dma_async_tx_descriptor *); extern dma_cookie_t vchan_tx_submit(struct dma_async_tx_descriptor *);
extern int vchan_tx_desc_free(struct dma_async_tx_descriptor *);
unsigned long flags;
dma_async_tx_descriptor_init(&vd->tx, &vc->chan); dma_async_tx_descriptor_init(&vd->tx, &vc->chan);
vd->tx.flags = tx_flags; vd->tx.flags = tx_flags;
vd->tx.tx_submit = vchan_tx_submit; vd->tx.tx_submit = vchan_tx_submit;
vd->tx.desc_free = vchan_tx_desc_free;
spin_lock_irqsave(&vc->lock, flags);
list_add_tail(&vd->node, &vc->desc_allocated);
spin_unlock_irqrestore(&vc->lock, flags);
return &vd->tx; return &vd->tx;
} }
...@@ -134,6 +142,7 @@ static inline struct virt_dma_desc *vchan_next_desc(struct virt_dma_chan *vc) ...@@ -134,6 +142,7 @@ static inline struct virt_dma_desc *vchan_next_desc(struct virt_dma_chan *vc)
static inline void vchan_get_all_descriptors(struct virt_dma_chan *vc, static inline void vchan_get_all_descriptors(struct virt_dma_chan *vc,
struct list_head *head) struct list_head *head)
{ {
list_splice_tail_init(&vc->desc_allocated, head);
list_splice_tail_init(&vc->desc_submitted, head); list_splice_tail_init(&vc->desc_submitted, head);
list_splice_tail_init(&vc->desc_issued, head); list_splice_tail_init(&vc->desc_issued, head);
list_splice_tail_init(&vc->desc_completed, head); list_splice_tail_init(&vc->desc_completed, head);
...@@ -141,11 +150,14 @@ static inline void vchan_get_all_descriptors(struct virt_dma_chan *vc, ...@@ -141,11 +150,14 @@ static inline void vchan_get_all_descriptors(struct virt_dma_chan *vc,
static inline void vchan_free_chan_resources(struct virt_dma_chan *vc) static inline void vchan_free_chan_resources(struct virt_dma_chan *vc)
{ {
struct virt_dma_desc *vd;
unsigned long flags; unsigned long flags;
LIST_HEAD(head); LIST_HEAD(head);
spin_lock_irqsave(&vc->lock, flags); spin_lock_irqsave(&vc->lock, flags);
vchan_get_all_descriptors(vc, &head); vchan_get_all_descriptors(vc, &head);
list_for_each_entry(vd, &head, node)
dmaengine_desc_clear_reuse(&vd->tx);
spin_unlock_irqrestore(&vc->lock, flags); spin_unlock_irqrestore(&vc->lock, flags);
vchan_dma_desc_free_list(vc, &head); vchan_dma_desc_free_list(vc, &head);
......
...@@ -659,6 +659,7 @@ enum dmaengine_alignment { ...@@ -659,6 +659,7 @@ enum dmaengine_alignment {
* struct with auxiliary transfer status information, otherwise the call * struct with auxiliary transfer status information, otherwise the call
* will just return a simple status code * will just return a simple status code
* @device_issue_pending: push pending transactions to hardware * @device_issue_pending: push pending transactions to hardware
* @descriptor_reuse: a submitted transfer can be resubmitted after completion
*/ */
struct dma_device { struct dma_device {
...@@ -681,6 +682,7 @@ struct dma_device { ...@@ -681,6 +682,7 @@ struct dma_device {
u32 src_addr_widths; u32 src_addr_widths;
u32 dst_addr_widths; u32 dst_addr_widths;
u32 directions; u32 directions;
bool descriptor_reuse;
enum dma_residue_granularity residue_granularity; enum dma_residue_granularity residue_granularity;
int (*device_alloc_chan_resources)(struct dma_chan *chan); int (*device_alloc_chan_resources)(struct dma_chan *chan);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment