Commit edf10919 authored by Sylvain Lesne's avatar Sylvain Lesne Committed by Vinod Koul

dmaengine: altera: fix spinlock usage

Since this lock is acquired in both process and IRQ context, failing to
to disable IRQs when trying to acquire the lock in process context can
lead to deadlocks.
Signed-off-by: default avatarSylvain Lesne <lesne@alse-fr.com>
Reviewed-by: default avatarStefan Roese <sr@denx.de>
Signed-off-by: default avatarVinod Koul <vinod.koul@intel.com>
parent d9ec4641
...@@ -212,11 +212,12 @@ struct msgdma_device { ...@@ -212,11 +212,12 @@ struct msgdma_device {
static struct msgdma_sw_desc *msgdma_get_descriptor(struct msgdma_device *mdev) static struct msgdma_sw_desc *msgdma_get_descriptor(struct msgdma_device *mdev)
{ {
struct msgdma_sw_desc *desc; struct msgdma_sw_desc *desc;
unsigned long flags;
spin_lock_bh(&mdev->lock); spin_lock_irqsave(&mdev->lock, flags);
desc = list_first_entry(&mdev->free_list, struct msgdma_sw_desc, node); desc = list_first_entry(&mdev->free_list, struct msgdma_sw_desc, node);
list_del(&desc->node); list_del(&desc->node);
spin_unlock_bh(&mdev->lock); spin_unlock_irqrestore(&mdev->lock, flags);
INIT_LIST_HEAD(&desc->tx_list); INIT_LIST_HEAD(&desc->tx_list);
...@@ -306,13 +307,14 @@ static dma_cookie_t msgdma_tx_submit(struct dma_async_tx_descriptor *tx) ...@@ -306,13 +307,14 @@ static dma_cookie_t msgdma_tx_submit(struct dma_async_tx_descriptor *tx)
struct msgdma_device *mdev = to_mdev(tx->chan); struct msgdma_device *mdev = to_mdev(tx->chan);
struct msgdma_sw_desc *new; struct msgdma_sw_desc *new;
dma_cookie_t cookie; dma_cookie_t cookie;
unsigned long flags;
new = tx_to_desc(tx); new = tx_to_desc(tx);
spin_lock_bh(&mdev->lock); spin_lock_irqsave(&mdev->lock, flags);
cookie = dma_cookie_assign(tx); cookie = dma_cookie_assign(tx);
list_add_tail(&new->node, &mdev->pending_list); list_add_tail(&new->node, &mdev->pending_list);
spin_unlock_bh(&mdev->lock); spin_unlock_irqrestore(&mdev->lock, flags);
return cookie; return cookie;
} }
...@@ -336,17 +338,18 @@ msgdma_prep_memcpy(struct dma_chan *dchan, dma_addr_t dma_dst, ...@@ -336,17 +338,18 @@ msgdma_prep_memcpy(struct dma_chan *dchan, dma_addr_t dma_dst,
struct msgdma_extended_desc *desc; struct msgdma_extended_desc *desc;
size_t copy; size_t copy;
u32 desc_cnt; u32 desc_cnt;
unsigned long irqflags;
desc_cnt = DIV_ROUND_UP(len, MSGDMA_MAX_TRANS_LEN); desc_cnt = DIV_ROUND_UP(len, MSGDMA_MAX_TRANS_LEN);
spin_lock_bh(&mdev->lock); spin_lock_irqsave(&mdev->lock, irqflags);
if (desc_cnt > mdev->desc_free_cnt) { if (desc_cnt > mdev->desc_free_cnt) {
spin_unlock_bh(&mdev->lock); spin_unlock_bh(&mdev->lock);
dev_dbg(mdev->dev, "mdev %p descs are not available\n", mdev); dev_dbg(mdev->dev, "mdev %p descs are not available\n", mdev);
return NULL; return NULL;
} }
mdev->desc_free_cnt -= desc_cnt; mdev->desc_free_cnt -= desc_cnt;
spin_unlock_bh(&mdev->lock); spin_unlock_irqrestore(&mdev->lock, irqflags);
do { do {
/* Allocate and populate the descriptor */ /* Allocate and populate the descriptor */
...@@ -397,18 +400,19 @@ msgdma_prep_slave_sg(struct dma_chan *dchan, struct scatterlist *sgl, ...@@ -397,18 +400,19 @@ msgdma_prep_slave_sg(struct dma_chan *dchan, struct scatterlist *sgl,
u32 desc_cnt = 0, i; u32 desc_cnt = 0, i;
struct scatterlist *sg; struct scatterlist *sg;
u32 stride; u32 stride;
unsigned long irqflags;
for_each_sg(sgl, sg, sg_len, i) for_each_sg(sgl, sg, sg_len, i)
desc_cnt += DIV_ROUND_UP(sg_dma_len(sg), MSGDMA_MAX_TRANS_LEN); desc_cnt += DIV_ROUND_UP(sg_dma_len(sg), MSGDMA_MAX_TRANS_LEN);
spin_lock_bh(&mdev->lock); spin_lock_irqsave(&mdev->lock, irqflags);
if (desc_cnt > mdev->desc_free_cnt) { if (desc_cnt > mdev->desc_free_cnt) {
spin_unlock_bh(&mdev->lock); spin_unlock_bh(&mdev->lock);
dev_dbg(mdev->dev, "mdev %p descs are not available\n", mdev); dev_dbg(mdev->dev, "mdev %p descs are not available\n", mdev);
return NULL; return NULL;
} }
mdev->desc_free_cnt -= desc_cnt; mdev->desc_free_cnt -= desc_cnt;
spin_unlock_bh(&mdev->lock); spin_unlock_irqrestore(&mdev->lock, irqflags);
avail = sg_dma_len(sgl); avail = sg_dma_len(sgl);
...@@ -566,10 +570,11 @@ static void msgdma_start_transfer(struct msgdma_device *mdev) ...@@ -566,10 +570,11 @@ static void msgdma_start_transfer(struct msgdma_device *mdev)
static void msgdma_issue_pending(struct dma_chan *chan) static void msgdma_issue_pending(struct dma_chan *chan)
{ {
struct msgdma_device *mdev = to_mdev(chan); struct msgdma_device *mdev = to_mdev(chan);
unsigned long flags;
spin_lock_bh(&mdev->lock); spin_lock_irqsave(&mdev->lock, flags);
msgdma_start_transfer(mdev); msgdma_start_transfer(mdev);
spin_unlock_bh(&mdev->lock); spin_unlock_irqrestore(&mdev->lock, flags);
} }
/** /**
...@@ -634,10 +639,11 @@ static void msgdma_free_descriptors(struct msgdma_device *mdev) ...@@ -634,10 +639,11 @@ static void msgdma_free_descriptors(struct msgdma_device *mdev)
static void msgdma_free_chan_resources(struct dma_chan *dchan) static void msgdma_free_chan_resources(struct dma_chan *dchan)
{ {
struct msgdma_device *mdev = to_mdev(dchan); struct msgdma_device *mdev = to_mdev(dchan);
unsigned long flags;
spin_lock_bh(&mdev->lock); spin_lock_irqsave(&mdev->lock, flags);
msgdma_free_descriptors(mdev); msgdma_free_descriptors(mdev);
spin_unlock_bh(&mdev->lock); spin_unlock_irqrestore(&mdev->lock, flags);
kfree(mdev->sw_desq); kfree(mdev->sw_desq);
} }
...@@ -682,8 +688,9 @@ static void msgdma_tasklet(unsigned long data) ...@@ -682,8 +688,9 @@ static void msgdma_tasklet(unsigned long data)
u32 count; u32 count;
u32 __maybe_unused size; u32 __maybe_unused size;
u32 __maybe_unused status; u32 __maybe_unused status;
unsigned long flags;
spin_lock(&mdev->lock); spin_lock_irqsave(&mdev->lock, flags);
/* Read number of responses that are available */ /* Read number of responses that are available */
count = ioread32(mdev->csr + MSGDMA_CSR_RESP_FILL_LEVEL); count = ioread32(mdev->csr + MSGDMA_CSR_RESP_FILL_LEVEL);
...@@ -704,7 +711,7 @@ static void msgdma_tasklet(unsigned long data) ...@@ -704,7 +711,7 @@ static void msgdma_tasklet(unsigned long data)
msgdma_chan_desc_cleanup(mdev); msgdma_chan_desc_cleanup(mdev);
} }
spin_unlock(&mdev->lock); spin_unlock_irqrestore(&mdev->lock, flags);
} }
/** /**
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment