Commit a5b21a8b authored by Michael Tretter's avatar Michael Tretter Committed by Vinod Koul

dmaengine: zynqmp_dma: replace spin_lock_bh with spin_lock_irqsave

All device_prep_dma_* functions and device_issue_pending can be called
from an interrupt context. As this includes hard IRQs, we must use
spin_lock_irqsave() instead of spin_lock_bh() to access chan->lock.
Signed-off-by: default avatarMichael Tretter <m.tretter@pengutronix.de>
Signed-off-by: default avatarVinod Koul <vkoul@kernel.org>
parent 65102238
...@@ -375,9 +375,10 @@ static dma_cookie_t zynqmp_dma_tx_submit(struct dma_async_tx_descriptor *tx) ...@@ -375,9 +375,10 @@ static dma_cookie_t zynqmp_dma_tx_submit(struct dma_async_tx_descriptor *tx)
struct zynqmp_dma_chan *chan = to_chan(tx->chan); struct zynqmp_dma_chan *chan = to_chan(tx->chan);
struct zynqmp_dma_desc_sw *desc, *new; struct zynqmp_dma_desc_sw *desc, *new;
dma_cookie_t cookie; dma_cookie_t cookie;
unsigned long irqflags;
new = tx_to_desc(tx); new = tx_to_desc(tx);
spin_lock_bh(&chan->lock); spin_lock_irqsave(&chan->lock, irqflags);
cookie = dma_cookie_assign(tx); cookie = dma_cookie_assign(tx);
if (!list_empty(&chan->pending_list)) { if (!list_empty(&chan->pending_list)) {
...@@ -393,7 +394,7 @@ static dma_cookie_t zynqmp_dma_tx_submit(struct dma_async_tx_descriptor *tx) ...@@ -393,7 +394,7 @@ static dma_cookie_t zynqmp_dma_tx_submit(struct dma_async_tx_descriptor *tx)
} }
list_add_tail(&new->node, &chan->pending_list); list_add_tail(&new->node, &chan->pending_list);
spin_unlock_bh(&chan->lock); spin_unlock_irqrestore(&chan->lock, irqflags);
return cookie; return cookie;
} }
...@@ -408,12 +409,13 @@ static struct zynqmp_dma_desc_sw * ...@@ -408,12 +409,13 @@ static struct zynqmp_dma_desc_sw *
zynqmp_dma_get_descriptor(struct zynqmp_dma_chan *chan) zynqmp_dma_get_descriptor(struct zynqmp_dma_chan *chan)
{ {
struct zynqmp_dma_desc_sw *desc; struct zynqmp_dma_desc_sw *desc;
unsigned long irqflags;
spin_lock_bh(&chan->lock); spin_lock_irqsave(&chan->lock, irqflags);
desc = list_first_entry(&chan->free_list, desc = list_first_entry(&chan->free_list,
struct zynqmp_dma_desc_sw, node); struct zynqmp_dma_desc_sw, node);
list_del(&desc->node); list_del(&desc->node);
spin_unlock_bh(&chan->lock); spin_unlock_irqrestore(&chan->lock, irqflags);
INIT_LIST_HEAD(&desc->tx_list); INIT_LIST_HEAD(&desc->tx_list);
/* Clear the src and dst descriptor memory */ /* Clear the src and dst descriptor memory */
...@@ -643,10 +645,11 @@ static void zynqmp_dma_complete_descriptor(struct zynqmp_dma_chan *chan) ...@@ -643,10 +645,11 @@ static void zynqmp_dma_complete_descriptor(struct zynqmp_dma_chan *chan)
static void zynqmp_dma_issue_pending(struct dma_chan *dchan) static void zynqmp_dma_issue_pending(struct dma_chan *dchan)
{ {
struct zynqmp_dma_chan *chan = to_chan(dchan); struct zynqmp_dma_chan *chan = to_chan(dchan);
unsigned long irqflags;
spin_lock_bh(&chan->lock); spin_lock_irqsave(&chan->lock, irqflags);
zynqmp_dma_start_transfer(chan); zynqmp_dma_start_transfer(chan);
spin_unlock_bh(&chan->lock); spin_unlock_irqrestore(&chan->lock, irqflags);
} }
/** /**
...@@ -667,10 +670,11 @@ static void zynqmp_dma_free_descriptors(struct zynqmp_dma_chan *chan) ...@@ -667,10 +670,11 @@ static void zynqmp_dma_free_descriptors(struct zynqmp_dma_chan *chan)
static void zynqmp_dma_free_chan_resources(struct dma_chan *dchan) static void zynqmp_dma_free_chan_resources(struct dma_chan *dchan)
{ {
struct zynqmp_dma_chan *chan = to_chan(dchan); struct zynqmp_dma_chan *chan = to_chan(dchan);
unsigned long irqflags;
spin_lock_bh(&chan->lock); spin_lock_irqsave(&chan->lock, irqflags);
zynqmp_dma_free_descriptors(chan); zynqmp_dma_free_descriptors(chan);
spin_unlock_bh(&chan->lock); spin_unlock_irqrestore(&chan->lock, irqflags);
dma_free_coherent(chan->dev, dma_free_coherent(chan->dev,
(2 * ZYNQMP_DMA_DESC_SIZE(chan) * ZYNQMP_DMA_NUM_DESCS), (2 * ZYNQMP_DMA_DESC_SIZE(chan) * ZYNQMP_DMA_NUM_DESCS),
chan->desc_pool_v, chan->desc_pool_p); chan->desc_pool_v, chan->desc_pool_p);
...@@ -743,8 +747,9 @@ static void zynqmp_dma_do_tasklet(unsigned long data) ...@@ -743,8 +747,9 @@ static void zynqmp_dma_do_tasklet(unsigned long data)
{ {
struct zynqmp_dma_chan *chan = (struct zynqmp_dma_chan *)data; struct zynqmp_dma_chan *chan = (struct zynqmp_dma_chan *)data;
u32 count; u32 count;
unsigned long irqflags;
spin_lock(&chan->lock); spin_lock_irqsave(&chan->lock, irqflags);
if (chan->err) { if (chan->err) {
zynqmp_dma_reset(chan); zynqmp_dma_reset(chan);
...@@ -764,7 +769,7 @@ static void zynqmp_dma_do_tasklet(unsigned long data) ...@@ -764,7 +769,7 @@ static void zynqmp_dma_do_tasklet(unsigned long data)
zynqmp_dma_start_transfer(chan); zynqmp_dma_start_transfer(chan);
unlock: unlock:
spin_unlock(&chan->lock); spin_unlock_irqrestore(&chan->lock, irqflags);
} }
/** /**
...@@ -776,11 +781,12 @@ static void zynqmp_dma_do_tasklet(unsigned long data) ...@@ -776,11 +781,12 @@ static void zynqmp_dma_do_tasklet(unsigned long data)
static int zynqmp_dma_device_terminate_all(struct dma_chan *dchan) static int zynqmp_dma_device_terminate_all(struct dma_chan *dchan)
{ {
struct zynqmp_dma_chan *chan = to_chan(dchan); struct zynqmp_dma_chan *chan = to_chan(dchan);
unsigned long irqflags;
spin_lock_bh(&chan->lock); spin_lock_irqsave(&chan->lock, irqflags);
writel(ZYNQMP_DMA_IDS_DEFAULT_MASK, chan->regs + ZYNQMP_DMA_IDS); writel(ZYNQMP_DMA_IDS_DEFAULT_MASK, chan->regs + ZYNQMP_DMA_IDS);
zynqmp_dma_free_descriptors(chan); zynqmp_dma_free_descriptors(chan);
spin_unlock_bh(&chan->lock); spin_unlock_irqrestore(&chan->lock, irqflags);
return 0; return 0;
} }
...@@ -804,19 +810,20 @@ static struct dma_async_tx_descriptor *zynqmp_dma_prep_memcpy( ...@@ -804,19 +810,20 @@ static struct dma_async_tx_descriptor *zynqmp_dma_prep_memcpy(
void *desc = NULL, *prev = NULL; void *desc = NULL, *prev = NULL;
size_t copy; size_t copy;
u32 desc_cnt; u32 desc_cnt;
unsigned long irqflags;
chan = to_chan(dchan); chan = to_chan(dchan);
desc_cnt = DIV_ROUND_UP(len, ZYNQMP_DMA_MAX_TRANS_LEN); desc_cnt = DIV_ROUND_UP(len, ZYNQMP_DMA_MAX_TRANS_LEN);
spin_lock_bh(&chan->lock); spin_lock_irqsave(&chan->lock, irqflags);
if (desc_cnt > chan->desc_free_cnt) { if (desc_cnt > chan->desc_free_cnt) {
spin_unlock_bh(&chan->lock); spin_unlock_irqrestore(&chan->lock, irqflags);
dev_dbg(chan->dev, "chan %p descs are not available\n", chan); dev_dbg(chan->dev, "chan %p descs are not available\n", chan);
return NULL; return NULL;
} }
chan->desc_free_cnt = chan->desc_free_cnt - desc_cnt; chan->desc_free_cnt = chan->desc_free_cnt - desc_cnt;
spin_unlock_bh(&chan->lock); spin_unlock_irqrestore(&chan->lock, irqflags);
do { do {
/* Allocate and populate the descriptor */ /* Allocate and populate the descriptor */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment