Commit b9138523 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'next' of git://git.kernel.org/pub/scm/linux/kernel/git/djbw/async_tx

* 'next' of git://git.kernel.org/pub/scm/linux/kernel/git/djbw/async_tx:
  fsldma: allow Freescale Elo DMA driver to be compiled as a module
  fsldma: remove internal self-test from Freescale Elo DMA driver
  drivers/dma/dmatest.c: switch a GFP_ATOMIC to GFP_KERNEL
  dmatest: properly handle duplicate DMA channels
  drivers/dma/ioat_dma.c: drop code after return
  async_tx: make async_tx_run_dependencies() easier to read
parents d67ae206 77cd62e8
...@@ -115,34 +115,32 @@ EXPORT_SYMBOL_GPL(dma_wait_for_async_tx); ...@@ -115,34 +115,32 @@ EXPORT_SYMBOL_GPL(dma_wait_for_async_tx);
* (start) dependent operations on their target channel * (start) dependent operations on their target channel
* @tx: transaction with dependencies * @tx: transaction with dependencies
*/ */
void void async_tx_run_dependencies(struct dma_async_tx_descriptor *tx)
async_tx_run_dependencies(struct dma_async_tx_descriptor *tx)
{ {
struct dma_async_tx_descriptor *next = tx->next; struct dma_async_tx_descriptor *dep = tx->next;
struct dma_async_tx_descriptor *dep_next;
struct dma_chan *chan; struct dma_chan *chan;
if (!next) if (!dep)
return; return;
tx->next = NULL; chan = dep->chan;
chan = next->chan;
/* keep submitting up until a channel switch is detected /* keep submitting up until a channel switch is detected
* in that case we will be called again as a result of * in that case we will be called again as a result of
* processing the interrupt from async_tx_channel_switch * processing the interrupt from async_tx_channel_switch
*/ */
while (next && next->chan == chan) { for (; dep; dep = dep_next) {
struct dma_async_tx_descriptor *_next; spin_lock_bh(&dep->lock);
dep->parent = NULL;
spin_lock_bh(&next->lock); dep_next = dep->next;
next->parent = NULL; if (dep_next && dep_next->chan == chan)
_next = next->next; dep->next = NULL; /* ->next will be submitted */
if (_next && _next->chan == chan) else
next->next = NULL; dep_next = NULL; /* submit current dep and terminate */
spin_unlock_bh(&next->lock); spin_unlock_bh(&dep->lock);
next->tx_submit(next); dep->tx_submit(dep);
next = _next;
} }
chan->device->device_issue_pending(chan); chan->device->device_issue_pending(chan);
......
...@@ -48,13 +48,13 @@ config DW_DMAC ...@@ -48,13 +48,13 @@ config DW_DMAC
can be integrated in chips such as the Atmel AT32ap7000. can be integrated in chips such as the Atmel AT32ap7000.
config FSL_DMA config FSL_DMA
bool "Freescale MPC85xx/MPC83xx DMA support" tristate "Freescale Elo and Elo Plus DMA support"
depends on PPC depends on FSL_SOC
select DMA_ENGINE select DMA_ENGINE
---help--- ---help---
Enable support for the Freescale DMA engine. Now, it support Enable support for the Freescale Elo and Elo Plus DMA controllers.
MPC8560/40, MPC8555, MPC8548 and MPC8641 processors. The Elo is the DMA controller on some 82xx and 83xx parts, and the
The MPC8349, MPC8360 is also supported. Elo Plus is the DMA controller on 85xx and 86xx parts.
config MV_XOR config MV_XOR
bool "Marvell XOR engine support" bool "Marvell XOR engine support"
......
...@@ -325,7 +325,12 @@ static enum dma_state_client dmatest_add_channel(struct dma_chan *chan) ...@@ -325,7 +325,12 @@ static enum dma_state_client dmatest_add_channel(struct dma_chan *chan)
struct dmatest_thread *thread; struct dmatest_thread *thread;
unsigned int i; unsigned int i;
dtc = kmalloc(sizeof(struct dmatest_chan), GFP_ATOMIC); /* Have we already been told about this channel? */
list_for_each_entry(dtc, &dmatest_channels, node)
if (dtc->chan == chan)
return DMA_DUP;
dtc = kmalloc(sizeof(struct dmatest_chan), GFP_KERNEL);
if (!dtc) { if (!dtc) {
pr_warning("dmatest: No memory for %s\n", chan->dev.bus_id); pr_warning("dmatest: No memory for %s\n", chan->dev.bus_id);
return DMA_NAK; return DMA_NAK;
......
This diff is collapsed.
...@@ -114,6 +114,7 @@ struct fsl_dma_device { ...@@ -114,6 +114,7 @@ struct fsl_dma_device {
struct dma_device common; struct dma_device common;
struct fsl_dma_chan *chan[FSL_DMA_MAX_CHANS_PER_DEVICE]; struct fsl_dma_chan *chan[FSL_DMA_MAX_CHANS_PER_DEVICE];
u32 feature; /* The same as DMA channels */ u32 feature; /* The same as DMA channels */
int irq; /* Channel IRQ */
}; };
/* Define macros for fsl_dma_chan->feature property */ /* Define macros for fsl_dma_chan->feature property */
......
...@@ -971,11 +971,9 @@ static struct ioat_desc_sw *ioat_dma_get_next_descriptor( ...@@ -971,11 +971,9 @@ static struct ioat_desc_sw *ioat_dma_get_next_descriptor(
switch (ioat_chan->device->version) { switch (ioat_chan->device->version) {
case IOAT_VER_1_2: case IOAT_VER_1_2:
return ioat1_dma_get_next_descriptor(ioat_chan); return ioat1_dma_get_next_descriptor(ioat_chan);
break;
case IOAT_VER_2_0: case IOAT_VER_2_0:
case IOAT_VER_3_0: case IOAT_VER_3_0:
return ioat2_dma_get_next_descriptor(ioat_chan); return ioat2_dma_get_next_descriptor(ioat_chan);
break;
} }
return NULL; return NULL;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment