Commit 9136291f authored by Lior Amsalem's avatar Lior Amsalem Committed by Vinod Koul

dmaengine: mv_xor: bug fix for racing condition in descriptors cleanup

This patch fixes a bug in the XOR driver where the cleanup function can be
called and free descriptors that never been processed by the engine (which
result in data errors).

The cleanup function will free descriptors based on the ownership bit in
the descriptors.

Fixes: ff7b0479 ("dmaengine: DMA engine driver for Marvell XOR engine")
Signed-off-by: default avatarLior Amsalem <alior@marvell.com>
Signed-off-by: default avatarMaxime Ripard <maxime.ripard@free-electrons.com>
Reviewed-by: default avatarOfer Heifetz <oferh@marvell.com>
Cc: stable@vger.kernel.org
Signed-off-by: default avatarVinod Koul <vinod.koul@intel.com>
parent e5489d5e
......@@ -273,7 +273,8 @@ static void mv_xor_slot_cleanup(struct mv_xor_chan *mv_chan)
dma_cookie_t cookie = 0;
int busy = mv_chan_is_busy(mv_chan);
u32 current_desc = mv_chan_get_current_desc(mv_chan);
int seen_current = 0;
int current_cleaned = 0;
struct mv_xor_desc *hw_desc;
dev_dbg(mv_chan_to_devp(mv_chan), "%s %d\n", __func__, __LINE__);
dev_dbg(mv_chan_to_devp(mv_chan), "current_desc %x\n", current_desc);
......@@ -285,38 +286,57 @@ static void mv_xor_slot_cleanup(struct mv_xor_chan *mv_chan)
list_for_each_entry_safe(iter, _iter, &mv_chan->chain,
chain_node) {
prefetch(_iter);
prefetch(&_iter->async_tx);
/* do not advance past the current descriptor loaded into the
* hardware channel, subsequent descriptors are either in
* process or have not been submitted
*/
if (seen_current)
break;
/* clean finished descriptors */
hw_desc = iter->hw_desc;
if (hw_desc->status & XOR_DESC_SUCCESS) {
cookie = mv_xor_run_tx_complete_actions(iter, mv_chan,
cookie);
/* stop the search if we reach the current descriptor and the
* channel is busy
*/
/* done processing desc, clean slot */
mv_xor_clean_slot(iter, mv_chan);
/* break if we did cleaned the current */
if (iter->async_tx.phys == current_desc) {
seen_current = 1;
if (busy)
current_cleaned = 1;
break;
}
cookie = mv_xor_run_tx_complete_actions(iter, mv_chan, cookie);
if (mv_xor_clean_slot(iter, mv_chan))
} else {
if (iter->async_tx.phys == current_desc) {
current_cleaned = 0;
break;
}
}
}
if ((busy == 0) && !list_empty(&mv_chan->chain)) {
struct mv_xor_desc_slot *chain_head;
chain_head = list_entry(mv_chan->chain.next,
if (current_cleaned) {
/*
* current descriptor cleaned and removed, run
* from list head
*/
iter = list_entry(mv_chan->chain.next,
struct mv_xor_desc_slot,
chain_node);
mv_xor_start_new_chain(mv_chan, chain_head);
mv_xor_start_new_chain(mv_chan, iter);
} else {
if (!list_is_last(&iter->chain_node, &mv_chan->chain)) {
/*
* descriptors are still waiting after
* current, trigger them
*/
iter = list_entry(iter->chain_node.next,
struct mv_xor_desc_slot,
chain_node);
mv_xor_start_new_chain(mv_chan, iter);
} else {
/*
* some descriptors are still waiting
* to be cleaned
*/
tasklet_schedule(&mv_chan->irq_tasklet);
}
}
}
if (cookie > 0)
......
......@@ -31,6 +31,7 @@
#define XOR_OPERATION_MODE_XOR 0
#define XOR_OPERATION_MODE_MEMCPY 2
#define XOR_DESCRIPTOR_SWAP BIT(14)
#define XOR_DESC_SUCCESS 0x40000000
#define XOR_DESC_DMA_OWNED BIT(31)
#define XOR_DESC_EOD_INT_EN BIT(31)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment