Commit d16695a7 authored by Ezequiel Garcia's avatar Ezequiel Garcia Committed by Dan Williams

dma: mv_xor: Use dmaengine_unmap_data for the self-tests

The driver-specific unmap code was removed in:

  commit 54f8d501
  Author: Bartlomiej Zolnierkiewicz <b.zolnierkie@samsung.com>
  Date:   Fri Oct 18 19:35:32 2013 +0200

      dmaengine: remove DMA unmap from drivers

which had the side-effect of not unmapping the self-test mappings.
Fix this by using dmaengine_unmap_data in the self-test routines.

In addition, since dmaengine_unmap() assumes that all mappings were created
with dma_map_page, this commit changes the single mapping to a page mapping
to avoid an incorrect unmapping of the memcpy self-test.

The allocation could be changed to be alloc_page(), but sticking to kmalloc
results in a less intrusive patch. The size of the test buffer is increased,
since dma_map_page() seem to fail when the source and destination pages are
the same page.
Signed-off-by: default avatarEzequiel Garcia <ezequiel.garcia@free-electrons.com>
Signed-off-by: default avatarDan Williams <dan.j.williams@intel.com>
parent 3cc377b9
...@@ -781,7 +781,6 @@ static void mv_xor_issue_pending(struct dma_chan *chan) ...@@ -781,7 +781,6 @@ static void mv_xor_issue_pending(struct dma_chan *chan)
/* /*
* Perform a transaction to verify the HW works. * Perform a transaction to verify the HW works.
*/ */
#define MV_XOR_TEST_SIZE 2000
static int mv_xor_memcpy_self_test(struct mv_xor_chan *mv_chan) static int mv_xor_memcpy_self_test(struct mv_xor_chan *mv_chan)
{ {
...@@ -791,20 +790,21 @@ static int mv_xor_memcpy_self_test(struct mv_xor_chan *mv_chan) ...@@ -791,20 +790,21 @@ static int mv_xor_memcpy_self_test(struct mv_xor_chan *mv_chan)
struct dma_chan *dma_chan; struct dma_chan *dma_chan;
dma_cookie_t cookie; dma_cookie_t cookie;
struct dma_async_tx_descriptor *tx; struct dma_async_tx_descriptor *tx;
struct dmaengine_unmap_data *unmap;
int err = 0; int err = 0;
src = kmalloc(sizeof(u8) * MV_XOR_TEST_SIZE, GFP_KERNEL); src = kmalloc(sizeof(u8) * PAGE_SIZE, GFP_KERNEL);
if (!src) if (!src)
return -ENOMEM; return -ENOMEM;
dest = kzalloc(sizeof(u8) * MV_XOR_TEST_SIZE, GFP_KERNEL); dest = kzalloc(sizeof(u8) * PAGE_SIZE, GFP_KERNEL);
if (!dest) { if (!dest) {
kfree(src); kfree(src);
return -ENOMEM; return -ENOMEM;
} }
/* Fill in src buffer */ /* Fill in src buffer */
for (i = 0; i < MV_XOR_TEST_SIZE; i++) for (i = 0; i < PAGE_SIZE; i++)
((u8 *) src)[i] = (u8)i; ((u8 *) src)[i] = (u8)i;
dma_chan = &mv_chan->dmachan; dma_chan = &mv_chan->dmachan;
...@@ -813,14 +813,26 @@ static int mv_xor_memcpy_self_test(struct mv_xor_chan *mv_chan) ...@@ -813,14 +813,26 @@ static int mv_xor_memcpy_self_test(struct mv_xor_chan *mv_chan)
goto out; goto out;
} }
dest_dma = dma_map_single(dma_chan->device->dev, dest, unmap = dmaengine_get_unmap_data(dma_chan->device->dev, 2, GFP_KERNEL);
MV_XOR_TEST_SIZE, DMA_FROM_DEVICE); if (!unmap) {
err = -ENOMEM;
goto free_resources;
}
src_dma = dma_map_page(dma_chan->device->dev, virt_to_page(src), 0,
PAGE_SIZE, DMA_TO_DEVICE);
unmap->to_cnt = 1;
unmap->addr[0] = src_dma;
src_dma = dma_map_single(dma_chan->device->dev, src, dest_dma = dma_map_page(dma_chan->device->dev, virt_to_page(dest), 0,
MV_XOR_TEST_SIZE, DMA_TO_DEVICE); PAGE_SIZE, DMA_FROM_DEVICE);
unmap->from_cnt = 1;
unmap->addr[1] = dest_dma;
unmap->len = PAGE_SIZE;
tx = mv_xor_prep_dma_memcpy(dma_chan, dest_dma, src_dma, tx = mv_xor_prep_dma_memcpy(dma_chan, dest_dma, src_dma,
MV_XOR_TEST_SIZE, 0); PAGE_SIZE, 0);
cookie = mv_xor_tx_submit(tx); cookie = mv_xor_tx_submit(tx);
mv_xor_issue_pending(dma_chan); mv_xor_issue_pending(dma_chan);
async_tx_ack(tx); async_tx_ack(tx);
...@@ -835,8 +847,8 @@ static int mv_xor_memcpy_self_test(struct mv_xor_chan *mv_chan) ...@@ -835,8 +847,8 @@ static int mv_xor_memcpy_self_test(struct mv_xor_chan *mv_chan)
} }
dma_sync_single_for_cpu(dma_chan->device->dev, dest_dma, dma_sync_single_for_cpu(dma_chan->device->dev, dest_dma,
MV_XOR_TEST_SIZE, DMA_FROM_DEVICE); PAGE_SIZE, DMA_FROM_DEVICE);
if (memcmp(src, dest, MV_XOR_TEST_SIZE)) { if (memcmp(src, dest, PAGE_SIZE)) {
dev_err(dma_chan->device->dev, dev_err(dma_chan->device->dev,
"Self-test copy failed compare, disabling\n"); "Self-test copy failed compare, disabling\n");
err = -ENODEV; err = -ENODEV;
...@@ -844,6 +856,7 @@ static int mv_xor_memcpy_self_test(struct mv_xor_chan *mv_chan) ...@@ -844,6 +856,7 @@ static int mv_xor_memcpy_self_test(struct mv_xor_chan *mv_chan)
} }
free_resources: free_resources:
dmaengine_unmap_put(unmap);
mv_xor_free_chan_resources(dma_chan); mv_xor_free_chan_resources(dma_chan);
out: out:
kfree(src); kfree(src);
...@@ -861,13 +874,15 @@ mv_xor_xor_self_test(struct mv_xor_chan *mv_chan) ...@@ -861,13 +874,15 @@ mv_xor_xor_self_test(struct mv_xor_chan *mv_chan)
dma_addr_t dma_srcs[MV_XOR_NUM_SRC_TEST]; dma_addr_t dma_srcs[MV_XOR_NUM_SRC_TEST];
dma_addr_t dest_dma; dma_addr_t dest_dma;
struct dma_async_tx_descriptor *tx; struct dma_async_tx_descriptor *tx;
struct dmaengine_unmap_data *unmap;
struct dma_chan *dma_chan; struct dma_chan *dma_chan;
dma_cookie_t cookie; dma_cookie_t cookie;
u8 cmp_byte = 0; u8 cmp_byte = 0;
u32 cmp_word; u32 cmp_word;
int err = 0; int err = 0;
int src_count = MV_XOR_NUM_SRC_TEST;
for (src_idx = 0; src_idx < MV_XOR_NUM_SRC_TEST; src_idx++) { for (src_idx = 0; src_idx < src_count; src_idx++) {
xor_srcs[src_idx] = alloc_page(GFP_KERNEL); xor_srcs[src_idx] = alloc_page(GFP_KERNEL);
if (!xor_srcs[src_idx]) { if (!xor_srcs[src_idx]) {
while (src_idx--) while (src_idx--)
...@@ -884,13 +899,13 @@ mv_xor_xor_self_test(struct mv_xor_chan *mv_chan) ...@@ -884,13 +899,13 @@ mv_xor_xor_self_test(struct mv_xor_chan *mv_chan)
} }
/* Fill in src buffers */ /* Fill in src buffers */
for (src_idx = 0; src_idx < MV_XOR_NUM_SRC_TEST; src_idx++) { for (src_idx = 0; src_idx < src_count; src_idx++) {
u8 *ptr = page_address(xor_srcs[src_idx]); u8 *ptr = page_address(xor_srcs[src_idx]);
for (i = 0; i < PAGE_SIZE; i++) for (i = 0; i < PAGE_SIZE; i++)
ptr[i] = (1 << src_idx); ptr[i] = (1 << src_idx);
} }
for (src_idx = 0; src_idx < MV_XOR_NUM_SRC_TEST; src_idx++) for (src_idx = 0; src_idx < src_count; src_idx++)
cmp_byte ^= (u8) (1 << src_idx); cmp_byte ^= (u8) (1 << src_idx);
cmp_word = (cmp_byte << 24) | (cmp_byte << 16) | cmp_word = (cmp_byte << 24) | (cmp_byte << 16) |
...@@ -904,16 +919,29 @@ mv_xor_xor_self_test(struct mv_xor_chan *mv_chan) ...@@ -904,16 +919,29 @@ mv_xor_xor_self_test(struct mv_xor_chan *mv_chan)
goto out; goto out;
} }
/* test xor */ unmap = dmaengine_get_unmap_data(dma_chan->device->dev, src_count + 1,
dest_dma = dma_map_page(dma_chan->device->dev, dest, 0, PAGE_SIZE, GFP_KERNEL);
DMA_FROM_DEVICE); if (!unmap) {
err = -ENOMEM;
goto free_resources;
}
for (i = 0; i < MV_XOR_NUM_SRC_TEST; i++) /* test xor */
dma_srcs[i] = dma_map_page(dma_chan->device->dev, xor_srcs[i], for (i = 0; i < src_count; i++) {
unmap->addr[i] = dma_map_page(dma_chan->device->dev, xor_srcs[i],
0, PAGE_SIZE, DMA_TO_DEVICE); 0, PAGE_SIZE, DMA_TO_DEVICE);
dma_srcs[i] = unmap->addr[i];
unmap->to_cnt++;
}
unmap->addr[src_count] = dma_map_page(dma_chan->device->dev, dest, 0, PAGE_SIZE,
DMA_FROM_DEVICE);
dest_dma = unmap->addr[src_count];
unmap->from_cnt = 1;
unmap->len = PAGE_SIZE;
tx = mv_xor_prep_dma_xor(dma_chan, dest_dma, dma_srcs, tx = mv_xor_prep_dma_xor(dma_chan, dest_dma, dma_srcs,
MV_XOR_NUM_SRC_TEST, PAGE_SIZE, 0); src_count, PAGE_SIZE, 0);
cookie = mv_xor_tx_submit(tx); cookie = mv_xor_tx_submit(tx);
mv_xor_issue_pending(dma_chan); mv_xor_issue_pending(dma_chan);
...@@ -942,9 +970,10 @@ mv_xor_xor_self_test(struct mv_xor_chan *mv_chan) ...@@ -942,9 +970,10 @@ mv_xor_xor_self_test(struct mv_xor_chan *mv_chan)
} }
free_resources: free_resources:
dmaengine_unmap_put(unmap);
mv_xor_free_chan_resources(dma_chan); mv_xor_free_chan_resources(dma_chan);
out: out:
src_idx = MV_XOR_NUM_SRC_TEST; src_idx = src_count;
while (src_idx--) while (src_idx--)
__free_page(xor_srcs[src_idx]); __free_page(xor_srcs[src_idx]);
__free_page(dest); __free_page(dest);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment