Commit 56ea27fd authored by Dan Williams's avatar Dan Williams

dmaengine: consolidate memcpy apis

Copying from page to page (dma_async_memcpy_pg_to_pg) is the superset,
make the other two apis use that one in preparation for providing a
common dma unmap implementation.  The common implementation just wants
to assume all buffers are mapped with dma_map_page().

Cc: Vinod Koul <vinod.koul@intel.com>
Cc: Tomasz Figa <t.figa@samsung.com>
Cc: Dave Jiang <dave.jiang@intel.com>
Signed-off-by: default avatarBartlomiej Zolnierkiewicz <b.zolnierkie@samsung.com>
Signed-off-by: default avatarKyungmin Park <kyungmin.park@samsung.com>
Signed-off-by: default avatarDan Williams <dan.j.williams@intel.com>
parent d1cab34c
...@@ -902,20 +902,23 @@ void dma_async_device_unregister(struct dma_device *device) ...@@ -902,20 +902,23 @@ void dma_async_device_unregister(struct dma_device *device)
EXPORT_SYMBOL(dma_async_device_unregister); EXPORT_SYMBOL(dma_async_device_unregister);
/** /**
* dma_async_memcpy_buf_to_buf - offloaded copy between virtual addresses * dma_async_memcpy_pg_to_pg - offloaded copy from page to page
* @chan: DMA channel to offload copy to * @chan: DMA channel to offload copy to
* @dest: destination address (virtual) * @dest_pg: destination page
* @src: source address (virtual) * @dest_off: offset in page to copy to
* @src_pg: source page
* @src_off: offset in page to copy from
* @len: length * @len: length
* *
* Both @dest and @src must be mappable to a bus address according to the * Both @dest_page/@dest_off and @src_page/@src_off must be mappable to a bus
* DMA mapping API rules for streaming mappings. * address according to the DMA mapping API rules for streaming mappings.
* Both @dest and @src must stay memory resident (kernel memory or locked * Both @dest_page/@dest_off and @src_page/@src_off must stay memory resident
* user space pages). * (kernel memory or locked user space pages).
*/ */
dma_cookie_t dma_cookie_t
dma_async_memcpy_buf_to_buf(struct dma_chan *chan, void *dest, dma_async_memcpy_pg_to_pg(struct dma_chan *chan, struct page *dest_pg,
void *src, size_t len) unsigned int dest_off, struct page *src_pg, unsigned int src_off,
size_t len)
{ {
struct dma_device *dev = chan->device; struct dma_device *dev = chan->device;
struct dma_async_tx_descriptor *tx; struct dma_async_tx_descriptor *tx;
...@@ -923,16 +926,15 @@ dma_async_memcpy_buf_to_buf(struct dma_chan *chan, void *dest, ...@@ -923,16 +926,15 @@ dma_async_memcpy_buf_to_buf(struct dma_chan *chan, void *dest,
dma_cookie_t cookie; dma_cookie_t cookie;
unsigned long flags; unsigned long flags;
dma_src = dma_map_single(dev->dev, src, len, DMA_TO_DEVICE); dma_src = dma_map_page(dev->dev, src_pg, src_off, len, DMA_TO_DEVICE);
dma_dest = dma_map_single(dev->dev, dest, len, DMA_FROM_DEVICE); dma_dest = dma_map_page(dev->dev, dest_pg, dest_off, len,
flags = DMA_CTRL_ACK | DMA_FROM_DEVICE);
DMA_COMPL_SRC_UNMAP_SINGLE | flags = DMA_CTRL_ACK;
DMA_COMPL_DEST_UNMAP_SINGLE;
tx = dev->device_prep_dma_memcpy(chan, dma_dest, dma_src, len, flags); tx = dev->device_prep_dma_memcpy(chan, dma_dest, dma_src, len, flags);
if (!tx) { if (!tx) {
dma_unmap_single(dev->dev, dma_src, len, DMA_TO_DEVICE); dma_unmap_page(dev->dev, dma_src, len, DMA_TO_DEVICE);
dma_unmap_single(dev->dev, dma_dest, len, DMA_FROM_DEVICE); dma_unmap_page(dev->dev, dma_dest, len, DMA_FROM_DEVICE);
return -ENOMEM; return -ENOMEM;
} }
...@@ -946,6 +948,29 @@ dma_async_memcpy_buf_to_buf(struct dma_chan *chan, void *dest, ...@@ -946,6 +948,29 @@ dma_async_memcpy_buf_to_buf(struct dma_chan *chan, void *dest,
return cookie; return cookie;
} }
EXPORT_SYMBOL(dma_async_memcpy_pg_to_pg);
/**
* dma_async_memcpy_buf_to_buf - offloaded copy between virtual addresses
* @chan: DMA channel to offload copy to
* @dest: destination address (virtual)
* @src: source address (virtual)
* @len: length
*
* Both @dest and @src must be mappable to a bus address according to the
* DMA mapping API rules for streaming mappings.
* Both @dest and @src must stay memory resident (kernel memory or locked
* user space pages).
*/
dma_cookie_t
dma_async_memcpy_buf_to_buf(struct dma_chan *chan, void *dest,
void *src, size_t len)
{
return dma_async_memcpy_pg_to_pg(chan, virt_to_page(dest),
(unsigned long) dest & ~PAGE_MASK,
virt_to_page(src),
(unsigned long) src & ~PAGE_MASK, len);
}
EXPORT_SYMBOL(dma_async_memcpy_buf_to_buf); EXPORT_SYMBOL(dma_async_memcpy_buf_to_buf);
/** /**
...@@ -963,86 +988,14 @@ EXPORT_SYMBOL(dma_async_memcpy_buf_to_buf); ...@@ -963,86 +988,14 @@ EXPORT_SYMBOL(dma_async_memcpy_buf_to_buf);
*/ */
dma_cookie_t dma_cookie_t
dma_async_memcpy_buf_to_pg(struct dma_chan *chan, struct page *page, dma_async_memcpy_buf_to_pg(struct dma_chan *chan, struct page *page,
unsigned int offset, void *kdata, size_t len) unsigned int offset, void *kdata, size_t len)
{ {
struct dma_device *dev = chan->device; return dma_async_memcpy_pg_to_pg(chan, page, offset,
struct dma_async_tx_descriptor *tx; virt_to_page(kdata),
dma_addr_t dma_dest, dma_src; (unsigned long) kdata & ~PAGE_MASK, len);
dma_cookie_t cookie;
unsigned long flags;
dma_src = dma_map_single(dev->dev, kdata, len, DMA_TO_DEVICE);
dma_dest = dma_map_page(dev->dev, page, offset, len, DMA_FROM_DEVICE);
flags = DMA_CTRL_ACK | DMA_COMPL_SRC_UNMAP_SINGLE;
tx = dev->device_prep_dma_memcpy(chan, dma_dest, dma_src, len, flags);
if (!tx) {
dma_unmap_single(dev->dev, dma_src, len, DMA_TO_DEVICE);
dma_unmap_page(dev->dev, dma_dest, len, DMA_FROM_DEVICE);
return -ENOMEM;
}
tx->callback = NULL;
cookie = tx->tx_submit(tx);
preempt_disable();
__this_cpu_add(chan->local->bytes_transferred, len);
__this_cpu_inc(chan->local->memcpy_count);
preempt_enable();
return cookie;
} }
EXPORT_SYMBOL(dma_async_memcpy_buf_to_pg); EXPORT_SYMBOL(dma_async_memcpy_buf_to_pg);
/**
* dma_async_memcpy_pg_to_pg - offloaded copy from page to page
* @chan: DMA channel to offload copy to
* @dest_pg: destination page
* @dest_off: offset in page to copy to
* @src_pg: source page
* @src_off: offset in page to copy from
* @len: length
*
* Both @dest_page/@dest_off and @src_page/@src_off must be mappable to a bus
* address according to the DMA mapping API rules for streaming mappings.
* Both @dest_page/@dest_off and @src_page/@src_off must stay memory resident
* (kernel memory or locked user space pages).
*/
dma_cookie_t
dma_async_memcpy_pg_to_pg(struct dma_chan *chan, struct page *dest_pg,
unsigned int dest_off, struct page *src_pg, unsigned int src_off,
size_t len)
{
struct dma_device *dev = chan->device;
struct dma_async_tx_descriptor *tx;
dma_addr_t dma_dest, dma_src;
dma_cookie_t cookie;
unsigned long flags;
dma_src = dma_map_page(dev->dev, src_pg, src_off, len, DMA_TO_DEVICE);
dma_dest = dma_map_page(dev->dev, dest_pg, dest_off, len,
DMA_FROM_DEVICE);
flags = DMA_CTRL_ACK;
tx = dev->device_prep_dma_memcpy(chan, dma_dest, dma_src, len, flags);
if (!tx) {
dma_unmap_page(dev->dev, dma_src, len, DMA_TO_DEVICE);
dma_unmap_page(dev->dev, dma_dest, len, DMA_FROM_DEVICE);
return -ENOMEM;
}
tx->callback = NULL;
cookie = tx->tx_submit(tx);
preempt_disable();
__this_cpu_add(chan->local->bytes_transferred, len);
__this_cpu_inc(chan->local->memcpy_count);
preempt_enable();
return cookie;
}
EXPORT_SYMBOL(dma_async_memcpy_pg_to_pg);
void dma_async_tx_descriptor_init(struct dma_async_tx_descriptor *tx, void dma_async_tx_descriptor_init(struct dma_async_tx_descriptor *tx,
struct dma_chan *chan) struct dma_chan *chan)
{ {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment