Commit d909b347 authored by Dan Williams's avatar Dan Williams

async_tx: kill ASYNC_TX_ASSUME_COHERENT

Remove the unused ASYNC_TX_ASSUME_COHERENT flag.  Async_tx is
meant to hide the difference between asynchronous hardware and synchronous
software operations, this flag requires clients to understand cache
coherency consequences of the async path.
Signed-off-by: default avatarDan Williams <dan.j.williams@intel.com>
Reviewed-by: default avatarHaavard Skinnemoen <hskinnemoen@atmel.com>
parent e73ef9ac
...@@ -35,7 +35,7 @@ ...@@ -35,7 +35,7 @@
* @src: src page * @src: src page
* @offset: offset in pages to start transaction * @offset: offset in pages to start transaction
* @len: length in bytes * @len: length in bytes
* @flags: ASYNC_TX_ASSUME_COHERENT, ASYNC_TX_ACK, ASYNC_TX_DEP_ACK, * @flags: ASYNC_TX_ACK, ASYNC_TX_DEP_ACK,
* @depend_tx: memcpy depends on the result of this transaction * @depend_tx: memcpy depends on the result of this transaction
* @cb_fn: function to call when the memcpy completes * @cb_fn: function to call when the memcpy completes
* @cb_param: parameter to pass to the callback routine * @cb_param: parameter to pass to the callback routine
...@@ -55,20 +55,15 @@ async_memcpy(struct page *dest, struct page *src, unsigned int dest_offset, ...@@ -55,20 +55,15 @@ async_memcpy(struct page *dest, struct page *src, unsigned int dest_offset,
if (tx) { /* run the memcpy asynchronously */ if (tx) { /* run the memcpy asynchronously */
dma_addr_t addr; dma_addr_t addr;
enum dma_data_direction dir;
pr_debug("%s: (async) len: %zu\n", __FUNCTION__, len); pr_debug("%s: (async) len: %zu\n", __FUNCTION__, len);
dir = (flags & ASYNC_TX_ASSUME_COHERENT) ? addr = dma_map_page(device->dev, dest, dest_offset, len,
DMA_NONE : DMA_FROM_DEVICE; DMA_FROM_DEVICE);
addr = dma_map_page(device->dev, dest, dest_offset, len, dir);
tx->tx_set_dest(addr, tx, 0); tx->tx_set_dest(addr, tx, 0);
dir = (flags & ASYNC_TX_ASSUME_COHERENT) ? addr = dma_map_page(device->dev, src, src_offset, len,
DMA_NONE : DMA_TO_DEVICE; DMA_TO_DEVICE);
addr = dma_map_page(device->dev, src, src_offset, len, dir);
tx->tx_set_src(addr, tx, 0); tx->tx_set_src(addr, tx, 0);
async_tx_submit(chan, tx, flags, depend_tx, cb_fn, cb_param); async_tx_submit(chan, tx, flags, depend_tx, cb_fn, cb_param);
......
...@@ -35,7 +35,7 @@ ...@@ -35,7 +35,7 @@
* @val: fill value * @val: fill value
* @offset: offset in pages to start transaction * @offset: offset in pages to start transaction
* @len: length in bytes * @len: length in bytes
* @flags: ASYNC_TX_ASSUME_COHERENT, ASYNC_TX_ACK, ASYNC_TX_DEP_ACK * @flags: ASYNC_TX_ACK, ASYNC_TX_DEP_ACK
* @depend_tx: memset depends on the result of this transaction * @depend_tx: memset depends on the result of this transaction
* @cb_fn: function to call when the memcpy completes * @cb_fn: function to call when the memcpy completes
* @cb_param: parameter to pass to the callback routine * @cb_param: parameter to pass to the callback routine
...@@ -55,13 +55,11 @@ async_memset(struct page *dest, int val, unsigned int offset, ...@@ -55,13 +55,11 @@ async_memset(struct page *dest, int val, unsigned int offset,
if (tx) { /* run the memset asynchronously */ if (tx) { /* run the memset asynchronously */
dma_addr_t dma_addr; dma_addr_t dma_addr;
enum dma_data_direction dir;
pr_debug("%s: (async) len: %zu\n", __FUNCTION__, len); pr_debug("%s: (async) len: %zu\n", __FUNCTION__, len);
dir = (flags & ASYNC_TX_ASSUME_COHERENT) ?
DMA_NONE : DMA_FROM_DEVICE;
dma_addr = dma_map_page(device->dev, dest, offset, len, dir); dma_addr = dma_map_page(device->dev, dest, offset, len,
DMA_FROM_DEVICE);
tx->tx_set_dest(dma_addr, tx, 0); tx->tx_set_dest(dma_addr, tx, 0);
async_tx_submit(chan, tx, flags, depend_tx, cb_fn, cb_param); async_tx_submit(chan, tx, flags, depend_tx, cb_fn, cb_param);
......
...@@ -42,23 +42,17 @@ do_async_xor(struct dma_async_tx_descriptor *tx, struct dma_device *device, ...@@ -42,23 +42,17 @@ do_async_xor(struct dma_async_tx_descriptor *tx, struct dma_device *device,
dma_async_tx_callback cb_fn, void *cb_param) dma_async_tx_callback cb_fn, void *cb_param)
{ {
dma_addr_t dma_addr; dma_addr_t dma_addr;
enum dma_data_direction dir;
int i; int i;
pr_debug("%s: len: %zu\n", __FUNCTION__, len); pr_debug("%s: len: %zu\n", __FUNCTION__, len);
dir = (flags & ASYNC_TX_ASSUME_COHERENT) ? dma_addr = dma_map_page(device->dev, dest, offset, len,
DMA_NONE : DMA_FROM_DEVICE; DMA_FROM_DEVICE);
dma_addr = dma_map_page(device->dev, dest, offset, len, dir);
tx->tx_set_dest(dma_addr, tx, 0); tx->tx_set_dest(dma_addr, tx, 0);
dir = (flags & ASYNC_TX_ASSUME_COHERENT) ?
DMA_NONE : DMA_TO_DEVICE;
for (i = 0; i < src_cnt; i++) { for (i = 0; i < src_cnt; i++) {
dma_addr = dma_map_page(device->dev, src_list[i], dma_addr = dma_map_page(device->dev, src_list[i],
offset, len, dir); offset, len, DMA_TO_DEVICE);
tx->tx_set_src(dma_addr, tx, i); tx->tx_set_src(dma_addr, tx, i);
} }
...@@ -106,7 +100,7 @@ do_sync_xor(struct page *dest, struct page **src_list, unsigned int offset, ...@@ -106,7 +100,7 @@ do_sync_xor(struct page *dest, struct page **src_list, unsigned int offset,
* @src_cnt: number of source pages * @src_cnt: number of source pages
* @len: length in bytes * @len: length in bytes
* @flags: ASYNC_TX_XOR_ZERO_DST, ASYNC_TX_XOR_DROP_DEST, * @flags: ASYNC_TX_XOR_ZERO_DST, ASYNC_TX_XOR_DROP_DEST,
* ASYNC_TX_ASSUME_COHERENT, ASYNC_TX_ACK, ASYNC_TX_DEP_ACK * ASYNC_TX_ACK, ASYNC_TX_DEP_ACK
* @depend_tx: xor depends on the result of this transaction. * @depend_tx: xor depends on the result of this transaction.
* @cb_fn: function to call when the xor completes * @cb_fn: function to call when the xor completes
* @cb_param: parameter to pass to the callback routine * @cb_param: parameter to pass to the callback routine
...@@ -246,7 +240,7 @@ static int page_is_zero(struct page *p, unsigned int offset, size_t len) ...@@ -246,7 +240,7 @@ static int page_is_zero(struct page *p, unsigned int offset, size_t len)
* @src_cnt: number of source pages * @src_cnt: number of source pages
* @len: length in bytes * @len: length in bytes
* @result: 0 if sum == 0 else non-zero * @result: 0 if sum == 0 else non-zero
* @flags: ASYNC_TX_ASSUME_COHERENT, ASYNC_TX_ACK, ASYNC_TX_DEP_ACK * @flags: ASYNC_TX_ACK, ASYNC_TX_DEP_ACK
* @depend_tx: xor depends on the result of this transaction. * @depend_tx: xor depends on the result of this transaction.
* @cb_fn: function to call when the xor completes * @cb_fn: function to call when the xor completes
* @cb_param: parameter to pass to the callback routine * @cb_param: parameter to pass to the callback routine
...@@ -270,16 +264,12 @@ async_xor_zero_sum(struct page *dest, struct page **src_list, ...@@ -270,16 +264,12 @@ async_xor_zero_sum(struct page *dest, struct page **src_list,
if (tx) { if (tx) {
dma_addr_t dma_addr; dma_addr_t dma_addr;
enum dma_data_direction dir;
pr_debug("%s: (async) len: %zu\n", __FUNCTION__, len); pr_debug("%s: (async) len: %zu\n", __FUNCTION__, len);
dir = (flags & ASYNC_TX_ASSUME_COHERENT) ?
DMA_NONE : DMA_TO_DEVICE;
for (i = 0; i < src_cnt; i++) { for (i = 0; i < src_cnt; i++) {
dma_addr = dma_map_page(device->dev, src_list[i], dma_addr = dma_map_page(device->dev, src_list[i],
offset, len, dir); offset, len, DMA_TO_DEVICE);
tx->tx_set_src(dma_addr, tx, i); tx->tx_set_src(dma_addr, tx, i);
} }
......
...@@ -47,7 +47,6 @@ struct dma_chan_ref { ...@@ -47,7 +47,6 @@ struct dma_chan_ref {
* address is an implied source, whereas the asynchronous case it must be listed * address is an implied source, whereas the asynchronous case it must be listed
* as a source. The destination address must be the first address in the source * as a source. The destination address must be the first address in the source
* array. * array.
* @ASYNC_TX_ASSUME_COHERENT: skip cache maintenance operations
* @ASYNC_TX_ACK: immediately ack the descriptor, precludes setting up a * @ASYNC_TX_ACK: immediately ack the descriptor, precludes setting up a
* dependency chain * dependency chain
* @ASYNC_TX_DEP_ACK: ack the dependency descriptor. Useful for chaining. * @ASYNC_TX_DEP_ACK: ack the dependency descriptor. Useful for chaining.
...@@ -55,7 +54,6 @@ struct dma_chan_ref { ...@@ -55,7 +54,6 @@ struct dma_chan_ref {
enum async_tx_flags { enum async_tx_flags {
ASYNC_TX_XOR_ZERO_DST = (1 << 0), ASYNC_TX_XOR_ZERO_DST = (1 << 0),
ASYNC_TX_XOR_DROP_DST = (1 << 1), ASYNC_TX_XOR_DROP_DST = (1 << 1),
ASYNC_TX_ASSUME_COHERENT = (1 << 2),
ASYNC_TX_ACK = (1 << 3), ASYNC_TX_ACK = (1 << 3),
ASYNC_TX_DEP_ACK = (1 << 4), ASYNC_TX_DEP_ACK = (1 << 4),
}; };
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment