Commit a227fb92 authored by Marek Szyprowski's avatar Marek Szyprowski

ARM: dma-mapping: remove offset parameter to prepare for generic dma_ops

This patch removes the need for the offset parameter in dma bounce
functions. This is required to let dma-mapping framework on ARM
architecture to use common, generic dma_map_ops based dma-mapping
helpers.

Background and more detailed explaination:

dma_*_range_* functions are available from the early days of the dma
mapping api. They are the correct way of doing a partial syncs on the
buffer (usually used by the network device drivers). This patch changes
only the internal implementation of the dma bounce functions to let
them tunnel through dma_map_ops structure. The driver api stays
unchanged, so driver are obliged to call dma_*_range_* functions to
keep code clean and easy to understand.

The only drawback from this patch is reduced detection of the dma api
abuse. Let us consider the following code:

dma_addr = dma_map_single(dev, ptr, 64, DMA_TO_DEVICE);
dma_sync_single_range_for_cpu(dev, dma_addr+16, 0, 32, DMA_TO_DEVICE);

Without the patch such code fails, because dma bounce code is unable
to find the bounce buffer for the given dma_address. After the patch
the above sync call will be equivalent to:

dma_sync_single_range_for_cpu(dev, dma_addr, 16, 32, DMA_TO_DEVICE);

which succeeds.

I don't consider this as a real problem, because DMA API abuse should be
caught by debug_dma_* function family. This patch lets us to simplify
the internal low-level implementation without chaning the driver visible
API.
Signed-off-by: default avatarMarek Szyprowski <m.szyprowski@samsung.com>
Acked-by: default avatarKyungmin Park <kyungmin.park@samsung.com>
Tested-By: default avatarSubash Patel <subash.ramaswamy@linaro.org>
parent 553ac788
...@@ -173,7 +173,8 @@ find_safe_buffer(struct dmabounce_device_info *device_info, dma_addr_t safe_dma_ ...@@ -173,7 +173,8 @@ find_safe_buffer(struct dmabounce_device_info *device_info, dma_addr_t safe_dma_
read_lock_irqsave(&device_info->lock, flags); read_lock_irqsave(&device_info->lock, flags);
list_for_each_entry(b, &device_info->safe_buffers, node) list_for_each_entry(b, &device_info->safe_buffers, node)
if (b->safe_dma_addr == safe_dma_addr) { if (b->safe_dma_addr <= safe_dma_addr &&
b->safe_dma_addr + b->size > safe_dma_addr) {
rb = b; rb = b;
break; break;
} }
...@@ -362,9 +363,10 @@ void __dma_unmap_page(struct device *dev, dma_addr_t dma_addr, size_t size, ...@@ -362,9 +363,10 @@ void __dma_unmap_page(struct device *dev, dma_addr_t dma_addr, size_t size,
EXPORT_SYMBOL(__dma_unmap_page); EXPORT_SYMBOL(__dma_unmap_page);
int dmabounce_sync_for_cpu(struct device *dev, dma_addr_t addr, int dmabounce_sync_for_cpu(struct device *dev, dma_addr_t addr,
unsigned long off, size_t sz, enum dma_data_direction dir) size_t sz, enum dma_data_direction dir)
{ {
struct safe_buffer *buf; struct safe_buffer *buf;
unsigned long off;
dev_dbg(dev, "%s(dma=%#x,off=%#lx,sz=%zx,dir=%x)\n", dev_dbg(dev, "%s(dma=%#x,off=%#lx,sz=%zx,dir=%x)\n",
__func__, addr, off, sz, dir); __func__, addr, off, sz, dir);
...@@ -373,6 +375,8 @@ int dmabounce_sync_for_cpu(struct device *dev, dma_addr_t addr, ...@@ -373,6 +375,8 @@ int dmabounce_sync_for_cpu(struct device *dev, dma_addr_t addr,
if (!buf) if (!buf)
return 1; return 1;
off = addr - buf->safe_dma_addr;
BUG_ON(buf->direction != dir); BUG_ON(buf->direction != dir);
dev_dbg(dev, "%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n", dev_dbg(dev, "%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n",
...@@ -391,9 +395,10 @@ int dmabounce_sync_for_cpu(struct device *dev, dma_addr_t addr, ...@@ -391,9 +395,10 @@ int dmabounce_sync_for_cpu(struct device *dev, dma_addr_t addr,
EXPORT_SYMBOL(dmabounce_sync_for_cpu); EXPORT_SYMBOL(dmabounce_sync_for_cpu);
int dmabounce_sync_for_device(struct device *dev, dma_addr_t addr, int dmabounce_sync_for_device(struct device *dev, dma_addr_t addr,
unsigned long off, size_t sz, enum dma_data_direction dir) size_t sz, enum dma_data_direction dir)
{ {
struct safe_buffer *buf; struct safe_buffer *buf;
unsigned long off;
dev_dbg(dev, "%s(dma=%#x,off=%#lx,sz=%zx,dir=%x)\n", dev_dbg(dev, "%s(dma=%#x,off=%#lx,sz=%zx,dir=%x)\n",
__func__, addr, off, sz, dir); __func__, addr, off, sz, dir);
...@@ -402,6 +407,8 @@ int dmabounce_sync_for_device(struct device *dev, dma_addr_t addr, ...@@ -402,6 +407,8 @@ int dmabounce_sync_for_device(struct device *dev, dma_addr_t addr,
if (!buf) if (!buf)
return 1; return 1;
off = addr - buf->safe_dma_addr;
BUG_ON(buf->direction != dir); BUG_ON(buf->direction != dir);
dev_dbg(dev, "%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n", dev_dbg(dev, "%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n",
......
...@@ -266,19 +266,17 @@ extern void __dma_unmap_page(struct device *, dma_addr_t, size_t, ...@@ -266,19 +266,17 @@ extern void __dma_unmap_page(struct device *, dma_addr_t, size_t,
/* /*
* Private functions * Private functions
*/ */
int dmabounce_sync_for_cpu(struct device *, dma_addr_t, unsigned long, int dmabounce_sync_for_cpu(struct device *, dma_addr_t, size_t, enum dma_data_direction);
size_t, enum dma_data_direction); int dmabounce_sync_for_device(struct device *, dma_addr_t, size_t, enum dma_data_direction);
int dmabounce_sync_for_device(struct device *, dma_addr_t, unsigned long,
size_t, enum dma_data_direction);
#else #else
static inline int dmabounce_sync_for_cpu(struct device *d, dma_addr_t addr, static inline int dmabounce_sync_for_cpu(struct device *d, dma_addr_t addr,
unsigned long offset, size_t size, enum dma_data_direction dir) size_t size, enum dma_data_direction dir)
{ {
return 1; return 1;
} }
static inline int dmabounce_sync_for_device(struct device *d, dma_addr_t addr, static inline int dmabounce_sync_for_device(struct device *d, dma_addr_t addr,
unsigned long offset, size_t size, enum dma_data_direction dir) size_t size, enum dma_data_direction dir)
{ {
return 1; return 1;
} }
...@@ -401,6 +399,33 @@ static inline void dma_unmap_page(struct device *dev, dma_addr_t handle, ...@@ -401,6 +399,33 @@ static inline void dma_unmap_page(struct device *dev, dma_addr_t handle,
__dma_unmap_page(dev, handle, size, dir); __dma_unmap_page(dev, handle, size, dir);
} }
static inline void dma_sync_single_for_cpu(struct device *dev,
dma_addr_t handle, size_t size, enum dma_data_direction dir)
{
BUG_ON(!valid_dma_direction(dir));
debug_dma_sync_single_for_cpu(dev, handle, size, dir);
if (!dmabounce_sync_for_cpu(dev, handle, size, dir))
return;
__dma_single_dev_to_cpu(dma_to_virt(dev, handle), size, dir);
}
static inline void dma_sync_single_for_device(struct device *dev,
dma_addr_t handle, size_t size, enum dma_data_direction dir)
{
BUG_ON(!valid_dma_direction(dir));
debug_dma_sync_single_for_device(dev, handle, size, dir);
if (!dmabounce_sync_for_device(dev, handle, size, dir))
return;
__dma_single_cpu_to_dev(dma_to_virt(dev, handle), size, dir);
}
/** /**
* dma_sync_single_range_for_cpu * dma_sync_single_range_for_cpu
* @dev: valid struct device pointer, or NULL for ISA and EISA-like devices * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
...@@ -423,40 +448,14 @@ static inline void dma_sync_single_range_for_cpu(struct device *dev, ...@@ -423,40 +448,14 @@ static inline void dma_sync_single_range_for_cpu(struct device *dev,
dma_addr_t handle, unsigned long offset, size_t size, dma_addr_t handle, unsigned long offset, size_t size,
enum dma_data_direction dir) enum dma_data_direction dir)
{ {
BUG_ON(!valid_dma_direction(dir)); dma_sync_single_for_cpu(dev, handle + offset, size, dir);
debug_dma_sync_single_for_cpu(dev, handle + offset, size, dir);
if (!dmabounce_sync_for_cpu(dev, handle, offset, size, dir))
return;
__dma_single_dev_to_cpu(dma_to_virt(dev, handle) + offset, size, dir);
} }
static inline void dma_sync_single_range_for_device(struct device *dev, static inline void dma_sync_single_range_for_device(struct device *dev,
dma_addr_t handle, unsigned long offset, size_t size, dma_addr_t handle, unsigned long offset, size_t size,
enum dma_data_direction dir) enum dma_data_direction dir)
{ {
BUG_ON(!valid_dma_direction(dir)); dma_sync_single_for_device(dev, handle + offset, size, dir);
debug_dma_sync_single_for_device(dev, handle + offset, size, dir);
if (!dmabounce_sync_for_device(dev, handle, offset, size, dir))
return;
__dma_single_cpu_to_dev(dma_to_virt(dev, handle) + offset, size, dir);
}
static inline void dma_sync_single_for_cpu(struct device *dev,
dma_addr_t handle, size_t size, enum dma_data_direction dir)
{
dma_sync_single_range_for_cpu(dev, handle, 0, size, dir);
}
static inline void dma_sync_single_for_device(struct device *dev,
dma_addr_t handle, size_t size, enum dma_data_direction dir)
{
dma_sync_single_range_for_device(dev, handle, 0, size, dir);
} }
/* /*
......
...@@ -660,7 +660,7 @@ void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, ...@@ -660,7 +660,7 @@ void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
int i; int i;
for_each_sg(sg, s, nents, i) { for_each_sg(sg, s, nents, i) {
if (!dmabounce_sync_for_cpu(dev, sg_dma_address(s), 0, if (!dmabounce_sync_for_cpu(dev, sg_dma_address(s),
sg_dma_len(s), dir)) sg_dma_len(s), dir))
continue; continue;
...@@ -686,7 +686,7 @@ void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, ...@@ -686,7 +686,7 @@ void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
int i; int i;
for_each_sg(sg, s, nents, i) { for_each_sg(sg, s, nents, i) {
if (!dmabounce_sync_for_device(dev, sg_dma_address(s), 0, if (!dmabounce_sync_for_device(dev, sg_dma_address(s),
sg_dma_len(s), dir)) sg_dma_len(s), dir))
continue; continue;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment