Commit 65af191a authored by Russell King's avatar Russell King

ARM: dma-mapping: move selection of page ops out of dma_cache_maint_contiguous

Signed-off-by: default avatarRussell King <rmk+kernel@arm.linux.org.uk>
Tested-By: default avatarSantosh Shilimkar <santosh.shilimkar@ti.com>
parent 4ea0d737
...@@ -447,48 +447,25 @@ void ___dma_single_dev_to_cpu(const void *kaddr, size_t size, ...@@ -447,48 +447,25 @@ void ___dma_single_dev_to_cpu(const void *kaddr, size_t size,
EXPORT_SYMBOL(___dma_single_dev_to_cpu); EXPORT_SYMBOL(___dma_single_dev_to_cpu);
static void dma_cache_maint_contiguous(struct page *page, unsigned long offset, static void dma_cache_maint_contiguous(struct page *page, unsigned long offset,
size_t size, int direction) size_t size, void (*op)(const void *, const void *))
{ {
void *vaddr; void *vaddr;
unsigned long paddr;
void (*inner_op)(const void *, const void *);
void (*outer_op)(unsigned long, unsigned long);
switch (direction) {
case DMA_FROM_DEVICE: /* invalidate only */
inner_op = dmac_inv_range;
outer_op = outer_inv_range;
break;
case DMA_TO_DEVICE: /* writeback only */
inner_op = dmac_clean_range;
outer_op = outer_clean_range;
break;
case DMA_BIDIRECTIONAL: /* writeback and invalidate */
inner_op = dmac_flush_range;
outer_op = outer_flush_range;
break;
default:
BUG();
}
if (!PageHighMem(page)) { if (!PageHighMem(page)) {
vaddr = page_address(page) + offset; vaddr = page_address(page) + offset;
inner_op(vaddr, vaddr + size); op(vaddr, vaddr + size);
} else { } else {
vaddr = kmap_high_get(page); vaddr = kmap_high_get(page);
if (vaddr) { if (vaddr) {
vaddr += offset; vaddr += offset;
inner_op(vaddr, vaddr + size); op(vaddr, vaddr + size);
kunmap_high(page); kunmap_high(page);
} }
} }
paddr = page_to_phys(page) + offset;
outer_op(paddr, paddr + size);
} }
static void dma_cache_maint_page(struct page *page, unsigned long offset, static void dma_cache_maint_page(struct page *page, unsigned long offset,
size_t size, int dir) size_t size, void (*op)(const void *, const void *))
{ {
/* /*
* A single sg entry may refer to multiple physically contiguous * A single sg entry may refer to multiple physically contiguous
...@@ -506,7 +483,7 @@ static void dma_cache_maint_page(struct page *page, unsigned long offset, ...@@ -506,7 +483,7 @@ static void dma_cache_maint_page(struct page *page, unsigned long offset,
} }
len = PAGE_SIZE - offset; len = PAGE_SIZE - offset;
} }
dma_cache_maint_contiguous(page, offset, len, dir); dma_cache_maint_contiguous(page, offset, len, op);
offset = 0; offset = 0;
page++; page++;
left -= len; left -= len;
...@@ -516,7 +493,31 @@ static void dma_cache_maint_page(struct page *page, unsigned long offset, ...@@ -516,7 +493,31 @@ static void dma_cache_maint_page(struct page *page, unsigned long offset,
void ___dma_page_cpu_to_dev(struct page *page, unsigned long off, void ___dma_page_cpu_to_dev(struct page *page, unsigned long off,
size_t size, enum dma_data_direction dir) size_t size, enum dma_data_direction dir)
{ {
dma_cache_maint_page(page, off, size, dir); unsigned long paddr;
void (*inner_op)(const void *, const void *);
void (*outer_op)(unsigned long, unsigned long);
switch (direction) {
case DMA_FROM_DEVICE: /* invalidate only */
inner_op = dmac_inv_range;
outer_op = outer_inv_range;
break;
case DMA_TO_DEVICE: /* writeback only */
inner_op = dmac_clean_range;
outer_op = outer_clean_range;
break;
case DMA_BIDIRECTIONAL: /* writeback and invalidate */
inner_op = dmac_flush_range;
outer_op = outer_flush_range;
break;
default:
BUG();
}
dma_cache_maint_page(page, off, size, inner_op);
paddr = page_to_phys(page) + off;
outer_op(paddr, paddr + size);
} }
EXPORT_SYMBOL(___dma_page_cpu_to_dev); EXPORT_SYMBOL(___dma_page_cpu_to_dev);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment