Commit a34a517a authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Linus Torvalds

avr32: convert to dma_map_ops

Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Cc: Haavard Skinnemoen <hskinnemoen@gmail.com>
Cc: Hans-Christian Egtvedt <egtvedt@samfundet.no>
Cc: Christian Borntraeger <borntraeger@de.ibm.com>
Cc: Joerg Roedel <jroedel@suse.de>
Cc: Sebastian Ott <sebott@linux.vnet.ibm.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 052c96db
...@@ -7,6 +7,7 @@ config AVR32 ...@@ -7,6 +7,7 @@ config AVR32
select HAVE_OPROFILE select HAVE_OPROFILE
select HAVE_KPROBES select HAVE_KPROBES
select VIRT_TO_BUS select VIRT_TO_BUS
select HAVE_DMA_ATTRS
select GENERIC_IRQ_PROBE select GENERIC_IRQ_PROBE
select GENERIC_ATOMIC64 select GENERIC_ATOMIC64
select HARDIRQS_SW_RESEND select HARDIRQS_SW_RESEND
......
This diff is collapsed.
...@@ -9,9 +9,14 @@ ...@@ -9,9 +9,14 @@
#include <linux/dma-mapping.h> #include <linux/dma-mapping.h>
#include <linux/gfp.h> #include <linux/gfp.h>
#include <linux/export.h> #include <linux/export.h>
#include <linux/mm.h>
#include <linux/device.h>
#include <linux/scatterlist.h>
#include <asm/addrspace.h> #include <asm/processor.h>
#include <asm/cacheflush.h> #include <asm/cacheflush.h>
#include <asm/io.h>
#include <asm/addrspace.h>
void dma_cache_sync(struct device *dev, void *vaddr, size_t size, int direction) void dma_cache_sync(struct device *dev, void *vaddr, size_t size, int direction)
{ {
...@@ -93,60 +98,100 @@ static void __dma_free(struct device *dev, size_t size, ...@@ -93,60 +98,100 @@ static void __dma_free(struct device *dev, size_t size,
__free_page(page++); __free_page(page++);
} }
void *dma_alloc_coherent(struct device *dev, size_t size, static void *avr32_dma_alloc(struct device *dev, size_t size,
dma_addr_t *handle, gfp_t gfp) dma_addr_t *handle, gfp_t gfp, struct dma_attrs *attrs)
{ {
struct page *page; struct page *page;
void *ret = NULL; dma_addr_t phys;
page = __dma_alloc(dev, size, handle, gfp); page = __dma_alloc(dev, size, handle, gfp);
if (page) if (!page)
ret = phys_to_uncached(page_to_phys(page)); return NULL;
phys = page_to_phys(page);
return ret; if (dma_get_attr(DMA_ATTR_WRITE_COMBINE, attrs)) {
/* Now, map the page into P3 with write-combining turned on */
*handle = phys;
return __ioremap(phys, size, _PAGE_BUFFER);
} else {
return phys_to_uncached(phys);
}
} }
EXPORT_SYMBOL(dma_alloc_coherent);
void dma_free_coherent(struct device *dev, size_t size, static void avr32_dma_free(struct device *dev, size_t size,
void *cpu_addr, dma_addr_t handle) void *cpu_addr, dma_addr_t handle, struct dma_attrs *attrs)
{ {
void *addr = phys_to_cached(uncached_to_phys(cpu_addr));
struct page *page; struct page *page;
pr_debug("dma_free_coherent addr %p (phys %08lx) size %u\n", if (dma_get_attr(DMA_ATTR_WRITE_COMBINE, attrs)) {
cpu_addr, (unsigned long)handle, (unsigned)size); iounmap(cpu_addr);
BUG_ON(!virt_addr_valid(addr));
page = virt_to_page(addr); page = phys_to_page(handle);
} else {
void *addr = phys_to_cached(uncached_to_phys(cpu_addr));
pr_debug("avr32_dma_free addr %p (phys %08lx) size %u\n",
cpu_addr, (unsigned long)handle, (unsigned)size);
BUG_ON(!virt_addr_valid(addr));
page = virt_to_page(addr);
}
__dma_free(dev, size, page, handle); __dma_free(dev, size, page, handle);
} }
EXPORT_SYMBOL(dma_free_coherent);
void *dma_alloc_writecombine(struct device *dev, size_t size, static dma_addr_t avr32_dma_map_page(struct device *dev, struct page *page,
dma_addr_t *handle, gfp_t gfp) unsigned long offset, size_t size,
enum dma_data_direction direction, struct dma_attrs *attrs)
{ {
struct page *page; void *cpu_addr = page_address(page) + offset;
dma_addr_t phys;
page = __dma_alloc(dev, size, handle, gfp); dma_cache_sync(dev, cpu_addr, size, direction);
if (!page) return virt_to_bus(cpu_addr);
return NULL; }
phys = page_to_phys(page); static int avr32_dma_map_sg(struct device *dev, struct scatterlist *sglist,
*handle = phys; int nents, enum dma_data_direction direction,
struct dma_attrs *attrs)
{
int i;
struct scatterlist *sg;
for_each_sg(sglist, sg, nents, i) {
char *virt;
/* Now, map the page into P3 with write-combining turned on */ sg->dma_address = page_to_bus(sg_page(sg)) + sg->offset;
return __ioremap(phys, size, _PAGE_BUFFER); virt = sg_virt(sg);
dma_cache_sync(dev, virt, sg->length, direction);
}
return nents;
} }
EXPORT_SYMBOL(dma_alloc_writecombine);
void dma_free_writecombine(struct device *dev, size_t size, static void avr32_dma_sync_single_for_device(struct device *dev,
void *cpu_addr, dma_addr_t handle) dma_addr_t dma_handle, size_t size,
enum dma_data_direction direction)
{ {
struct page *page; dma_cache_sync(dev, bus_to_virt(dma_handle), size, direction);
}
iounmap(cpu_addr); static void avr32_dma_sync_sg_for_device(struct device *dev,
struct scatterlist *sglist, int nents,
enum dma_data_direction direction)
{
int i;
struct scatterlist *sg;
page = phys_to_page(handle); for_each_sg(sglist, sg, nents, i)
__dma_free(dev, size, page, handle); dma_cache_sync(dev, sg_virt(sg), sg->length, direction);
} }
EXPORT_SYMBOL(dma_free_writecombine);
struct dma_map_ops avr32_dma_ops = {
.alloc = avr32_dma_alloc,
.free = avr32_dma_free,
.map_page = avr32_dma_map_page,
.map_sg = avr32_dma_map_sg,
.sync_single_for_device = avr32_dma_sync_single_for_device,
.sync_sg_for_device = avr32_dma_sync_sg_for_device,
};
EXPORT_SYMBOL(avr32_dma_ops);
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment