Commit c1f59375 authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Helge Deller

parisc: use generic dma_noncoherent_ops

Switch to the generic noncoherent direct mapping implementation.

Fix sync_single_for_cpu to do skip the cache flush unless the transfer
is to the device to match the more tested unmap_single path which should
have the same cache coherency implications.
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Signed-off-by: default avatarHelge Deller <deller@gmx.de>
parent 7f150105
...@@ -187,6 +187,10 @@ config PA20 ...@@ -187,6 +187,10 @@ config PA20
config PA11 config PA11
def_bool y def_bool y
depends on PA7000 || PA7100LC || PA7200 || PA7300LC depends on PA7000 || PA7100LC || PA7200 || PA7300LC
select ARCH_HAS_SYNC_DMA_FOR_CPU
select ARCH_HAS_SYNC_DMA_FOR_DEVICE
select DMA_NONCOHERENT_OPS
select DMA_NONCOHERENT_CACHE_SYNC
config PREFETCH config PREFETCH
def_bool y def_bool y
......
...@@ -21,10 +21,6 @@ ...@@ -21,10 +21,6 @@
** flush/purge and allocate "regular" cacheable pages for everything. ** flush/purge and allocate "regular" cacheable pages for everything.
*/ */
#ifdef CONFIG_PA11
extern const struct dma_map_ops pa11_dma_ops;
#endif
extern const struct dma_map_ops *hppa_dma_ops; extern const struct dma_map_ops *hppa_dma_ops;
static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus) static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
......
...@@ -21,13 +21,12 @@ ...@@ -21,13 +21,12 @@
#include <linux/init.h> #include <linux/init.h>
#include <linux/gfp.h> #include <linux/gfp.h>
#include <linux/mm.h> #include <linux/mm.h>
#include <linux/pci.h>
#include <linux/proc_fs.h> #include <linux/proc_fs.h>
#include <linux/seq_file.h> #include <linux/seq_file.h>
#include <linux/string.h> #include <linux/string.h>
#include <linux/types.h> #include <linux/types.h>
#include <linux/scatterlist.h> #include <linux/dma-direct.h>
#include <linux/export.h> #include <linux/dma-noncoherent.h>
#include <asm/cacheflush.h> #include <asm/cacheflush.h>
#include <asm/dma.h> /* for DMA_CHUNK_SIZE */ #include <asm/dma.h> /* for DMA_CHUNK_SIZE */
...@@ -437,7 +436,7 @@ static void *pcx_dma_alloc(struct device *dev, size_t size, ...@@ -437,7 +436,7 @@ static void *pcx_dma_alloc(struct device *dev, size_t size,
return addr; return addr;
} }
static void *pa11_dma_alloc(struct device *dev, size_t size, void *arch_dma_alloc(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs) dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
{ {
...@@ -447,7 +446,7 @@ static void *pa11_dma_alloc(struct device *dev, size_t size, ...@@ -447,7 +446,7 @@ static void *pa11_dma_alloc(struct device *dev, size_t size,
return pcx_dma_alloc(dev, size, dma_handle, gfp, attrs); return pcx_dma_alloc(dev, size, dma_handle, gfp, attrs);
} }
static void pa11_dma_free(struct device *dev, size_t size, void *vaddr, void arch_dma_free(struct device *dev, size_t size, void *vaddr,
dma_addr_t dma_handle, unsigned long attrs) dma_addr_t dma_handle, unsigned long attrs)
{ {
int order = get_order(size); int order = get_order(size);
...@@ -462,142 +461,20 @@ static void pa11_dma_free(struct device *dev, size_t size, void *vaddr, ...@@ -462,142 +461,20 @@ static void pa11_dma_free(struct device *dev, size_t size, void *vaddr,
free_pages((unsigned long)vaddr, get_order(size)); free_pages((unsigned long)vaddr, get_order(size));
} }
static dma_addr_t pa11_dma_map_page(struct device *dev, struct page *page, void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr,
unsigned long offset, size_t size, size_t size, enum dma_data_direction dir)
enum dma_data_direction direction, unsigned long attrs)
{ {
void *addr = page_address(page) + offset; flush_kernel_dcache_range((unsigned long)phys_to_virt(paddr), size);
BUG_ON(direction == DMA_NONE);
if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
flush_kernel_dcache_range((unsigned long) addr, size);
return virt_to_phys(addr);
}
static void pa11_dma_unmap_page(struct device *dev, dma_addr_t dma_handle,
size_t size, enum dma_data_direction direction,
unsigned long attrs)
{
BUG_ON(direction == DMA_NONE);
if (attrs & DMA_ATTR_SKIP_CPU_SYNC)
return;
if (direction == DMA_TO_DEVICE)
return;
/*
* For PCI_DMA_FROMDEVICE this flush is not necessary for the
* simple map/unmap case. However, it IS necessary if if
* pci_dma_sync_single_* has been called and the buffer reused.
*/
flush_kernel_dcache_range((unsigned long) phys_to_virt(dma_handle), size);
} }
static int pa11_dma_map_sg(struct device *dev, struct scatterlist *sglist, void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr,
int nents, enum dma_data_direction direction, size_t size, enum dma_data_direction dir)
unsigned long attrs)
{ {
int i; flush_kernel_dcache_range((unsigned long)phys_to_virt(paddr), size);
struct scatterlist *sg;
BUG_ON(direction == DMA_NONE);
for_each_sg(sglist, sg, nents, i) {
unsigned long vaddr = (unsigned long)sg_virt(sg);
sg_dma_address(sg) = (dma_addr_t) virt_to_phys(vaddr);
sg_dma_len(sg) = sg->length;
if (attrs & DMA_ATTR_SKIP_CPU_SYNC)
continue;
flush_kernel_dcache_range(vaddr, sg->length);
}
return nents;
} }
static void pa11_dma_unmap_sg(struct device *dev, struct scatterlist *sglist, void arch_dma_cache_sync(struct device *dev, void *vaddr, size_t size,
int nents, enum dma_data_direction direction,
unsigned long attrs)
{
int i;
struct scatterlist *sg;
BUG_ON(direction == DMA_NONE);
if (attrs & DMA_ATTR_SKIP_CPU_SYNC)
return;
if (direction == DMA_TO_DEVICE)
return;
/* once we do combining we'll need to use phys_to_virt(sg_dma_address(sglist)) */
for_each_sg(sglist, sg, nents, i)
flush_kernel_dcache_range(sg_virt(sg), sg->length);
}
static void pa11_dma_sync_single_for_cpu(struct device *dev,
dma_addr_t dma_handle, size_t size,
enum dma_data_direction direction)
{
BUG_ON(direction == DMA_NONE);
flush_kernel_dcache_range((unsigned long) phys_to_virt(dma_handle),
size);
}
static void pa11_dma_sync_single_for_device(struct device *dev,
dma_addr_t dma_handle, size_t size,
enum dma_data_direction direction)
{
BUG_ON(direction == DMA_NONE);
flush_kernel_dcache_range((unsigned long) phys_to_virt(dma_handle),
size);
}
static void pa11_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sglist, int nents, enum dma_data_direction direction)
{
int i;
struct scatterlist *sg;
/* once we do combining we'll need to use phys_to_virt(sg_dma_address(sglist)) */
for_each_sg(sglist, sg, nents, i)
flush_kernel_dcache_range(sg_virt(sg), sg->length);
}
static void pa11_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sglist, int nents, enum dma_data_direction direction)
{
int i;
struct scatterlist *sg;
/* once we do combining we'll need to use phys_to_virt(sg_dma_address(sglist)) */
for_each_sg(sglist, sg, nents, i)
flush_kernel_dcache_range(sg_virt(sg), sg->length);
}
static void pa11_dma_cache_sync(struct device *dev, void *vaddr, size_t size,
enum dma_data_direction direction) enum dma_data_direction direction)
{ {
flush_kernel_dcache_range((unsigned long)vaddr, size); flush_kernel_dcache_range((unsigned long)vaddr, size);
} }
const struct dma_map_ops pa11_dma_ops = {
.alloc = pa11_dma_alloc,
.free = pa11_dma_free,
.map_page = pa11_dma_map_page,
.unmap_page = pa11_dma_unmap_page,
.map_sg = pa11_dma_map_sg,
.unmap_sg = pa11_dma_unmap_sg,
.sync_single_for_cpu = pa11_dma_sync_single_for_cpu,
.sync_single_for_device = pa11_dma_sync_single_for_device,
.sync_sg_for_cpu = pa11_dma_sync_sg_for_cpu,
.sync_sg_for_device = pa11_dma_sync_sg_for_device,
.cache_sync = pa11_dma_cache_sync,
};
...@@ -102,7 +102,7 @@ void __init dma_ops_init(void) ...@@ -102,7 +102,7 @@ void __init dma_ops_init(void)
case pcxl: /* falls through */ case pcxl: /* falls through */
case pcxs: case pcxs:
case pcxt: case pcxt:
hppa_dma_ops = &pa11_dma_ops; hppa_dma_ops = &dma_noncoherent_ops;
break; break;
default: default:
break; break;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment