Commit 44a0337b authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Michael Ellerman

powerpc/dma: split the two __dma_alloc_coherent implementations

The implemementation for the CONFIG_NOT_COHERENT_CACHE case doesn't share
any code with the one for systems with coherent caches.  Split it off
and merge it with the helpers in dma-noncoherent.c that have no other
callers.
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Acked-by: default avatarBenjamin Herrenschmidt <benh@kernel.crashing.org>
Signed-off-by: default avatarMichael Ellerman <mpe@ellerman.id.au>
parent 9c15a87c
...@@ -39,9 +39,6 @@ extern int dma_nommu_mmap_coherent(struct device *dev, ...@@ -39,9 +39,6 @@ extern int dma_nommu_mmap_coherent(struct device *dev,
* to ensure it is consistent. * to ensure it is consistent.
*/ */
struct device; struct device;
extern void *__dma_alloc_coherent(struct device *dev, size_t size,
dma_addr_t *handle, gfp_t gfp);
extern void __dma_free_coherent(size_t size, void *vaddr);
extern void __dma_sync(void *vaddr, size_t size, int direction); extern void __dma_sync(void *vaddr, size_t size, int direction);
extern void __dma_sync_page(struct page *page, unsigned long offset, extern void __dma_sync_page(struct page *page, unsigned long offset,
size_t size, int direction); size_t size, int direction);
...@@ -52,8 +49,6 @@ extern unsigned long __dma_get_coherent_pfn(unsigned long cpu_addr); ...@@ -52,8 +49,6 @@ extern unsigned long __dma_get_coherent_pfn(unsigned long cpu_addr);
* Cache coherent cores. * Cache coherent cores.
*/ */
#define __dma_alloc_coherent(dev, gfp, size, handle) NULL
#define __dma_free_coherent(size, addr) ((void)0)
#define __dma_sync(addr, size, rw) ((void)0) #define __dma_sync(addr, size, rw) ((void)0)
#define __dma_sync_page(pg, off, sz, rw) ((void)0) #define __dma_sync_page(pg, off, sz, rw) ((void)0)
......
...@@ -62,18 +62,12 @@ static int dma_nommu_dma_supported(struct device *dev, u64 mask) ...@@ -62,18 +62,12 @@ static int dma_nommu_dma_supported(struct device *dev, u64 mask)
#endif #endif
} }
#ifndef CONFIG_NOT_COHERENT_CACHE
void *__dma_nommu_alloc_coherent(struct device *dev, size_t size, void *__dma_nommu_alloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t flag, dma_addr_t *dma_handle, gfp_t flag,
unsigned long attrs) unsigned long attrs)
{ {
void *ret; void *ret;
#ifdef CONFIG_NOT_COHERENT_CACHE
ret = __dma_alloc_coherent(dev, size, dma_handle, flag);
if (ret == NULL)
return NULL;
*dma_handle += get_dma_offset(dev);
return ret;
#else
struct page *page; struct page *page;
int node = dev_to_node(dev); int node = dev_to_node(dev);
#ifdef CONFIG_FSL_SOC #ifdef CONFIG_FSL_SOC
...@@ -113,19 +107,15 @@ void *__dma_nommu_alloc_coherent(struct device *dev, size_t size, ...@@ -113,19 +107,15 @@ void *__dma_nommu_alloc_coherent(struct device *dev, size_t size,
*dma_handle = __pa(ret) + get_dma_offset(dev); *dma_handle = __pa(ret) + get_dma_offset(dev);
return ret; return ret;
#endif
} }
void __dma_nommu_free_coherent(struct device *dev, size_t size, void __dma_nommu_free_coherent(struct device *dev, size_t size,
void *vaddr, dma_addr_t dma_handle, void *vaddr, dma_addr_t dma_handle,
unsigned long attrs) unsigned long attrs)
{ {
#ifdef CONFIG_NOT_COHERENT_CACHE
__dma_free_coherent(size, vaddr);
#else
free_pages((unsigned long)vaddr, get_order(size)); free_pages((unsigned long)vaddr, get_order(size));
#endif
} }
#endif /* !CONFIG_NOT_COHERENT_CACHE */
static void *dma_nommu_alloc_coherent(struct device *dev, size_t size, static void *dma_nommu_alloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t flag, dma_addr_t *dma_handle, gfp_t flag,
......
...@@ -29,7 +29,7 @@ ...@@ -29,7 +29,7 @@
#include <linux/string.h> #include <linux/string.h>
#include <linux/types.h> #include <linux/types.h>
#include <linux/highmem.h> #include <linux/highmem.h>
#include <linux/dma-mapping.h> #include <linux/dma-direct.h>
#include <linux/export.h> #include <linux/export.h>
#include <asm/tlbflush.h> #include <asm/tlbflush.h>
...@@ -151,8 +151,8 @@ static struct ppc_vm_region *ppc_vm_region_find(struct ppc_vm_region *head, unsi ...@@ -151,8 +151,8 @@ static struct ppc_vm_region *ppc_vm_region_find(struct ppc_vm_region *head, unsi
* Allocate DMA-coherent memory space and return both the kernel remapped * Allocate DMA-coherent memory space and return both the kernel remapped
* virtual and bus address for that space. * virtual and bus address for that space.
*/ */
void * void *__dma_nommu_alloc_coherent(struct device *dev, size_t size,
__dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp) dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
{ {
struct page *page; struct page *page;
struct ppc_vm_region *c; struct ppc_vm_region *c;
...@@ -223,7 +223,7 @@ __dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *handle, gfp_t ...@@ -223,7 +223,7 @@ __dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *handle, gfp_t
/* /*
* Set the "dma handle" * Set the "dma handle"
*/ */
*handle = page_to_phys(page); *dma_handle = phys_to_dma(dev, page_to_phys(page));
do { do {
SetPageReserved(page); SetPageReserved(page);
...@@ -249,12 +249,12 @@ __dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *handle, gfp_t ...@@ -249,12 +249,12 @@ __dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *handle, gfp_t
no_page: no_page:
return NULL; return NULL;
} }
EXPORT_SYMBOL(__dma_alloc_coherent);
/* /*
* free a page as defined by the above mapping. * free a page as defined by the above mapping.
*/ */
void __dma_free_coherent(size_t size, void *vaddr) void __dma_nommu_free_coherent(struct device *dev, size_t size, void *vaddr,
dma_addr_t dma_handle, unsigned long attrs)
{ {
struct ppc_vm_region *c; struct ppc_vm_region *c;
unsigned long flags, addr; unsigned long flags, addr;
...@@ -309,7 +309,6 @@ void __dma_free_coherent(size_t size, void *vaddr) ...@@ -309,7 +309,6 @@ void __dma_free_coherent(size_t size, void *vaddr)
__func__, vaddr); __func__, vaddr);
dump_stack(); dump_stack();
} }
EXPORT_SYMBOL(__dma_free_coherent);
/* /*
* make an area consistent. * make an area consistent.
...@@ -401,7 +400,7 @@ EXPORT_SYMBOL(__dma_sync_page); ...@@ -401,7 +400,7 @@ EXPORT_SYMBOL(__dma_sync_page);
/* /*
* Return the PFN for a given cpu virtual address returned by * Return the PFN for a given cpu virtual address returned by
* __dma_alloc_coherent. This is used by dma_mmap_coherent() * __dma_nommu_alloc_coherent. This is used by dma_mmap_coherent()
*/ */
unsigned long __dma_get_coherent_pfn(unsigned long cpu_addr) unsigned long __dma_get_coherent_pfn(unsigned long cpu_addr)
{ {
......
...@@ -47,7 +47,7 @@ static int __init warp_probe(void) ...@@ -47,7 +47,7 @@ static int __init warp_probe(void)
if (!of_machine_is_compatible("pika,warp")) if (!of_machine_is_compatible("pika,warp"))
return 0; return 0;
/* For __dma_alloc_coherent */ /* For __dma_nommu_alloc_coherent */
ISA_DMA_THRESHOLD = ~0L; ISA_DMA_THRESHOLD = ~0L;
return 1; return 1;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment