Commit e20dd889 authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Linus Torvalds

cris: convert to dma_map_ops

Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Cc: Mikael Starvik <starvik@axis.com>
Cc: Jesper Nilsson <jesper.nilsson@axis.com>
Cc: Christian Borntraeger <borntraeger@de.ibm.com>
Cc: Joerg Roedel <jroedel@suse.de>
Cc: Sebastian Ott <sebott@linux.vnet.ibm.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 4605f04b
...@@ -54,6 +54,7 @@ config CRIS ...@@ -54,6 +54,7 @@ config CRIS
select GENERIC_ATOMIC64 select GENERIC_ATOMIC64
select HAVE_UID16 select HAVE_UID16
select VIRT_TO_BUS select VIRT_TO_BUS
select HAVE_DMA_ATTRS
select ARCH_WANT_IPC_PARSE_VERSION select ARCH_WANT_IPC_PARSE_VERSION
select GENERIC_IRQ_SHOW select GENERIC_IRQ_SHOW
select GENERIC_IOMAP select GENERIC_IOMAP
......
...@@ -16,21 +16,18 @@ ...@@ -16,21 +16,18 @@
#include <linux/gfp.h> #include <linux/gfp.h>
#include <asm/io.h> #include <asm/io.h>
void *dma_alloc_coherent(struct device *dev, size_t size, static void *v32_dma_alloc(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t gfp) dma_addr_t *dma_handle, gfp_t gfp, struct dma_attrs *attrs)
{ {
void *ret; void *ret;
int order = get_order(size);
/* ignore region specifiers */ /* ignore region specifiers */
gfp &= ~(__GFP_DMA | __GFP_HIGHMEM); gfp &= ~(__GFP_DMA | __GFP_HIGHMEM);
if (dma_alloc_from_coherent(dev, size, dma_handle, &ret))
return ret;
if (dev == NULL || (dev->coherent_dma_mask < 0xffffffff)) if (dev == NULL || (dev->coherent_dma_mask < 0xffffffff))
gfp |= GFP_DMA; gfp |= GFP_DMA;
ret = (void *)__get_free_pages(gfp, order); ret = (void *)__get_free_pages(gfp, get_order(size));
if (ret != NULL) { if (ret != NULL) {
memset(ret, 0, size); memset(ret, 0, size);
...@@ -39,12 +36,45 @@ void *dma_alloc_coherent(struct device *dev, size_t size, ...@@ -39,12 +36,45 @@ void *dma_alloc_coherent(struct device *dev, size_t size,
return ret; return ret;
} }
void dma_free_coherent(struct device *dev, size_t size, static void v32_dma_free(struct device *dev, size_t size, void *vaddr,
void *vaddr, dma_addr_t dma_handle) dma_addr_t dma_handle, struct dma_attrs *attrs)
{
free_pages((unsigned long)vaddr, get_order(size));
}
static inline dma_addr_t v32_dma_map_page(struct device *dev,
struct page *page, unsigned long offset, size_t size,
enum dma_data_direction direction,
struct dma_attrs *attrs)
{ {
int order = get_order(size); return page_to_phys(page) + offset;
}
if (!dma_release_from_coherent(dev, order, vaddr)) static inline int v32_dma_map_sg(struct device *dev, struct scatterlist *sg,
free_pages((unsigned long)vaddr, order); int nents, enum dma_data_direction direction,
struct dma_attrs *attrs)
{
printk("Map sg\n");
return nents;
}
static inline int v32_dma_supported(struct device *dev, u64 mask)
{
/*
* we fall back to GFP_DMA when the mask isn't all 1s,
* so we can't guarantee allocations that must be
* within a tighter range than GFP_DMA..
*/
if (mask < 0x00ffffff)
return 0;
return 1;
} }
struct dma_map_ops v32_dma_ops = {
.alloc = v32_dma_alloc,
.free = v32_dma_free,
.map_page = v32_dma_map_page,
.map_sg = v32_dma_map_sg,
.dma_supported = v32_dma_supported,
};
EXPORT_SYMBOL(v32_dma_ops);
/* DMA mapping. Nothing tricky here, just virt_to_phys */
#ifndef _ASM_CRIS_DMA_MAPPING_H #ifndef _ASM_CRIS_DMA_MAPPING_H
#define _ASM_CRIS_DMA_MAPPING_H #define _ASM_CRIS_DMA_MAPPING_H
#include <linux/mm.h>
#include <linux/kernel.h>
#include <linux/scatterlist.h>
#include <asm/cache.h>
#include <asm/io.h>
#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
#ifdef CONFIG_PCI #ifdef CONFIG_PCI
#include <asm-generic/dma-coherent.h> extern struct dma_map_ops v32_dma_ops;
void *dma_alloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t flag);
void dma_free_coherent(struct device *dev, size_t size, static inline struct dma_map_ops *get_dma_ops(struct device *dev)
void *vaddr, dma_addr_t dma_handle);
#else
static inline void *
dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
gfp_t flag)
{ {
BUG(); return &v32_dma_ops;
return NULL;
} }
#else
static inline void static inline struct dma_map_ops *get_dma_ops(struct device *dev)
dma_free_coherent(struct device *dev, size_t size, void *cpu_addr,
dma_addr_t dma_handle)
{ {
BUG(); BUG();
return NULL;
} }
#endif #endif
static inline dma_addr_t
dma_map_single(struct device *dev, void *ptr, size_t size,
enum dma_data_direction direction)
{
BUG_ON(direction == DMA_NONE);
return virt_to_phys(ptr);
}
static inline void
dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
enum dma_data_direction direction)
{
BUG_ON(direction == DMA_NONE);
}
static inline int
dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
enum dma_data_direction direction)
{
printk("Map sg\n");
return nents;
}
static inline dma_addr_t
dma_map_page(struct device *dev, struct page *page, unsigned long offset,
size_t size, enum dma_data_direction direction)
{
BUG_ON(direction == DMA_NONE);
return page_to_phys(page) + offset;
}
static inline void
dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size,
enum dma_data_direction direction)
{
BUG_ON(direction == DMA_NONE);
}
static inline void
dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries,
enum dma_data_direction direction)
{
BUG_ON(direction == DMA_NONE);
}
static inline void
dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size,
enum dma_data_direction direction)
{
}
static inline void
dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, size_t size,
enum dma_data_direction direction)
{
}
static inline void
dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle,
unsigned long offset, size_t size,
enum dma_data_direction direction)
{
}
static inline void #include <asm-generic/dma-mapping-common.h>
dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle,
unsigned long offset, size_t size,
enum dma_data_direction direction)
{
}
static inline void
dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems,
enum dma_data_direction direction)
{
}
static inline void
dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems,
enum dma_data_direction direction)
{
}
static inline int
dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
{
return 0;
}
static inline int
dma_supported(struct device *dev, u64 mask)
{
/*
* we fall back to GFP_DMA when the mask isn't all 1s,
* so we can't guarantee allocations that must be
* within a tighter range than GFP_DMA..
*/
if(mask < 0x00ffffff)
return 0;
return 1;
}
static inline int
dma_set_mask(struct device *dev, u64 mask)
{
if(!dev->dma_mask || !dma_supported(dev, mask))
return -EIO;
*dev->dma_mask = mask;
return 0;
}
static inline void static inline void
dma_cache_sync(struct device *dev, void *vaddr, size_t size, dma_cache_sync(struct device *dev, void *vaddr, size_t size,
...@@ -158,15 +24,4 @@ dma_cache_sync(struct device *dev, void *vaddr, size_t size, ...@@ -158,15 +24,4 @@ dma_cache_sync(struct device *dev, void *vaddr, size_t size,
{ {
} }
/* drivers/base/dma-mapping.c */
extern int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
void *cpu_addr, dma_addr_t dma_addr, size_t size);
extern int dma_common_get_sgtable(struct device *dev, struct sg_table *sgt,
void *cpu_addr, dma_addr_t dma_addr,
size_t size);
#define dma_mmap_coherent(d, v, c, h, s) dma_common_mmap(d, v, c, h, s)
#define dma_get_sgtable(d, t, v, h, s) dma_common_get_sgtable(d, t, v, h, s)
#endif #endif
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment