Commit 5a842922 authored by Christoph Hellwig's avatar Christoph Hellwig

dma-mapping: remove dma_cache_sync

All users are gone now, remove the API.
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Acked-by: Thomas Bogendoerfer <tsbogend@alpha.franken.de> (MIPS part)
parent d69d8adc
...@@ -1135,7 +1135,6 @@ config DMA_NONCOHERENT ...@@ -1135,7 +1135,6 @@ config DMA_NONCOHERENT
select ARCH_HAS_SYNC_DMA_FOR_DEVICE select ARCH_HAS_SYNC_DMA_FOR_DEVICE
select ARCH_HAS_DMA_SET_UNCACHED select ARCH_HAS_DMA_SET_UNCACHED
select DMA_NONCOHERENT_MMAP select DMA_NONCOHERENT_MMAP
select DMA_NONCOHERENT_CACHE_SYNC
select NEED_DMA_MAP_STATE select NEED_DMA_MAP_STATE
config SYS_HAS_EARLY_PRINTK config SYS_HAS_EARLY_PRINTK
......
...@@ -620,7 +620,6 @@ const struct dma_map_ops jazz_dma_ops = { ...@@ -620,7 +620,6 @@ const struct dma_map_ops jazz_dma_ops = {
.sync_single_for_device = jazz_dma_sync_single_for_device, .sync_single_for_device = jazz_dma_sync_single_for_device,
.sync_sg_for_cpu = jazz_dma_sync_sg_for_cpu, .sync_sg_for_cpu = jazz_dma_sync_sg_for_cpu,
.sync_sg_for_device = jazz_dma_sync_sg_for_device, .sync_sg_for_device = jazz_dma_sync_sg_for_device,
.cache_sync = arch_dma_cache_sync,
.mmap = dma_common_mmap, .mmap = dma_common_mmap,
.get_sgtable = dma_common_get_sgtable, .get_sgtable = dma_common_get_sgtable,
}; };
......
...@@ -137,12 +137,6 @@ void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size, ...@@ -137,12 +137,6 @@ void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
} }
#endif #endif
void arch_dma_cache_sync(struct device *dev, void *vaddr, size_t size,
enum dma_data_direction direction)
{
dma_sync_virt_for_device(vaddr, size, direction);
}
#ifdef CONFIG_DMA_PERDEV_COHERENT #ifdef CONFIG_DMA_PERDEV_COHERENT
void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size, void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
const struct iommu_ops *iommu, bool coherent) const struct iommu_ops *iommu, bool coherent)
......
...@@ -195,7 +195,6 @@ config PA11 ...@@ -195,7 +195,6 @@ config PA11
depends on PA7000 || PA7100LC || PA7200 || PA7300LC depends on PA7000 || PA7100LC || PA7200 || PA7300LC
select ARCH_HAS_SYNC_DMA_FOR_CPU select ARCH_HAS_SYNC_DMA_FOR_CPU
select ARCH_HAS_SYNC_DMA_FOR_DEVICE select ARCH_HAS_SYNC_DMA_FOR_DEVICE
select DMA_NONCOHERENT_CACHE_SYNC
config PREFETCH config PREFETCH
def_bool y def_bool y
......
...@@ -454,9 +454,3 @@ void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size, ...@@ -454,9 +454,3 @@ void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
{ {
flush_kernel_dcache_range((unsigned long)phys_to_virt(paddr), size); flush_kernel_dcache_range((unsigned long)phys_to_virt(paddr), size);
} }
void arch_dma_cache_sync(struct device *dev, void *vaddr, size_t size,
enum dma_data_direction direction)
{
flush_kernel_dcache_range((unsigned long)vaddr, size);
}
...@@ -117,8 +117,6 @@ struct dma_map_ops { ...@@ -117,8 +117,6 @@ struct dma_map_ops {
void (*sync_sg_for_device)(struct device *dev, void (*sync_sg_for_device)(struct device *dev,
struct scatterlist *sg, int nents, struct scatterlist *sg, int nents,
enum dma_data_direction dir); enum dma_data_direction dir);
void (*cache_sync)(struct device *dev, void *vaddr, size_t size,
enum dma_data_direction direction);
int (*dma_supported)(struct device *dev, u64 mask); int (*dma_supported)(struct device *dev, u64 mask);
u64 (*get_required_mask)(struct device *dev); u64 (*get_required_mask)(struct device *dev);
size_t (*max_mapping_size)(struct device *dev); size_t (*max_mapping_size)(struct device *dev);
...@@ -249,8 +247,6 @@ void *dmam_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle, ...@@ -249,8 +247,6 @@ void *dmam_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle,
gfp_t gfp, unsigned long attrs); gfp_t gfp, unsigned long attrs);
void dmam_free_coherent(struct device *dev, size_t size, void *vaddr, void dmam_free_coherent(struct device *dev, size_t size, void *vaddr,
dma_addr_t dma_handle); dma_addr_t dma_handle);
void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
enum dma_data_direction dir);
int dma_get_sgtable_attrs(struct device *dev, struct sg_table *sgt, int dma_get_sgtable_attrs(struct device *dev, struct sg_table *sgt,
void *cpu_addr, dma_addr_t dma_addr, size_t size, void *cpu_addr, dma_addr_t dma_addr, size_t size,
unsigned long attrs); unsigned long attrs);
...@@ -334,10 +330,6 @@ static inline void dmam_free_coherent(struct device *dev, size_t size, ...@@ -334,10 +330,6 @@ static inline void dmam_free_coherent(struct device *dev, size_t size,
void *vaddr, dma_addr_t dma_handle) void *vaddr, dma_addr_t dma_handle)
{ {
} }
static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
enum dma_data_direction dir)
{
}
static inline int dma_get_sgtable_attrs(struct device *dev, static inline int dma_get_sgtable_attrs(struct device *dev,
struct sg_table *sgt, void *cpu_addr, dma_addr_t dma_addr, struct sg_table *sgt, void *cpu_addr, dma_addr_t dma_addr,
size_t size, unsigned long attrs) size_t size, unsigned long attrs)
......
...@@ -62,16 +62,6 @@ static inline pgprot_t dma_pgprot(struct device *dev, pgprot_t prot, ...@@ -62,16 +62,6 @@ static inline pgprot_t dma_pgprot(struct device *dev, pgprot_t prot,
} }
#endif /* CONFIG_MMU */ #endif /* CONFIG_MMU */
#ifdef CONFIG_DMA_NONCOHERENT_CACHE_SYNC
void arch_dma_cache_sync(struct device *dev, void *vaddr, size_t size,
enum dma_data_direction direction);
#else
static inline void arch_dma_cache_sync(struct device *dev, void *vaddr,
size_t size, enum dma_data_direction direction)
{
}
#endif /* CONFIG_DMA_NONCOHERENT_CACHE_SYNC */
#ifdef CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE #ifdef CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE
void arch_sync_dma_for_device(phys_addr_t paddr, size_t size, void arch_sync_dma_for_device(phys_addr_t paddr, size_t size,
enum dma_data_direction dir); enum dma_data_direction dir);
......
...@@ -75,9 +75,6 @@ config ARCH_HAS_DMA_PREP_COHERENT ...@@ -75,9 +75,6 @@ config ARCH_HAS_DMA_PREP_COHERENT
config ARCH_HAS_FORCE_DMA_UNENCRYPTED config ARCH_HAS_FORCE_DMA_UNENCRYPTED
bool bool
config DMA_NONCOHERENT_CACHE_SYNC
bool
config DMA_VIRT_OPS config DMA_VIRT_OPS
bool bool
depends on HAS_DMA depends on HAS_DMA
......
...@@ -529,20 +529,6 @@ int dma_set_coherent_mask(struct device *dev, u64 mask) ...@@ -529,20 +529,6 @@ int dma_set_coherent_mask(struct device *dev, u64 mask)
EXPORT_SYMBOL(dma_set_coherent_mask); EXPORT_SYMBOL(dma_set_coherent_mask);
#endif #endif
void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
enum dma_data_direction dir)
{
const struct dma_map_ops *ops = get_dma_ops(dev);
BUG_ON(!valid_dma_direction(dir));
if (dma_alloc_direct(dev, ops))
arch_dma_cache_sync(dev, vaddr, size, dir);
else if (ops->cache_sync)
ops->cache_sync(dev, vaddr, size, dir);
}
EXPORT_SYMBOL(dma_cache_sync);
size_t dma_max_mapping_size(struct device *dev) size_t dma_max_mapping_size(struct device *dev)
{ {
const struct dma_map_ops *ops = get_dma_ops(dev); const struct dma_map_ops *ops = get_dma_ops(dev);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment