Commit 0acbfc66 authored by Ralf Baechle's avatar Ralf Baechle

MIPS: DMA: Implement platform hook to perform post-DMA cache flushes.

Signed-off-by: default avatarRalf Baechle <ralf@linux-mips.org>
parent 25307380
...@@ -69,4 +69,8 @@ static inline int plat_device_is_coherent(struct device *dev) ...@@ -69,4 +69,8 @@ static inline int plat_device_is_coherent(struct device *dev)
#endif #endif
} }
static inline void plat_post_dma_flush(struct device *dev)
{
}
#endif /* __ASM_MACH_ATH25_DMA_COHERENCE_H */ #endif /* __ASM_MACH_ATH25_DMA_COHERENCE_H */
...@@ -45,4 +45,8 @@ static inline int plat_device_is_coherent(struct device *dev) ...@@ -45,4 +45,8 @@ static inline int plat_device_is_coherent(struct device *dev)
return 0; return 0;
} }
static inline void plat_post_dma_flush(struct device *dev)
{
}
#endif /* __ASM_MACH_BMIPS_DMA_COHERENCE_H */ #endif /* __ASM_MACH_BMIPS_DMA_COHERENCE_H */
...@@ -57,6 +57,10 @@ static inline int plat_device_is_coherent(struct device *dev) ...@@ -57,6 +57,10 @@ static inline int plat_device_is_coherent(struct device *dev)
return 1; return 1;
} }
static inline void plat_post_dma_flush(struct device *dev)
{
}
dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr); dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr);
phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr); phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr);
......
...@@ -52,6 +52,10 @@ static inline int plat_device_is_coherent(struct device *dev) ...@@ -52,6 +52,10 @@ static inline int plat_device_is_coherent(struct device *dev)
return coherentio; return coherentio;
} }
static inline void plat_post_dma_flush(struct device *dev)
{
}
#ifdef CONFIG_SWIOTLB #ifdef CONFIG_SWIOTLB
static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr) static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
{ {
......
...@@ -58,6 +58,10 @@ static inline int plat_dma_supported(struct device *dev, u64 mask) ...@@ -58,6 +58,10 @@ static inline int plat_dma_supported(struct device *dev, u64 mask)
return 1; return 1;
} }
static inline void plat_post_dma_flush(struct device *dev)
{
}
static inline int plat_device_is_coherent(struct device *dev) static inline int plat_device_is_coherent(struct device *dev)
{ {
return 1; /* IP27 non-cohernet mode is unsupported */ return 1; /* IP27 non-cohernet mode is unsupported */
......
...@@ -80,6 +80,10 @@ static inline int plat_dma_supported(struct device *dev, u64 mask) ...@@ -80,6 +80,10 @@ static inline int plat_dma_supported(struct device *dev, u64 mask)
return 1; return 1;
} }
static inline void plat_post_dma_flush(struct device *dev)
{
}
static inline int plat_device_is_coherent(struct device *dev) static inline int plat_device_is_coherent(struct device *dev)
{ {
return 0; /* IP32 is non-cohernet */ return 0; /* IP32 is non-cohernet */
......
...@@ -48,6 +48,10 @@ static inline int plat_dma_supported(struct device *dev, u64 mask) ...@@ -48,6 +48,10 @@ static inline int plat_dma_supported(struct device *dev, u64 mask)
return 1; return 1;
} }
static inline void plat_post_dma_flush(struct device *dev)
{
}
static inline int plat_device_is_coherent(struct device *dev) static inline int plat_device_is_coherent(struct device *dev)
{ {
return 0; return 0;
......
...@@ -78,4 +78,8 @@ static inline int plat_device_is_coherent(struct device *dev) ...@@ -78,4 +78,8 @@ static inline int plat_device_is_coherent(struct device *dev)
#endif /* CONFIG_DMA_NONCOHERENT */ #endif /* CONFIG_DMA_NONCOHERENT */
} }
static inline void plat_post_dma_flush(struct device *dev)
{
}
#endif /* __ASM_MACH_LOONGSON_DMA_COHERENCE_H */ #endif /* __ASM_MACH_LOONGSON_DMA_COHERENCE_H */
...@@ -258,7 +258,7 @@ static void mips_dma_unmap_page(struct device *dev, dma_addr_t dma_addr, ...@@ -258,7 +258,7 @@ static void mips_dma_unmap_page(struct device *dev, dma_addr_t dma_addr,
if (cpu_needs_post_dma_flush(dev)) if (cpu_needs_post_dma_flush(dev))
__dma_sync(dma_addr_to_page(dev, dma_addr), __dma_sync(dma_addr_to_page(dev, dma_addr),
dma_addr & ~PAGE_MASK, size, direction); dma_addr & ~PAGE_MASK, size, direction);
plat_post_dma_flush(dev);
plat_unmap_dma_mem(dev, dma_addr, size, direction); plat_unmap_dma_mem(dev, dma_addr, size, direction);
} }
...@@ -312,6 +312,7 @@ static void mips_dma_sync_single_for_cpu(struct device *dev, ...@@ -312,6 +312,7 @@ static void mips_dma_sync_single_for_cpu(struct device *dev,
if (cpu_needs_post_dma_flush(dev)) if (cpu_needs_post_dma_flush(dev))
__dma_sync(dma_addr_to_page(dev, dma_handle), __dma_sync(dma_addr_to_page(dev, dma_handle),
dma_handle & ~PAGE_MASK, size, direction); dma_handle & ~PAGE_MASK, size, direction);
plat_post_dma_flush(dev);
} }
static void mips_dma_sync_single_for_device(struct device *dev, static void mips_dma_sync_single_for_device(struct device *dev,
...@@ -331,6 +332,7 @@ static void mips_dma_sync_sg_for_cpu(struct device *dev, ...@@ -331,6 +332,7 @@ static void mips_dma_sync_sg_for_cpu(struct device *dev,
for (i = 0; i < nelems; i++, sg++) for (i = 0; i < nelems; i++, sg++)
__dma_sync(sg_page(sg), sg->offset, sg->length, __dma_sync(sg_page(sg), sg->offset, sg->length,
direction); direction);
plat_post_dma_flush(dev);
} }
static void mips_dma_sync_sg_for_device(struct device *dev, static void mips_dma_sync_sg_for_device(struct device *dev,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment