Commit c30700db authored by Christoph Hellwig's avatar Christoph Hellwig

dma-direct: provide generic support for uncached kernel segments

A few architectures support uncached kernel segments.  In that case we get
an uncached mapping for a given physica address by using an offset in the
uncached segement.  Implement support for this scheme in the generic
dma-direct code instead of duplicating it in arch hooks.
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
parent 67f30ad1
...@@ -260,6 +260,14 @@ config ARCH_HAS_SET_MEMORY ...@@ -260,6 +260,14 @@ config ARCH_HAS_SET_MEMORY
config ARCH_HAS_SET_DIRECT_MAP config ARCH_HAS_SET_DIRECT_MAP
bool bool
#
# Select if arch has an uncached kernel segment and provides the
# uncached_kernel_address / cached_kernel_address symbols to use it
#
config ARCH_HAS_UNCACHED_SEGMENT
select ARCH_HAS_DMA_PREP_COHERENT
bool
# Select if arch init_task must go in the __init_task_data section # Select if arch init_task must go in the __init_task_data section
config ARCH_TASK_STRUCT_ON_STACK config ARCH_TASK_STRUCT_ON_STACK
bool bool
......
...@@ -80,4 +80,7 @@ static inline void arch_dma_prep_coherent(struct page *page, size_t size) ...@@ -80,4 +80,7 @@ static inline void arch_dma_prep_coherent(struct page *page, size_t size)
} }
#endif /* CONFIG_ARCH_HAS_DMA_PREP_COHERENT */ #endif /* CONFIG_ARCH_HAS_DMA_PREP_COHERENT */
void *uncached_kernel_address(void *addr);
void *cached_kernel_address(void *addr);
#endif /* _LINUX_DMA_NONCOHERENT_H */ #endif /* _LINUX_DMA_NONCOHERENT_H */
...@@ -158,6 +158,13 @@ void *dma_direct_alloc_pages(struct device *dev, size_t size, ...@@ -158,6 +158,13 @@ void *dma_direct_alloc_pages(struct device *dev, size_t size,
*dma_handle = phys_to_dma(dev, page_to_phys(page)); *dma_handle = phys_to_dma(dev, page_to_phys(page));
} }
memset(ret, 0, size); memset(ret, 0, size);
if (IS_ENABLED(CONFIG_ARCH_HAS_UNCACHED_SEGMENT) &&
!dev_is_dma_coherent(dev) && !(attrs & DMA_ATTR_NON_CONSISTENT)) {
arch_dma_prep_coherent(page, size);
ret = uncached_kernel_address(ret);
}
return ret; return ret;
} }
...@@ -173,13 +180,18 @@ void dma_direct_free_pages(struct device *dev, size_t size, void *cpu_addr, ...@@ -173,13 +180,18 @@ void dma_direct_free_pages(struct device *dev, size_t size, void *cpu_addr,
if (force_dma_unencrypted()) if (force_dma_unencrypted())
set_memory_encrypted((unsigned long)cpu_addr, 1 << page_order); set_memory_encrypted((unsigned long)cpu_addr, 1 << page_order);
if (IS_ENABLED(CONFIG_ARCH_HAS_UNCACHED_SEGMENT) &&
!dev_is_dma_coherent(dev) && !(attrs & DMA_ATTR_NON_CONSISTENT))
cpu_addr = cached_kernel_address(cpu_addr);
__dma_direct_free_pages(dev, size, virt_to_page(cpu_addr)); __dma_direct_free_pages(dev, size, virt_to_page(cpu_addr));
} }
void *dma_direct_alloc(struct device *dev, size_t size, void *dma_direct_alloc(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs) dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
{ {
if (!dev_is_dma_coherent(dev)) if (!IS_ENABLED(CONFIG_ARCH_HAS_UNCACHED_SEGMENT) &&
!dev_is_dma_coherent(dev))
return arch_dma_alloc(dev, size, dma_handle, gfp, attrs); return arch_dma_alloc(dev, size, dma_handle, gfp, attrs);
return dma_direct_alloc_pages(dev, size, dma_handle, gfp, attrs); return dma_direct_alloc_pages(dev, size, dma_handle, gfp, attrs);
} }
...@@ -187,7 +199,8 @@ void *dma_direct_alloc(struct device *dev, size_t size, ...@@ -187,7 +199,8 @@ void *dma_direct_alloc(struct device *dev, size_t size,
void dma_direct_free(struct device *dev, size_t size, void dma_direct_free(struct device *dev, size_t size,
void *cpu_addr, dma_addr_t dma_addr, unsigned long attrs) void *cpu_addr, dma_addr_t dma_addr, unsigned long attrs)
{ {
if (!dev_is_dma_coherent(dev)) if (!IS_ENABLED(CONFIG_ARCH_HAS_UNCACHED_SEGMENT) &&
!dev_is_dma_coherent(dev))
arch_dma_free(dev, size, cpu_addr, dma_addr, attrs); arch_dma_free(dev, size, cpu_addr, dma_addr, attrs);
else else
dma_direct_free_pages(dev, size, cpu_addr, dma_addr, attrs); dma_direct_free_pages(dev, size, cpu_addr, dma_addr, attrs);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment