Commit f0edfea8 authored by Christoph Hellwig's avatar Christoph Hellwig

dma-mapping: move the remap helpers to a separate file

The dma remap code only makes sense for not cache coherent architectures
(or possibly the corner case of highmem CMA allocations) and currently
is only used by arm, arm64, csky and xtensa.  Split it out into a
separate file with a separate Kconfig symbol, which gets the right
copyright notice given that this code was written by Laura Abbott
working for Code Aurora at that point.
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Acked-by: default avatarLaura Abbott <labbott@redhat.com>
Reviewed-by: default avatarRobin Murphy <robin.murphy@arm.com>
parent 704f2c20
...@@ -30,6 +30,7 @@ config ARM ...@@ -30,6 +30,7 @@ config ARM
select CPU_PM if (SUSPEND || CPU_IDLE) select CPU_PM if (SUSPEND || CPU_IDLE)
select DCACHE_WORD_ACCESS if HAVE_EFFICIENT_UNALIGNED_ACCESS select DCACHE_WORD_ACCESS if HAVE_EFFICIENT_UNALIGNED_ACCESS
select DMA_DIRECT_OPS if !MMU select DMA_DIRECT_OPS if !MMU
select DMA_REMAP if MMU
select EDAC_SUPPORT select EDAC_SUPPORT
select EDAC_ATOMIC_SCRUB select EDAC_ATOMIC_SCRUB
select GENERIC_ALLOCATOR select GENERIC_ALLOCATOR
......
...@@ -82,6 +82,7 @@ config ARM64 ...@@ -82,6 +82,7 @@ config ARM64
select CRC32 select CRC32
select DCACHE_WORD_ACCESS select DCACHE_WORD_ACCESS
select DMA_DIRECT_OPS select DMA_DIRECT_OPS
select DMA_REMAP
select EDAC_SUPPORT select EDAC_SUPPORT
select FRAME_POINTER select FRAME_POINTER
select GENERIC_ALLOCATOR select GENERIC_ALLOCATOR
......
...@@ -9,6 +9,7 @@ config CSKY ...@@ -9,6 +9,7 @@ config CSKY
select CLKSRC_OF select CLKSRC_OF
select DMA_DIRECT_OPS select DMA_DIRECT_OPS
select DMA_NONCOHERENT_OPS select DMA_NONCOHERENT_OPS
select DMA_REMAP
select IRQ_DOMAIN select IRQ_DOMAIN
select HANDLE_DOMAIN_IRQ select HANDLE_DOMAIN_IRQ
select DW_APB_TIMER_OF select DW_APB_TIMER_OF
......
...@@ -11,6 +11,7 @@ config XTENSA ...@@ -11,6 +11,7 @@ config XTENSA
select CLONE_BACKWARDS select CLONE_BACKWARDS
select COMMON_CLK select COMMON_CLK
select DMA_DIRECT_OPS select DMA_DIRECT_OPS
select DMA_REMAP if MMU
select GENERIC_ATOMIC64 select GENERIC_ATOMIC64
select GENERIC_CLOCKEVENTS select GENERIC_CLOCKEVENTS
select GENERIC_IRQ_SHOW select GENERIC_IRQ_SHOW
......
...@@ -51,3 +51,7 @@ config SWIOTLB ...@@ -51,3 +51,7 @@ config SWIOTLB
bool bool
select DMA_DIRECT_OPS select DMA_DIRECT_OPS
select NEED_DMA_MAP_STATE select NEED_DMA_MAP_STATE
config DMA_REMAP
depends on MMU
bool
...@@ -7,4 +7,4 @@ obj-$(CONFIG_DMA_DIRECT_OPS) += direct.o ...@@ -7,4 +7,4 @@ obj-$(CONFIG_DMA_DIRECT_OPS) += direct.o
obj-$(CONFIG_DMA_VIRT_OPS) += virt.o obj-$(CONFIG_DMA_VIRT_OPS) += virt.o
obj-$(CONFIG_DMA_API_DEBUG) += debug.o obj-$(CONFIG_DMA_API_DEBUG) += debug.o
obj-$(CONFIG_SWIOTLB) += swiotlb.o obj-$(CONFIG_SWIOTLB) += swiotlb.o
obj-$(CONFIG_DMA_REMAP) += remap.o
...@@ -262,87 +262,3 @@ int dma_common_mmap(struct device *dev, struct vm_area_struct *vma, ...@@ -262,87 +262,3 @@ int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
#endif /* !CONFIG_ARCH_NO_COHERENT_DMA_MMAP */ #endif /* !CONFIG_ARCH_NO_COHERENT_DMA_MMAP */
} }
EXPORT_SYMBOL(dma_common_mmap); EXPORT_SYMBOL(dma_common_mmap);
#ifdef CONFIG_MMU
static struct vm_struct *__dma_common_pages_remap(struct page **pages,
size_t size, unsigned long vm_flags, pgprot_t prot,
const void *caller)
{
struct vm_struct *area;
area = get_vm_area_caller(size, vm_flags, caller);
if (!area)
return NULL;
if (map_vm_area(area, prot, pages)) {
vunmap(area->addr);
return NULL;
}
return area;
}
/*
* remaps an array of PAGE_SIZE pages into another vm_area
* Cannot be used in non-sleeping contexts
*/
void *dma_common_pages_remap(struct page **pages, size_t size,
unsigned long vm_flags, pgprot_t prot,
const void *caller)
{
struct vm_struct *area;
area = __dma_common_pages_remap(pages, size, vm_flags, prot, caller);
if (!area)
return NULL;
area->pages = pages;
return area->addr;
}
/*
* remaps an allocated contiguous region into another vm_area.
* Cannot be used in non-sleeping contexts
*/
void *dma_common_contiguous_remap(struct page *page, size_t size,
unsigned long vm_flags,
pgprot_t prot, const void *caller)
{
int i;
struct page **pages;
struct vm_struct *area;
pages = kmalloc(sizeof(struct page *) << get_order(size), GFP_KERNEL);
if (!pages)
return NULL;
for (i = 0; i < (size >> PAGE_SHIFT); i++)
pages[i] = nth_page(page, i);
area = __dma_common_pages_remap(pages, size, vm_flags, prot, caller);
kfree(pages);
if (!area)
return NULL;
return area->addr;
}
/*
* unmaps a range previously mapped by dma_common_*_remap
*/
void dma_common_free_remap(void *cpu_addr, size_t size, unsigned long vm_flags)
{
struct vm_struct *area = find_vm_area(cpu_addr);
if (!area || (area->flags & vm_flags) != vm_flags) {
WARN(1, "trying to free invalid coherent area: %p\n", cpu_addr);
return;
}
unmap_kernel_range((unsigned long)cpu_addr, PAGE_ALIGN(size));
vunmap(cpu_addr);
}
#endif
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2014 The Linux Foundation
*/
#include <linux/dma-mapping.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
static struct vm_struct *__dma_common_pages_remap(struct page **pages,
size_t size, unsigned long vm_flags, pgprot_t prot,
const void *caller)
{
struct vm_struct *area;
area = get_vm_area_caller(size, vm_flags, caller);
if (!area)
return NULL;
if (map_vm_area(area, prot, pages)) {
vunmap(area->addr);
return NULL;
}
return area;
}
/*
* Remaps an array of PAGE_SIZE pages into another vm_area.
* Cannot be used in non-sleeping contexts
*/
void *dma_common_pages_remap(struct page **pages, size_t size,
unsigned long vm_flags, pgprot_t prot,
const void *caller)
{
struct vm_struct *area;
area = __dma_common_pages_remap(pages, size, vm_flags, prot, caller);
if (!area)
return NULL;
area->pages = pages;
return area->addr;
}
/*
* Remaps an allocated contiguous region into another vm_area.
* Cannot be used in non-sleeping contexts
*/
void *dma_common_contiguous_remap(struct page *page, size_t size,
unsigned long vm_flags,
pgprot_t prot, const void *caller)
{
int i;
struct page **pages;
struct vm_struct *area;
pages = kmalloc(sizeof(struct page *) << get_order(size), GFP_KERNEL);
if (!pages)
return NULL;
for (i = 0; i < (size >> PAGE_SHIFT); i++)
pages[i] = nth_page(page, i);
area = __dma_common_pages_remap(pages, size, vm_flags, prot, caller);
kfree(pages);
if (!area)
return NULL;
return area->addr;
}
/*
* Unmaps a range previously mapped by dma_common_*_remap
*/
void dma_common_free_remap(void *cpu_addr, size_t size, unsigned long vm_flags)
{
struct vm_struct *area = find_vm_area(cpu_addr);
if (!area || (area->flags & vm_flags) != vm_flags) {
WARN(1, "trying to free invalid coherent area: %p\n", cpu_addr);
return;
}
unmap_kernel_range((unsigned long)cpu_addr, PAGE_ALIGN(size));
vunmap(cpu_addr);
}
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment