Commit e6475eb0 authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Joerg Roedel

iommu/dma: Don't depend on CONFIG_DMA_DIRECT_REMAP

For entirely dma coherent architectures there is no requirement to ever
remap dma coherent allocation.  Move all the remap and pool code under
IS_ENABLED() checks and drop the Kconfig dependency.
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Reviewed-by: default avatarRobin Murphy <robin.murphy@arm.com>
Signed-off-by: default avatarJoerg Roedel <jroedel@suse.de>
parent efd9f10b
...@@ -97,7 +97,6 @@ config IOMMU_DMA ...@@ -97,7 +97,6 @@ config IOMMU_DMA
select IOMMU_IOVA select IOMMU_IOVA
select IRQ_MSI_IOMMU select IRQ_MSI_IOMMU
select NEED_SG_DMA_LENGTH select NEED_SG_DMA_LENGTH
depends on DMA_DIRECT_REMAP
config FSL_PAMU config FSL_PAMU
bool "Freescale IOMMU support" bool "Freescale IOMMU support"
......
...@@ -942,10 +942,11 @@ static void __iommu_dma_free(struct device *dev, size_t size, void *cpu_addr) ...@@ -942,10 +942,11 @@ static void __iommu_dma_free(struct device *dev, size_t size, void *cpu_addr)
struct page *page = NULL, **pages = NULL; struct page *page = NULL, **pages = NULL;
/* Non-coherent atomic allocation? Easy */ /* Non-coherent atomic allocation? Easy */
if (dma_free_from_pool(cpu_addr, alloc_size)) if (IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) &&
dma_free_from_pool(cpu_addr, alloc_size))
return; return;
if (is_vmalloc_addr(cpu_addr)) { if (IS_ENABLED(CONFIG_DMA_REMAP) && is_vmalloc_addr(cpu_addr)) {
/* /*
* If it the address is remapped, then it's either non-coherent * If it the address is remapped, then it's either non-coherent
* or highmem CMA, or an iommu_dma_alloc_remap() construction. * or highmem CMA, or an iommu_dma_alloc_remap() construction.
...@@ -989,7 +990,7 @@ static void *iommu_dma_alloc_pages(struct device *dev, size_t size, ...@@ -989,7 +990,7 @@ static void *iommu_dma_alloc_pages(struct device *dev, size_t size,
if (!page) if (!page)
return NULL; return NULL;
if (!coherent || PageHighMem(page)) { if (IS_ENABLED(CONFIG_DMA_REMAP) && (!coherent || PageHighMem(page))) {
pgprot_t prot = arch_dma_mmap_pgprot(dev, PAGE_KERNEL, attrs); pgprot_t prot = arch_dma_mmap_pgprot(dev, PAGE_KERNEL, attrs);
cpu_addr = dma_common_contiguous_remap(page, alloc_size, cpu_addr = dma_common_contiguous_remap(page, alloc_size,
...@@ -1022,11 +1023,12 @@ static void *iommu_dma_alloc(struct device *dev, size_t size, ...@@ -1022,11 +1023,12 @@ static void *iommu_dma_alloc(struct device *dev, size_t size,
gfp |= __GFP_ZERO; gfp |= __GFP_ZERO;
if (gfpflags_allow_blocking(gfp) && if (IS_ENABLED(CONFIG_DMA_REMAP) && gfpflags_allow_blocking(gfp) &&
!(attrs & DMA_ATTR_FORCE_CONTIGUOUS)) !(attrs & DMA_ATTR_FORCE_CONTIGUOUS))
return iommu_dma_alloc_remap(dev, size, handle, gfp, attrs); return iommu_dma_alloc_remap(dev, size, handle, gfp, attrs);
if (!gfpflags_allow_blocking(gfp) && !coherent) if (IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) &&
!gfpflags_allow_blocking(gfp) && !coherent)
cpu_addr = dma_alloc_from_pool(PAGE_ALIGN(size), &page, gfp); cpu_addr = dma_alloc_from_pool(PAGE_ALIGN(size), &page, gfp);
else else
cpu_addr = iommu_dma_alloc_pages(dev, size, &page, gfp, attrs); cpu_addr = iommu_dma_alloc_pages(dev, size, &page, gfp, attrs);
...@@ -1058,7 +1060,7 @@ static int iommu_dma_mmap(struct device *dev, struct vm_area_struct *vma, ...@@ -1058,7 +1060,7 @@ static int iommu_dma_mmap(struct device *dev, struct vm_area_struct *vma,
if (off >= nr_pages || vma_pages(vma) > nr_pages - off) if (off >= nr_pages || vma_pages(vma) > nr_pages - off)
return -ENXIO; return -ENXIO;
if (is_vmalloc_addr(cpu_addr)) { if (IS_ENABLED(CONFIG_DMA_REMAP) && is_vmalloc_addr(cpu_addr)) {
struct page **pages = __iommu_dma_get_pages(cpu_addr); struct page **pages = __iommu_dma_get_pages(cpu_addr);
if (pages) if (pages)
...@@ -1080,7 +1082,7 @@ static int iommu_dma_get_sgtable(struct device *dev, struct sg_table *sgt, ...@@ -1080,7 +1082,7 @@ static int iommu_dma_get_sgtable(struct device *dev, struct sg_table *sgt,
struct page *page; struct page *page;
int ret; int ret;
if (is_vmalloc_addr(cpu_addr)) { if (IS_ENABLED(CONFIG_DMA_REMAP) && is_vmalloc_addr(cpu_addr)) {
struct page **pages = __iommu_dma_get_pages(cpu_addr); struct page **pages = __iommu_dma_get_pages(cpu_addr);
if (pages) { if (pages) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment