Commit 42a0a1b0 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'for-v3.9' of git://git.linaro.org/people/mszyprowski/linux-dma-mapping

Pull DMA-mapping updates from Marek Szyprowski:
 "This time all patches are related only to ARM DMA-mapping subsystem.
  The main extension provided by this pull request is highmem support.
  Besides that it contains a bunch of small bugfixes and cleanups."

* 'for-v3.9' of git://git.linaro.org/people/mszyprowski/linux-dma-mapping:
  ARM: DMA-mapping: fix memory leak in IOMMU dma-mapping implementation
  ARM: dma-mapping: Add maximum alignment order for dma iommu buffers
  ARM: dma-mapping: use himem for DMA buffers for IOMMU-mapped devices
  ARM: dma-mapping: add support for CMA regions placed in highmem zone
  arm: dma mapping: export arm iommu functions
  ARM: dma-mapping: Add arm_iommu_detach_device()
  ARM: dma-mapping: Add macro to_dma_iommu_mapping()
  ARM: dma-mapping: Set arm_dma_set_mask() for iommu->set_dma_mask()
  ARM: iommu: Include linux/kref.h in asm/dma-iommu.h
parents 52caa59e d5898291
...@@ -77,6 +77,27 @@ config ARM_DMA_USE_IOMMU ...@@ -77,6 +77,27 @@ config ARM_DMA_USE_IOMMU
select ARM_HAS_SG_CHAIN select ARM_HAS_SG_CHAIN
select NEED_SG_DMA_LENGTH select NEED_SG_DMA_LENGTH
if ARM_DMA_USE_IOMMU
config ARM_DMA_IOMMU_ALIGNMENT
int "Maximum PAGE_SIZE order of alignment for DMA IOMMU buffers"
range 4 9
default 8
help
DMA mapping framework by default aligns all buffers to the smallest
PAGE_SIZE order which is greater than or equal to the requested buffer
size. This works well for buffers up to a few hundreds kilobytes, but
for larger buffers it just a waste of address space. Drivers which has
relatively small addressing window (like 64Mib) might run out of
virtual space with just a few allocations.
With this parameter you can specify the maximum PAGE_SIZE order for
DMA IOMMU buffers. Larger buffers will be aligned only to this
specified order. The order is expressed as a power of two multiplied
by the PAGE_SIZE.
endif
config HAVE_PWM config HAVE_PWM
bool bool
......
...@@ -27,4 +27,10 @@ struct pdev_archdata { ...@@ -27,4 +27,10 @@ struct pdev_archdata {
#endif #endif
}; };
#ifdef CONFIG_ARM_DMA_USE_IOMMU
#define to_dma_iommu_mapping(dev) ((dev)->archdata.mapping)
#else
#define to_dma_iommu_mapping(dev) NULL
#endif
#endif #endif
...@@ -7,6 +7,7 @@ ...@@ -7,6 +7,7 @@
#include <linux/scatterlist.h> #include <linux/scatterlist.h>
#include <linux/dma-debug.h> #include <linux/dma-debug.h>
#include <linux/kmemcheck.h> #include <linux/kmemcheck.h>
#include <linux/kref.h>
struct dma_iommu_mapping { struct dma_iommu_mapping {
/* iommu specific data */ /* iommu specific data */
...@@ -29,6 +30,7 @@ void arm_iommu_release_mapping(struct dma_iommu_mapping *mapping); ...@@ -29,6 +30,7 @@ void arm_iommu_release_mapping(struct dma_iommu_mapping *mapping);
int arm_iommu_attach_device(struct device *dev, int arm_iommu_attach_device(struct device *dev,
struct dma_iommu_mapping *mapping); struct dma_iommu_mapping *mapping);
void arm_iommu_detach_device(struct device *dev);
#endif /* __KERNEL__ */ #endif /* __KERNEL__ */
#endif #endif
...@@ -186,13 +186,24 @@ static u64 get_coherent_dma_mask(struct device *dev) ...@@ -186,13 +186,24 @@ static u64 get_coherent_dma_mask(struct device *dev)
static void __dma_clear_buffer(struct page *page, size_t size) static void __dma_clear_buffer(struct page *page, size_t size)
{ {
void *ptr;
/* /*
* Ensure that the allocated pages are zeroed, and that any data * Ensure that the allocated pages are zeroed, and that any data
* lurking in the kernel direct-mapped region is invalidated. * lurking in the kernel direct-mapped region is invalidated.
*/ */
ptr = page_address(page); if (PageHighMem(page)) {
if (ptr) { phys_addr_t base = __pfn_to_phys(page_to_pfn(page));
phys_addr_t end = base + size;
while (size > 0) {
void *ptr = kmap_atomic(page);
memset(ptr, 0, PAGE_SIZE);
dmac_flush_range(ptr, ptr + PAGE_SIZE);
kunmap_atomic(ptr);
page++;
size -= PAGE_SIZE;
}
outer_flush_range(base, end);
} else {
void *ptr = page_address(page);
memset(ptr, 0, size); memset(ptr, 0, size);
dmac_flush_range(ptr, ptr + size); dmac_flush_range(ptr, ptr + size);
outer_flush_range(__pa(ptr), __pa(ptr) + size); outer_flush_range(__pa(ptr), __pa(ptr) + size);
...@@ -243,7 +254,8 @@ static void __dma_free_buffer(struct page *page, size_t size) ...@@ -243,7 +254,8 @@ static void __dma_free_buffer(struct page *page, size_t size)
#endif #endif
static void *__alloc_from_contiguous(struct device *dev, size_t size, static void *__alloc_from_contiguous(struct device *dev, size_t size,
pgprot_t prot, struct page **ret_page); pgprot_t prot, struct page **ret_page,
const void *caller);
static void *__alloc_remap_buffer(struct device *dev, size_t size, gfp_t gfp, static void *__alloc_remap_buffer(struct device *dev, size_t size, gfp_t gfp,
pgprot_t prot, struct page **ret_page, pgprot_t prot, struct page **ret_page,
...@@ -346,10 +358,11 @@ static int __init atomic_pool_init(void) ...@@ -346,10 +358,11 @@ static int __init atomic_pool_init(void)
goto no_pages; goto no_pages;
if (IS_ENABLED(CONFIG_CMA)) if (IS_ENABLED(CONFIG_CMA))
ptr = __alloc_from_contiguous(NULL, pool->size, prot, &page); ptr = __alloc_from_contiguous(NULL, pool->size, prot, &page,
atomic_pool_init);
else else
ptr = __alloc_remap_buffer(NULL, pool->size, GFP_KERNEL, prot, ptr = __alloc_remap_buffer(NULL, pool->size, GFP_KERNEL, prot,
&page, NULL); &page, atomic_pool_init);
if (ptr) { if (ptr) {
int i; int i;
...@@ -542,27 +555,41 @@ static int __free_from_pool(void *start, size_t size) ...@@ -542,27 +555,41 @@ static int __free_from_pool(void *start, size_t size)
} }
static void *__alloc_from_contiguous(struct device *dev, size_t size, static void *__alloc_from_contiguous(struct device *dev, size_t size,
pgprot_t prot, struct page **ret_page) pgprot_t prot, struct page **ret_page,
const void *caller)
{ {
unsigned long order = get_order(size); unsigned long order = get_order(size);
size_t count = size >> PAGE_SHIFT; size_t count = size >> PAGE_SHIFT;
struct page *page; struct page *page;
void *ptr;
page = dma_alloc_from_contiguous(dev, count, order); page = dma_alloc_from_contiguous(dev, count, order);
if (!page) if (!page)
return NULL; return NULL;
__dma_clear_buffer(page, size); __dma_clear_buffer(page, size);
__dma_remap(page, size, prot);
if (PageHighMem(page)) {
ptr = __dma_alloc_remap(page, size, GFP_KERNEL, prot, caller);
if (!ptr) {
dma_release_from_contiguous(dev, page, count);
return NULL;
}
} else {
__dma_remap(page, size, prot);
ptr = page_address(page);
}
*ret_page = page; *ret_page = page;
return page_address(page); return ptr;
} }
static void __free_from_contiguous(struct device *dev, struct page *page, static void __free_from_contiguous(struct device *dev, struct page *page,
size_t size) void *cpu_addr, size_t size)
{ {
__dma_remap(page, size, pgprot_kernel); if (PageHighMem(page))
__dma_free_remap(cpu_addr, size);
else
__dma_remap(page, size, pgprot_kernel);
dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT); dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT);
} }
...@@ -583,9 +610,9 @@ static inline pgprot_t __get_dma_pgprot(struct dma_attrs *attrs, pgprot_t prot) ...@@ -583,9 +610,9 @@ static inline pgprot_t __get_dma_pgprot(struct dma_attrs *attrs, pgprot_t prot)
#define __get_dma_pgprot(attrs, prot) __pgprot(0) #define __get_dma_pgprot(attrs, prot) __pgprot(0)
#define __alloc_remap_buffer(dev, size, gfp, prot, ret, c) NULL #define __alloc_remap_buffer(dev, size, gfp, prot, ret, c) NULL
#define __alloc_from_pool(size, ret_page) NULL #define __alloc_from_pool(size, ret_page) NULL
#define __alloc_from_contiguous(dev, size, prot, ret) NULL #define __alloc_from_contiguous(dev, size, prot, ret, c) NULL
#define __free_from_pool(cpu_addr, size) 0 #define __free_from_pool(cpu_addr, size) 0
#define __free_from_contiguous(dev, page, size) do { } while (0) #define __free_from_contiguous(dev, page, cpu_addr, size) do { } while (0)
#define __dma_free_remap(cpu_addr, size) do { } while (0) #define __dma_free_remap(cpu_addr, size) do { } while (0)
#endif /* CONFIG_MMU */ #endif /* CONFIG_MMU */
...@@ -645,7 +672,7 @@ static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, ...@@ -645,7 +672,7 @@ static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
else if (!IS_ENABLED(CONFIG_CMA)) else if (!IS_ENABLED(CONFIG_CMA))
addr = __alloc_remap_buffer(dev, size, gfp, prot, &page, caller); addr = __alloc_remap_buffer(dev, size, gfp, prot, &page, caller);
else else
addr = __alloc_from_contiguous(dev, size, prot, &page); addr = __alloc_from_contiguous(dev, size, prot, &page, caller);
if (addr) if (addr)
*handle = pfn_to_dma(dev, page_to_pfn(page)); *handle = pfn_to_dma(dev, page_to_pfn(page));
...@@ -739,7 +766,7 @@ static void __arm_dma_free(struct device *dev, size_t size, void *cpu_addr, ...@@ -739,7 +766,7 @@ static void __arm_dma_free(struct device *dev, size_t size, void *cpu_addr,
* Non-atomic allocations cannot be freed with IRQs disabled * Non-atomic allocations cannot be freed with IRQs disabled
*/ */
WARN_ON(irqs_disabled()); WARN_ON(irqs_disabled());
__free_from_contiguous(dev, page, size); __free_from_contiguous(dev, page, cpu_addr, size);
} }
} }
...@@ -1002,6 +1029,9 @@ static inline dma_addr_t __alloc_iova(struct dma_iommu_mapping *mapping, ...@@ -1002,6 +1029,9 @@ static inline dma_addr_t __alloc_iova(struct dma_iommu_mapping *mapping,
unsigned int count, start; unsigned int count, start;
unsigned long flags; unsigned long flags;
if (order > CONFIG_ARM_DMA_IOMMU_ALIGNMENT)
order = CONFIG_ARM_DMA_IOMMU_ALIGNMENT;
count = ((PAGE_ALIGN(size) >> PAGE_SHIFT) + count = ((PAGE_ALIGN(size) >> PAGE_SHIFT) +
(1 << mapping->order) - 1) >> mapping->order; (1 << mapping->order) - 1) >> mapping->order;
...@@ -1068,12 +1098,17 @@ static struct page **__iommu_alloc_buffer(struct device *dev, size_t size, ...@@ -1068,12 +1098,17 @@ static struct page **__iommu_alloc_buffer(struct device *dev, size_t size,
return pages; return pages;
} }
/*
* IOMMU can map any pages, so himem can also be used here
*/
gfp |= __GFP_NOWARN | __GFP_HIGHMEM;
while (count) { while (count) {
int j, order = __fls(count); int j, order = __fls(count);
pages[i] = alloc_pages(gfp | __GFP_NOWARN, order); pages[i] = alloc_pages(gfp, order);
while (!pages[i] && order) while (!pages[i] && order)
pages[i] = alloc_pages(gfp | __GFP_NOWARN, --order); pages[i] = alloc_pages(gfp, --order);
if (!pages[i]) if (!pages[i])
goto error; goto error;
...@@ -1257,11 +1292,11 @@ static void *__iommu_alloc_atomic(struct device *dev, size_t size, ...@@ -1257,11 +1292,11 @@ static void *__iommu_alloc_atomic(struct device *dev, size_t size,
return NULL; return NULL;
} }
static void __iommu_free_atomic(struct device *dev, struct page **pages, static void __iommu_free_atomic(struct device *dev, void *cpu_addr,
dma_addr_t handle, size_t size) dma_addr_t handle, size_t size)
{ {
__iommu_remove_mapping(dev, handle, size); __iommu_remove_mapping(dev, handle, size);
__free_from_pool(page_address(pages[0]), size); __free_from_pool(cpu_addr, size);
} }
static void *arm_iommu_alloc_attrs(struct device *dev, size_t size, static void *arm_iommu_alloc_attrs(struct device *dev, size_t size,
...@@ -1344,7 +1379,7 @@ void arm_iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr, ...@@ -1344,7 +1379,7 @@ void arm_iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr,
} }
if (__in_atomic_pool(cpu_addr, size)) { if (__in_atomic_pool(cpu_addr, size)) {
__iommu_free_atomic(dev, pages, handle, size); __iommu_free_atomic(dev, cpu_addr, handle, size);
return; return;
} }
...@@ -1732,6 +1767,8 @@ struct dma_map_ops iommu_ops = { ...@@ -1732,6 +1767,8 @@ struct dma_map_ops iommu_ops = {
.unmap_sg = arm_iommu_unmap_sg, .unmap_sg = arm_iommu_unmap_sg,
.sync_sg_for_cpu = arm_iommu_sync_sg_for_cpu, .sync_sg_for_cpu = arm_iommu_sync_sg_for_cpu,
.sync_sg_for_device = arm_iommu_sync_sg_for_device, .sync_sg_for_device = arm_iommu_sync_sg_for_device,
.set_dma_mask = arm_dma_set_mask,
}; };
struct dma_map_ops iommu_coherent_ops = { struct dma_map_ops iommu_coherent_ops = {
...@@ -1745,6 +1782,8 @@ struct dma_map_ops iommu_coherent_ops = { ...@@ -1745,6 +1782,8 @@ struct dma_map_ops iommu_coherent_ops = {
.map_sg = arm_coherent_iommu_map_sg, .map_sg = arm_coherent_iommu_map_sg,
.unmap_sg = arm_coherent_iommu_unmap_sg, .unmap_sg = arm_coherent_iommu_unmap_sg,
.set_dma_mask = arm_dma_set_mask,
}; };
/** /**
...@@ -1799,6 +1838,7 @@ arm_iommu_create_mapping(struct bus_type *bus, dma_addr_t base, size_t size, ...@@ -1799,6 +1838,7 @@ arm_iommu_create_mapping(struct bus_type *bus, dma_addr_t base, size_t size,
err: err:
return ERR_PTR(err); return ERR_PTR(err);
} }
EXPORT_SYMBOL_GPL(arm_iommu_create_mapping);
static void release_iommu_mapping(struct kref *kref) static void release_iommu_mapping(struct kref *kref)
{ {
...@@ -1815,6 +1855,7 @@ void arm_iommu_release_mapping(struct dma_iommu_mapping *mapping) ...@@ -1815,6 +1855,7 @@ void arm_iommu_release_mapping(struct dma_iommu_mapping *mapping)
if (mapping) if (mapping)
kref_put(&mapping->kref, release_iommu_mapping); kref_put(&mapping->kref, release_iommu_mapping);
} }
EXPORT_SYMBOL_GPL(arm_iommu_release_mapping);
/** /**
* arm_iommu_attach_device * arm_iommu_attach_device
...@@ -1843,5 +1884,32 @@ int arm_iommu_attach_device(struct device *dev, ...@@ -1843,5 +1884,32 @@ int arm_iommu_attach_device(struct device *dev,
pr_debug("Attached IOMMU controller to %s device.\n", dev_name(dev)); pr_debug("Attached IOMMU controller to %s device.\n", dev_name(dev));
return 0; return 0;
} }
EXPORT_SYMBOL_GPL(arm_iommu_attach_device);
/**
* arm_iommu_detach_device
* @dev: valid struct device pointer
*
* Detaches the provided device from a previously attached map.
* This voids the dma operations (dma_map_ops pointer)
*/
void arm_iommu_detach_device(struct device *dev)
{
struct dma_iommu_mapping *mapping;
mapping = to_dma_iommu_mapping(dev);
if (!mapping) {
dev_warn(dev, "Not attached\n");
return;
}
iommu_detach_device(mapping->domain, dev);
kref_put(&mapping->kref, release_iommu_mapping);
mapping = NULL;
set_dma_ops(dev, NULL);
pr_debug("Detached IOMMU controller from %s device.\n", dev_name(dev));
}
EXPORT_SYMBOL_GPL(arm_iommu_detach_device);
#endif #endif
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment