Commit 7474043e authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'for-3.15' of git://git.linaro.org/people/mszyprowski/linux-dma-mapping

Pull DMA-mapping updates from Marek Szyprowski:
 "This contains extension for more efficient handling of io address
  space for dma-mapping subsystem for ARM architecture"

* 'for-3.15' of git://git.linaro.org/people/mszyprowski/linux-dma-mapping:
  arm: dma-mapping: remove order parameter from arm_iommu_create_mapping()
  arm: dma-mapping: Add support to extend DMA IOMMU mappings
parents b9f2b21a 68efd7d2
...@@ -13,9 +13,12 @@ struct dma_iommu_mapping { ...@@ -13,9 +13,12 @@ struct dma_iommu_mapping {
/* iommu specific data */ /* iommu specific data */
struct iommu_domain *domain; struct iommu_domain *domain;
void *bitmap; unsigned long **bitmaps; /* array of bitmaps */
size_t bits; unsigned int nr_bitmaps; /* nr of elements in array */
unsigned int order; unsigned int extensions;
size_t bitmap_size; /* size of a single bitmap */
size_t bits; /* per bitmap */
unsigned int size; /* per bitmap */
dma_addr_t base; dma_addr_t base;
spinlock_t lock; spinlock_t lock;
...@@ -23,8 +26,7 @@ struct dma_iommu_mapping { ...@@ -23,8 +26,7 @@ struct dma_iommu_mapping {
}; };
struct dma_iommu_mapping * struct dma_iommu_mapping *
arm_iommu_create_mapping(struct bus_type *bus, dma_addr_t base, size_t size, arm_iommu_create_mapping(struct bus_type *bus, dma_addr_t base, size_t size);
int order);
void arm_iommu_release_mapping(struct dma_iommu_mapping *mapping); void arm_iommu_release_mapping(struct dma_iommu_mapping *mapping);
......
...@@ -1069,6 +1069,8 @@ fs_initcall(dma_debug_do_init); ...@@ -1069,6 +1069,8 @@ fs_initcall(dma_debug_do_init);
/* IOMMU */ /* IOMMU */
static int extend_iommu_mapping(struct dma_iommu_mapping *mapping);
static inline dma_addr_t __alloc_iova(struct dma_iommu_mapping *mapping, static inline dma_addr_t __alloc_iova(struct dma_iommu_mapping *mapping,
size_t size) size_t size)
{ {
...@@ -1076,41 +1078,87 @@ static inline dma_addr_t __alloc_iova(struct dma_iommu_mapping *mapping, ...@@ -1076,41 +1078,87 @@ static inline dma_addr_t __alloc_iova(struct dma_iommu_mapping *mapping,
unsigned int align = 0; unsigned int align = 0;
unsigned int count, start; unsigned int count, start;
unsigned long flags; unsigned long flags;
dma_addr_t iova;
int i;
if (order > CONFIG_ARM_DMA_IOMMU_ALIGNMENT) if (order > CONFIG_ARM_DMA_IOMMU_ALIGNMENT)
order = CONFIG_ARM_DMA_IOMMU_ALIGNMENT; order = CONFIG_ARM_DMA_IOMMU_ALIGNMENT;
count = ((PAGE_ALIGN(size) >> PAGE_SHIFT) + count = PAGE_ALIGN(size) >> PAGE_SHIFT;
(1 << mapping->order) - 1) >> mapping->order; align = (1 << order) - 1;
if (order > mapping->order)
align = (1 << (order - mapping->order)) - 1;
spin_lock_irqsave(&mapping->lock, flags); spin_lock_irqsave(&mapping->lock, flags);
start = bitmap_find_next_zero_area(mapping->bitmap, mapping->bits, 0, for (i = 0; i < mapping->nr_bitmaps; i++) {
count, align); start = bitmap_find_next_zero_area(mapping->bitmaps[i],
mapping->bits, 0, count, align);
if (start > mapping->bits)
continue;
bitmap_set(mapping->bitmaps[i], start, count);
break;
}
/*
* No unused range found. Try to extend the existing mapping
* and perform a second attempt to reserve an IO virtual
* address range of size bytes.
*/
if (i == mapping->nr_bitmaps) {
if (extend_iommu_mapping(mapping)) {
spin_unlock_irqrestore(&mapping->lock, flags);
return DMA_ERROR_CODE;
}
start = bitmap_find_next_zero_area(mapping->bitmaps[i],
mapping->bits, 0, count, align);
if (start > mapping->bits) { if (start > mapping->bits) {
spin_unlock_irqrestore(&mapping->lock, flags); spin_unlock_irqrestore(&mapping->lock, flags);
return DMA_ERROR_CODE; return DMA_ERROR_CODE;
} }
bitmap_set(mapping->bitmap, start, count); bitmap_set(mapping->bitmaps[i], start, count);
}
spin_unlock_irqrestore(&mapping->lock, flags); spin_unlock_irqrestore(&mapping->lock, flags);
return mapping->base + (start << (mapping->order + PAGE_SHIFT)); iova = mapping->base + (mapping->size * i);
iova += start << PAGE_SHIFT;
return iova;
} }
static inline void __free_iova(struct dma_iommu_mapping *mapping, static inline void __free_iova(struct dma_iommu_mapping *mapping,
dma_addr_t addr, size_t size) dma_addr_t addr, size_t size)
{ {
unsigned int start = (addr - mapping->base) >> unsigned int start, count;
(mapping->order + PAGE_SHIFT);
unsigned int count = ((size >> PAGE_SHIFT) +
(1 << mapping->order) - 1) >> mapping->order;
unsigned long flags; unsigned long flags;
dma_addr_t bitmap_base;
u32 bitmap_index;
if (!size)
return;
bitmap_index = (u32) (addr - mapping->base) / (u32) mapping->size;
BUG_ON(addr < mapping->base || bitmap_index > mapping->extensions);
bitmap_base = mapping->base + mapping->size * bitmap_index;
start = (addr - bitmap_base) >> PAGE_SHIFT;
if (addr + size > bitmap_base + mapping->size) {
/*
* The address range to be freed reaches into the iova
* range of the next bitmap. This should not happen as
* we don't allow this in __alloc_iova (at the
* moment).
*/
BUG();
} else
count = size >> PAGE_SHIFT;
spin_lock_irqsave(&mapping->lock, flags); spin_lock_irqsave(&mapping->lock, flags);
bitmap_clear(mapping->bitmap, start, count); bitmap_clear(mapping->bitmaps[bitmap_index], start, count);
spin_unlock_irqrestore(&mapping->lock, flags); spin_unlock_irqrestore(&mapping->lock, flags);
} }
...@@ -1875,8 +1923,7 @@ struct dma_map_ops iommu_coherent_ops = { ...@@ -1875,8 +1923,7 @@ struct dma_map_ops iommu_coherent_ops = {
* arm_iommu_create_mapping * arm_iommu_create_mapping
* @bus: pointer to the bus holding the client device (for IOMMU calls) * @bus: pointer to the bus holding the client device (for IOMMU calls)
* @base: start address of the valid IO address space * @base: start address of the valid IO address space
* @size: size of the valid IO address space * @size: maximum size of the valid IO address space
* @order: accuracy of the IO addresses allocations
* *
* Creates a mapping structure which holds information about used/unused * Creates a mapping structure which holds information about used/unused
* IO address ranges, which is required to perform memory allocation and * IO address ranges, which is required to perform memory allocation and
...@@ -1886,38 +1933,54 @@ struct dma_map_ops iommu_coherent_ops = { ...@@ -1886,38 +1933,54 @@ struct dma_map_ops iommu_coherent_ops = {
* arm_iommu_attach_device function. * arm_iommu_attach_device function.
*/ */
struct dma_iommu_mapping * struct dma_iommu_mapping *
arm_iommu_create_mapping(struct bus_type *bus, dma_addr_t base, size_t size, arm_iommu_create_mapping(struct bus_type *bus, dma_addr_t base, size_t size)
int order)
{ {
unsigned int count = size >> (PAGE_SHIFT + order); unsigned int bits = size >> PAGE_SHIFT;
unsigned int bitmap_size = BITS_TO_LONGS(count) * sizeof(long); unsigned int bitmap_size = BITS_TO_LONGS(bits) * sizeof(long);
struct dma_iommu_mapping *mapping; struct dma_iommu_mapping *mapping;
int extensions = 1;
int err = -ENOMEM; int err = -ENOMEM;
if (!count) if (!bitmap_size)
return ERR_PTR(-EINVAL); return ERR_PTR(-EINVAL);
if (bitmap_size > PAGE_SIZE) {
extensions = bitmap_size / PAGE_SIZE;
bitmap_size = PAGE_SIZE;
}
mapping = kzalloc(sizeof(struct dma_iommu_mapping), GFP_KERNEL); mapping = kzalloc(sizeof(struct dma_iommu_mapping), GFP_KERNEL);
if (!mapping) if (!mapping)
goto err; goto err;
mapping->bitmap = kzalloc(bitmap_size, GFP_KERNEL); mapping->bitmap_size = bitmap_size;
if (!mapping->bitmap) mapping->bitmaps = kzalloc(extensions * sizeof(unsigned long *),
GFP_KERNEL);
if (!mapping->bitmaps)
goto err2; goto err2;
mapping->bitmaps[0] = kzalloc(bitmap_size, GFP_KERNEL);
if (!mapping->bitmaps[0])
goto err3;
mapping->nr_bitmaps = 1;
mapping->extensions = extensions;
mapping->base = base; mapping->base = base;
mapping->size = bitmap_size << PAGE_SHIFT;
mapping->bits = BITS_PER_BYTE * bitmap_size; mapping->bits = BITS_PER_BYTE * bitmap_size;
mapping->order = order;
spin_lock_init(&mapping->lock); spin_lock_init(&mapping->lock);
mapping->domain = iommu_domain_alloc(bus); mapping->domain = iommu_domain_alloc(bus);
if (!mapping->domain) if (!mapping->domain)
goto err3; goto err4;
kref_init(&mapping->kref); kref_init(&mapping->kref);
return mapping; return mapping;
err4:
kfree(mapping->bitmaps[0]);
err3: err3:
kfree(mapping->bitmap); kfree(mapping->bitmaps);
err2: err2:
kfree(mapping); kfree(mapping);
err: err:
...@@ -1927,14 +1990,35 @@ EXPORT_SYMBOL_GPL(arm_iommu_create_mapping); ...@@ -1927,14 +1990,35 @@ EXPORT_SYMBOL_GPL(arm_iommu_create_mapping);
static void release_iommu_mapping(struct kref *kref) static void release_iommu_mapping(struct kref *kref)
{ {
int i;
struct dma_iommu_mapping *mapping = struct dma_iommu_mapping *mapping =
container_of(kref, struct dma_iommu_mapping, kref); container_of(kref, struct dma_iommu_mapping, kref);
iommu_domain_free(mapping->domain); iommu_domain_free(mapping->domain);
kfree(mapping->bitmap); for (i = 0; i < mapping->nr_bitmaps; i++)
kfree(mapping->bitmaps[i]);
kfree(mapping->bitmaps);
kfree(mapping); kfree(mapping);
} }
static int extend_iommu_mapping(struct dma_iommu_mapping *mapping)
{
int next_bitmap;
if (mapping->nr_bitmaps > mapping->extensions)
return -EINVAL;
next_bitmap = mapping->nr_bitmaps;
mapping->bitmaps[next_bitmap] = kzalloc(mapping->bitmap_size,
GFP_ATOMIC);
if (!mapping->bitmaps[next_bitmap])
return -ENOMEM;
mapping->nr_bitmaps++;
return 0;
}
void arm_iommu_release_mapping(struct dma_iommu_mapping *mapping) void arm_iommu_release_mapping(struct dma_iommu_mapping *mapping)
{ {
if (mapping) if (mapping)
......
...@@ -237,7 +237,6 @@ struct drm_exynos_file_private { ...@@ -237,7 +237,6 @@ struct drm_exynos_file_private {
* otherwise default one. * otherwise default one.
* @da_space_size: size of device address space. * @da_space_size: size of device address space.
* if 0 then default value is used for it. * if 0 then default value is used for it.
* @da_space_order: order to device address space.
*/ */
struct exynos_drm_private { struct exynos_drm_private {
struct drm_fb_helper *fb_helper; struct drm_fb_helper *fb_helper;
...@@ -255,7 +254,6 @@ struct exynos_drm_private { ...@@ -255,7 +254,6 @@ struct exynos_drm_private {
unsigned long da_start; unsigned long da_start;
unsigned long da_space_size; unsigned long da_space_size;
unsigned long da_space_order;
}; };
/* /*
......
...@@ -36,12 +36,10 @@ int drm_create_iommu_mapping(struct drm_device *drm_dev) ...@@ -36,12 +36,10 @@ int drm_create_iommu_mapping(struct drm_device *drm_dev)
priv->da_start = EXYNOS_DEV_ADDR_START; priv->da_start = EXYNOS_DEV_ADDR_START;
if (!priv->da_space_size) if (!priv->da_space_size)
priv->da_space_size = EXYNOS_DEV_ADDR_SIZE; priv->da_space_size = EXYNOS_DEV_ADDR_SIZE;
if (!priv->da_space_order)
priv->da_space_order = EXYNOS_DEV_ADDR_ORDER;
mapping = arm_iommu_create_mapping(&platform_bus_type, priv->da_start, mapping = arm_iommu_create_mapping(&platform_bus_type, priv->da_start,
priv->da_space_size, priv->da_space_size);
priv->da_space_order);
if (IS_ERR(mapping)) if (IS_ERR(mapping))
return PTR_ERR(mapping); return PTR_ERR(mapping);
......
...@@ -14,7 +14,6 @@ ...@@ -14,7 +14,6 @@
#define EXYNOS_DEV_ADDR_START 0x20000000 #define EXYNOS_DEV_ADDR_START 0x20000000
#define EXYNOS_DEV_ADDR_SIZE 0x40000000 #define EXYNOS_DEV_ADDR_SIZE 0x40000000
#define EXYNOS_DEV_ADDR_ORDER 0x0
#ifdef CONFIG_DRM_EXYNOS_IOMMU #ifdef CONFIG_DRM_EXYNOS_IOMMU
......
...@@ -343,7 +343,7 @@ static int shmobile_iommu_add_device(struct device *dev) ...@@ -343,7 +343,7 @@ static int shmobile_iommu_add_device(struct device *dev)
mapping = archdata->iommu_mapping; mapping = archdata->iommu_mapping;
if (!mapping) { if (!mapping) {
mapping = arm_iommu_create_mapping(&platform_bus_type, 0, mapping = arm_iommu_create_mapping(&platform_bus_type, 0,
L1_LEN << 20, 0); L1_LEN << 20);
if (IS_ERR(mapping)) if (IS_ERR(mapping))
return PTR_ERR(mapping); return PTR_ERR(mapping);
archdata->iommu_mapping = mapping; archdata->iommu_mapping = mapping;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment