Commit 9eef8b8c authored by Christoph Hellwig's avatar Christoph Hellwig

arm: implement ->mapping_error

DMA_ERROR_CODE is going to go away, so don't rely on it.
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
parent a760088b
...@@ -33,6 +33,7 @@ ...@@ -33,6 +33,7 @@
#include <linux/scatterlist.h> #include <linux/scatterlist.h>
#include <asm/cacheflush.h> #include <asm/cacheflush.h>
#include <asm/dma-iommu.h>
#undef STATS #undef STATS
...@@ -256,7 +257,7 @@ static inline dma_addr_t map_single(struct device *dev, void *ptr, size_t size, ...@@ -256,7 +257,7 @@ static inline dma_addr_t map_single(struct device *dev, void *ptr, size_t size,
if (buf == NULL) { if (buf == NULL) {
dev_err(dev, "%s: unable to map unsafe buffer %p!\n", dev_err(dev, "%s: unable to map unsafe buffer %p!\n",
__func__, ptr); __func__, ptr);
return DMA_ERROR_CODE; return ARM_MAPPING_ERROR;
} }
dev_dbg(dev, "%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n", dev_dbg(dev, "%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n",
...@@ -326,7 +327,7 @@ static dma_addr_t dmabounce_map_page(struct device *dev, struct page *page, ...@@ -326,7 +327,7 @@ static dma_addr_t dmabounce_map_page(struct device *dev, struct page *page,
ret = needs_bounce(dev, dma_addr, size); ret = needs_bounce(dev, dma_addr, size);
if (ret < 0) if (ret < 0)
return DMA_ERROR_CODE; return ARM_MAPPING_ERROR;
if (ret == 0) { if (ret == 0) {
arm_dma_ops.sync_single_for_device(dev, dma_addr, size, dir); arm_dma_ops.sync_single_for_device(dev, dma_addr, size, dir);
...@@ -335,7 +336,7 @@ static dma_addr_t dmabounce_map_page(struct device *dev, struct page *page, ...@@ -335,7 +336,7 @@ static dma_addr_t dmabounce_map_page(struct device *dev, struct page *page,
if (PageHighMem(page)) { if (PageHighMem(page)) {
dev_err(dev, "DMA buffer bouncing of HIGHMEM pages is not supported\n"); dev_err(dev, "DMA buffer bouncing of HIGHMEM pages is not supported\n");
return DMA_ERROR_CODE; return ARM_MAPPING_ERROR;
} }
return map_single(dev, page_address(page) + offset, size, dir, attrs); return map_single(dev, page_address(page) + offset, size, dir, attrs);
...@@ -452,6 +453,11 @@ static int dmabounce_set_mask(struct device *dev, u64 dma_mask) ...@@ -452,6 +453,11 @@ static int dmabounce_set_mask(struct device *dev, u64 dma_mask)
return arm_dma_ops.set_dma_mask(dev, dma_mask); return arm_dma_ops.set_dma_mask(dev, dma_mask);
} }
static int dmabounce_mapping_error(struct device *dev, dma_addr_t dma_addr)
{
return arm_dma_ops.mapping_error(dev, dma_addr);
}
static const struct dma_map_ops dmabounce_ops = { static const struct dma_map_ops dmabounce_ops = {
.alloc = arm_dma_alloc, .alloc = arm_dma_alloc,
.free = arm_dma_free, .free = arm_dma_free,
...@@ -466,6 +472,7 @@ static const struct dma_map_ops dmabounce_ops = { ...@@ -466,6 +472,7 @@ static const struct dma_map_ops dmabounce_ops = {
.sync_sg_for_cpu = arm_dma_sync_sg_for_cpu, .sync_sg_for_cpu = arm_dma_sync_sg_for_cpu,
.sync_sg_for_device = arm_dma_sync_sg_for_device, .sync_sg_for_device = arm_dma_sync_sg_for_device,
.set_dma_mask = dmabounce_set_mask, .set_dma_mask = dmabounce_set_mask,
.mapping_error = dmabounce_mapping_error,
}; };
static int dmabounce_init_pool(struct dmabounce_pool *pool, struct device *dev, static int dmabounce_init_pool(struct dmabounce_pool *pool, struct device *dev,
......
...@@ -9,6 +9,8 @@ ...@@ -9,6 +9,8 @@
#include <linux/kmemcheck.h> #include <linux/kmemcheck.h>
#include <linux/kref.h> #include <linux/kref.h>
#define ARM_MAPPING_ERROR (~(dma_addr_t)0x0)
struct dma_iommu_mapping { struct dma_iommu_mapping {
/* iommu specific data */ /* iommu specific data */
struct iommu_domain *domain; struct iommu_domain *domain;
......
...@@ -12,7 +12,6 @@ ...@@ -12,7 +12,6 @@
#include <xen/xen.h> #include <xen/xen.h>
#include <asm/xen/hypervisor.h> #include <asm/xen/hypervisor.h>
#define DMA_ERROR_CODE (~(dma_addr_t)0x0)
extern const struct dma_map_ops arm_dma_ops; extern const struct dma_map_ops arm_dma_ops;
extern const struct dma_map_ops arm_coherent_dma_ops; extern const struct dma_map_ops arm_coherent_dma_ops;
......
...@@ -180,6 +180,11 @@ static void arm_dma_sync_single_for_device(struct device *dev, ...@@ -180,6 +180,11 @@ static void arm_dma_sync_single_for_device(struct device *dev,
__dma_page_cpu_to_dev(page, offset, size, dir); __dma_page_cpu_to_dev(page, offset, size, dir);
} }
static int arm_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
{
return dma_addr == ARM_MAPPING_ERROR;
}
const struct dma_map_ops arm_dma_ops = { const struct dma_map_ops arm_dma_ops = {
.alloc = arm_dma_alloc, .alloc = arm_dma_alloc,
.free = arm_dma_free, .free = arm_dma_free,
...@@ -193,6 +198,7 @@ const struct dma_map_ops arm_dma_ops = { ...@@ -193,6 +198,7 @@ const struct dma_map_ops arm_dma_ops = {
.sync_single_for_device = arm_dma_sync_single_for_device, .sync_single_for_device = arm_dma_sync_single_for_device,
.sync_sg_for_cpu = arm_dma_sync_sg_for_cpu, .sync_sg_for_cpu = arm_dma_sync_sg_for_cpu,
.sync_sg_for_device = arm_dma_sync_sg_for_device, .sync_sg_for_device = arm_dma_sync_sg_for_device,
.mapping_error = arm_dma_mapping_error,
}; };
EXPORT_SYMBOL(arm_dma_ops); EXPORT_SYMBOL(arm_dma_ops);
...@@ -211,6 +217,7 @@ const struct dma_map_ops arm_coherent_dma_ops = { ...@@ -211,6 +217,7 @@ const struct dma_map_ops arm_coherent_dma_ops = {
.get_sgtable = arm_dma_get_sgtable, .get_sgtable = arm_dma_get_sgtable,
.map_page = arm_coherent_dma_map_page, .map_page = arm_coherent_dma_map_page,
.map_sg = arm_dma_map_sg, .map_sg = arm_dma_map_sg,
.mapping_error = arm_dma_mapping_error,
}; };
EXPORT_SYMBOL(arm_coherent_dma_ops); EXPORT_SYMBOL(arm_coherent_dma_ops);
...@@ -799,7 +806,7 @@ static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, ...@@ -799,7 +806,7 @@ static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
gfp &= ~(__GFP_COMP); gfp &= ~(__GFP_COMP);
args.gfp = gfp; args.gfp = gfp;
*handle = DMA_ERROR_CODE; *handle = ARM_MAPPING_ERROR;
allowblock = gfpflags_allow_blocking(gfp); allowblock = gfpflags_allow_blocking(gfp);
cma = allowblock ? dev_get_cma_area(dev) : false; cma = allowblock ? dev_get_cma_area(dev) : false;
...@@ -1254,7 +1261,7 @@ static inline dma_addr_t __alloc_iova(struct dma_iommu_mapping *mapping, ...@@ -1254,7 +1261,7 @@ static inline dma_addr_t __alloc_iova(struct dma_iommu_mapping *mapping,
if (i == mapping->nr_bitmaps) { if (i == mapping->nr_bitmaps) {
if (extend_iommu_mapping(mapping)) { if (extend_iommu_mapping(mapping)) {
spin_unlock_irqrestore(&mapping->lock, flags); spin_unlock_irqrestore(&mapping->lock, flags);
return DMA_ERROR_CODE; return ARM_MAPPING_ERROR;
} }
start = bitmap_find_next_zero_area(mapping->bitmaps[i], start = bitmap_find_next_zero_area(mapping->bitmaps[i],
...@@ -1262,7 +1269,7 @@ static inline dma_addr_t __alloc_iova(struct dma_iommu_mapping *mapping, ...@@ -1262,7 +1269,7 @@ static inline dma_addr_t __alloc_iova(struct dma_iommu_mapping *mapping,
if (start > mapping->bits) { if (start > mapping->bits) {
spin_unlock_irqrestore(&mapping->lock, flags); spin_unlock_irqrestore(&mapping->lock, flags);
return DMA_ERROR_CODE; return ARM_MAPPING_ERROR;
} }
bitmap_set(mapping->bitmaps[i], start, count); bitmap_set(mapping->bitmaps[i], start, count);
...@@ -1445,7 +1452,7 @@ __iommu_create_mapping(struct device *dev, struct page **pages, size_t size, ...@@ -1445,7 +1452,7 @@ __iommu_create_mapping(struct device *dev, struct page **pages, size_t size,
int i; int i;
dma_addr = __alloc_iova(mapping, size); dma_addr = __alloc_iova(mapping, size);
if (dma_addr == DMA_ERROR_CODE) if (dma_addr == ARM_MAPPING_ERROR)
return dma_addr; return dma_addr;
iova = dma_addr; iova = dma_addr;
...@@ -1472,7 +1479,7 @@ __iommu_create_mapping(struct device *dev, struct page **pages, size_t size, ...@@ -1472,7 +1479,7 @@ __iommu_create_mapping(struct device *dev, struct page **pages, size_t size,
fail: fail:
iommu_unmap(mapping->domain, dma_addr, iova-dma_addr); iommu_unmap(mapping->domain, dma_addr, iova-dma_addr);
__free_iova(mapping, dma_addr, size); __free_iova(mapping, dma_addr, size);
return DMA_ERROR_CODE; return ARM_MAPPING_ERROR;
} }
static int __iommu_remove_mapping(struct device *dev, dma_addr_t iova, size_t size) static int __iommu_remove_mapping(struct device *dev, dma_addr_t iova, size_t size)
...@@ -1533,7 +1540,7 @@ static void *__iommu_alloc_simple(struct device *dev, size_t size, gfp_t gfp, ...@@ -1533,7 +1540,7 @@ static void *__iommu_alloc_simple(struct device *dev, size_t size, gfp_t gfp,
return NULL; return NULL;
*handle = __iommu_create_mapping(dev, &page, size, attrs); *handle = __iommu_create_mapping(dev, &page, size, attrs);
if (*handle == DMA_ERROR_CODE) if (*handle == ARM_MAPPING_ERROR)
goto err_mapping; goto err_mapping;
return addr; return addr;
...@@ -1561,7 +1568,7 @@ static void *__arm_iommu_alloc_attrs(struct device *dev, size_t size, ...@@ -1561,7 +1568,7 @@ static void *__arm_iommu_alloc_attrs(struct device *dev, size_t size,
struct page **pages; struct page **pages;
void *addr = NULL; void *addr = NULL;
*handle = DMA_ERROR_CODE; *handle = ARM_MAPPING_ERROR;
size = PAGE_ALIGN(size); size = PAGE_ALIGN(size);
if (coherent_flag == COHERENT || !gfpflags_allow_blocking(gfp)) if (coherent_flag == COHERENT || !gfpflags_allow_blocking(gfp))
...@@ -1582,7 +1589,7 @@ static void *__arm_iommu_alloc_attrs(struct device *dev, size_t size, ...@@ -1582,7 +1589,7 @@ static void *__arm_iommu_alloc_attrs(struct device *dev, size_t size,
return NULL; return NULL;
*handle = __iommu_create_mapping(dev, pages, size, attrs); *handle = __iommu_create_mapping(dev, pages, size, attrs);
if (*handle == DMA_ERROR_CODE) if (*handle == ARM_MAPPING_ERROR)
goto err_buffer; goto err_buffer;
if (attrs & DMA_ATTR_NO_KERNEL_MAPPING) if (attrs & DMA_ATTR_NO_KERNEL_MAPPING)
...@@ -1732,10 +1739,10 @@ static int __map_sg_chunk(struct device *dev, struct scatterlist *sg, ...@@ -1732,10 +1739,10 @@ static int __map_sg_chunk(struct device *dev, struct scatterlist *sg,
int prot; int prot;
size = PAGE_ALIGN(size); size = PAGE_ALIGN(size);
*handle = DMA_ERROR_CODE; *handle = ARM_MAPPING_ERROR;
iova_base = iova = __alloc_iova(mapping, size); iova_base = iova = __alloc_iova(mapping, size);
if (iova == DMA_ERROR_CODE) if (iova == ARM_MAPPING_ERROR)
return -ENOMEM; return -ENOMEM;
for (count = 0, s = sg; count < (size >> PAGE_SHIFT); s = sg_next(s)) { for (count = 0, s = sg; count < (size >> PAGE_SHIFT); s = sg_next(s)) {
...@@ -1775,7 +1782,7 @@ static int __iommu_map_sg(struct device *dev, struct scatterlist *sg, int nents, ...@@ -1775,7 +1782,7 @@ static int __iommu_map_sg(struct device *dev, struct scatterlist *sg, int nents,
for (i = 1; i < nents; i++) { for (i = 1; i < nents; i++) {
s = sg_next(s); s = sg_next(s);
s->dma_address = DMA_ERROR_CODE; s->dma_address = ARM_MAPPING_ERROR;
s->dma_length = 0; s->dma_length = 0;
if (s->offset || (size & ~PAGE_MASK) || size + s->length > max) { if (s->offset || (size & ~PAGE_MASK) || size + s->length > max) {
...@@ -1950,7 +1957,7 @@ static dma_addr_t arm_coherent_iommu_map_page(struct device *dev, struct page *p ...@@ -1950,7 +1957,7 @@ static dma_addr_t arm_coherent_iommu_map_page(struct device *dev, struct page *p
int ret, prot, len = PAGE_ALIGN(size + offset); int ret, prot, len = PAGE_ALIGN(size + offset);
dma_addr = __alloc_iova(mapping, len); dma_addr = __alloc_iova(mapping, len);
if (dma_addr == DMA_ERROR_CODE) if (dma_addr == ARM_MAPPING_ERROR)
return dma_addr; return dma_addr;
prot = __dma_info_to_prot(dir, attrs); prot = __dma_info_to_prot(dir, attrs);
...@@ -1962,7 +1969,7 @@ static dma_addr_t arm_coherent_iommu_map_page(struct device *dev, struct page *p ...@@ -1962,7 +1969,7 @@ static dma_addr_t arm_coherent_iommu_map_page(struct device *dev, struct page *p
return dma_addr + offset; return dma_addr + offset;
fail: fail:
__free_iova(mapping, dma_addr, len); __free_iova(mapping, dma_addr, len);
return DMA_ERROR_CODE; return ARM_MAPPING_ERROR;
} }
/** /**
...@@ -2056,7 +2063,7 @@ static dma_addr_t arm_iommu_map_resource(struct device *dev, ...@@ -2056,7 +2063,7 @@ static dma_addr_t arm_iommu_map_resource(struct device *dev,
size_t len = PAGE_ALIGN(size + offset); size_t len = PAGE_ALIGN(size + offset);
dma_addr = __alloc_iova(mapping, len); dma_addr = __alloc_iova(mapping, len);
if (dma_addr == DMA_ERROR_CODE) if (dma_addr == ARM_MAPPING_ERROR)
return dma_addr; return dma_addr;
prot = __dma_info_to_prot(dir, attrs) | IOMMU_MMIO; prot = __dma_info_to_prot(dir, attrs) | IOMMU_MMIO;
...@@ -2068,7 +2075,7 @@ static dma_addr_t arm_iommu_map_resource(struct device *dev, ...@@ -2068,7 +2075,7 @@ static dma_addr_t arm_iommu_map_resource(struct device *dev,
return dma_addr + offset; return dma_addr + offset;
fail: fail:
__free_iova(mapping, dma_addr, len); __free_iova(mapping, dma_addr, len);
return DMA_ERROR_CODE; return ARM_MAPPING_ERROR;
} }
/** /**
...@@ -2140,6 +2147,8 @@ const struct dma_map_ops iommu_ops = { ...@@ -2140,6 +2147,8 @@ const struct dma_map_ops iommu_ops = {
.map_resource = arm_iommu_map_resource, .map_resource = arm_iommu_map_resource,
.unmap_resource = arm_iommu_unmap_resource, .unmap_resource = arm_iommu_unmap_resource,
.mapping_error = arm_dma_mapping_error,
}; };
const struct dma_map_ops iommu_coherent_ops = { const struct dma_map_ops iommu_coherent_ops = {
...@@ -2156,6 +2165,8 @@ const struct dma_map_ops iommu_coherent_ops = { ...@@ -2156,6 +2165,8 @@ const struct dma_map_ops iommu_coherent_ops = {
.map_resource = arm_iommu_map_resource, .map_resource = arm_iommu_map_resource,
.unmap_resource = arm_iommu_unmap_resource, .unmap_resource = arm_iommu_unmap_resource,
.mapping_error = arm_dma_mapping_error,
}; };
/** /**
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment