Commit 6009faa4 authored by Christoph Hellwig's avatar Christoph Hellwig

powerpc: implement ->mapping_error

DMA_ERROR_CODE is going to go away, so don't rely on it.  Instead
define a ->mapping_error method for all IOMMU based dma operation
instances.  The direct ops don't ever return an error and don't
need a ->mapping_error method.
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Acked-by: default avatarMichael Ellerman <mpe@ellerman.id.au>
parent ceaf481c
...@@ -17,10 +17,6 @@ ...@@ -17,10 +17,6 @@
#include <asm/io.h> #include <asm/io.h>
#include <asm/swiotlb.h> #include <asm/swiotlb.h>
#ifdef CONFIG_PPC64
#define DMA_ERROR_CODE (~(dma_addr_t)0x0)
#endif
/* Some dma direct funcs must be visible for use in other dma_ops */ /* Some dma direct funcs must be visible for use in other dma_ops */
extern void *__dma_direct_alloc_coherent(struct device *dev, size_t size, extern void *__dma_direct_alloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t flag, dma_addr_t *dma_handle, gfp_t flag,
......
...@@ -139,6 +139,8 @@ struct scatterlist; ...@@ -139,6 +139,8 @@ struct scatterlist;
#ifdef CONFIG_PPC64 #ifdef CONFIG_PPC64
#define IOMMU_MAPPING_ERROR (~(dma_addr_t)0x0)
static inline void set_iommu_table_base(struct device *dev, static inline void set_iommu_table_base(struct device *dev,
struct iommu_table *base) struct iommu_table *base)
{ {
...@@ -238,6 +240,8 @@ static inline int __init tce_iommu_bus_notifier_init(void) ...@@ -238,6 +240,8 @@ static inline int __init tce_iommu_bus_notifier_init(void)
} }
#endif /* !CONFIG_IOMMU_API */ #endif /* !CONFIG_IOMMU_API */
int dma_iommu_mapping_error(struct device *dev, dma_addr_t dma_addr);
#else #else
static inline void *get_iommu_table_base(struct device *dev) static inline void *get_iommu_table_base(struct device *dev)
......
...@@ -105,6 +105,11 @@ static u64 dma_iommu_get_required_mask(struct device *dev) ...@@ -105,6 +105,11 @@ static u64 dma_iommu_get_required_mask(struct device *dev)
return mask; return mask;
} }
int dma_iommu_mapping_error(struct device *dev, dma_addr_t dma_addr)
{
return dma_addr == IOMMU_MAPPING_ERROR;
}
struct dma_map_ops dma_iommu_ops = { struct dma_map_ops dma_iommu_ops = {
.alloc = dma_iommu_alloc_coherent, .alloc = dma_iommu_alloc_coherent,
.free = dma_iommu_free_coherent, .free = dma_iommu_free_coherent,
...@@ -115,5 +120,6 @@ struct dma_map_ops dma_iommu_ops = { ...@@ -115,5 +120,6 @@ struct dma_map_ops dma_iommu_ops = {
.map_page = dma_iommu_map_page, .map_page = dma_iommu_map_page,
.unmap_page = dma_iommu_unmap_page, .unmap_page = dma_iommu_unmap_page,
.get_required_mask = dma_iommu_get_required_mask, .get_required_mask = dma_iommu_get_required_mask,
.mapping_error = dma_iommu_mapping_error,
}; };
EXPORT_SYMBOL(dma_iommu_ops); EXPORT_SYMBOL(dma_iommu_ops);
...@@ -198,11 +198,11 @@ static unsigned long iommu_range_alloc(struct device *dev, ...@@ -198,11 +198,11 @@ static unsigned long iommu_range_alloc(struct device *dev,
if (unlikely(npages == 0)) { if (unlikely(npages == 0)) {
if (printk_ratelimit()) if (printk_ratelimit())
WARN_ON(1); WARN_ON(1);
return DMA_ERROR_CODE; return IOMMU_MAPPING_ERROR;
} }
if (should_fail_iommu(dev)) if (should_fail_iommu(dev))
return DMA_ERROR_CODE; return IOMMU_MAPPING_ERROR;
/* /*
* We don't need to disable preemption here because any CPU can * We don't need to disable preemption here because any CPU can
...@@ -278,7 +278,7 @@ static unsigned long iommu_range_alloc(struct device *dev, ...@@ -278,7 +278,7 @@ static unsigned long iommu_range_alloc(struct device *dev,
} else { } else {
/* Give up */ /* Give up */
spin_unlock_irqrestore(&(pool->lock), flags); spin_unlock_irqrestore(&(pool->lock), flags);
return DMA_ERROR_CODE; return IOMMU_MAPPING_ERROR;
} }
} }
...@@ -310,13 +310,13 @@ static dma_addr_t iommu_alloc(struct device *dev, struct iommu_table *tbl, ...@@ -310,13 +310,13 @@ static dma_addr_t iommu_alloc(struct device *dev, struct iommu_table *tbl,
unsigned long attrs) unsigned long attrs)
{ {
unsigned long entry; unsigned long entry;
dma_addr_t ret = DMA_ERROR_CODE; dma_addr_t ret = IOMMU_MAPPING_ERROR;
int build_fail; int build_fail;
entry = iommu_range_alloc(dev, tbl, npages, NULL, mask, align_order); entry = iommu_range_alloc(dev, tbl, npages, NULL, mask, align_order);
if (unlikely(entry == DMA_ERROR_CODE)) if (unlikely(entry == IOMMU_MAPPING_ERROR))
return DMA_ERROR_CODE; return IOMMU_MAPPING_ERROR;
entry += tbl->it_offset; /* Offset into real TCE table */ entry += tbl->it_offset; /* Offset into real TCE table */
ret = entry << tbl->it_page_shift; /* Set the return dma address */ ret = entry << tbl->it_page_shift; /* Set the return dma address */
...@@ -328,12 +328,12 @@ static dma_addr_t iommu_alloc(struct device *dev, struct iommu_table *tbl, ...@@ -328,12 +328,12 @@ static dma_addr_t iommu_alloc(struct device *dev, struct iommu_table *tbl,
/* tbl->it_ops->set() only returns non-zero for transient errors. /* tbl->it_ops->set() only returns non-zero for transient errors.
* Clean up the table bitmap in this case and return * Clean up the table bitmap in this case and return
* DMA_ERROR_CODE. For all other errors the functionality is * IOMMU_MAPPING_ERROR. For all other errors the functionality is
* not altered. * not altered.
*/ */
if (unlikely(build_fail)) { if (unlikely(build_fail)) {
__iommu_free(tbl, ret, npages); __iommu_free(tbl, ret, npages);
return DMA_ERROR_CODE; return IOMMU_MAPPING_ERROR;
} }
/* Flush/invalidate TLB caches if necessary */ /* Flush/invalidate TLB caches if necessary */
...@@ -478,7 +478,7 @@ int ppc_iommu_map_sg(struct device *dev, struct iommu_table *tbl, ...@@ -478,7 +478,7 @@ int ppc_iommu_map_sg(struct device *dev, struct iommu_table *tbl,
DBG(" - vaddr: %lx, size: %lx\n", vaddr, slen); DBG(" - vaddr: %lx, size: %lx\n", vaddr, slen);
/* Handle failure */ /* Handle failure */
if (unlikely(entry == DMA_ERROR_CODE)) { if (unlikely(entry == IOMMU_MAPPING_ERROR)) {
if (!(attrs & DMA_ATTR_NO_WARN) && if (!(attrs & DMA_ATTR_NO_WARN) &&
printk_ratelimit()) printk_ratelimit())
dev_info(dev, "iommu_alloc failed, tbl %p " dev_info(dev, "iommu_alloc failed, tbl %p "
...@@ -545,7 +545,7 @@ int ppc_iommu_map_sg(struct device *dev, struct iommu_table *tbl, ...@@ -545,7 +545,7 @@ int ppc_iommu_map_sg(struct device *dev, struct iommu_table *tbl,
*/ */
if (outcount < incount) { if (outcount < incount) {
outs = sg_next(outs); outs = sg_next(outs);
outs->dma_address = DMA_ERROR_CODE; outs->dma_address = IOMMU_MAPPING_ERROR;
outs->dma_length = 0; outs->dma_length = 0;
} }
...@@ -563,7 +563,7 @@ int ppc_iommu_map_sg(struct device *dev, struct iommu_table *tbl, ...@@ -563,7 +563,7 @@ int ppc_iommu_map_sg(struct device *dev, struct iommu_table *tbl,
npages = iommu_num_pages(s->dma_address, s->dma_length, npages = iommu_num_pages(s->dma_address, s->dma_length,
IOMMU_PAGE_SIZE(tbl)); IOMMU_PAGE_SIZE(tbl));
__iommu_free(tbl, vaddr, npages); __iommu_free(tbl, vaddr, npages);
s->dma_address = DMA_ERROR_CODE; s->dma_address = IOMMU_MAPPING_ERROR;
s->dma_length = 0; s->dma_length = 0;
} }
if (s == outs) if (s == outs)
...@@ -777,7 +777,7 @@ dma_addr_t iommu_map_page(struct device *dev, struct iommu_table *tbl, ...@@ -777,7 +777,7 @@ dma_addr_t iommu_map_page(struct device *dev, struct iommu_table *tbl,
unsigned long mask, enum dma_data_direction direction, unsigned long mask, enum dma_data_direction direction,
unsigned long attrs) unsigned long attrs)
{ {
dma_addr_t dma_handle = DMA_ERROR_CODE; dma_addr_t dma_handle = IOMMU_MAPPING_ERROR;
void *vaddr; void *vaddr;
unsigned long uaddr; unsigned long uaddr;
unsigned int npages, align; unsigned int npages, align;
...@@ -797,7 +797,7 @@ dma_addr_t iommu_map_page(struct device *dev, struct iommu_table *tbl, ...@@ -797,7 +797,7 @@ dma_addr_t iommu_map_page(struct device *dev, struct iommu_table *tbl,
dma_handle = iommu_alloc(dev, tbl, vaddr, npages, direction, dma_handle = iommu_alloc(dev, tbl, vaddr, npages, direction,
mask >> tbl->it_page_shift, align, mask >> tbl->it_page_shift, align,
attrs); attrs);
if (dma_handle == DMA_ERROR_CODE) { if (dma_handle == IOMMU_MAPPING_ERROR) {
if (!(attrs & DMA_ATTR_NO_WARN) && if (!(attrs & DMA_ATTR_NO_WARN) &&
printk_ratelimit()) { printk_ratelimit()) {
dev_info(dev, "iommu_alloc failed, tbl %p " dev_info(dev, "iommu_alloc failed, tbl %p "
...@@ -869,7 +869,7 @@ void *iommu_alloc_coherent(struct device *dev, struct iommu_table *tbl, ...@@ -869,7 +869,7 @@ void *iommu_alloc_coherent(struct device *dev, struct iommu_table *tbl,
io_order = get_iommu_order(size, tbl); io_order = get_iommu_order(size, tbl);
mapping = iommu_alloc(dev, tbl, ret, nio_pages, DMA_BIDIRECTIONAL, mapping = iommu_alloc(dev, tbl, ret, nio_pages, DMA_BIDIRECTIONAL,
mask >> tbl->it_page_shift, io_order, 0); mask >> tbl->it_page_shift, io_order, 0);
if (mapping == DMA_ERROR_CODE) { if (mapping == IOMMU_MAPPING_ERROR) {
free_pages((unsigned long)ret, order); free_pages((unsigned long)ret, order);
return NULL; return NULL;
} }
......
...@@ -660,6 +660,7 @@ static const struct dma_map_ops dma_iommu_fixed_ops = { ...@@ -660,6 +660,7 @@ static const struct dma_map_ops dma_iommu_fixed_ops = {
.set_dma_mask = dma_set_mask_and_switch, .set_dma_mask = dma_set_mask_and_switch,
.map_page = dma_fixed_map_page, .map_page = dma_fixed_map_page,
.unmap_page = dma_fixed_unmap_page, .unmap_page = dma_fixed_unmap_page,
.mapping_error = dma_iommu_mapping_error,
}; };
static void cell_dma_dev_setup_fixed(struct device *dev); static void cell_dma_dev_setup_fixed(struct device *dev);
......
...@@ -519,7 +519,7 @@ static dma_addr_t vio_dma_iommu_map_page(struct device *dev, struct page *page, ...@@ -519,7 +519,7 @@ static dma_addr_t vio_dma_iommu_map_page(struct device *dev, struct page *page,
{ {
struct vio_dev *viodev = to_vio_dev(dev); struct vio_dev *viodev = to_vio_dev(dev);
struct iommu_table *tbl; struct iommu_table *tbl;
dma_addr_t ret = DMA_ERROR_CODE; dma_addr_t ret = IOMMU_MAPPING_ERROR;
tbl = get_iommu_table_base(dev); tbl = get_iommu_table_base(dev);
if (vio_cmo_alloc(viodev, roundup(size, IOMMU_PAGE_SIZE(tbl)))) { if (vio_cmo_alloc(viodev, roundup(size, IOMMU_PAGE_SIZE(tbl)))) {
...@@ -625,6 +625,7 @@ static const struct dma_map_ops vio_dma_mapping_ops = { ...@@ -625,6 +625,7 @@ static const struct dma_map_ops vio_dma_mapping_ops = {
.unmap_page = vio_dma_iommu_unmap_page, .unmap_page = vio_dma_iommu_unmap_page,
.dma_supported = vio_dma_iommu_dma_supported, .dma_supported = vio_dma_iommu_dma_supported,
.get_required_mask = vio_dma_get_required_mask, .get_required_mask = vio_dma_get_required_mask,
.mapping_error = dma_iommu_mapping_error,
}; };
/** /**
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment