Commit 5935877a authored by Akinobu Mita's avatar Akinobu Mita Committed by Linus Torvalds

powerpc: use for_each_sg()

This replaces the plain loop over the sglist array with for_each_sg()
macro which consists of sg_next() function calls.  Since powerpc does
select ARCH_HAS_SG_CHAIN, it is necessary to use for_each_sg() in order
to loop over each sg element.  This also help find problems with drivers
that do not properly initialize their sg tables when CONFIG_DEBUG_SG is
enabled.
Signed-off-by: default avatarAkinobu Mita <akinobu.mita@gmail.com>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Michael Ellerman <mpe@ellerman.id.au>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent ae70a7bb
...@@ -557,11 +557,11 @@ static int vio_dma_iommu_map_sg(struct device *dev, struct scatterlist *sglist, ...@@ -557,11 +557,11 @@ static int vio_dma_iommu_map_sg(struct device *dev, struct scatterlist *sglist,
struct vio_dev *viodev = to_vio_dev(dev); struct vio_dev *viodev = to_vio_dev(dev);
struct iommu_table *tbl; struct iommu_table *tbl;
struct scatterlist *sgl; struct scatterlist *sgl;
int ret, count = 0; int ret, count;
size_t alloc_size = 0; size_t alloc_size = 0;
tbl = get_iommu_table_base(dev); tbl = get_iommu_table_base(dev);
for (sgl = sglist; count < nelems; count++, sgl++) for_each_sg(sglist, sgl, nelems, count)
alloc_size += roundup(sgl->length, IOMMU_PAGE_SIZE(tbl)); alloc_size += roundup(sgl->length, IOMMU_PAGE_SIZE(tbl));
if (vio_cmo_alloc(viodev, alloc_size)) { if (vio_cmo_alloc(viodev, alloc_size)) {
...@@ -577,7 +577,7 @@ static int vio_dma_iommu_map_sg(struct device *dev, struct scatterlist *sglist, ...@@ -577,7 +577,7 @@ static int vio_dma_iommu_map_sg(struct device *dev, struct scatterlist *sglist,
return ret; return ret;
} }
for (sgl = sglist, count = 0; count < ret; count++, sgl++) for_each_sg(sglist, sgl, ret, count)
alloc_size -= roundup(sgl->dma_length, IOMMU_PAGE_SIZE(tbl)); alloc_size -= roundup(sgl->dma_length, IOMMU_PAGE_SIZE(tbl));
if (alloc_size) if (alloc_size)
vio_cmo_dealloc(viodev, alloc_size); vio_cmo_dealloc(viodev, alloc_size);
...@@ -594,10 +594,10 @@ static void vio_dma_iommu_unmap_sg(struct device *dev, ...@@ -594,10 +594,10 @@ static void vio_dma_iommu_unmap_sg(struct device *dev,
struct iommu_table *tbl; struct iommu_table *tbl;
struct scatterlist *sgl; struct scatterlist *sgl;
size_t alloc_size = 0; size_t alloc_size = 0;
int count = 0; int count;
tbl = get_iommu_table_base(dev); tbl = get_iommu_table_base(dev);
for (sgl = sglist; count < nelems; count++, sgl++) for_each_sg(sglist, sgl, nelems, count)
alloc_size += roundup(sgl->dma_length, IOMMU_PAGE_SIZE(tbl)); alloc_size += roundup(sgl->dma_length, IOMMU_PAGE_SIZE(tbl));
dma_iommu_ops.unmap_sg(dev, sglist, nelems, direction, attrs); dma_iommu_ops.unmap_sg(dev, sglist, nelems, direction, attrs);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment