Commit 3e6110fd authored by Dan Williams's avatar Dan Williams

Revert "scatterlist: use sg_phys()"

commit db0fa0cb "scatterlist: use sg_phys()" did replacements of
the form:

    phys_addr_t phys = page_to_phys(sg_page(s));
    phys_addr_t phys = sg_phys(s) & PAGE_MASK;

However, this breaks platforms where sizeof(phys_addr_t) >
sizeof(unsigned long).  Revert for 4.3 and 4.4 to make room for a
combined helper in 4.5.

Cc: <stable@vger.kernel.org>
Cc: Jens Axboe <axboe@fb.com>
Cc: Christoph Hellwig <hch@lst.de>
Cc: Russell King <linux@arm.linux.org.uk>
Cc: David Woodhouse <dwmw2@infradead.org>
Cc: Andrew Morton <akpm@linux-foundation.org>
Fixes: db0fa0cb ("scatterlist: use sg_phys()")
Suggested-by: default avatarJoerg Roedel <joro@8bytes.org>
Reported-by: default avatarVitaly Lavrov <vel21ripn@gmail.com>
Signed-off-by: default avatarDan Williams <dan.j.williams@intel.com>
parent d91e8928
...@@ -1521,7 +1521,7 @@ static int __map_sg_chunk(struct device *dev, struct scatterlist *sg, ...@@ -1521,7 +1521,7 @@ static int __map_sg_chunk(struct device *dev, struct scatterlist *sg,
return -ENOMEM; return -ENOMEM;
for (count = 0, s = sg; count < (size >> PAGE_SHIFT); s = sg_next(s)) { for (count = 0, s = sg; count < (size >> PAGE_SHIFT); s = sg_next(s)) {
phys_addr_t phys = sg_phys(s) & PAGE_MASK; phys_addr_t phys = page_to_phys(sg_page(s));
unsigned int len = PAGE_ALIGN(s->offset + s->length); unsigned int len = PAGE_ALIGN(s->offset + s->length);
if (!is_coherent && if (!is_coherent &&
......
...@@ -61,7 +61,8 @@ static int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl, ...@@ -61,7 +61,8 @@ static int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl,
/* FIXME this part of code is untested */ /* FIXME this part of code is untested */
for_each_sg(sgl, sg, nents, i) { for_each_sg(sgl, sg, nents, i) {
sg->dma_address = sg_phys(sg); sg->dma_address = sg_phys(sg);
__dma_sync(sg_phys(sg), sg->length, direction); __dma_sync(page_to_phys(sg_page(sg)) + sg->offset,
sg->length, direction);
} }
return nents; return nents;
......
...@@ -2159,7 +2159,7 @@ static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn, ...@@ -2159,7 +2159,7 @@ static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
sg_res = aligned_nrpages(sg->offset, sg->length); sg_res = aligned_nrpages(sg->offset, sg->length);
sg->dma_address = ((dma_addr_t)iov_pfn << VTD_PAGE_SHIFT) + sg->offset; sg->dma_address = ((dma_addr_t)iov_pfn << VTD_PAGE_SHIFT) + sg->offset;
sg->dma_length = sg->length; sg->dma_length = sg->length;
pteval = (sg_phys(sg) & PAGE_MASK) | prot; pteval = page_to_phys(sg_page(sg)) | prot;
phys_pfn = pteval >> VTD_PAGE_SHIFT; phys_pfn = pteval >> VTD_PAGE_SHIFT;
} }
...@@ -3704,7 +3704,7 @@ static int intel_nontranslate_map_sg(struct device *hddev, ...@@ -3704,7 +3704,7 @@ static int intel_nontranslate_map_sg(struct device *hddev,
for_each_sg(sglist, sg, nelems, i) { for_each_sg(sglist, sg, nelems, i) {
BUG_ON(!sg_page(sg)); BUG_ON(!sg_page(sg));
sg->dma_address = sg_phys(sg); sg->dma_address = page_to_phys(sg_page(sg)) + sg->offset;
sg->dma_length = sg->length; sg->dma_length = sg->length;
} }
return nelems; return nelems;
......
...@@ -1430,7 +1430,7 @@ size_t default_iommu_map_sg(struct iommu_domain *domain, unsigned long iova, ...@@ -1430,7 +1430,7 @@ size_t default_iommu_map_sg(struct iommu_domain *domain, unsigned long iova,
min_pagesz = 1 << __ffs(domain->ops->pgsize_bitmap); min_pagesz = 1 << __ffs(domain->ops->pgsize_bitmap);
for_each_sg(sg, s, nents, i) { for_each_sg(sg, s, nents, i) {
phys_addr_t phys = sg_phys(s); phys_addr_t phys = page_to_phys(sg_page(s)) + s->offset;
/* /*
* We are mapping on IOMMU page boundaries, so offset within * We are mapping on IOMMU page boundaries, so offset within
......
...@@ -81,7 +81,7 @@ static int ion_chunk_heap_allocate(struct ion_heap *heap, ...@@ -81,7 +81,7 @@ static int ion_chunk_heap_allocate(struct ion_heap *heap,
err: err:
sg = table->sgl; sg = table->sgl;
for (i -= 1; i >= 0; i--) { for (i -= 1; i >= 0; i--) {
gen_pool_free(chunk_heap->pool, sg_phys(sg) & PAGE_MASK, gen_pool_free(chunk_heap->pool, page_to_phys(sg_page(sg)),
sg->length); sg->length);
sg = sg_next(sg); sg = sg_next(sg);
} }
...@@ -109,7 +109,7 @@ static void ion_chunk_heap_free(struct ion_buffer *buffer) ...@@ -109,7 +109,7 @@ static void ion_chunk_heap_free(struct ion_buffer *buffer)
DMA_BIDIRECTIONAL); DMA_BIDIRECTIONAL);
for_each_sg(table->sgl, sg, table->nents, i) { for_each_sg(table->sgl, sg, table->nents, i) {
gen_pool_free(chunk_heap->pool, sg_phys(sg) & PAGE_MASK, gen_pool_free(chunk_heap->pool, page_to_phys(sg_page(sg)),
sg->length); sg->length);
} }
chunk_heap->allocated -= allocated_size; chunk_heap->allocated -= allocated_size;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment