Commit 9ad5d6ed authored by Robin Murphy's avatar Robin Murphy Committed by Joerg Roedel

iommu/dma: Cleanup variable naming in iommu_dma_alloc

Most importantly clear up the size / iosize confusion.  Also rename addr
to cpu_addr to match the surrounding code and make the intention a little
more clear.
Signed-off-by: default avatarRobin Murphy <robin.murphy@arm.com>
[hch: split from a larger patch]
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Signed-off-by: default avatarJoerg Roedel <jroedel@suse.de>
parent 8553f6e6
......@@ -977,64 +977,63 @@ static void *iommu_dma_alloc(struct device *dev, size_t size,
{
bool coherent = dev_is_dma_coherent(dev);
int ioprot = dma_info_to_prot(DMA_BIDIRECTIONAL, coherent, attrs);
size_t iosize = size;
size_t alloc_size = PAGE_ALIGN(size);
struct page *page = NULL;
void *addr;
void *cpu_addr;
size = PAGE_ALIGN(size);
gfp |= __GFP_ZERO;
if (gfpflags_allow_blocking(gfp) &&
!(attrs & DMA_ATTR_FORCE_CONTIGUOUS))
return iommu_dma_alloc_remap(dev, iosize, handle, gfp, attrs);
return iommu_dma_alloc_remap(dev, size, handle, gfp, attrs);
if (!gfpflags_allow_blocking(gfp) && !coherent) {
addr = dma_alloc_from_pool(size, &page, gfp);
if (!addr)
cpu_addr = dma_alloc_from_pool(alloc_size, &page, gfp);
if (!cpu_addr)
return NULL;
*handle = __iommu_dma_map(dev, page_to_phys(page), iosize,
*handle = __iommu_dma_map(dev, page_to_phys(page), size,
ioprot);
if (*handle == DMA_MAPPING_ERROR) {
dma_free_from_pool(addr, size);
dma_free_from_pool(cpu_addr, alloc_size);
return NULL;
}
return addr;
return cpu_addr;
}
if (gfpflags_allow_blocking(gfp))
page = dma_alloc_from_contiguous(dev, size >> PAGE_SHIFT,
get_order(size),
page = dma_alloc_from_contiguous(dev, alloc_size >> PAGE_SHIFT,
get_order(alloc_size),
gfp & __GFP_NOWARN);
if (!page)
page = alloc_pages(gfp, get_order(size));
page = alloc_pages(gfp, get_order(alloc_size));
if (!page)
return NULL;
*handle = __iommu_dma_map(dev, page_to_phys(page), iosize, ioprot);
*handle = __iommu_dma_map(dev, page_to_phys(page), size, ioprot);
if (*handle == DMA_MAPPING_ERROR)
goto out_free_pages;
if (!coherent || PageHighMem(page)) {
pgprot_t prot = arch_dma_mmap_pgprot(dev, PAGE_KERNEL, attrs);
addr = dma_common_contiguous_remap(page, size, VM_USERMAP, prot,
__builtin_return_address(0));
if (!addr)
cpu_addr = dma_common_contiguous_remap(page, alloc_size,
VM_USERMAP, prot, __builtin_return_address(0));
if (!cpu_addr)
goto out_unmap;
if (!coherent)
arch_dma_prep_coherent(page, iosize);
arch_dma_prep_coherent(page, size);
} else {
addr = page_address(page);
cpu_addr = page_address(page);
}
memset(addr, 0, size);
return addr;
memset(cpu_addr, 0, alloc_size);
return cpu_addr;
out_unmap:
__iommu_dma_unmap(dev, *handle, iosize);
__iommu_dma_unmap(dev, *handle, size);
out_free_pages:
if (!dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT))
__free_pages(page, get_order(size));
if (!dma_release_from_contiguous(dev, page, alloc_size >> PAGE_SHIFT))
__free_pages(page, get_order(alloc_size));
return NULL;
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment