Commit 7132813c authored by Suzuki K. Poulose's avatar Suzuki K. Poulose Committed by Catalin Marinas

arm64: Honor __GFP_ZERO in dma allocations

Current implementation doesn't zero out the pages allocated.
Honor the __GFP_ZERO flag and zero out if set.

Cc: <stable@vger.kernel.org> # v3.14+
Acked-by: default avatarWill Deacon <will.deacon@arm.com>
Signed-off-by: default avatarSuzuki K. Poulose <suzuki.poulose@arm.com>
Signed-off-by: default avatarCatalin Marinas <catalin.marinas@arm.com>
parent 130c93fd
...@@ -51,7 +51,7 @@ static int __init early_coherent_pool(char *p) ...@@ -51,7 +51,7 @@ static int __init early_coherent_pool(char *p)
} }
early_param("coherent_pool", early_coherent_pool); early_param("coherent_pool", early_coherent_pool);
static void *__alloc_from_pool(size_t size, struct page **ret_page) static void *__alloc_from_pool(size_t size, struct page **ret_page, gfp_t flags)
{ {
unsigned long val; unsigned long val;
void *ptr = NULL; void *ptr = NULL;
...@@ -67,6 +67,8 @@ static void *__alloc_from_pool(size_t size, struct page **ret_page) ...@@ -67,6 +67,8 @@ static void *__alloc_from_pool(size_t size, struct page **ret_page)
*ret_page = phys_to_page(phys); *ret_page = phys_to_page(phys);
ptr = (void *)val; ptr = (void *)val;
if (flags & __GFP_ZERO)
memset(ptr, 0, size);
} }
return ptr; return ptr;
...@@ -101,6 +103,7 @@ static void *__dma_alloc_coherent(struct device *dev, size_t size, ...@@ -101,6 +103,7 @@ static void *__dma_alloc_coherent(struct device *dev, size_t size,
flags |= GFP_DMA; flags |= GFP_DMA;
if (IS_ENABLED(CONFIG_DMA_CMA) && (flags & __GFP_WAIT)) { if (IS_ENABLED(CONFIG_DMA_CMA) && (flags & __GFP_WAIT)) {
struct page *page; struct page *page;
void *addr;
size = PAGE_ALIGN(size); size = PAGE_ALIGN(size);
page = dma_alloc_from_contiguous(dev, size >> PAGE_SHIFT, page = dma_alloc_from_contiguous(dev, size >> PAGE_SHIFT,
...@@ -109,7 +112,10 @@ static void *__dma_alloc_coherent(struct device *dev, size_t size, ...@@ -109,7 +112,10 @@ static void *__dma_alloc_coherent(struct device *dev, size_t size,
return NULL; return NULL;
*dma_handle = phys_to_dma(dev, page_to_phys(page)); *dma_handle = phys_to_dma(dev, page_to_phys(page));
return page_address(page); addr = page_address(page);
if (flags & __GFP_ZERO)
memset(addr, 0, size);
return addr;
} else { } else {
return swiotlb_alloc_coherent(dev, size, dma_handle, flags); return swiotlb_alloc_coherent(dev, size, dma_handle, flags);
} }
...@@ -146,7 +152,7 @@ static void *__dma_alloc(struct device *dev, size_t size, ...@@ -146,7 +152,7 @@ static void *__dma_alloc(struct device *dev, size_t size,
if (!coherent && !(flags & __GFP_WAIT)) { if (!coherent && !(flags & __GFP_WAIT)) {
struct page *page = NULL; struct page *page = NULL;
void *addr = __alloc_from_pool(size, &page); void *addr = __alloc_from_pool(size, &page, flags);
if (addr) if (addr)
*dma_handle = phys_to_dma(dev, page_to_phys(page)); *dma_handle = phys_to_dma(dev, page_to_phys(page));
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment