Commit 6b700393 authored by Vineet Gupta's avatar Vineet Gupta

ARC: dma: non-coherent pages need V-P mapping if in HIGHMEM

Previously a non-coherent page (hardware IOC or simply driver needs)
could be handled by cpu with paddr alone (kvaddr used to be needed for
coherent mappings to enforce uncached semantics via a MMU mapping).

Now however such a page might still require a V-P mapping if it was in
physical address space > 32bits due to PAE40, which the CPU can't access
directly with a paddr

So decouple decision of kvaddr allocation from type of alloc request
(coh/non-coh)
Signed-off-by: default avatarVineet Gupta <vgupta@synopsys.com>
parent d98a15a5
...@@ -28,23 +28,18 @@ static void *arc_dma_alloc(struct device *dev, size_t size, ...@@ -28,23 +28,18 @@ static void *arc_dma_alloc(struct device *dev, size_t size,
struct page *page; struct page *page;
phys_addr_t paddr; phys_addr_t paddr;
void *kvaddr; void *kvaddr;
int need_coh = 1, need_kvaddr = 0;
page = alloc_pages(gfp, order); page = alloc_pages(gfp, order);
if (!page) if (!page)
return NULL; return NULL;
/* This is linear addr (0x8000_0000 based) */
paddr = page_to_phys(page);
/* For now bus address is exactly same as paddr */
*dma_handle = paddr;
/* /*
* IOC relies on all data (even coherent DMA data) being in cache * IOC relies on all data (even coherent DMA data) being in cache
* Thus allocate normal cached memory * Thus allocate normal cached memory
* *
* The gains with IOC are two pronged: * The gains with IOC are two pronged:
* -For streaming data, elides needs for cache maintenance, saving * -For streaming data, elides need for cache maintenance, saving
* cycles in flush code, and bus bandwidth as all the lines of a * cycles in flush code, and bus bandwidth as all the lines of a
* buffer need to be flushed out to memory * buffer need to be flushed out to memory
* -For coherent data, Read/Write to buffers terminate early in cache * -For coherent data, Read/Write to buffers terminate early in cache
...@@ -52,14 +47,32 @@ static void *arc_dma_alloc(struct device *dev, size_t size, ...@@ -52,14 +47,32 @@ static void *arc_dma_alloc(struct device *dev, size_t size,
*/ */
if ((is_isa_arcv2() && ioc_exists) || if ((is_isa_arcv2() && ioc_exists) ||
dma_get_attr(DMA_ATTR_NON_CONSISTENT, attrs)) dma_get_attr(DMA_ATTR_NON_CONSISTENT, attrs))
return paddr; need_coh = 0;
/*
* - A coherent buffer needs MMU mapping to enforce non-cachability
* - A highmem page needs a virtual handle (hence MMU mapping)
* independent of cachability
*/
if (PageHighMem(page) || need_coh)
need_kvaddr = 1;
/* This is linear addr (0x8000_0000 based) */
paddr = page_to_phys(page);
/* For now bus address is exactly same as paddr */
*dma_handle = paddr;
/* This is kernel Virtual address (0x7000_0000 based) */ /* This is kernel Virtual address (0x7000_0000 based) */
if (need_kvaddr) {
kvaddr = ioremap_nocache((unsigned long)paddr, size); kvaddr = ioremap_nocache((unsigned long)paddr, size);
if (kvaddr == NULL) { if (kvaddr == NULL) {
__free_pages(page, order); __free_pages(page, order);
return NULL; return NULL;
} }
} else {
kvaddr = (void *)paddr;
}
/* /*
* Evict any existing L1 and/or L2 lines for the backing page * Evict any existing L1 and/or L2 lines for the backing page
...@@ -71,6 +84,7 @@ static void *arc_dma_alloc(struct device *dev, size_t size, ...@@ -71,6 +84,7 @@ static void *arc_dma_alloc(struct device *dev, size_t size,
* Currently flush_cache_vmap nukes the L1 cache completely which * Currently flush_cache_vmap nukes the L1 cache completely which
* will be optimized as a separate commit * will be optimized as a separate commit
*/ */
if (need_coh)
dma_cache_wback_inv((unsigned long)paddr, size); dma_cache_wback_inv((unsigned long)paddr, size);
return kvaddr; return kvaddr;
...@@ -80,9 +94,12 @@ static void arc_dma_free(struct device *dev, size_t size, void *vaddr, ...@@ -80,9 +94,12 @@ static void arc_dma_free(struct device *dev, size_t size, void *vaddr,
dma_addr_t dma_handle, struct dma_attrs *attrs) dma_addr_t dma_handle, struct dma_attrs *attrs)
{ {
struct page *page = virt_to_page(dma_handle); struct page *page = virt_to_page(dma_handle);
int is_non_coh = 1;
is_non_coh = dma_get_attr(DMA_ATTR_NON_CONSISTENT, attrs) ||
(is_isa_arcv2() && ioc_exists);
if (!dma_get_attr(DMA_ATTR_NON_CONSISTENT, attrs) && if (PageHighMem(page) || !is_non_coh)
!(is_isa_arcv2() && ioc_exists))
iounmap((void __force __iomem *)vaddr); iounmap((void __force __iomem *)vaddr);
__free_pages(page, get_order(size)); __free_pages(page, get_order(size));
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment