powerpc: Fix up dma_alloc_coherent() on platforms without cache coherency.

The implementation we just revived has issues, such as using a
Kconfig-defined virtual address area in kernel space that nothing
actually carves out (and thus will overlap whatever is there),
or having some dependencies on being self contained in a single
PTE page which adds unnecessary constraints on the kernel virtual
address space.

This fixes it by using more classic PTE accessors and automatically
locating the area for consistent memory, carving an appropriate hole
in the kernel virtual address space, leaving only the size of that
area as a Kconfig option. It also brings some dma-mask related fixes
from the ARM implementation which was almost identical initially but
grew its own fixes.
Signed-off-by: default avatarBenjamin Herrenschmidt <benh@kernel.crashing.org>
parent f637a49e
...@@ -868,19 +868,6 @@ config TASK_SIZE ...@@ -868,19 +868,6 @@ config TASK_SIZE
default "0x80000000" if PPC_PREP || PPC_8xx default "0x80000000" if PPC_PREP || PPC_8xx
default "0xc0000000" default "0xc0000000"
config CONSISTENT_START_BOOL
bool "Set custom consistent memory pool address"
depends on ADVANCED_OPTIONS && NOT_COHERENT_CACHE
help
This option allows you to set the base virtual address
of the consistent memory pool. This pool of virtual
memory is used to make consistent memory allocations.
config CONSISTENT_START
hex "Base virtual address of consistent memory pool" if CONSISTENT_START_BOOL
default "0xfd000000" if (NOT_COHERENT_CACHE && 8xx)
default "0xff100000" if NOT_COHERENT_CACHE
config CONSISTENT_SIZE_BOOL config CONSISTENT_SIZE_BOOL
bool "Set custom consistent memory pool size" bool "Set custom consistent memory pool size"
depends on ADVANCED_OPTIONS && NOT_COHERENT_CACHE depends on ADVANCED_OPTIONS && NOT_COHERENT_CACHE
......
...@@ -26,7 +26,9 @@ ...@@ -26,7 +26,9 @@
* allocate the space "normally" and use the cache management functions * allocate the space "normally" and use the cache management functions
* to ensure it is consistent. * to ensure it is consistent.
*/ */
extern void *__dma_alloc_coherent(size_t size, dma_addr_t *handle, gfp_t gfp); struct device;
extern void *__dma_alloc_coherent(struct device *dev, size_t size,
dma_addr_t *handle, gfp_t gfp);
extern void __dma_free_coherent(size_t size, void *vaddr); extern void __dma_free_coherent(size_t size, void *vaddr);
extern void __dma_sync(void *vaddr, size_t size, int direction); extern void __dma_sync(void *vaddr, size_t size, int direction);
extern void __dma_sync_page(struct page *page, unsigned long offset, extern void __dma_sync_page(struct page *page, unsigned long offset,
...@@ -37,7 +39,7 @@ extern void __dma_sync_page(struct page *page, unsigned long offset, ...@@ -37,7 +39,7 @@ extern void __dma_sync_page(struct page *page, unsigned long offset,
* Cache coherent cores. * Cache coherent cores.
*/ */
#define __dma_alloc_coherent(gfp, size, handle) NULL #define __dma_alloc_coherent(dev, gfp, size, handle) NULL
#define __dma_free_coherent(size, addr) ((void)0) #define __dma_free_coherent(size, addr) ((void)0)
#define __dma_sync(addr, size, rw) ((void)0) #define __dma_sync(addr, size, rw) ((void)0)
#define __dma_sync_page(pg, off, sz, rw) ((void)0) #define __dma_sync_page(pg, off, sz, rw) ((void)0)
......
...@@ -71,7 +71,11 @@ extern int icache_44x_need_flush; ...@@ -71,7 +71,11 @@ extern int icache_44x_need_flush;
* until mem_init() at which point this becomes the top of the vmalloc * until mem_init() at which point this becomes the top of the vmalloc
* and ioremap space * and ioremap space
*/ */
#ifdef CONFIG_NOT_COHERENT_CACHE
#define IOREMAP_TOP ((KVIRT_TOP - CONFIG_CONSISTENT_SIZE) & PAGE_MASK)
#else
#define IOREMAP_TOP KVIRT_TOP #define IOREMAP_TOP KVIRT_TOP
#endif
/* /*
* Just any arbitrary offset to the start of the vmalloc VM area: the * Just any arbitrary offset to the start of the vmalloc VM area: the
......
...@@ -32,7 +32,7 @@ void *dma_direct_alloc_coherent(struct device *dev, size_t size, ...@@ -32,7 +32,7 @@ void *dma_direct_alloc_coherent(struct device *dev, size_t size,
{ {
void *ret; void *ret;
#ifdef CONFIG_NOT_COHERENT_CACHE #ifdef CONFIG_NOT_COHERENT_CACHE
ret = __dma_alloc_coherent(size, dma_handle, flag); ret = __dma_alloc_coherent(dev, size, dma_handle, flag);
if (ret == NULL) if (ret == NULL)
return NULL; return NULL;
*dma_handle += get_dma_direct_offset(dev); *dma_handle += get_dma_direct_offset(dev);
......
...@@ -32,20 +32,21 @@ ...@@ -32,20 +32,21 @@
#include <asm/tlbflush.h> #include <asm/tlbflush.h>
#include "mmu_decl.h"
/* /*
* This address range defaults to a value that is safe for all * This address range defaults to a value that is safe for all
* platforms which currently set CONFIG_NOT_COHERENT_CACHE. It * platforms which currently set CONFIG_NOT_COHERENT_CACHE. It
* can be further configured for specific applications under * can be further configured for specific applications under
* the "Advanced Setup" menu. -Matt * the "Advanced Setup" menu. -Matt
*/ */
#define CONSISTENT_BASE (CONFIG_CONSISTENT_START) #define CONSISTENT_BASE (IOREMAP_TOP)
#define CONSISTENT_END (CONFIG_CONSISTENT_START + CONFIG_CONSISTENT_SIZE) #define CONSISTENT_END (CONSISTENT_BASE + CONFIG_CONSISTENT_SIZE)
#define CONSISTENT_OFFSET(x) (((unsigned long)(x) - CONSISTENT_BASE) >> PAGE_SHIFT) #define CONSISTENT_OFFSET(x) (((unsigned long)(x) - CONSISTENT_BASE) >> PAGE_SHIFT)
/* /*
* This is the page table (2MB) covering uncached, DMA consistent allocations * This is the page table (2MB) covering uncached, DMA consistent allocations
*/ */
static pte_t *consistent_pte;
static DEFINE_SPINLOCK(consistent_lock); static DEFINE_SPINLOCK(consistent_lock);
/* /*
...@@ -148,22 +149,38 @@ static struct ppc_vm_region *ppc_vm_region_find(struct ppc_vm_region *head, unsi ...@@ -148,22 +149,38 @@ static struct ppc_vm_region *ppc_vm_region_find(struct ppc_vm_region *head, unsi
* virtual and bus address for that space. * virtual and bus address for that space.
*/ */
void * void *
__dma_alloc_coherent(size_t size, dma_addr_t *handle, gfp_t gfp) __dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp)
{ {
struct page *page; struct page *page;
struct ppc_vm_region *c; struct ppc_vm_region *c;
unsigned long order; unsigned long order;
u64 mask = 0x00ffffff, limit; /* ISA default */ u64 mask = ISA_DMA_THRESHOLD, limit;
if (!consistent_pte) { if (dev) {
printk(KERN_ERR "%s: not initialised\n", __func__); mask = dev->coherent_dma_mask;
dump_stack();
return NULL; /*
* Sanity check the DMA mask - it must be non-zero, and
* must be able to be satisfied by a DMA allocation.
*/
if (mask == 0) {
dev_warn(dev, "coherent DMA mask is unset\n");
goto no_page;
} }
if ((~mask) & ISA_DMA_THRESHOLD) {
dev_warn(dev, "coherent DMA mask %#llx is smaller "
"than system GFP_DMA mask %#llx\n",
mask, (unsigned long long)ISA_DMA_THRESHOLD);
goto no_page;
}
}
size = PAGE_ALIGN(size); size = PAGE_ALIGN(size);
limit = (mask + 1) & ~mask; limit = (mask + 1) & ~mask;
if ((limit && size >= limit) || size >= (CONSISTENT_END - CONSISTENT_BASE)) { if ((limit && size >= limit) ||
size >= (CONSISTENT_END - CONSISTENT_BASE)) {
printk(KERN_WARNING "coherent allocation too big (requested %#x mask %#Lx)\n", printk(KERN_WARNING "coherent allocation too big (requested %#x mask %#Lx)\n",
size, mask); size, mask);
return NULL; return NULL;
...@@ -171,6 +188,7 @@ __dma_alloc_coherent(size_t size, dma_addr_t *handle, gfp_t gfp) ...@@ -171,6 +188,7 @@ __dma_alloc_coherent(size_t size, dma_addr_t *handle, gfp_t gfp)
order = get_order(size); order = get_order(size);
/* Might be useful if we ever have a real legacy DMA zone... */
if (mask != 0xffffffff) if (mask != 0xffffffff)
gfp |= GFP_DMA; gfp |= GFP_DMA;
...@@ -195,7 +213,6 @@ __dma_alloc_coherent(size_t size, dma_addr_t *handle, gfp_t gfp) ...@@ -195,7 +213,6 @@ __dma_alloc_coherent(size_t size, dma_addr_t *handle, gfp_t gfp)
gfp & ~(__GFP_DMA | __GFP_HIGHMEM)); gfp & ~(__GFP_DMA | __GFP_HIGHMEM));
if (c) { if (c) {
unsigned long vaddr = c->vm_start; unsigned long vaddr = c->vm_start;
pte_t *pte = consistent_pte + CONSISTENT_OFFSET(vaddr);
struct page *end = page + (1 << order); struct page *end = page + (1 << order);
split_page(page, order); split_page(page, order);
...@@ -206,13 +223,10 @@ __dma_alloc_coherent(size_t size, dma_addr_t *handle, gfp_t gfp) ...@@ -206,13 +223,10 @@ __dma_alloc_coherent(size_t size, dma_addr_t *handle, gfp_t gfp)
*handle = page_to_phys(page); *handle = page_to_phys(page);
do { do {
BUG_ON(!pte_none(*pte));
SetPageReserved(page); SetPageReserved(page);
set_pte_at(&init_mm, vaddr, map_page(vaddr, page_to_phys(page),
pte, mk_pte(page, pgprot_noncached(PAGE_KERNEL))); pgprot_noncached(PAGE_KERNEL));
page++; page++;
pte++;
vaddr += PAGE_SIZE; vaddr += PAGE_SIZE;
} while (size -= PAGE_SIZE); } while (size -= PAGE_SIZE);
...@@ -241,7 +255,6 @@ void __dma_free_coherent(size_t size, void *vaddr) ...@@ -241,7 +255,6 @@ void __dma_free_coherent(size_t size, void *vaddr)
{ {
struct ppc_vm_region *c; struct ppc_vm_region *c;
unsigned long flags, addr; unsigned long flags, addr;
pte_t *ptep;
size = PAGE_ALIGN(size); size = PAGE_ALIGN(size);
...@@ -258,29 +271,26 @@ void __dma_free_coherent(size_t size, void *vaddr) ...@@ -258,29 +271,26 @@ void __dma_free_coherent(size_t size, void *vaddr)
size = c->vm_end - c->vm_start; size = c->vm_end - c->vm_start;
} }
ptep = consistent_pte + CONSISTENT_OFFSET(c->vm_start);
addr = c->vm_start; addr = c->vm_start;
do { do {
pte_t pte = ptep_get_and_clear(&init_mm, addr, ptep); pte_t *ptep;
unsigned long pfn; unsigned long pfn;
ptep++; ptep = pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k(addr),
addr += PAGE_SIZE; addr),
addr),
if (!pte_none(pte) && pte_present(pte)) { addr);
pfn = pte_pfn(pte); if (!pte_none(*ptep) && pte_present(*ptep)) {
pfn = pte_pfn(*ptep);
pte_clear(&init_mm, addr, ptep);
if (pfn_valid(pfn)) { if (pfn_valid(pfn)) {
struct page *page = pfn_to_page(pfn); struct page *page = pfn_to_page(pfn);
ClearPageReserved(page);
ClearPageReserved(page);
__free_page(page); __free_page(page);
continue;
} }
} }
addr += PAGE_SIZE;
printk(KERN_CRIT "%s: bad page in kernel page table\n",
__func__);
} while (size -= PAGE_SIZE); } while (size -= PAGE_SIZE);
flush_tlb_kernel_range(c->vm_start, c->vm_end); flush_tlb_kernel_range(c->vm_start, c->vm_end);
...@@ -300,42 +310,6 @@ void __dma_free_coherent(size_t size, void *vaddr) ...@@ -300,42 +310,6 @@ void __dma_free_coherent(size_t size, void *vaddr)
} }
EXPORT_SYMBOL(__dma_free_coherent); EXPORT_SYMBOL(__dma_free_coherent);
/*
* Initialise the consistent memory allocation.
*/
static int __init dma_alloc_init(void)
{
pgd_t *pgd;
pud_t *pud;
pmd_t *pmd;
pte_t *pte;
int ret = 0;
do {
pgd = pgd_offset(&init_mm, CONSISTENT_BASE);
pud = pud_alloc(&init_mm, pgd, CONSISTENT_BASE);
pmd = pmd_alloc(&init_mm, pud, CONSISTENT_BASE);
if (!pmd) {
printk(KERN_ERR "%s: no pmd tables\n", __func__);
ret = -ENOMEM;
break;
}
pte = pte_alloc_kernel(pmd, CONSISTENT_BASE);
if (!pte) {
printk(KERN_ERR "%s: no pte tables\n", __func__);
ret = -ENOMEM;
break;
}
consistent_pte = pte;
} while (0);
return ret;
}
core_initcall(dma_alloc_init);
/* /*
* make an area consistent. * make an area consistent.
*/ */
......
...@@ -387,6 +387,10 @@ void __init mem_init(void) ...@@ -387,6 +387,10 @@ void __init mem_init(void)
pr_info(" * 0x%08lx..0x%08lx : highmem PTEs\n", pr_info(" * 0x%08lx..0x%08lx : highmem PTEs\n",
PKMAP_BASE, PKMAP_ADDR(LAST_PKMAP)); PKMAP_BASE, PKMAP_ADDR(LAST_PKMAP));
#endif /* CONFIG_HIGHMEM */ #endif /* CONFIG_HIGHMEM */
#ifdef CONFIG_NOT_COHERENT_CACHE
pr_info(" * 0x%08lx..0x%08lx : consistent mem\n",
IOREMAP_TOP, IOREMAP_TOP + CONFIG_CONSISTENT_SIZE);
#endif /* CONFIG_NOT_COHERENT_CACHE */
pr_info(" * 0x%08lx..0x%08lx : early ioremap\n", pr_info(" * 0x%08lx..0x%08lx : early ioremap\n",
ioremap_bot, IOREMAP_TOP); ioremap_bot, IOREMAP_TOP);
pr_info(" * 0x%08lx..0x%08lx : vmalloc & ioremap\n", pr_info(" * 0x%08lx..0x%08lx : vmalloc & ioremap\n",
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment