Commit 8c34f4a5 authored by Paul Mackerras's avatar Paul Mackerras

PPC32: Better allocation of DMA-consistent memory on incoherent machines.

parent baecf72c
...@@ -22,6 +22,7 @@ void *pci_alloc_consistent(struct pci_dev *hwdev, size_t size, ...@@ -22,6 +22,7 @@ void *pci_alloc_consistent(struct pci_dev *hwdev, size_t size,
if (hwdev == NULL || hwdev->dma_mask != 0xffffffff) if (hwdev == NULL || hwdev->dma_mask != 0xffffffff)
gfp |= GFP_DMA; gfp |= GFP_DMA;
#ifdef CONFIG_NOT_COHERENT_CACHE #ifdef CONFIG_NOT_COHERENT_CACHE
ret = consistent_alloc(gfp, size, dma_handle); ret = consistent_alloc(gfp, size, dma_handle);
#else #else
...@@ -30,7 +31,9 @@ void *pci_alloc_consistent(struct pci_dev *hwdev, size_t size, ...@@ -30,7 +31,9 @@ void *pci_alloc_consistent(struct pci_dev *hwdev, size_t size,
if (ret != NULL) { if (ret != NULL) {
memset(ret, 0, size); memset(ret, 0, size);
#ifndef CONFIG_NOT_COHERENT_CACHE
*dma_handle = virt_to_bus(ret); *dma_handle = virt_to_bus(ret);
#endif
} }
return ret; return ret;
} }
......
...@@ -2,7 +2,7 @@ ...@@ -2,7 +2,7 @@
* PowerPC version derived from arch/arm/mm/consistent.c * PowerPC version derived from arch/arm/mm/consistent.c
* Copyright (C) 2001 Dan Malek (dmalek@jlc.net) * Copyright (C) 2001 Dan Malek (dmalek@jlc.net)
* *
* arch/arm/mm/consistent.c * arch/ppc/mm/cachemap.c
* *
* Copyright (C) 2000 Russell King * Copyright (C) 2000 Russell King
* *
...@@ -59,57 +59,69 @@ int map_page(unsigned long va, unsigned long pa, int flags); ...@@ -59,57 +59,69 @@ int map_page(unsigned long va, unsigned long pa, int flags);
*/ */
void *consistent_alloc(int gfp, size_t size, dma_addr_t *dma_handle) void *consistent_alloc(int gfp, size_t size, dma_addr_t *dma_handle)
{ {
int order, err, i; int order, err;
unsigned long page, va, pa, flags; struct page *page, *free, *end;
struct vm_struct *area; unsigned long pa, flags, offset;
void *ret; struct vm_struct *area = NULL;
unsigned long va = 0;
if (in_interrupt()) BUG_ON(in_interrupt());
BUG();
/* Only allocate page size areas. /* Only allocate page size areas */
*/
size = PAGE_ALIGN(size); size = PAGE_ALIGN(size);
order = get_order(size); order = get_order(size);
page = __get_free_pages(gfp, order); free = page = alloc_pages(gfp, order);
if (!page) { if (! page)
BUG();
return NULL; return NULL;
}
pa = page_to_phys(page);
*dma_handle = page_to_bus(page);
end = page + (1 << order);
/* /*
* we need to ensure that there are no cachelines in use, * we need to ensure that there are no cachelines in use,
* or worse dirty in this area. * or worse dirty in this area.
*/ */
invalidate_dcache_range(page, page + size); invalidate_dcache_range((unsigned long)page_address(page),
(unsigned long)page_address(page) + size);
/* Allocate some common virtual space to map the new pages. /*
* alloc_pages() expects the block to be handled as a unit, so
* it only sets the page count on the first page. We set the
* counts on each page so they can be freed individually
*/ */
for (; page < end; page++)
set_page_count(page, 1);
/* Allocate some common virtual space to map the new pages*/
area = get_vm_area(size, VM_ALLOC); area = get_vm_area(size, VM_ALLOC);
if (area == 0) { if (! area)
free_pages(page, order); goto out;
return NULL;
}
va = VMALLOC_VMADDR(area->addr);
ret = (void *)va;
/* This gives us the real physical address of the first page. va = VMALLOC_VMADDR(area->addr);
*/
*dma_handle = pa = virt_to_bus((void *)page);
flags = _PAGE_KERNEL | _PAGE_NO_CACHE; flags = _PAGE_KERNEL | _PAGE_NO_CACHE;
err = 0; for (offset = 0; offset < size; offset += PAGE_SIZE) {
for (i = 0; i < size && err == 0; i += PAGE_SIZE) err = map_page(va+offset, pa+offset, flags);
err = map_page(va+i, pa+i, flags);
if (err) { if (err) {
vfree((void *)va); vfree((void *)va);
return NULL; va = 0;
goto out;
} }
return ret; free++;
}
out:
/* Free pages which weren't mapped */
for (; free < end; free++) {
__free_page(free);
}
return (void *)va;
} }
/* /*
...@@ -117,8 +129,7 @@ void *consistent_alloc(int gfp, size_t size, dma_addr_t *dma_handle) ...@@ -117,8 +129,7 @@ void *consistent_alloc(int gfp, size_t size, dma_addr_t *dma_handle)
*/ */
void consistent_free(void *vaddr) void consistent_free(void *vaddr)
{ {
if (in_interrupt()) BUG_ON(in_interrupt());
BUG();
vfree(vaddr); vfree(vaddr);
} }
...@@ -155,6 +166,6 @@ void consistent_sync_page(struct page *page, unsigned long offset, ...@@ -155,6 +166,6 @@ void consistent_sync_page(struct page *page, unsigned long offset,
{ {
unsigned long start; unsigned long start;
start = page_address(page) + offset; start = (unsigned long)page_address(page) + offset;
consistent_sync((void *)start, size, direction); consistent_sync((void *)start, size, direction);
} }
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment