Commit 34dc63a5 authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Geert Uytterhoeven

m68k: Use the generic dma coherent remap allocator

This switches m68k to using common code for the DMA allocations,
including potential use of the CMA allocator if configured.
Also add a comment where the existing behavior seems to be lacking.

Switching to the generic code enables DMA allocations from atomic
context, which is required by the DMA API documentation, and also
adds various other minor features drivers start relying upon.  It
also makes sure we have a tested code base for all architectures
that require uncached pte bits for coherent DMA allocations.
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Signed-off-by: default avatarGeert Uytterhoeven <geert@linux-m68k.org>
parent f67d6672
......@@ -3,10 +3,12 @@ config M68K
bool
default y
select ARCH_32BIT_OFF_T
select ARCH_HAS_DMA_MMAP_PGPROT if MMU && !COLDFIRE
select ARCH_HAS_SYNC_DMA_FOR_DEVICE if HAS_DMA
select ARCH_MIGHT_HAVE_PC_PARPORT if ISA
select ARCH_NO_COHERENT_DMA_MMAP if !MMU
select ARCH_NO_PREEMPT if !COLDFIRE
select DMA_DIRECT_REMAP if HAS_DMA && MMU && !COLDFIRE
select HAVE_IDE
select HAVE_AOUT if MMU
select HAVE_DEBUG_BUGVERBOSE
......
......@@ -18,57 +18,17 @@
#include <asm/pgalloc.h>
#if defined(CONFIG_MMU) && !defined(CONFIG_COLDFIRE)
void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
gfp_t flag, unsigned long attrs)
pgprot_t arch_dma_mmap_pgprot(struct device *dev, pgprot_t prot,
unsigned long attrs)
{
struct page *page, **map;
pgprot_t pgprot;
void *addr;
int i, order;
pr_debug("dma_alloc_coherent: %d,%x\n", size, flag);
size = PAGE_ALIGN(size);
order = get_order(size);
page = alloc_pages(flag | __GFP_ZERO, order);
if (!page)
return NULL;
*handle = page_to_phys(page);
map = kmalloc(sizeof(struct page *) << order, flag & ~__GFP_DMA);
if (!map) {
__free_pages(page, order);
return NULL;
if (CPU_IS_040_OR_060) {
pgprot_val(prot) &= ~_PAGE_CACHE040;
pgprot_val(prot) |= _PAGE_GLOBAL040 | _PAGE_NOCACHE_S;
} else {
pgprot_val(prot) |= _PAGE_NOCACHE030;
}
split_page(page, order);
order = 1 << order;
size >>= PAGE_SHIFT;
map[0] = page;
for (i = 1; i < size; i++)
map[i] = page + i;
for (; i < order; i++)
__free_page(page + i);
pgprot = __pgprot(_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_DIRTY);
if (CPU_IS_040_OR_060)
pgprot_val(pgprot) |= _PAGE_GLOBAL040 | _PAGE_NOCACHE_S;
else
pgprot_val(pgprot) |= _PAGE_NOCACHE030;
addr = vmap(map, size, VM_MAP, pgprot);
kfree(map);
return addr;
return prot;
}
void arch_dma_free(struct device *dev, size_t size, void *addr,
dma_addr_t handle, unsigned long attrs)
{
pr_debug("dma_free_coherent: %p, %x\n", addr, handle);
vfree(addr);
}
#else
#include <asm/cacheflush.h>
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment