Commit 768e5bc1 authored by David Howells's avatar David Howells Committed by Linus Torvalds

[PATCH] frv: PCI DMA fixes

The attached patch makes cache flushing work correctly on DMA consistent
memory for the frv arch.  On the FRV unmapped memory can't be flushed
directly, but has to be kmapped first since the flush instructions take
virtual pointers not physical ones.

It also splits the MMU and !MMU versions into separate files.
Signed-Off-By: default avatarDavid Howells <dhowells@redhat.com>
Signed-off-by: default avatarAndrew Morton <akpm@osdl.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@osdl.org>
parent 988618ea
......@@ -2,4 +2,12 @@
# Makefile for the MB93090-MB00 motherboard stuff
#
obj-$(CONFIG_PCI) := pci-frv.o pci-dma.o pci-irq.o pci-vdk.o
ifeq "$(CONFIG_PCI)" "y"
obj-y := pci-frv.o pci-irq.o pci-vdk.o
ifeq "$(CONFIG_MMU)" "y"
obj-y += pci-dma.o
else
obj-y += pci-dma-nommu.o
endif
endif
/* pci-dma-nommu.c: Dynamic DMA mapping support for the FRV
*
* Copyright (C) 2004 Red Hat, Inc. All Rights Reserved.
* Written by David Woodhouse (dwmw2@redhat.com)
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <linux/types.h>
#include <linux/slab.h>
#include <linux/dma-mapping.h>
#include <linux/list.h>
#include <linux/pci.h>
#include <asm/io.h>
#if 1
#define DMA_SRAM_START dma_coherent_mem_start
#define DMA_SRAM_END dma_coherent_mem_end
#else // Use video RAM on Matrox
#define DMA_SRAM_START 0xe8900000
#define DMA_SRAM_END 0xe8a00000
#endif
struct dma_alloc_record {
struct list_head list;
unsigned long ofs;
unsigned long len;
};
static spinlock_t dma_alloc_lock = SPIN_LOCK_UNLOCKED;
static LIST_HEAD(dma_alloc_list);
void *dma_alloc_coherent(struct device *hwdev, size_t size, dma_addr_t *dma_handle, int gfp)
{
struct dma_alloc_record *new;
struct list_head *this = &dma_alloc_list;
unsigned long flags;
unsigned long start = DMA_SRAM_START;
unsigned long end;
if (!DMA_SRAM_START) {
printk("%s called without any DMA area reserved!\n", __func__);
return NULL;
}
new = kmalloc(sizeof (*new), GFP_ATOMIC);
if (!new)
return NULL;
/* Round up to a reasonable alignment */
new->len = (size + 31) & ~31;
spin_lock_irqsave(&dma_alloc_lock, flags);
list_for_each (this, &dma_alloc_list) {
struct dma_alloc_record *this_r = list_entry(this, struct dma_alloc_record, list);
end = this_r->ofs;
if (end - start >= size)
goto gotone;
start = this_r->ofs + this_r->len;
}
/* Reached end of list. */
end = DMA_SRAM_END;
this = &dma_alloc_list;
if (end - start >= size) {
gotone:
new->ofs = start;
list_add_tail(&new->list, this);
spin_unlock_irqrestore(&dma_alloc_lock, flags);
*dma_handle = start;
return (void *)start;
}
kfree(new);
spin_unlock_irqrestore(&dma_alloc_lock, flags);
return NULL;
}
void dma_free_coherent(struct device *hwdev, size_t size, void *vaddr, dma_addr_t dma_handle)
{
struct dma_alloc_record *rec;
unsigned long flags;
spin_lock_irqsave(&dma_alloc_lock, flags);
list_for_each_entry(rec, &dma_alloc_list, list) {
if (rec->ofs == dma_handle) {
list_del(&rec->list);
kfree(rec);
spin_unlock_irqrestore(&dma_alloc_lock, flags);
return;
}
}
spin_unlock_irqrestore(&dma_alloc_lock, flags);
BUG();
}
/*
* Map a single buffer of the indicated size for DMA in streaming mode.
* The 32-bit bus address to use is returned.
*
* Once the device is given the dma address, the device owns this memory
* until either pci_unmap_single or pci_dma_sync_single is performed.
*/
dma_addr_t dma_map_single(struct device *dev, void *ptr, size_t size,
enum dma_data_direction direction)
{
if (direction == DMA_NONE)
BUG();
frv_cache_wback_inv((unsigned long) ptr, (unsigned long) ptr + size);
return virt_to_bus(ptr);
}
/*
* Map a set of buffers described by scatterlist in streaming
* mode for DMA. This is the scather-gather version of the
* above pci_map_single interface. Here the scatter gather list
* elements are each tagged with the appropriate dma address
* and length. They are obtained via sg_dma_{address,length}(SG).
*
* NOTE: An implementation may be able to use a smaller number of
* DMA address/length pairs than there are SG table elements.
* (for example via virtual mapping capabilities)
* The routine returns the number of addr/length pairs actually
* used, at most nents.
*
* Device ownership issues as mentioned above for pci_map_single are
* the same here.
*/
int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
enum dma_data_direction direction)
{
int i;
for (i=0; i<nents; i++)
frv_cache_wback_inv(sg_dma_address(&sg[i]),
sg_dma_address(&sg[i]) + sg_dma_len(&sg[i]));
if (direction == DMA_NONE)
BUG();
return nents;
}
/*
* Dynamic DMA mapping support.
/* pci-dma.c: Dynamic DMA mapping support for the FRV CPUs that have MMUs
*
* Copyright (C) 2004 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
*
* On i386 there is no hardware dynamic DMA address translation,
* so consistent alloc/free are merely page allocation/freeing.
* The rest of the dynamic DMA mapping interface is implemented
* in asm/pci.h.
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <linux/types.h>
......@@ -12,10 +14,9 @@
#include <linux/dma-mapping.h>
#include <linux/list.h>
#include <linux/pci.h>
#include <linux/highmem.h>
#include <asm/io.h>
#ifdef CONFIG_MMU
void *dma_alloc_coherent(struct device *hwdev, size_t size, dma_addr_t *dma_handle, int gfp)
{
void *ret;
......@@ -32,93 +33,73 @@ void dma_free_coherent(struct device *hwdev, size_t size, void *vaddr, dma_addr_
consistent_free(vaddr);
}
/*
* Map a single buffer of the indicated size for DMA in streaming mode.
* The 32-bit bus address to use is returned.
*
* Once the device is given the dma address, the device owns this memory
* until either pci_unmap_single or pci_dma_sync_single is performed.
*/
dma_addr_t dma_map_single(struct device *dev, void *ptr, size_t size,
enum dma_data_direction direction)
{
if (direction == DMA_NONE)
BUG();
#else /* CONFIG_MMU */
#if 1
#define DMA_SRAM_START dma_coherent_mem_start
#define DMA_SRAM_END dma_coherent_mem_end
#else // Use video RAM on Matrox
#define DMA_SRAM_START 0xe8900000
#define DMA_SRAM_END 0xe8a00000
#endif
struct dma_alloc_record {
struct list_head list;
unsigned long ofs;
unsigned long len;
};
frv_cache_wback_inv((unsigned long) ptr, (unsigned long) ptr + size);
static spinlock_t dma_alloc_lock = SPIN_LOCK_UNLOCKED;
static LIST_HEAD(dma_alloc_list);
return virt_to_bus(ptr);
}
void *dma_alloc_coherent(struct device *hwdev, size_t size, dma_addr_t *dma_handle, int gfp)
/*
* Map a set of buffers described by scatterlist in streaming
* mode for DMA. This is the scather-gather version of the
* above pci_map_single interface. Here the scatter gather list
* elements are each tagged with the appropriate dma address
* and length. They are obtained via sg_dma_{address,length}(SG).
*
* NOTE: An implementation may be able to use a smaller number of
* DMA address/length pairs than there are SG table elements.
* (for example via virtual mapping capabilities)
* The routine returns the number of addr/length pairs actually
* used, at most nents.
*
* Device ownership issues as mentioned above for pci_map_single are
* the same here.
*/
int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
enum dma_data_direction direction)
{
struct dma_alloc_record *new;
struct list_head *this = &dma_alloc_list;
unsigned long flags;
unsigned long start = DMA_SRAM_START;
unsigned long end;
if (!DMA_SRAM_START) {
printk("%s called without any DMA area reserved!\n", __func__);
return NULL;
}
unsigned long dampr2;
void *vaddr;
int i;
new = kmalloc(sizeof (*new), GFP_ATOMIC);
if (!new)
return NULL;
if (direction == DMA_NONE)
BUG();
/* Round up to a reasonable alignment */
new->len = (size + 31) & ~31;
dampr2 = __get_DAMPR(2);
spin_lock_irqsave(&dma_alloc_lock, flags);
for (i = 0; i < nents; i++) {
vaddr = kmap_atomic(sg[i].page, __KM_CACHE);
list_for_each (this, &dma_alloc_list) {
struct dma_alloc_record *this_r = list_entry(this, struct dma_alloc_record, list);
end = this_r->ofs;
frv_dcache_writeback((unsigned long) vaddr,
(unsigned long) vaddr + PAGE_SIZE);
if (end - start >= size)
goto gotone;
start = this_r->ofs + this_r->len;
}
/* Reached end of list. */
end = DMA_SRAM_END;
this = &dma_alloc_list;
if (end - start >= size) {
gotone:
new->ofs = start;
list_add_tail(&new->list, this);
spin_unlock_irqrestore(&dma_alloc_lock, flags);
*dma_handle = start;
return (void *)start;
kunmap_atomic(vaddr, __KM_CACHE);
if (dampr2) {
__set_DAMPR(2, dampr2);
__set_IAMPR(2, dampr2);
}
kfree(new);
spin_unlock_irqrestore(&dma_alloc_lock, flags);
return NULL;
return nents;
}
void dma_free_coherent(struct device *hwdev, size_t size, void *vaddr, dma_addr_t dma_handle)
dma_addr_t dma_map_page(struct device *dev, struct page *page, unsigned long offset,
size_t size, enum dma_data_direction direction)
{
struct dma_alloc_record *rec;
unsigned long flags;
spin_lock_irqsave(&dma_alloc_lock, flags);
list_for_each_entry(rec, &dma_alloc_list, list) {
if (rec->ofs == dma_handle) {
list_del(&rec->list);
kfree(rec);
spin_unlock_irqrestore(&dma_alloc_lock, flags);
return;
}
}
spin_unlock_irqrestore(&dma_alloc_lock, flags);
BUG();
BUG_ON(direction == DMA_NONE);
flush_dcache_page(page);
return (dma_addr_t) page_to_phys(page) + offset;
}
#endif /* CONFIG_MMU */
......@@ -33,17 +33,8 @@ void dma_free_coherent(struct device *dev, size_t size, void *vaddr, dma_addr_t
* Once the device is given the dma address, the device owns this memory
* until either pci_unmap_single or pci_dma_sync_single is performed.
*/
static inline
dma_addr_t dma_map_single(struct device *dev, void *ptr, size_t size,
enum dma_data_direction direction)
{
if (direction == DMA_NONE)
BUG();
frv_cache_wback_inv((unsigned long) ptr, (unsigned long) ptr + size);
return virt_to_bus(ptr);
}
extern dma_addr_t dma_map_single(struct device *dev, void *ptr, size_t size,
enum dma_data_direction direction);
/*
* Unmap a single streaming mode DMA translation. The dma_addr and size
......@@ -76,21 +67,8 @@ void dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
* Device ownership issues as mentioned above for pci_map_single are
* the same here.
*/
static inline
int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
enum dma_data_direction direction)
{
int i;
for (i=0; i<nents; i++)
frv_cache_wback_inv(sg_dma_address(&sg[i]),
sg_dma_address(&sg[i]) + sg_dma_len(&sg[i]));
if (direction == DMA_NONE)
BUG();
return nents;
}
extern int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
enum dma_data_direction direction);
/*
* Unmap a set of streaming mode DMA translations.
......@@ -104,13 +82,9 @@ void dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries,
BUG_ON(direction == DMA_NONE);
}
static inline
extern
dma_addr_t dma_map_page(struct device *dev, struct page *page, unsigned long offset,
size_t size, enum dma_data_direction direction)
{
BUG_ON(direction == DMA_NONE);
return (dma_addr_t)(page_to_pfn(page)) * PAGE_SIZE + offset;
}
size_t size, enum dma_data_direction direction);
static inline
void dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment