Commit c2e26dd2 authored by Deepak Saxena's avatar Deepak Saxena Committed by Russell King

[ARM PATCH] 1815/1: Generic DMA buffer bouncing support for ARM targets

Patch from Deepak Saxena

Latest (and hopefully last :) patch for generic DMA buffer bouncing.

- Fixed SA1111 dma_needs_bounce
- Added check for out of bounds buffers.
- Made dmabounce.c directly implement dma_* API

Note that I didn't do the following:

#ifndef CONFIG_DMABOUNCE
inline implementations of all functions
#else
extern declarations
#endif

Instead I wrapped it individual in #ifndef/#else blocks to keep the
comments in the same area as function declarations.  IMHO this makes 
it easier for someone to go look at the comments if they need to know 
what a specific API does.
parent a70018bc
......@@ -210,6 +210,11 @@ config FORCE_MAX_ZONEORDER
depends on SA1111
default "9"
config DMABOUNCE
bool
depends on SA1111
default y
source arch/arm/mm/Kconfig
# bool 'Use XScale PMU as timer source' CONFIG_XSCALE_PMU_TIMER
......
......@@ -5,6 +5,7 @@
obj-y += platform.o
obj-$(CONFIG_ARM_AMBA) += amba.o
obj-$(CONFIG_ICST525) += icst525.o
obj-$(CONFIG_SA1111) += sa1111.o sa1111-pcibuf.o
obj-$(CONFIG_SA1111) += sa1111.o
obj-$(CONFIG_PCI_HOST_PLX90X0) += plx90x0.o
obj-$(CONFIG_PCI_HOST_VIA82C505) += via82c505.o
obj-$(CONFIG_DMABOUNCE) += dmabounce.o
......@@ -25,6 +25,7 @@
#include <linux/device.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/dma-mapping.h>
#include <asm/hardware.h>
#include <asm/mach-types.h>
......@@ -547,15 +548,6 @@ sa1111_init_one_child(struct sa1111 *sachip, struct resource *parent,
snprintf(dev->dev.bus_id, sizeof(dev->dev.bus_id),
"%4.4lx", info->offset);
/*
* If the parent device has a DMA mask associated with it,
* propagate it down to the children.
*/
if (sachip->dev->dma_mask) {
dev->dma_mask = *sachip->dev->dma_mask;
dev->dev.dma_mask = &dev->dma_mask;
}
dev->devid = info->devid;
dev->dev.parent = sachip->dev;
dev->dev.bus = &sa1111_bus_type;
......@@ -573,15 +565,37 @@ sa1111_init_one_child(struct sa1111 *sachip, struct resource *parent,
if (ret) {
printk("SA1111: failed to allocate resource for %s\n",
dev->res.name);
kfree(dev);
goto out;
}
ret = device_register(&dev->dev);
if (ret) {
release_resource(&dev->res);
out:
kfree(dev);
goto out;
}
/*
* If the parent device has a DMA mask associated with it,
* propagate it down to the children.
*/
if (sachip->dev->dma_mask) {
dev->dma_mask = *sachip->dev->dma_mask;
dev->dev.dma_mask = &dev->dma_mask;
if (dev->dma_mask != 0xffffffffUL) {
ret = dmabounce_register_dev(&dev->dev, 1024, 4096);
if (ret) {
printk("SA1111: Failed to register %s with dmabounce", dev->dev.bus_id);
kfree(dev);
device_unregister(dev);
}
}
}
out:
return ret;
}
......@@ -742,61 +756,31 @@ static void __sa1111_remove(struct sa1111 *sachip)
*
* This routine only identifies whether or not a given DMA address
* is susceptible to the bug.
*
* This should only get called for sa1111_device types due to the
* way we configure our device dma_masks.
*/
int sa1111_check_dma_bug(dma_addr_t addr)
int dma_needs_bounce(struct device *dev, dma_addr_t addr, size_t size)
{
struct sa1111 *sachip = g_sa1111;
unsigned int physaddr = SA1111_DMA_ADDR((unsigned int)addr);
unsigned int smcr;
u32 dma_mask = *dev->dma_mask;
/* Section 4.6 of the "Intel StrongARM SA-1111 Development Module
/*
* Section 4.6 of the "Intel StrongARM SA-1111 Development Module
* User's Guide" mentions that jumpers R51 and R52 control the
* target of SA-1111 DMA (either SDRAM bank 0 on Assabet, or
* SDRAM bank 1 on Neponset). The default configuration selects
* Assabet, so any address in bank 1 is necessarily invalid.
*/
if ((machine_is_assabet() || machine_is_pfs168()) && addr >= 0xc8000000)
return -1;
if ((machine_is_assabet() || machine_is_pfs168()) &&
(addr >= 0xc8000000 || (addr + size) >= 0xc8000000))
return 1;
/* The bug only applies to buffers located more than one megabyte
* above the start of the target bank:
/*
* Check to see if either the start or end are illegal.
*/
if (physaddr<(1<<20))
return 0;
smcr = sa1111_readl(sachip->base + SA1111_SMCR);
switch (FExtr(smcr, SMCR_DRAC)) {
case 01: /* 10 row + bank address bits, A<20> must not be set */
if (physaddr & (1<<20))
return -1;
break;
case 02: /* 11 row + bank address bits, A<23> must not be set */
if (physaddr & (1<<23))
return -1;
break;
case 03: /* 12 row + bank address bits, A<24> must not be set */
if (physaddr & (1<<24))
return -1;
break;
case 04: /* 13 row + bank address bits, A<25> must not be set */
if (physaddr & (1<<25))
return -1;
break;
case 05: /* 14 row + bank address bits, A<20> must not be set */
if (physaddr & (1<<20))
return -1;
break;
case 06: /* 15 row + bank address bits, A<20> must not be set */
if (physaddr & (1<<20))
return -1;
break;
default:
printk(KERN_ERR "%s(): invalid SMCR DRAC value 0%lo\n",
__FUNCTION__, FExtr(smcr, SMCR_DRAC));
return -1;
}
return 0;
return ((addr & ~(*dev->dma_mask))) ||
((addr + size - 1) & ~(*dev->dma_mask));
}
struct sa1111_save_data {
......@@ -1293,7 +1277,6 @@ module_exit(sa1111_exit);
MODULE_DESCRIPTION("Intel Corporation SA1111 core driver");
MODULE_LICENSE("GPL");
EXPORT_SYMBOL(sa1111_check_dma_bug);
EXPORT_SYMBOL(sa1111_select_audio_mode);
EXPORT_SYMBOL(sa1111_set_audio_rate);
EXPORT_SYMBOL(sa1111_get_audio_rate);
......
......@@ -16,29 +16,6 @@
*/
extern void consistent_sync(void *kaddr, size_t size, int rw);
/*
* For SA-1111 these functions are "magic" and utilize bounce
* bufferes as needed to work around SA-1111 DMA bugs.
*/
dma_addr_t sa1111_map_single(struct device *dev, void *, size_t, enum dma_data_direction);
void sa1111_unmap_single(struct device *dev, dma_addr_t, size_t, enum dma_data_direction);
int sa1111_map_sg(struct device *dev, struct scatterlist *, int, enum dma_data_direction);
void sa1111_unmap_sg(struct device *dev, struct scatterlist *, int, enum dma_data_direction);
void sa1111_dma_sync_single_for_cpu(struct device *dev, dma_addr_t, size_t, enum dma_data_direction);
void sa1111_dma_sync_single_for_device(struct device *dev, dma_addr_t, size_t, enum dma_data_direction);
void sa1111_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *, int, enum dma_data_direction);
void sa1111_dma_sync_sg_for_device(struct device *dev, struct scatterlist *, int, enum dma_data_direction);
#ifdef CONFIG_SA1111
extern struct bus_type sa1111_bus_type;
#define dmadev_is_sa1111(dev) ((dev)->bus == &sa1111_bus_type)
#else
#define dmadev_is_sa1111(dev) (0)
#endif
/*
* Return whether the given device DMA address mask can be supported
* properly. For example, if your device can only drive the low 24-bits
......@@ -70,6 +47,14 @@ static inline int dma_is_consistent(dma_addr_t handle)
return 0;
}
/*
* DMA errors are defined by all-bits-set in the DMA address.
*/
static inline int dma_mapping_error(dma_addr_t dma_addr)
{
return dma_addr == ~0;
}
/**
* dma_alloc_coherent - allocate consistent memory for DMA
* @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
......@@ -118,6 +103,7 @@ dma_alloc_writecombine(struct device *dev, size_t size, dma_addr_t *handle, int
#define dma_free_writecombine(dev,size,cpu_addr,handle) \
dma_free_coherent(dev,size,cpu_addr,handle)
/**
* dma_map_single - map a single buffer for streaming DMA
* @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
......@@ -132,16 +118,17 @@ dma_alloc_writecombine(struct device *dev, size_t size, dma_addr_t *handle, int
* can regain ownership by calling dma_unmap_single() or
* dma_sync_single_for_cpu().
*/
#ifndef CONFIG_DMABOUNCE
static inline dma_addr_t
dma_map_single(struct device *dev, void *cpu_addr, size_t size,
enum dma_data_direction dir)
{
if (dmadev_is_sa1111(dev))
return sa1111_map_single(dev, cpu_addr, size, dir);
consistent_sync(cpu_addr, size, dir);
return __virt_to_bus((unsigned long)cpu_addr);
}
#else
extern dma_addr_t dma_map_single(struct device *,void *, size_t, enum dma_data_direction);
#endif
/**
* dma_map_page - map a portion of a page for streaming DMA
......@@ -180,15 +167,16 @@ dma_map_page(struct device *dev, struct page *page,
* After this call, reads by the CPU to the buffer are guaranteed to see
* whatever the device wrote there.
*/
#ifndef CONFIG_DMABOUNCE
static inline void
dma_unmap_single(struct device *dev, dma_addr_t handle, size_t size,
enum dma_data_direction dir)
{
if (dmadev_is_sa1111(dev))
sa1111_unmap_single(dev, handle, size, dir);
/* nothing to do */
}
#else
extern void dma_unmap_single(struct device *, dma_addr_t, size_t, enum dma_data_direction);
#endif
/**
* dma_unmap_page - unmap a buffer previously mapped through dma_map_page()
......@@ -233,15 +221,13 @@ dma_unmap_page(struct device *dev, dma_addr_t handle, size_t size,
* Device ownership issues as mentioned above for dma_map_single are
* the same here.
*/
#ifndef CONFIG_DMABOUNCE
static inline int
dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
enum dma_data_direction dir)
{
int i;
if (dmadev_is_sa1111(dev))
return sa1111_map_sg(dev, sg, nents, dir);
for (i = 0; i < nents; i++, sg++) {
char *virt;
......@@ -252,6 +238,9 @@ dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
return nents;
}
#else
extern int dma_map_sg(struct device *, struct scatterlist *, int, enum dma_data_direction);
#endif
/**
* dma_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg
......@@ -264,17 +253,18 @@ dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
* Again, CPU read rules concerning calls here are the same as for
* dma_unmap_single() above.
*/
#ifndef CONFIG_DMABOUNCE
static inline void
dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
enum dma_data_direction dir)
{
if (dmadev_is_sa1111(dev)) {
sa1111_unmap_sg(dev, sg, nents, dir);
return;
}
/* nothing to do */
}
#else
extern void dma_unmap_sg(struct device *, struct scatterlist *, int, enum dma_data_direction);
#endif
/**
* dma_sync_single_for_cpu
......@@ -293,15 +283,11 @@ dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
* must first the perform a dma_sync_for_device, and then the
* device again owns the buffer.
*/
#ifndef CONFIG_DMABOUNCE
static inline void
dma_sync_single_for_cpu(struct device *dev, dma_addr_t handle, size_t size,
enum dma_data_direction dir)
{
if (dmadev_is_sa1111(dev)) {
sa1111_dma_sync_single_for_cpu(dev, handle, size, dir);
return;
}
consistent_sync((void *)__bus_to_virt(handle), size, dir);
}
......@@ -309,13 +295,13 @@ static inline void
dma_sync_single_for_device(struct device *dev, dma_addr_t handle, size_t size,
enum dma_data_direction dir)
{
if (dmadev_is_sa1111(dev)) {
sa1111_dma_sync_single_for_device(dev, handle, size, dir);
return;
}
consistent_sync((void *)__bus_to_virt(handle), size, dir);
}
#else
extern void dma_sync_single_for_cpu(struct device*, dma_addr_t, size_t, enum dma_data_direction);
extern void dma_sync_single_for_device(struct device*, dma_addr_t, size_t, enum dma_data_direction);
#endif
/**
* dma_sync_sg_for_cpu
......@@ -330,17 +316,13 @@ dma_sync_single_for_device(struct device *dev, dma_addr_t handle, size_t size,
* The same as dma_sync_single_for_* but for a scatter-gather list,
* same rules and usage.
*/
#ifndef CONFIG_DMABOUNCE
static inline void
dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nents,
enum dma_data_direction dir)
{
int i;
if (dmadev_is_sa1111(dev)) {
sa1111_dma_sync_sg_for_cpu(dev, sg, nents, dir);
return;
}
for (i = 0; i < nents; i++, sg++) {
char *virt = page_address(sg->page) + sg->offset;
consistent_sync(virt, sg->length, dir);
......@@ -353,24 +335,73 @@ dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nents,
{
int i;
if (dmadev_is_sa1111(dev)) {
sa1111_dma_sync_sg_for_device(dev, sg, nents, dir);
return;
}
for (i = 0; i < nents; i++, sg++) {
char *virt = page_address(sg->page) + sg->offset;
consistent_sync(virt, sg->length, dir);
}
}
#else
extern void dma_sync_sg_for_cpu(struct device*, struct scatterlist*, int, enum dma_data_direction);
extern void dma_sync_sg_for_device(struct device*, struct scatterlist*, int, enum dma_data_direction);
#endif
#ifdef CONFIG_DMABOUNCE
/*
* DMA errors are defined by all-bits-set in the DMA address.
* For SA-1111, IXP425, and ADI systems the dma-mapping functions are "magic"
* and utilize bounce buffers as needed to work around limited DMA windows.
*
* On the SA-1111, a bug limits DMA to only certain regions of RAM.
* On the IXP425, the PCI inbound window is 64MB (256MB total RAM)
* On some ADI engineering sytems, PCI inbound window is 32MB (12MB total RAM)
*
* The following are helper functions used by the dmabounce subystem
*
*/
static inline int dma_mapping_error(dma_addr_t dma_addr)
{
return dma_addr == ~0;
}
/**
* dmabounce_register_dev
*
* @dev: valid struct device pointer
* @small_buf_size: size of buffers to use with small buffer pool
* @large_buf_size: size of buffers to use with large buffer pool (can be 0)
*
* This function should be called by low-level platform code to register
* a device as requireing DMA buffer bouncing. The function will allocate
* appropriate DMA pools for the device.
*
*/
extern int dmabounce_register_dev(struct device *, unsigned long, unsigned long);
/**
* dmabounce_unregister_dev
*
* @dev: valid struct device pointer
*
* This function should be called by low-level platform code when device
* that was previously registered with dmabounce_register_dev is removed
* from the system.
*
*/
extern void dmabounce_unregister_dev(struct device *);
/**
* dma_needs_bounce
*
* @dev: valid struct device pointer
* @dma_handle: dma_handle of unbounced buffer
* @size: size of region being mapped
*
* Platforms that utilize the dmabounce mechanism must implement
* this function.
*
* The dmabounce routines call this function whenever a dma-mapping
* is requested to determine whether a given buffer needs to be bounced
* or not. The function must return 0 if the the buffer is OK for
* DMA access and 1 if the buffer needs to be bounced.
*
*/
extern int dma_needs_bounce(struct device*, dma_addr_t, size_t);
#endif /* CONFIG_DMABOUNCE */
#endif /* __KERNEL__ */
#endif
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment