Commit 0703ed2a authored by Russell King's avatar Russell King

ARM: dmabounce: get rid of dma_needs_bounce global function

Pass the device type specific needs_bounce function in at dmabounce
register time, avoiding the need for a platform specific global
function to do this.
Signed-off-by: default avatarRussell King <rmk+kernel@arm.linux.org.uk>
parent c289b2e0
...@@ -79,6 +79,8 @@ struct dmabounce_device_info { ...@@ -79,6 +79,8 @@ struct dmabounce_device_info {
struct dmabounce_pool large; struct dmabounce_pool large;
rwlock_t lock; rwlock_t lock;
int (*needs_bounce)(struct device *, dma_addr_t, size_t);
}; };
#ifdef STATS #ifdef STATS
...@@ -236,7 +238,7 @@ static int needs_bounce(struct device *dev, dma_addr_t dma_addr, size_t size) ...@@ -236,7 +238,7 @@ static int needs_bounce(struct device *dev, dma_addr_t dma_addr, size_t size)
return 1; return 1;
} }
return dma_needs_bounce(dev, dma_addr, size) ? 1 : 0; return !!dev->archdata.dmabounce->needs_bounce(dev, dma_addr, size);
} }
static inline dma_addr_t map_single(struct device *dev, void *ptr, size_t size, static inline dma_addr_t map_single(struct device *dev, void *ptr, size_t size,
...@@ -430,7 +432,8 @@ static int dmabounce_init_pool(struct dmabounce_pool *pool, struct device *dev, ...@@ -430,7 +432,8 @@ static int dmabounce_init_pool(struct dmabounce_pool *pool, struct device *dev,
} }
int dmabounce_register_dev(struct device *dev, unsigned long small_buffer_size, int dmabounce_register_dev(struct device *dev, unsigned long small_buffer_size,
unsigned long large_buffer_size) unsigned long large_buffer_size,
int (*needs_bounce_fn)(struct device *, dma_addr_t, size_t))
{ {
struct dmabounce_device_info *device_info; struct dmabounce_device_info *device_info;
int ret; int ret;
...@@ -466,6 +469,7 @@ int dmabounce_register_dev(struct device *dev, unsigned long small_buffer_size, ...@@ -466,6 +469,7 @@ int dmabounce_register_dev(struct device *dev, unsigned long small_buffer_size,
device_info->dev = dev; device_info->dev = dev;
INIT_LIST_HEAD(&device_info->safe_buffers); INIT_LIST_HEAD(&device_info->safe_buffers);
rwlock_init(&device_info->lock); rwlock_init(&device_info->lock);
device_info->needs_bounce = needs_bounce_fn;
#ifdef STATS #ifdef STATS
device_info->total_allocs = 0; device_info->total_allocs = 0;
......
...@@ -243,6 +243,13 @@ static struct resource it8152_mem = { ...@@ -243,6 +243,13 @@ static struct resource it8152_mem = {
* ITE8152 chip can address up to 64MByte, so all the devices * ITE8152 chip can address up to 64MByte, so all the devices
* connected to ITE8152 (PCI and USB) should have limited DMA window * connected to ITE8152 (PCI and USB) should have limited DMA window
*/ */
static int it8152_needs_bounce(struct device *dev, dma_addr_t dma_addr, size_t size)
{
dev_dbg(dev, "%s: dma_addr %08x, size %08x\n",
__func__, dma_addr, size);
return dev->bus == &pci_bus_type &&
(dma_addr + size - PHYS_OFFSET) >= SZ_64M;
}
/* /*
* Setup DMA mask to 64MB on devices connected to ITE8152. Ignore all * Setup DMA mask to 64MB on devices connected to ITE8152. Ignore all
...@@ -254,7 +261,7 @@ static int it8152_pci_platform_notify(struct device *dev) ...@@ -254,7 +261,7 @@ static int it8152_pci_platform_notify(struct device *dev)
if (dev->dma_mask) if (dev->dma_mask)
*dev->dma_mask = (SZ_64M - 1) | PHYS_OFFSET; *dev->dma_mask = (SZ_64M - 1) | PHYS_OFFSET;
dev->coherent_dma_mask = (SZ_64M - 1) | PHYS_OFFSET; dev->coherent_dma_mask = (SZ_64M - 1) | PHYS_OFFSET;
dmabounce_register_dev(dev, 2048, 4096); dmabounce_register_dev(dev, 2048, 4096, it8152_needs_bounce);
} }
return 0; return 0;
} }
...@@ -267,14 +274,6 @@ static int it8152_pci_platform_notify_remove(struct device *dev) ...@@ -267,14 +274,6 @@ static int it8152_pci_platform_notify_remove(struct device *dev)
return 0; return 0;
} }
int dma_needs_bounce(struct device *dev, dma_addr_t dma_addr, size_t size)
{
dev_dbg(dev, "%s: dma_addr %08x, size %08x\n",
__func__, dma_addr, size);
return (dev->bus == &pci_bus_type) &&
((dma_addr + size - PHYS_OFFSET) >= SZ_64M);
}
int dma_set_coherent_mask(struct device *dev, u64 mask) int dma_set_coherent_mask(struct device *dev, u64 mask)
{ {
if (mask >= PHYS_OFFSET + SZ_64M - 1) if (mask >= PHYS_OFFSET + SZ_64M - 1)
......
...@@ -579,7 +579,36 @@ sa1111_configure_smc(struct sa1111 *sachip, int sdram, unsigned int drac, ...@@ -579,7 +579,36 @@ sa1111_configure_smc(struct sa1111 *sachip, int sdram, unsigned int drac,
sachip->dev->coherent_dma_mask &= sa1111_dma_mask[drac >> 2]; sachip->dev->coherent_dma_mask &= sa1111_dma_mask[drac >> 2];
} }
#endif
#ifdef CONFIG_DMABOUNCE
/*
* According to the "Intel StrongARM SA-1111 Microprocessor Companion
* Chip Specification Update" (June 2000), erratum #7, there is a
* significant bug in the SA1111 SDRAM shared memory controller. If
* an access to a region of memory above 1MB relative to the bank base,
* it is important that address bit 10 _NOT_ be asserted. Depending
* on the configuration of the RAM, bit 10 may correspond to one
* of several different (processor-relative) address bits.
*
* This routine only identifies whether or not a given DMA address
* is susceptible to the bug.
*
* This should only get called for sa1111_device types due to the
* way we configure our device dma_masks.
*/
static int sa1111_needs_bounce(struct device *dev, dma_addr_t addr, size_t size)
{
/*
* Section 4.6 of the "Intel StrongARM SA-1111 Development Module
* User's Guide" mentions that jumpers R51 and R52 control the
* target of SA-1111 DMA (either SDRAM bank 0 on Assabet, or
* SDRAM bank 1 on Neponset). The default configuration selects
* Assabet, so any address in bank 1 is necessarily invalid.
*/
return (machine_is_assabet() || machine_is_pfs168()) &&
(addr >= 0xc8000000 || (addr + size) >= 0xc8000000);
}
#endif #endif
static void sa1111_dev_release(struct device *_dev) static void sa1111_dev_release(struct device *_dev)
...@@ -644,7 +673,8 @@ sa1111_init_one_child(struct sa1111 *sachip, struct resource *parent, ...@@ -644,7 +673,8 @@ sa1111_init_one_child(struct sa1111 *sachip, struct resource *parent,
dev->dev.dma_mask = &dev->dma_mask; dev->dev.dma_mask = &dev->dma_mask;
if (dev->dma_mask != 0xffffffffUL) { if (dev->dma_mask != 0xffffffffUL) {
ret = dmabounce_register_dev(&dev->dev, 1024, 4096); ret = dmabounce_register_dev(&dev->dev, 1024, 4096,
sa1111_needs_bounce);
if (ret) { if (ret) {
dev_err(&dev->dev, "SA1111: Failed to register" dev_err(&dev->dev, "SA1111: Failed to register"
" with dmabounce\n"); " with dmabounce\n");
...@@ -818,34 +848,6 @@ static void __sa1111_remove(struct sa1111 *sachip) ...@@ -818,34 +848,6 @@ static void __sa1111_remove(struct sa1111 *sachip)
kfree(sachip); kfree(sachip);
} }
/*
* According to the "Intel StrongARM SA-1111 Microprocessor Companion
* Chip Specification Update" (June 2000), erratum #7, there is a
* significant bug in the SA1111 SDRAM shared memory controller. If
* an access to a region of memory above 1MB relative to the bank base,
* it is important that address bit 10 _NOT_ be asserted. Depending
* on the configuration of the RAM, bit 10 may correspond to one
* of several different (processor-relative) address bits.
*
* This routine only identifies whether or not a given DMA address
* is susceptible to the bug.
*
* This should only get called for sa1111_device types due to the
* way we configure our device dma_masks.
*/
int dma_needs_bounce(struct device *dev, dma_addr_t addr, size_t size)
{
/*
* Section 4.6 of the "Intel StrongARM SA-1111 Development Module
* User's Guide" mentions that jumpers R51 and R52 control the
* target of SA-1111 DMA (either SDRAM bank 0 on Assabet, or
* SDRAM bank 1 on Neponset). The default configuration selects
* Assabet, so any address in bank 1 is necessarily invalid.
*/
return ((machine_is_assabet() || machine_is_pfs168()) &&
(addr >= 0xc8000000 || (addr + size) >= 0xc8000000));
}
struct sa1111_save_data { struct sa1111_save_data {
unsigned int skcr; unsigned int skcr;
unsigned int skpcr; unsigned int skpcr;
......
...@@ -256,14 +256,14 @@ int dma_mmap_writecombine(struct device *, struct vm_area_struct *, ...@@ -256,14 +256,14 @@ int dma_mmap_writecombine(struct device *, struct vm_area_struct *,
* @dev: valid struct device pointer * @dev: valid struct device pointer
* @small_buf_size: size of buffers to use with small buffer pool * @small_buf_size: size of buffers to use with small buffer pool
* @large_buf_size: size of buffers to use with large buffer pool (can be 0) * @large_buf_size: size of buffers to use with large buffer pool (can be 0)
* @needs_bounce_fn: called to determine whether buffer needs bouncing
* *
* This function should be called by low-level platform code to register * This function should be called by low-level platform code to register
* a device as requireing DMA buffer bouncing. The function will allocate * a device as requireing DMA buffer bouncing. The function will allocate
* appropriate DMA pools for the device. * appropriate DMA pools for the device.
*
*/ */
extern int dmabounce_register_dev(struct device *, unsigned long, extern int dmabounce_register_dev(struct device *, unsigned long,
unsigned long); unsigned long, int (*)(struct device *, dma_addr_t, size_t));
/** /**
* dmabounce_unregister_dev * dmabounce_unregister_dev
...@@ -277,24 +277,6 @@ extern int dmabounce_register_dev(struct device *, unsigned long, ...@@ -277,24 +277,6 @@ extern int dmabounce_register_dev(struct device *, unsigned long,
*/ */
extern void dmabounce_unregister_dev(struct device *); extern void dmabounce_unregister_dev(struct device *);
/**
* dma_needs_bounce
*
* @dev: valid struct device pointer
* @dma_handle: dma_handle of unbounced buffer
* @size: size of region being mapped
*
* Platforms that utilize the dmabounce mechanism must implement
* this function.
*
* The dmabounce routines call this function whenever a dma-mapping
* is requested to determine whether a given buffer needs to be bounced
* or not. The function must return 0 if the buffer is OK for
* DMA access and 1 if the buffer needs to be bounced.
*
*/
extern int dma_needs_bounce(struct device*, dma_addr_t, size_t);
/* /*
* The DMA API, implemented by dmabounce.c. See below for descriptions. * The DMA API, implemented by dmabounce.c. See below for descriptions.
*/ */
......
...@@ -316,6 +316,11 @@ static int abort_handler(unsigned long addr, unsigned int fsr, struct pt_regs *r ...@@ -316,6 +316,11 @@ static int abort_handler(unsigned long addr, unsigned int fsr, struct pt_regs *r
} }
static int ixp4xx_needs_bounce(struct device *dev, dma_addr_t dma_addr, size_t size)
{
return dev->bus == &pci_bus_type && (dma_addr + size) >= SZ_64M;
}
/* /*
* Setup DMA mask to 64MB on PCI devices. Ignore all other devices. * Setup DMA mask to 64MB on PCI devices. Ignore all other devices.
*/ */
...@@ -324,7 +329,7 @@ static int ixp4xx_pci_platform_notify(struct device *dev) ...@@ -324,7 +329,7 @@ static int ixp4xx_pci_platform_notify(struct device *dev)
if(dev->bus == &pci_bus_type) { if(dev->bus == &pci_bus_type) {
*dev->dma_mask = SZ_64M - 1; *dev->dma_mask = SZ_64M - 1;
dev->coherent_dma_mask = SZ_64M - 1; dev->coherent_dma_mask = SZ_64M - 1;
dmabounce_register_dev(dev, 2048, 4096); dmabounce_register_dev(dev, 2048, 4096, ixp4xx_needs_bounce);
} }
return 0; return 0;
} }
...@@ -337,11 +342,6 @@ static int ixp4xx_pci_platform_notify_remove(struct device *dev) ...@@ -337,11 +342,6 @@ static int ixp4xx_pci_platform_notify_remove(struct device *dev)
return 0; return 0;
} }
int dma_needs_bounce(struct device *dev, dma_addr_t dma_addr, size_t size)
{
return (dev->bus == &pci_bus_type ) && ((dma_addr + size) >= SZ_64M);
}
void __init ixp4xx_pci_preinit(void) void __init ixp4xx_pci_preinit(void)
{ {
unsigned long cpuid = read_cpuid_id(); unsigned long cpuid = read_cpuid_id();
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment