Commit 969d67a8 authored by Christopher Hoover's avatar Christopher Hoover Committed by Russell King

[ARM PATCH] 1255/1: [PATCH] SA-1111 PCI support for USB

Fixes several oopsen in the SA-1111 "fake" PCI support

Complete re-write of the SA-1111 DMA bug "bounce buffer" workaround.

Merge latest drivers/pci/pool.c into mach-sa1100/pcipool.c (pool
allocation debugging follows CONFIG_DEBUG_SLAB a la drivers/pci/pool.c)

/arch/arm/mach-sa1100/pcipool.h can be deleted
(unrelated: /arch/arm/mach-sa1100/sa1111-ohci.c can be deleted)

Applies to 2.5.30-rmk1.  Should be back ported to 2.4 as the existing
SA-1111 bounce buffer code is broken.

(This patch is *required* for the OHCI HCD driver that is part of 2.5.21+.)
parent 50a06edf
...@@ -27,7 +27,7 @@ endif ...@@ -27,7 +27,7 @@ endif
# Next, the SA1111 stuff. # Next, the SA1111 stuff.
obj-$(CONFIG_SA1111) += sa1111.o obj-$(CONFIG_SA1111) += sa1111.o
obj-$(CONFIG_USB_OHCI_SA1111) += sa1111-pcibuf.o pcipool.o obj-$(CONFIG_USB_OHCI_HCD) += sa1111-pcibuf.o pcipool.o
# Specific board support # Specific board support
obj-$(CONFIG_SA1100_ADSBITSY) += adsbitsy.o obj-$(CONFIG_SA1100_ADSBITSY) += adsbitsy.o
......
...@@ -20,8 +20,6 @@ ...@@ -20,8 +20,6 @@
#include <asm/page.h> #include <asm/page.h>
#include "pcipool.h"
/* /*
* Pool allocator ... wraps the pci_alloc_consistent page allocator, so * Pool allocator ... wraps the pci_alloc_consistent page allocator, so
* small blocks are easily used by drivers for bus mastering controllers. * small blocks are easily used by drivers for bus mastering controllers.
...@@ -33,7 +31,6 @@ struct pci_pool { /* the pool */ ...@@ -33,7 +31,6 @@ struct pci_pool { /* the pool */
spinlock_t lock; spinlock_t lock;
size_t blocks_per_page; size_t blocks_per_page;
size_t size; size_t size;
int flags;
struct pci_dev *dev; struct pci_dev *dev;
size_t allocation; size_t allocation;
char name [32]; char name [32];
...@@ -52,6 +49,19 @@ struct pci_page { /* cacheable header for 'allocation' bytes */ ...@@ -52,6 +49,19 @@ struct pci_page { /* cacheable header for 'allocation' bytes */
// #define CONFIG_PCIPOOL_DEBUG // #define CONFIG_PCIPOOL_DEBUG
static inline const char *slot_name(const struct pci_pool *pool)
{
const struct pci_dev *pdev = pool->dev;
if (pdev == 0)
return "[0]";
else if (dev_is_sa1111(pdev))
return "[SA-1111]";
else
return pdev->slot_name;
}
/** /**
* pci_pool_create - Creates a pool of pci consistent memory blocks, for dma. * pci_pool_create - Creates a pool of pci consistent memory blocks, for dma.
...@@ -60,7 +70,7 @@ struct pci_page { /* cacheable header for 'allocation' bytes */ ...@@ -60,7 +70,7 @@ struct pci_page { /* cacheable header for 'allocation' bytes */
* @size: size of the blocks in this pool. * @size: size of the blocks in this pool.
* @align: alignment requirement for blocks; must be a power of two * @align: alignment requirement for blocks; must be a power of two
* @allocation: returned blocks won't cross this boundary (or zero) * @allocation: returned blocks won't cross this boundary (or zero)
* @flags: SLAB_* flags (not all are supported). * @mem_flags: SLAB_* flags.
* *
* Returns a pci allocation pool with the requested characteristics, or * Returns a pci allocation pool with the requested characteristics, or
* null if one can't be created. Given one of these pools, pci_pool_alloc() * null if one can't be created. Given one of these pools, pci_pool_alloc()
...@@ -76,7 +86,7 @@ struct pci_page { /* cacheable header for 'allocation' bytes */ ...@@ -76,7 +86,7 @@ struct pci_page { /* cacheable header for 'allocation' bytes */
*/ */
struct pci_pool * struct pci_pool *
pci_pool_create (const char *name, struct pci_dev *pdev, pci_pool_create (const char *name, struct pci_dev *pdev,
size_t size, size_t align, size_t allocation, int flags) size_t size, size_t align, size_t allocation, int mem_flags)
{ {
struct pci_pool *retval; struct pci_pool *retval;
...@@ -100,13 +110,9 @@ pci_pool_create (const char *name, struct pci_dev *pdev, ...@@ -100,13 +110,9 @@ pci_pool_create (const char *name, struct pci_dev *pdev,
} else if (allocation < size) } else if (allocation < size)
return 0; return 0;
if (!(retval = kmalloc (sizeof *retval, flags))) if (!(retval = kmalloc (sizeof *retval, mem_flags)))
return retval; return retval;
#ifdef CONFIG_PCIPOOL_DEBUG
flags |= SLAB_POISON;
#endif
strncpy (retval->name, name, sizeof retval->name); strncpy (retval->name, name, sizeof retval->name);
retval->name [sizeof retval->name - 1] = 0; retval->name [sizeof retval->name - 1] = 0;
...@@ -114,14 +120,13 @@ pci_pool_create (const char *name, struct pci_dev *pdev, ...@@ -114,14 +120,13 @@ pci_pool_create (const char *name, struct pci_dev *pdev,
INIT_LIST_HEAD (&retval->page_list); INIT_LIST_HEAD (&retval->page_list);
spin_lock_init (&retval->lock); spin_lock_init (&retval->lock);
retval->size = size; retval->size = size;
retval->flags = flags;
retval->allocation = allocation; retval->allocation = allocation;
retval->blocks_per_page = allocation / size; retval->blocks_per_page = allocation / size;
init_waitqueue_head (&retval->waitq); init_waitqueue_head (&retval->waitq);
#ifdef CONFIG_PCIPOOL_DEBUG #ifdef CONFIG_PCIPOOL_DEBUG
printk (KERN_DEBUG "pcipool create %s/%s size %d, %d/page (%d alloc)\n", printk (KERN_DEBUG "pcipool create %s/%s size %d, %d/page (%d alloc)\n",
pdev ? pdev->slot_name : NULL, retval->name, size, slot_name(retval), retval->name, size,
retval->blocks_per_page, allocation); retval->blocks_per_page, allocation);
#endif #endif
...@@ -143,11 +148,13 @@ pool_alloc_page (struct pci_pool *pool, int mem_flags) ...@@ -143,11 +148,13 @@ pool_alloc_page (struct pci_pool *pool, int mem_flags)
if (!page) if (!page)
return 0; return 0;
page->vaddr = pci_alloc_consistent (pool->dev, page->vaddr = pci_alloc_consistent (pool->dev,
pool->allocation, &page->dma); pool->allocation,
&page->dma);
if (page->vaddr) { if (page->vaddr) {
memset (page->bitmap, 0xff, mapsize); // bit set == free memset (page->bitmap, 0xff, mapsize); // bit set == free
if (pool->flags & SLAB_POISON) #ifdef CONFIG_DEBUG_SLAB
memset (page->vaddr, POOL_POISON_BYTE, pool->allocation); memset (page->vaddr, POOL_POISON_BYTE, pool->allocation);
#endif
list_add (&page->page_list, &pool->page_list); list_add (&page->page_list, &pool->page_list);
} else { } else {
kfree (page); kfree (page);
...@@ -173,8 +180,9 @@ pool_free_page (struct pci_pool *pool, struct pci_page *page) ...@@ -173,8 +180,9 @@ pool_free_page (struct pci_pool *pool, struct pci_page *page)
{ {
dma_addr_t dma = page->dma; dma_addr_t dma = page->dma;
if (pool->flags & SLAB_POISON) #ifdef CONFIG_DEBUG_SLAB
memset (page->vaddr, POOL_POISON_BYTE, pool->allocation); memset (page->vaddr, POOL_POISON_BYTE, pool->allocation);
#endif
pci_free_consistent (pool->dev, pool->allocation, page->vaddr, dma); pci_free_consistent (pool->dev, pool->allocation, page->vaddr, dma);
list_del (&page->page_list); list_del (&page->page_list);
kfree (page); kfree (page);
...@@ -195,8 +203,7 @@ pci_pool_destroy (struct pci_pool *pool) ...@@ -195,8 +203,7 @@ pci_pool_destroy (struct pci_pool *pool)
#ifdef CONFIG_PCIPOOL_DEBUG #ifdef CONFIG_PCIPOOL_DEBUG
printk (KERN_DEBUG "pcipool destroy %s/%s\n", printk (KERN_DEBUG "pcipool destroy %s/%s\n",
pool->dev ? pool->dev->slot_name : NULL, slot_name(pool), pool->name);
pool->name);
#endif #endif
spin_lock_irqsave (&pool->lock, flags); spin_lock_irqsave (&pool->lock, flags);
...@@ -206,8 +213,7 @@ pci_pool_destroy (struct pci_pool *pool) ...@@ -206,8 +213,7 @@ pci_pool_destroy (struct pci_pool *pool)
struct pci_page, page_list); struct pci_page, page_list);
if (is_page_busy (pool->blocks_per_page, page->bitmap)) { if (is_page_busy (pool->blocks_per_page, page->bitmap)) {
printk (KERN_ERR "pci_pool_destroy %s/%s, %p busy\n", printk (KERN_ERR "pci_pool_destroy %s/%s, %p busy\n",
pool->dev ? pool->dev->slot_name : NULL, slot_name(pool), pool->name, page->vaddr);
pool->name, page->vaddr);
/* leak the still-in-use consistent memory */ /* leak the still-in-use consistent memory */
list_del (&page->page_list); list_del (&page->page_list);
kfree (page); kfree (page);
...@@ -327,35 +333,32 @@ pci_pool_free (struct pci_pool *pool, void *vaddr, dma_addr_t dma) ...@@ -327,35 +333,32 @@ pci_pool_free (struct pci_pool *pool, void *vaddr, dma_addr_t dma)
int map, block; int map, block;
if ((page = pool_find_page (pool, dma)) == 0) { if ((page = pool_find_page (pool, dma)) == 0) {
printk (KERN_ERR "pci_pool_free %s/%s, %p/%x (bad dma)\n", printk (KERN_ERR "pci_pool_free %s/%s, %p/%lx (bad dma)\n",
pool->dev ? pool->dev->slot_name : NULL,
pool->name, vaddr, dma);
return;
}
#ifdef CONFIG_PCIPOOL_DEBUG
if (((dma - page->dma) + (void *)page->vaddr) != vaddr) {
printk (KERN_ERR "pci_pool_free %s/%s, %p (bad vaddr)/%x\n",
pool->dev ? pool->dev->slot_name : NULL, pool->dev ? pool->dev->slot_name : NULL,
pool->name, vaddr, dma); pool->name, vaddr, (unsigned long) dma);
return; return;
} }
#endif
block = dma - page->dma; block = dma - page->dma;
block /= pool->size; block /= pool->size;
map = block / BITS_PER_LONG; map = block / BITS_PER_LONG;
block %= BITS_PER_LONG; block %= BITS_PER_LONG;
#ifdef CONFIG_PCIPOOL_DEBUG #ifdef CONFIG_DEBUG_SLAB
if (((dma - page->dma) + (void *)page->vaddr) != vaddr) {
printk (KERN_ERR "pci_pool_free %s/%s, %p (bad vaddr)/%lx\n",
pool->dev ? pool->dev->slot_name : NULL,
pool->name, vaddr, (unsigned long) dma);
return;
}
if (page->bitmap [map] & (1UL << block)) { if (page->bitmap [map] & (1UL << block)) {
printk (KERN_ERR "pci_pool_free %s/%s, dma %x already free\n", printk (KERN_ERR "pci_pool_free %s/%s, dma %x already free\n",
pool->dev ? pool->dev->slot_name : NULL, pool->dev ? pool->dev->slot_name : NULL,
pool->name, dma); pool->name, dma);
return; return;
} }
#endif
if (pool->flags & SLAB_POISON)
memset (vaddr, POOL_POISON_BYTE, pool->size); memset (vaddr, POOL_POISON_BYTE, pool->size);
#endif
spin_lock_irqsave (&pool->lock, flags); spin_lock_irqsave (&pool->lock, flags);
set_bit (block, &page->bitmap [map]); set_bit (block, &page->bitmap [map]);
...@@ -369,9 +372,19 @@ pci_pool_free (struct pci_pool *pool, void *vaddr, dma_addr_t dma) ...@@ -369,9 +372,19 @@ pci_pool_free (struct pci_pool *pool, void *vaddr, dma_addr_t dma)
spin_unlock_irqrestore (&pool->lock, flags); spin_unlock_irqrestore (&pool->lock, flags);
} }
EXPORT_SYMBOL (pci_pool_create); EXPORT_SYMBOL (pci_pool_create);
EXPORT_SYMBOL (pci_pool_destroy); EXPORT_SYMBOL (pci_pool_destroy);
EXPORT_SYMBOL (pci_pool_alloc); EXPORT_SYMBOL (pci_pool_alloc);
EXPORT_SYMBOL (pci_pool_free); EXPORT_SYMBOL (pci_pool_free);
/* **************************************** */
static int __init pcipool_init(void)
{
MOD_INC_USE_COUNT; /* never unload */
return 0;
}
module_init(pcipool_init);
MODULE_LICENSE("GPL");
struct pci_pool *pci_pool_create (const char *name, struct pci_dev *dev,
size_t size, size_t align, size_t allocation, int flags);
void pci_pool_destroy (struct pci_pool *pool);
void *pci_pool_alloc (struct pci_pool *pool, int flags, dma_addr_t *handle);
void pci_pool_free (struct pci_pool *pool, void *vaddr, dma_addr_t addr);
/* /*
* linux/arch/arm/mach-sa1100/pci-sa1111.c * linux/arch/arm/mach-sa1100/pci-sa1111.c
* *
* Special pci_map/unmap_single routines for SA-1111. These functions * Special pci_{map/unmap/dma_sync}_* routines for SA-1111.
* compensate for a bug in the SA-1111 hardware which don't allow DMA
* to/from addresses above 1MB.
* *
* Brad Parker (brad@heeltoe.com) * These functions utilize bouncer buffers to compensate for a bug in
* the SA-1111 hardware which don't allow DMA to/from addresses
* certain addresses above 1MB.
* *
* This program is free software; you can redistribute it and/or modify * Re-written by Christopher Hoover <ch@murgatroid.com>
* it under the terms of the GNU General Public License version 2 as * Original version by Brad Parker (brad@heeltoe.com)
* published by the Free Software Foundation.
* *
* 06/13/2001 - created. * Copyright (C) 2002 Hewlett Packard Company.
*/ *
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* version 2 as published by the Free Software Foundation.
* */
#include <linux/module.h> #include <linux/module.h>
#include <linux/init.h> #include <linux/init.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/pci.h> #include <linux/pci.h>
#include <linux/list.h>
#include <asm/hardware/sa1111.h>
#include "pcipool.h" //#define DEBUG
#ifdef DEBUG
#define DPRINTK(...) do { printk(KERN_DEBUG __VA_ARGS__); } while (0)
#else
#define DPRINTK(...) do { } while (0)
#endif
/*
* simple buffer allocator for copying of unsafe to safe buffers
* uses __alloc/__free for actual buffers
* keeps track of safe buffers we've allocated so we can recover the
* unsafe buffers.
*/
#define MAX_SAFE 32 struct safe_buffer {
#define SIZE_SMALL 1024 struct list_head node;
#define SIZE_LARGE (16*1024)
/* original request */
void *ptr;
size_t size;
int direction;
/* safe buffer info */
struct pci_pool *pool;
void *safe;
dma_addr_t safe_dma_addr;
};
LIST_HEAD(safe_buffers);
static long mapped_alloc_size;
static char *safe_buffers[MAX_SAFE][2];
#define SIZE_SMALL 1024
#define SIZE_LARGE (16*1024)
static struct pci_pool *small_buffer_cache, *large_buffer_cache; static struct pci_pool *small_buffer_pool, *large_buffer_pool;
static int static int __init
init_safe_buffers(struct pci_dev *dev) create_safe_buffer_pools(void)
{ {
small_buffer_cache = pci_pool_create("pci_small_buffer", small_buffer_pool = pci_pool_create("sa1111_small_dma_buffer",
dev, SA1111_FAKE_PCIDEV,
SIZE_SMALL, SIZE_SMALL,
0 /* byte alignment */, 0 /* byte alignment */,
0 /* no page-crossing issues */, 0 /* no page-crossing issues */,
GFP_KERNEL | GFP_DMA); SLAB_KERNEL);
if (0 == small_buffer_pool) {
if (small_buffer_cache == 0) printk(KERN_ERR
"sa1111_pcibuf: could not allocate small pci pool\n");
return -1; return -1;
}
large_buffer_cache = pci_pool_create("pci_large_buffer", large_buffer_pool = pci_pool_create("sa1111_large_dma_buffer",
dev, SA1111_FAKE_PCIDEV,
SIZE_LARGE, SIZE_LARGE,
0 /* byte alignment */, 0 /* byte alignment */,
0 /* no page-crossing issues */, 0 /* no page-crossing issues */,
GFP_KERNEL | GFP_DMA); SLAB_KERNEL);
if (large_buffer_cache == 0) if (0 == large_buffer_pool) {
printk(KERN_ERR
"sa1111_pcibuf: could not allocate large pci pool\n");
pci_pool_destroy(small_buffer_pool);
small_buffer_pool = 0;
return -1; return -1;
}
return 0; return 0;
} }
static void __exit
destroy_safe_buffer_pools(void)
{
if (small_buffer_pool)
pci_pool_destroy(small_buffer_pool);
if (large_buffer_pool)
pci_pool_destroy(large_buffer_pool);
small_buffer_pool = large_buffer_pool = 0;
}
/* allocate a 'safe' buffer and keep track of it */ /* allocate a 'safe' buffer and keep track of it */
static char * static struct safe_buffer *
alloc_safe_buffer(char *unsafe, int size, dma_addr_t *pbus) alloc_safe_buffer(struct pci_dev *hwdev, void *ptr, size_t size, int direction)
{ {
char *safe; struct safe_buffer *buf;
dma_addr_t busptr;
struct pci_pool *pool; struct pci_pool *pool;
int i; void *safe;
dma_addr_t safe_dma_addr;
if (0) printk("alloc_safe_buffer(size=%d)\n", size); DPRINTK("%s(ptr=%p, size=%d, direction=%d)\n",
__func__, ptr, size, direction);
if (size <= SIZE_SMALL) buf = kmalloc(sizeof(struct safe_buffer), GFP_ATOMIC);
pool = small_buffer_cache; if (buf == 0) {
else printk(KERN_WARNING "%s: kmalloc failed\n", __func__);
if (size < SIZE_LARGE)
pool = large_buffer_cache;
else
return 0;
safe = pci_pool_alloc(pool, SLAB_ATOMIC, &busptr);
if (safe == 0)
return 0; return 0;
for (i = 0; i < MAX_SAFE; i++)
if (safe_buffers[i][0] == 0) {
break;
} }
if (i == MAX_SAFE) { if (size <= SIZE_SMALL) {
panic(__FILE__ ": exceeded MAX_SAFE buffers"); pool = small_buffer_pool;
safe = pci_pool_alloc(pool, GFP_ATOMIC, &safe_dma_addr);
} else if (size <= SIZE_LARGE) {
pool = large_buffer_pool;
safe = pci_pool_alloc(pool, GFP_ATOMIC, &safe_dma_addr);
} else {
printk(KERN_DEBUG
"sa111_pcibuf: resorting to pci_alloc_consistent\n");
pool = 0;
safe = pci_alloc_consistent(SA1111_FAKE_PCIDEV, size,
&safe_dma_addr);
} }
/* place the size index and the old buffer ptr in the first 8 bytes if (safe == 0) {
* and return a ptr + 12 to caller printk(KERN_WARNING
*/ "%s: could not alloc dma memory (size=%d)\n",
((int *)safe)[0] = i; __func__, size);
((char **)safe)[1] = (char *)pool; kfree(buf);
((char **)safe)[2] = unsafe; return 0;
}
busptr += sizeof(int) + sizeof(char *) + sizeof(char *); BUG_ON(sa1111_check_dma_bug(safe_dma_addr)); // paranoia
safe_buffers[i][0] = (void *)busptr; buf->ptr = ptr;
safe_buffers[i][1] = (void *)safe; buf->size = size;
buf->direction = direction;
buf->pool = pool;
buf->safe = safe;
buf->safe_dma_addr = safe_dma_addr;
safe += sizeof(int) + sizeof(char *) + sizeof(char *); MOD_INC_USE_COUNT;
list_add(&buf->node, &safe_buffers);
*pbus = busptr; return buf;
return safe;
} }
/* determine if a buffer is from our "safe" pool */ /* determine if a buffer is from our "safe" pool */
static char * static struct safe_buffer *
find_safe_buffer(char *busptr, char **unsafe) find_safe_buffer(dma_addr_t safe_dma_addr)
{ {
int i; struct list_head *entry;
char *buf;
list_for_each(entry, &safe_buffers) {
for (i = 0; i < MAX_SAFE; i++) { struct safe_buffer *b =
if (safe_buffers[i][0] == busptr) { list_entry(entry, struct safe_buffer, node);
if (0) printk("find_safe_buffer(%p) found @ %d\n", busptr, i);
buf = safe_buffers[i][1]; if (b->safe_dma_addr == safe_dma_addr) {
*unsafe = ((char **)buf)[2]; return b;
return buf + sizeof(int) + sizeof(char *) + sizeof(char *);
} }
} }
return (char *)0; return 0;
} }
static void static void
free_safe_buffer(char *buf) free_safe_buffer(struct safe_buffer *buf)
{ {
int index; DPRINTK("%s(buf=%p)\n", __func__, buf);
struct pci_pool *pool;
char *dma;
if (0) printk("free_safe_buffer(buf=%p)\n", buf); list_del(&buf->node);
/* retrieve the buffer size index */ if (buf->pool)
buf -= sizeof(int) + sizeof(char*) + sizeof(char*); pci_pool_free(buf->pool, buf->safe, buf->safe_dma_addr);
index = ((int *)buf)[0]; else
pool = (struct pci_pool *)((char **)buf)[1]; pci_free_consistent(SA1111_FAKE_PCIDEV, buf->size, buf->safe,
buf->safe_dma_addr);
kfree(buf);
if (0) printk("free_safe_buffer(%p) index %d\n", MOD_DEC_USE_COUNT;
buf, index); }
if (index < 0 || index >= MAX_SAFE) { static inline int
printk(__FILE__ ": free_safe_buffer() corrupt buffer\n"); dma_range_is_safe(dma_addr_t addr, size_t size)
return; {
} unsigned int physaddr = SA1111_DMA_ADDR((unsigned int) addr);
dma = safe_buffers[index][0]; /* Any address within one megabyte of the start of the target
safe_buffers[index][0] = 0; * bank will be OK. This is an overly conservative test:
* other addresses can be OK depending on the dram
* configuration. (See sa1111.c:sa1111_check_dma_bug() * for
* details.)
*
* We take care to ensure the entire dma region is within
* the safe range.
*/
pci_pool_free(pool, buf, (u32)dma); return ((physaddr + size - 1) < (1<<20));
} }
/*
NOTE:
replace pci_map/unmap_single with local routines which will
do buffer copies if buffer is above 1mb...
*/
/* /*
* see if a buffer address is in an 'unsafe' range. if it is * see if a buffer address is in an 'unsafe' range. if it is
* allocate a 'safe' buffer and copy the unsafe buffer into it. * allocate a 'safe' buffer and copy the unsafe buffer into it.
* substitute the safe buffer for the unsafe one. * substitute the safe buffer for the unsafe one.
* (basically move the buffer from an unsafe area to a safe one) * (basically move the buffer from an unsafe area to a safe one)
*
* we assume calls to map_single are symmetric with calls to unmap_single...
*/ */
dma_addr_t dma_addr_t
sa1111_map_single(struct pci_dev *hwdev, void *virtptr, sa1111_map_single(struct pci_dev *hwdev, void *ptr, size_t size, int direction)
size_t size, int direction)
{ {
dma_addr_t busptr; unsigned long flags;
dma_addr_t dma_addr;
mapped_alloc_size += size; DPRINTK("%s(hwdev=%p,ptr=%p,size=%d,dir=%x)\n",
__func__, hwdev, ptr, size, direction);
if (0) printk("pci_map_single(hwdev=%p,ptr=%p,size=%d,dir=%x) " BUG_ON(hwdev != SA1111_FAKE_PCIDEV);
"alloced=%ld\n", BUG_ON(direction == PCI_DMA_NONE);
hwdev, virtptr, size, direction, mapped_alloc_size);
busptr = virt_to_bus(virtptr); local_irq_save(flags);
/* we assume here that a buffer will never be >=64k */ dma_addr = virt_to_bus(ptr);
if ( (((unsigned long)busptr) & 0x100000) ||
((((unsigned long)busptr)+size) & 0x100000) )
{
char *safe;
safe = alloc_safe_buffer(virtptr, size, &busptr); if (!dma_range_is_safe(dma_addr, size)) {
if (safe == 0) { struct safe_buffer *buf;
printk("unable to map unsafe buffer %p!\n", virtptr);
buf = alloc_safe_buffer(hwdev, ptr, size, direction);
if (buf == 0) {
printk(KERN_ERR
"%s: unable to map unsafe buffer %p!\n",
__func__, ptr);
local_irq_restore(flags);
return 0; return 0;
} }
if (0) printk("unsafe buffer %p (phy=%p) mapped to %p (phy=%p)\n", DPRINTK("%s: unsafe buffer %p (phy=%p) mapped to %p (phy=%p)\n",
virtptr, (void *)virt_to_bus(virtptr), __func__,
safe, (void *)busptr); buf->ptr, (void *) virt_to_bus(buf->ptr),
buf->safe, (void *) buf->safe_dma_addr);
memcpy(safe, virtptr, size); if ((direction == PCI_DMA_TODEVICE) ||
consistent_sync(safe, size, direction); (direction == PCI_DMA_BIDIRECTIONAL)) {
DPRINTK("%s: copy out from unsafe %p, to safe %p, size %d\n",
__func__, ptr, buf->safe, size);
memcpy(buf->safe, ptr, size);
}
consistent_sync(buf->safe, size, direction);
return busptr; dma_addr = buf->safe_dma_addr;
} else {
consistent_sync(ptr, size, direction);
} }
consistent_sync(virtptr, size, direction); local_irq_restore(flags);
return busptr; return dma_addr;
} }
/* /*
* see if a mapped address was really a "safe" buffer and if so, * see if a mapped address was really a "safe" buffer and if so, copy
* copy the data from the safe buffer back to the unsafe buffer * the data from the safe buffer back to the unsafe buffer and free up
* and free up the safe buffer. * the safe buffer. (basically return things back to the way they
* (basically return things back to the way they should be) * should be)
*/ */
void void
sa1111_unmap_single(struct pci_dev *hwdev, dma_addr_t dma_addr, sa1111_unmap_single(struct pci_dev *hwdev, dma_addr_t dma_addr,
size_t size, int direction) size_t size, int direction)
{ {
char *safe, *unsafe; unsigned long flags;
void *buf; struct safe_buffer *buf;
DPRINTK("%s(hwdev=%p,ptr=%p,size=%d,dir=%x)\n",
__func__, hwdev, (void *) dma_addr, size, direction);
BUG_ON(hwdev != SA1111_FAKE_PCIDEV);
BUG_ON(direction == PCI_DMA_NONE);
local_irq_save(flags);
/* hack; usb-ohci.c never sends hwdev==NULL, all others do */ buf = find_safe_buffer(dma_addr);
if (hwdev == NULL) { if (buf) {
return; BUG_ON(buf->size != size);
BUG_ON(buf->direction != direction);
DPRINTK("%s: unsafe buffer %p (phy=%p) mapped to %p (phy=%p)\n",
__func__,
buf->ptr, (void *) virt_to_bus(buf->ptr),
buf->safe, (void *) buf->safe_dma_addr);
if ((direction == PCI_DMA_FROMDEVICE) ||
(direction == PCI_DMA_BIDIRECTIONAL)) {
DPRINTK("%s: copy back from safe %p, to unsafe %p size %d\n",
__func__, buf->safe, buf->ptr, size);
memcpy(buf->ptr, buf->safe, size);
}
free_safe_buffer(buf);
} }
mapped_alloc_size -= size; local_irq_restore(flags);
}
if (0) printk("pci_unmap_single(hwdev=%p,ptr=%p,size=%d,dir=%x) " int
"alloced=%ld\n", sa1111_map_sg(struct pci_dev *hwdev, struct scatterlist *sg,
hwdev, (void *)dma_addr, size, direction, int nents, int direction)
mapped_alloc_size); {
BUG(); /* Not implemented. */
}
if ((safe = find_safe_buffer((void *)dma_addr, &unsafe))) { void
if (0) printk("copyback unsafe %p, safe %p, size %d\n", sa1111_unmap_sg(struct pci_dev *hwdev, struct scatterlist *sg, int nents,
unsafe, safe, size); int direction)
{
BUG(); /* Not implemented. */
}
consistent_sync(safe, size, PCI_DMA_FROMDEVICE); void
memcpy(unsafe, safe, size); sa1111_dma_sync_single(struct pci_dev *hwdev, dma_addr_t dma_addr,
free_safe_buffer(safe); size_t size, int direction)
{
unsigned long flags;
struct safe_buffer *buf;
DPRINTK("%s(hwdev=%p,ptr=%p,size=%d,dir=%x)\n",
__func__, hwdev, (void *) dma_addr, size, direction);
BUG_ON(hwdev != SA1111_FAKE_PCIDEV);
local_irq_save(flags);
buf = find_safe_buffer(dma_addr);
if (buf) {
BUG_ON(buf->size != size);
BUG_ON(buf->direction != direction);
DPRINTK("%s: unsafe buffer %p (phy=%p) mapped to %p (phy=%p)\n",
__func__,
buf->ptr, (void *) virt_to_bus(buf->ptr),
buf->safe, (void *) buf->safe_dma_addr);
switch (direction) {
case PCI_DMA_FROMDEVICE:
DPRINTK("%s: copy back from safe %p, to unsafe %p size %d\n",
__func__, buf->safe, buf->ptr, size);
memcpy(buf->ptr, buf->safe, size);
break;
case PCI_DMA_TODEVICE:
DPRINTK("%s: copy out from unsafe %p, to safe %p, size %d\n",
__func__,buf->ptr, buf->safe, size);
memcpy(buf->safe, buf->ptr, size);
break;
case PCI_DMA_BIDIRECTIONAL:
BUG(); /* is this allowed? what does it mean? */
default:
BUG();
}
consistent_sync(buf->safe, size, direction);
} else { } else {
/* assume this is normal memory */ consistent_sync(bus_to_virt(dma_addr), size, direction);
buf = bus_to_virt(dma_addr);
consistent_sync(buf, size, PCI_DMA_FROMDEVICE);
} }
local_irq_restore(flags);
}
void
sa1111_dma_sync_sg(struct pci_dev *hwdev, struct scatterlist *sg,
int nelems, int direction)
{
BUG(); /* Not implemented. */
} }
EXPORT_SYMBOL(sa1111_map_single); EXPORT_SYMBOL(sa1111_map_single);
EXPORT_SYMBOL(sa1111_unmap_single); EXPORT_SYMBOL(sa1111_unmap_single);
EXPORT_SYMBOL(sa1111_map_sg);
EXPORT_SYMBOL(sa1111_unmap_sg);
EXPORT_SYMBOL(sa1111_dma_sync_single);
EXPORT_SYMBOL(sa1111_dma_sync_sg);
/* **************************************** */
static int __init sa1111_init_safe_buffers(void) static int __init sa1111_pcibuf_init(void)
{ {
printk("Initializing SA1111 buffer pool for DMA workaround\n"); int ret;
init_safe_buffers(NULL);
return 0; printk(KERN_DEBUG
"sa1111_pcibuf: initializing SA-1111 DMA workaround\n");
ret = create_safe_buffer_pools();
return ret;
} }
module_init(sa1111_pcibuf_init);
static void free_safe_buffers(void) static void __exit sa1111_pcibuf_exit(void)
{ {
pci_pool_destroy(small_buffer_cache); BUG_ON(!list_empty(&safe_buffers));
pci_pool_destroy(large_buffer_cache);
destroy_safe_buffer_pools();
} }
module_exit(sa1111_pcibuf_exit);
module_init(sa1111_init_safe_buffers); MODULE_AUTHOR("Christopher Hoover <ch@hpl.hp.com>");
module_exit(free_safe_buffers); MODULE_DESCRIPTION("Special pci_{map/unmap/dma_sync}_* routines for SA-1111.");
MODULE_LICENSE("GPL");
...@@ -98,7 +98,8 @@ void *pci_alloc_consistent(struct pci_dev *hwdev, size_t size, dma_addr_t *handl ...@@ -98,7 +98,8 @@ void *pci_alloc_consistent(struct pci_dev *hwdev, size_t size, dma_addr_t *handl
{ {
int gfp = GFP_KERNEL; int gfp = GFP_KERNEL;
if (hwdev == NULL || hwdev->dma_mask != 0xffffffff) if (hwdev == NULL || dev_is_sa1111(hwdev) ||
hwdev->dma_mask != 0xffffffff)
gfp |= GFP_DMA; gfp |= GFP_DMA;
return consistent_alloc(gfp, size, handle); return consistent_alloc(gfp, size, handle);
......
...@@ -611,10 +611,13 @@ struct sa1111_device { ...@@ -611,10 +611,13 @@ struct sa1111_device {
extern struct sa1111_device *sa1111; extern struct sa1111_device *sa1111;
int sa1111_check_dma_bug(dma_addr_t addr);
/* /*
* These frob the SKPCR register. * These frob the SKPCR register.
*/ */
void sa1111_enable_device(unsigned int mask); void sa1111_enable_device(unsigned int mask);
void sa1111_disable_device(unsigned int mask); void sa1111_disable_device(unsigned int mask);
#endif /* _ASM_ARCH_SA1111 */ #endif /* _ASM_ARCH_SA1111 */
...@@ -12,6 +12,34 @@ ...@@ -12,6 +12,34 @@
struct pci_dev; struct pci_dev;
/*
* For SA-1111 these functions are "magic" and utilize bounce
* buffers as need to workaround SA-1111 DMA bugs. They are called in
* place of their pci_* counterparts when dev_is_sa1111() returns true.
*/
dma_addr_t sa1111_map_single(struct pci_dev *, void *, size_t, int);
void sa1111_unmap_single(struct pci_dev *, dma_addr_t, size_t, int);
int sa1111_map_sg(struct pci_dev *, struct scatterlist *, int, int);
void sa1111_unmap_sg(struct pci_dev *, struct scatterlist *, int, int);
void sa1111_dma_sync_single(struct pci_dev *, dma_addr_t, size_t, int);
void sa1111_dma_sync_sg(struct pci_dev *, struct scatterlist *, int, int);
#ifdef CONFIG_SA1111
#define SA1111_FAKE_PCIDEV ((struct pci_dev *) 1111)
static inline int dev_is_sa1111(const struct pci_dev *dev)
{
return (dev == SA1111_FAKE_PCIDEV);
}
#else
static inline int dev_is_sa1111(const struct pci_dev *dev) { return 0; }
#endif
static inline void pcibios_set_master(struct pci_dev *dev) static inline void pcibios_set_master(struct pci_dev *dev)
{ {
/* No special bus mastering setup handling */ /* No special bus mastering setup handling */
...@@ -61,17 +89,9 @@ pci_free_consistent(struct pci_dev *hwdev, size_t size, void *vaddr, ...@@ -61,17 +89,9 @@ pci_free_consistent(struct pci_dev *hwdev, size_t size, void *vaddr,
static inline dma_addr_t static inline dma_addr_t
pci_map_single(struct pci_dev *hwdev, void *ptr, size_t size, int direction) pci_map_single(struct pci_dev *hwdev, void *ptr, size_t size, int direction)
{ {
#ifdef CONFIG_SA1111 if (dev_is_sa1111(hwdev))
extern dma_addr_t sa1111_map_single(struct pci_dev *, void *, size_t, int);
/*
* for SA1111 these functions are "magic" and relocate buffers. We
* only need to do these if hwdev is non-null; otherwise we expect
* the buffer to already be suitable for DMA.
*/
if (hwdev != NULL)
return sa1111_map_single(hwdev, ptr, size, direction); return sa1111_map_single(hwdev, ptr, size, direction);
#endif
consistent_sync(ptr, size, direction); consistent_sync(ptr, size, direction);
return virt_to_bus(ptr); return virt_to_bus(ptr);
} }
...@@ -86,12 +106,9 @@ pci_map_single(struct pci_dev *hwdev, void *ptr, size_t size, int direction) ...@@ -86,12 +106,9 @@ pci_map_single(struct pci_dev *hwdev, void *ptr, size_t size, int direction)
static inline void static inline void
pci_unmap_single(struct pci_dev *hwdev, dma_addr_t dma_addr, size_t size, int direction) pci_unmap_single(struct pci_dev *hwdev, dma_addr_t dma_addr, size_t size, int direction)
{ {
#ifdef CONFIG_SA1111 if (dev_is_sa1111(hwdev))
extern void sa1111_unmap_single(struct pci_dev *, dma_addr_t, size_t, int);
if (hwdev != NULL)
sa1111_unmap_single(hwdev, dma_addr, size, direction); sa1111_unmap_single(hwdev, dma_addr, size, direction);
#endif
/* nothing to do */ /* nothing to do */
} }
...@@ -99,7 +116,7 @@ pci_unmap_single(struct pci_dev *hwdev, dma_addr_t dma_addr, size_t size, int di ...@@ -99,7 +116,7 @@ pci_unmap_single(struct pci_dev *hwdev, dma_addr_t dma_addr, size_t size, int di
* Whether pci_unmap_{single,page} is a nop depends upon the * Whether pci_unmap_{single,page} is a nop depends upon the
* configuration. * configuration.
*/ */
#ifdef CONFIG_PCI #if defined(CONFIG_PCI) || defined(CONFIG_SA1111)
#define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME) dma_addr_t ADDR_NAME; #define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME) dma_addr_t ADDR_NAME;
#define DECLARE_PCI_UNMAP_LEN(LEN_NAME) __u32 LEN_NAME; #define DECLARE_PCI_UNMAP_LEN(LEN_NAME) __u32 LEN_NAME;
#define pci_unmap_addr(PTR, ADDR_NAME) ((PTR)->ADDR_NAME) #define pci_unmap_addr(PTR, ADDR_NAME) ((PTR)->ADDR_NAME)
...@@ -135,6 +152,9 @@ pci_map_sg(struct pci_dev *hwdev, struct scatterlist *sg, int nents, int directi ...@@ -135,6 +152,9 @@ pci_map_sg(struct pci_dev *hwdev, struct scatterlist *sg, int nents, int directi
{ {
int i; int i;
if (dev_is_sa1111(hwdev))
return sa1111_map_sg(hwdev, sg, nents, direction);
for (i = 0; i < nents; i++, sg++) { for (i = 0; i < nents; i++, sg++) {
char *virt; char *virt;
...@@ -153,6 +173,11 @@ pci_map_sg(struct pci_dev *hwdev, struct scatterlist *sg, int nents, int directi ...@@ -153,6 +173,11 @@ pci_map_sg(struct pci_dev *hwdev, struct scatterlist *sg, int nents, int directi
static inline void static inline void
pci_unmap_sg(struct pci_dev *hwdev, struct scatterlist *sg, int nents, int direction) pci_unmap_sg(struct pci_dev *hwdev, struct scatterlist *sg, int nents, int direction)
{ {
if (dev_is_sa1111(hwdev)) {
sa1111_unmap_sg(hwdev, sg, nents, direction);
return;
}
/* nothing to do */ /* nothing to do */
} }
...@@ -168,6 +193,11 @@ pci_unmap_sg(struct pci_dev *hwdev, struct scatterlist *sg, int nents, int direc ...@@ -168,6 +193,11 @@ pci_unmap_sg(struct pci_dev *hwdev, struct scatterlist *sg, int nents, int direc
static inline void static inline void
pci_dma_sync_single(struct pci_dev *hwdev, dma_addr_t dma_handle, size_t size, int direction) pci_dma_sync_single(struct pci_dev *hwdev, dma_addr_t dma_handle, size_t size, int direction)
{ {
if (dev_is_sa1111(hwdev)) {
sa1111_dma_sync_single(hwdev, dma_handle, size, direction);
return;
}
consistent_sync(bus_to_virt(dma_handle), size, direction); consistent_sync(bus_to_virt(dma_handle), size, direction);
} }
...@@ -182,6 +212,11 @@ pci_dma_sync_sg(struct pci_dev *hwdev, struct scatterlist *sg, int nelems, int d ...@@ -182,6 +212,11 @@ pci_dma_sync_sg(struct pci_dev *hwdev, struct scatterlist *sg, int nelems, int d
{ {
int i; int i;
if (dev_is_sa1111(hwdev)) {
sa1111_dma_sync_sg(hwdev, sg, nelems, direction);
return;
}
for (i = 0; i < nelems; i++, sg++) { for (i = 0; i < nelems; i++, sg++) {
char *virt = page_address(sg->page) + sg->offset; char *virt = page_address(sg->page) + sg->offset;
consistent_sync(virt, sg->length, direction); consistent_sync(virt, sg->length, direction);
...@@ -204,6 +239,19 @@ static inline int pci_dma_supported(struct pci_dev *hwdev, u64 mask) ...@@ -204,6 +239,19 @@ static inline int pci_dma_supported(struct pci_dev *hwdev, u64 mask)
/* Return the index of the PCI controller for device PDEV. */ /* Return the index of the PCI controller for device PDEV. */
#define pci_controller_num(PDEV) (0) #define pci_controller_num(PDEV) (0)
#if defined(CONFIG_SA1111) && !defined(CONFIG_PCI)
/* SA-1111 needs these prototypes even when !defined(CONFIG_PCI) */
/* kmem_cache style wrapper around pci_alloc_consistent() */
struct pci_pool *pci_pool_create (const char *name, struct pci_dev *dev,
size_t size, size_t align, size_t allocation, int flags);
void pci_pool_destroy (struct pci_pool *pool);
void *pci_pool_alloc (struct pci_pool *pool, int flags, dma_addr_t *handle);
void pci_pool_free (struct pci_pool *pool, void *vaddr, dma_addr_t addr);
#endif
#endif /* __KERNEL__ */ #endif /* __KERNEL__ */
#endif #endif
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment