Commit 6f221c06 authored by Russell King's avatar Russell King

[ARM] Update sa1111-pcibuf for dmapool changes.

- use dev_dbg for device-centric debugging messages
- use pr_debug for general debugging messages
- use dmapools instead of pcipools
- use NULL rather than 0 for NULL pointers
- use enum dma_data_direction rather than int
- use DMA_* direction definitions rather than PCI_DMA_*
- only check for sane DMA direction on mapping functions, but
  check that DMA direction matches when unmapping/syncing.
parent 9a13f8e3
......@@ -5,6 +5,6 @@
obj-y += platform.o
obj-$(CONFIG_ARM_AMBA) += amba.o
obj-$(CONFIG_ICST525) += icst525.o
obj-$(CONFIG_SA1111) += sa1111.o sa1111-pcibuf.o sa1111-pcipool.o
obj-$(CONFIG_SA1111) += sa1111.o sa1111-pcibuf.o
obj-$(CONFIG_PCI_HOST_PLX90X0) += plx90x0.o
obj-$(CONFIG_PCI_HOST_VIA82C505) += via82c505.o
/*
* linux/arch/arm/mach-sa1100/pci-sa1111.c
* linux/arch/arm/mach-sa1100/sa1111-pcibuf.c
*
* Special pci_{map/unmap/dma_sync}_* routines for SA-1111.
* Special dma_{map/unmap/dma_sync}_* routines for SA-1111.
*
* These functions utilize bouncer buffers to compensate for a bug in
* the SA-1111 hardware which don't allow DMA to/from addresses
......@@ -17,20 +17,17 @@
* version 2 as published by the Free Software Foundation.
* */
//#define DEBUG
#include <linux/module.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/pci.h>
#include <linux/list.h>
#include <linux/device.h>
#include <linux/dma-mapping.h>
#include <linux/dmapool.h>
#include <asm/hardware/sa1111.h>
//#define DEBUG
#ifdef DEBUG
#define DPRINTK(...) do { printk(KERN_DEBUG __VA_ARGS__); } while (0)
#else
#define DPRINTK(...) do { } while (0)
#endif
//#define STATS
#ifdef STATS
#define DO_STATS(X) do { X ; } while (0)
......@@ -46,12 +43,13 @@ struct safe_buffer {
/* original request */
void *ptr;
size_t size;
int direction;
enum dma_data_direction direction;
/* safe buffer info */
struct pci_pool *pool;
struct dma_pool *pool;
void *safe;
dma_addr_t safe_dma_addr;
struct device *dev;
};
static LIST_HEAD(safe_buffers);
......@@ -60,7 +58,7 @@ static LIST_HEAD(safe_buffers);
#define SIZE_SMALL 1024
#define SIZE_LARGE (4*1024)
static struct pci_pool *small_buffer_pool, *large_buffer_pool;
static struct dma_pool *small_buffer_pool, *large_buffer_pool;
#ifdef STATS
static unsigned long sbp_allocs __initdata = 0;
......@@ -70,95 +68,90 @@ static unsigned long total_allocs __initdata= 0;
static void print_alloc_stats(void)
{
printk(KERN_INFO
"sa1111_pcibuf: sbp: %lu, lbp: %lu, other: %lu, total: %lu\n",
"sa1111_dmabuf: sbp: %lu, lbp: %lu, other: %lu, total: %lu\n",
sbp_allocs, lbp_allocs,
total_allocs - sbp_allocs - lbp_allocs, total_allocs);
}
#endif
static int __init
create_safe_buffer_pools(void)
static int __init create_safe_buffer_pools(void)
{
small_buffer_pool = pci_pool_create("sa1111_small_dma_buffer",
SA1111_FAKE_PCIDEV,
SIZE_SMALL,
small_buffer_pool = dma_pool_create("sa1111_small_dma_buffer",
NULL, SIZE_SMALL,
0 /* byte alignment */,
0 /* no page-crossing issues */);
if (0 == small_buffer_pool) {
if (small_buffer_pool == NULL) {
printk(KERN_ERR
"sa1111_pcibuf: could not allocate small pci pool\n");
return -1;
"sa1111_dmabuf: could not allocate small pci pool\n");
return -ENOMEM;
}
large_buffer_pool = pci_pool_create("sa1111_large_dma_buffer",
SA1111_FAKE_PCIDEV,
SIZE_LARGE,
large_buffer_pool = dma_pool_create("sa1111_large_dma_buffer",
NULL, SIZE_LARGE,
0 /* byte alignment */,
0 /* no page-crossing issues */);
if (0 == large_buffer_pool) {
if (large_buffer_pool == NULL) {
printk(KERN_ERR
"sa1111_pcibuf: could not allocate large pci pool\n");
pci_pool_destroy(small_buffer_pool);
small_buffer_pool = 0;
return -1;
"sa1111_dmabuf: could not allocate large pci pool\n");
dma_pool_destroy(small_buffer_pool);
small_buffer_pool = NULL;
return -ENOMEM;
}
printk(KERN_INFO
"sa1111_pcibuf: buffer sizes: small=%u, large=%u\n",
printk(KERN_INFO "SA1111: DMA buffer sizes: small=%u, large=%u\n",
SIZE_SMALL, SIZE_LARGE);
return 0;
}
static void __exit
destroy_safe_buffer_pools(void)
static void __exit destroy_safe_buffer_pools(void)
{
if (small_buffer_pool)
pci_pool_destroy(small_buffer_pool);
dma_pool_destroy(small_buffer_pool);
if (large_buffer_pool)
pci_pool_destroy(large_buffer_pool);
dma_pool_destroy(large_buffer_pool);
small_buffer_pool = large_buffer_pool = 0;
small_buffer_pool = large_buffer_pool = NULL;
}
/* allocate a 'safe' buffer and keep track of it */
static struct safe_buffer *
alloc_safe_buffer(void *ptr, size_t size, int direction)
static struct safe_buffer *alloc_safe_buffer(struct device *dev, void *ptr,
size_t size,
enum dma_data_direction dir)
{
struct safe_buffer *buf;
struct pci_pool *pool;
struct dma_pool *pool;
void *safe;
dma_addr_t safe_dma_addr;
DPRINTK("%s(ptr=%p, size=%d, direction=%d)\n",
__func__, ptr, size, direction);
dev_dbg(dev, "%s(ptr=%p, size=%d, direction=%d)\n",
__func__, ptr, size, dir);
DO_STATS ( total_allocs++ );
buf = kmalloc(sizeof(struct safe_buffer), GFP_ATOMIC);
if (buf == 0) {
if (buf == NULL) {
printk(KERN_WARNING "%s: kmalloc failed\n", __func__);
return 0;
}
if (size <= SIZE_SMALL) {
pool = small_buffer_pool;
safe = pci_pool_alloc(pool, GFP_ATOMIC, &safe_dma_addr);
safe = dma_pool_alloc(pool, GFP_ATOMIC, &safe_dma_addr);
DO_STATS ( sbp_allocs++ );
} else if (size <= SIZE_LARGE) {
pool = large_buffer_pool;
safe = pci_pool_alloc(pool, GFP_ATOMIC, &safe_dma_addr);
safe = dma_pool_alloc(pool, GFP_ATOMIC, &safe_dma_addr);
DO_STATS ( lbp_allocs++ );
} else {
pool = 0;
safe = pci_alloc_consistent(SA1111_FAKE_PCIDEV, size,
&safe_dma_addr);
pool = NULL;
safe = dma_alloc_coherent(dev, size, &safe_dma_addr, GFP_ATOMIC);
}
if (safe == 0) {
if (safe == NULL) {
printk(KERN_WARNING
"%s: could not alloc dma memory (size=%d)\n",
__func__, size);
......@@ -175,20 +168,20 @@ alloc_safe_buffer(void *ptr, size_t size, int direction)
buf->ptr = ptr;
buf->size = size;
buf->direction = direction;
buf->direction = dir;
buf->pool = pool;
buf->safe = safe;
buf->safe_dma_addr = safe_dma_addr;
buf->dev = dev;
MOD_INC_USE_COUNT;
list_add(&buf->node, &safe_buffers);
return buf;
}
/* determine if a buffer is from our "safe" pool */
static struct safe_buffer *
find_safe_buffer(dma_addr_t safe_dma_addr)
static struct safe_buffer *find_safe_buffer(struct device *dev,
dma_addr_t safe_dma_addr)
{
struct list_head *entry;
......@@ -196,7 +189,8 @@ find_safe_buffer(dma_addr_t safe_dma_addr)
struct safe_buffer *b =
list_entry(entry, struct safe_buffer, node);
if (b->safe_dma_addr == safe_dma_addr) {
if (b->safe_dma_addr == safe_dma_addr &&
b->dev == dev) {
return b;
}
}
......@@ -204,25 +198,22 @@ find_safe_buffer(dma_addr_t safe_dma_addr)
return 0;
}
static void
free_safe_buffer(struct safe_buffer *buf)
static void free_safe_buffer(struct safe_buffer *buf)
{
DPRINTK("%s(buf=%p)\n", __func__, buf);
pr_debug("%s(buf=%p)\n", __func__, buf);
list_del(&buf->node);
if (buf->pool)
pci_pool_free(buf->pool, buf->safe, buf->safe_dma_addr);
dma_pool_free(buf->pool, buf->safe, buf->safe_dma_addr);
else
pci_free_consistent(SA1111_FAKE_PCIDEV, buf->size, buf->safe,
dma_free_coherent(buf->dev, buf->size, buf->safe,
buf->safe_dma_addr);
kfree(buf);
MOD_DEC_USE_COUNT;
}
static inline int
dma_range_is_safe(dma_addr_t addr, size_t size)
static inline int dma_range_is_safe(struct device *dev, dma_addr_t addr,
size_t size)
{
unsigned int physaddr = SA1111_DMA_ADDR((unsigned int) addr);
......@@ -248,13 +239,13 @@ static unsigned long bounce_count __initdata = 0;
static void print_map_stats(void)
{
printk(KERN_INFO
"sa1111_pcibuf: map_op_count=%lu, bounce_count=%lu\n",
"sa1111_dmabuf: map_op_count=%lu, bounce_count=%lu\n",
map_op_count, bounce_count);
}
#endif
static dma_addr_t
map_single(void *ptr, size_t size, int direction)
static dma_addr_t map_single(struct device *dev, void *ptr,
size_t size, enum dma_data_direction dir)
{
dma_addr_t dma_addr;
......@@ -262,37 +253,36 @@ map_single(void *ptr, size_t size, int direction)
dma_addr = virt_to_bus(ptr);
if (!dma_range_is_safe(dma_addr, size)) {
if (!dma_range_is_safe(dev, dma_addr, size)) {
struct safe_buffer *buf;
DO_STATS ( bounce_count++ ) ;
buf = alloc_safe_buffer(ptr, size, direction);
if (buf == 0) {
buf = alloc_safe_buffer(dev, ptr, size, dir);
if (buf == NULL) {
printk(KERN_ERR
"%s: unable to map unsafe buffer %p!\n",
__func__, ptr);
return 0;
}
DPRINTK("%s: unsafe buffer %p (phy=%p) mapped to %p (phy=%p)\n",
dev_dbg(dev, "%s: unsafe buffer %p (phy=%08lx) mapped to %p (phy=%08x)\n",
__func__,
buf->ptr, (void *) virt_to_bus(buf->ptr),
buf->safe, (void *) buf->safe_dma_addr);
buf->ptr, virt_to_bus(buf->ptr),
buf->safe, buf->safe_dma_addr);
if ((direction == PCI_DMA_TODEVICE) ||
(direction == PCI_DMA_BIDIRECTIONAL)) {
DPRINTK("%s: copy out from unsafe %p, to safe %p, size %d\n",
if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL) {
dev_dbg(dev, "%s: copy out from unsafe %p, to safe %p, size %d\n",
__func__, ptr, buf->safe, size);
memcpy(buf->safe, ptr, size);
}
consistent_sync(buf->safe, size, direction);
dma_addr = buf->safe_dma_addr;
} else {
consistent_sync(ptr, size, direction);
ptr = buf->safe;
}
consistent_sync(ptr, size, dir);
#ifdef STATS
if (map_op_count % 1000 == 0)
print_map_stats();
......@@ -301,28 +291,26 @@ map_single(void *ptr, size_t size, int direction)
return dma_addr;
}
static void
unmap_single(dma_addr_t dma_addr, size_t size, int direction)
static void unmap_single(struct device *dev, dma_addr_t dma_addr,
size_t size, enum dma_data_direction dir)
{
struct safe_buffer *buf;
buf = find_safe_buffer(dma_addr);
buf = find_safe_buffer(dev, dma_addr);
if (buf) {
BUG_ON(buf->size != size);
BUG_ON(buf->direction != direction);
BUG_ON(buf->direction != dir);
DPRINTK("%s: unsafe buffer %p (phy=%p) mapped to %p (phy=%p)\n",
dev_dbg(dev, "%s: unsafe buffer %p (phy=%08lx) mapped to %p (phy=%08lx)\n",
__func__,
buf->ptr, (void *) virt_to_bus(buf->ptr),
buf->safe, (void *) buf->safe_dma_addr);
buf->ptr, virt_to_bus(buf->ptr),
buf->safe, buf->safe_dma_addr);
DO_STATS ( bounce_count++ );
if ((direction == PCI_DMA_FROMDEVICE) ||
(direction == PCI_DMA_BIDIRECTIONAL)) {
DPRINTK("%s: copy back from safe %p, to unsafe %p size %d\n",
if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL) {
dev_dbg(dev, "%s: copy back from safe %p, to unsafe %p size %d\n",
__func__, buf->safe, buf->ptr, size);
memcpy(buf->ptr, buf->safe, size);
}
......@@ -330,44 +318,46 @@ unmap_single(dma_addr_t dma_addr, size_t size, int direction)
}
}
static void
sync_single(dma_addr_t dma_addr, size_t size, int direction)
static void sync_single(struct device *dev, dma_addr_t dma_addr,
size_t size, enum dma_data_direction dir)
{
struct safe_buffer *buf;
void *ptr;
buf = find_safe_buffer(dma_addr);
buf = find_safe_buffer(dev, dma_addr);
if (buf) {
BUG_ON(buf->size != size);
BUG_ON(buf->direction != direction);
BUG_ON(buf->direction != dir);
DPRINTK("%s: unsafe buffer %p (phy=%p) mapped to %p (phy=%p)\n",
dev_dbg(dev, "%s: unsafe buffer %p (phy=%08lx) mapped to %p (phy=%08lx)\n",
__func__,
buf->ptr, (void *) virt_to_bus(buf->ptr),
buf->safe, (void *) buf->safe_dma_addr);
buf->ptr, virt_to_bus(buf->ptr),
buf->safe, buf->safe_dma_addr);
DO_STATS ( bounce_count++ );
switch (direction) {
case PCI_DMA_FROMDEVICE:
DPRINTK("%s: copy back from safe %p, to unsafe %p size %d\n",
switch (dir) {
case DMA_FROM_DEVICE:
dev_dbg(dev, "%s: copy back from safe %p, to unsafe %p size %d\n",
__func__, buf->safe, buf->ptr, size);
memcpy(buf->ptr, buf->safe, size);
break;
case PCI_DMA_TODEVICE:
DPRINTK("%s: copy out from unsafe %p, to safe %p, size %d\n",
case DMA_TO_DEVICE:
dev_dbg(dev, "%s: copy out from unsafe %p, to safe %p, size %d\n",
__func__,buf->ptr, buf->safe, size);
memcpy(buf->safe, buf->ptr, size);
break;
case PCI_DMA_BIDIRECTIONAL:
case DMA_BIDIRECTIONAL:
BUG(); /* is this allowed? what does it mean? */
default:
BUG();
}
consistent_sync(buf->safe, size, direction);
ptr = buf->safe;
} else {
consistent_sync(bus_to_virt(dma_addr), size, direction);
ptr = bus_to_virt(dma_addr);
}
consistent_sync(ptr, size, dir);
}
/* ************************************************** */
......@@ -378,20 +368,20 @@ sync_single(dma_addr_t dma_addr, size_t size, int direction)
* substitute the safe buffer for the unsafe one.
* (basically move the buffer from an unsafe area to a safe one)
*/
dma_addr_t
sa1111_map_single(void *ptr, size_t size, int direction)
dma_addr_t sa1111_map_single(struct device *dev, void *ptr,
size_t size, enum dma_data_direction dir)
{
unsigned long flags;
dma_addr_t dma_addr;
DPRINTK("%s(ptr=%p,size=%d,dir=%x)\n",
__func__, ptr, size, direction);
dev_dbg(dev, "%s(ptr=%p,size=%d,dir=%x)\n",
__func__, ptr, size, dir);
BUG_ON(direction == PCI_DMA_NONE);
BUG_ON(dir == DMA_NONE);
local_irq_save(flags);
dma_addr = map_single(ptr, size, direction);
dma_addr = map_single(dev, ptr, size, dir);
local_irq_restore(flags);
......@@ -404,34 +394,31 @@ sa1111_map_single(void *ptr, size_t size, int direction)
* the safe buffer. (basically return things back to the way they
* should be)
*/
void
sa1111_unmap_single(dma_addr_t dma_addr, size_t size, int direction)
void sa1111_unmap_single(struct device *dev, dma_addr_t dma_addr,
size_t size, enum dma_data_direction dir)
{
unsigned long flags;
DPRINTK("%s(ptr=%p,size=%d,dir=%x)\n",
__func__, (void *) dma_addr, size, direction);
BUG_ON(direction == PCI_DMA_NONE);
dev_dbg(dev, "%s(ptr=%08lx,size=%d,dir=%x)\n",
__func__, dma_addr, size, dir);
local_irq_save(flags);
unmap_single(dma_addr, size, direction);
unmap_single(dev, dma_addr, size, dir);
local_irq_restore(flags);
}
int
sa1111_map_sg(struct scatterlist *sg, int nents, int direction)
int sa1111_map_sg(struct device *dev, struct scatterlist *sg,
int nents, enum dma_data_direction dir)
{
unsigned long flags;
int i;
DPRINTK("%s(sg=%p,nents=%d,dir=%x)\n",
__func__, sg, nents, direction);
dev_dbg(dev, "%s(sg=%p,nents=%d,dir=%x)\n",
__func__, sg, nents, dir);
BUG_ON(direction == PCI_DMA_NONE);
BUG_ON(dir == DMA_NONE);
local_irq_save(flags);
......@@ -441,8 +428,7 @@ sa1111_map_sg(struct scatterlist *sg, int nents, int direction)
unsigned int length = sg->length;
void *ptr = page_address(page) + offset;
sg->dma_address =
map_single(ptr, length, direction);
sg->dma_address = map_single(dev, ptr, length, dir);
}
local_irq_restore(flags);
......@@ -450,16 +436,14 @@ sa1111_map_sg(struct scatterlist *sg, int nents, int direction)
return nents;
}
void
sa1111_unmap_sg(struct scatterlist *sg, int nents, int direction)
void sa1111_unmap_sg(struct device *dev, struct scatterlist *sg,
int nents, enum dma_data_direction dir)
{
unsigned long flags;
int i;
DPRINTK("%s(sg=%p,nents=%d,dir=%x)\n",
__func__, sg, nents, direction);
BUG_ON(direction == PCI_DMA_NONE);
dev_dbg(dev, "%s(sg=%p,nents=%d,dir=%x)\n",
__func__, sg, nents, dir);
local_irq_save(flags);
......@@ -467,37 +451,35 @@ sa1111_unmap_sg(struct scatterlist *sg, int nents, int direction)
dma_addr_t dma_addr = sg->dma_address;
unsigned int length = sg->length;
unmap_single(dma_addr, length, direction);
unmap_single(dev, dma_addr, length, dir);
}
local_irq_restore(flags);
}
void
sa1111_dma_sync_single(dma_addr_t dma_addr, size_t size, int direction)
void sa1111_dma_sync_single(struct device *dev, dma_addr_t dma_addr,
size_t size, enum dma_data_direction dir)
{
unsigned long flags;
DPRINTK("%s(ptr=%p,size=%d,dir=%x)\n",
__func__, (void *) dma_addr, size, direction);
dev_dbg(dev, "%s(ptr=%08lx,size=%d,dir=%x)\n",
__func__, dma_addr, size, dir);
local_irq_save(flags);
sync_single(dma_addr, size, direction);
sync_single(dev, dma_addr, size, dir);
local_irq_restore(flags);
}
void
sa1111_dma_sync_sg(struct scatterlist *sg, int nents, int direction)
void sa1111_dma_sync_sg(struct device *dev, struct scatterlist *sg,
int nents, enum dma_data_direction dir)
{
unsigned long flags;
int i;
DPRINTK("%s(sg=%p,nents=%d,dir=%x)\n",
__func__, sg, nents, direction);
BUG_ON(direction == PCI_DMA_NONE);
dev_dbg(dev, "%s(sg=%p,nents=%d,dir=%x)\n",
__func__, sg, nents, dir);
local_irq_save(flags);
......@@ -505,7 +487,7 @@ sa1111_dma_sync_sg(struct scatterlist *sg, int nents, int direction)
dma_addr_t dma_addr = sg->dma_address;
unsigned int length = sg->length;
sync_single(dma_addr, length, direction);
sync_single(dev, dma_addr, length, dir);
}
local_irq_restore(flags);
......@@ -520,20 +502,15 @@ EXPORT_SYMBOL(sa1111_dma_sync_sg);
/* **************************************** */
static int __init sa1111_pcibuf_init(void)
static int __init sa1111_dmabuf_init(void)
{
int ret;
printk(KERN_DEBUG
"sa1111_pcibuf: initializing SA-1111 DMA workaround\n");
ret = create_safe_buffer_pools();
printk(KERN_DEBUG "sa1111_dmabuf: initializing SA-1111 DMA buffers\n");
return ret;
return create_safe_buffer_pools();
}
module_init(sa1111_pcibuf_init);
module_init(sa1111_dmabuf_init);
static void __exit sa1111_pcibuf_exit(void)
static void __exit sa1111_dmabuf_exit(void)
{
BUG_ON(!list_empty(&safe_buffers));
......@@ -544,8 +521,8 @@ static void __exit sa1111_pcibuf_exit(void)
destroy_safe_buffer_pools();
}
module_exit(sa1111_pcibuf_exit);
module_exit(sa1111_dmabuf_exit);
MODULE_AUTHOR("Christopher Hoover <ch@hpl.hp.com>");
MODULE_DESCRIPTION("Special pci_{map/unmap/dma_sync}_* routines for SA-1111.");
MODULE_DESCRIPTION("Special dma_{map/unmap/dma_sync}_* routines for SA-1111.");
MODULE_LICENSE("GPL");
/*
NOTE:
this code was lifted straight out of drivers/pci/pci.c;
when compiling for the Intel StrongARM SA-1110/SA-1111 the
usb-ohci.c driver needs these routines even when the architecture
has no pci bus...
*/
#include <linux/config.h>
#include <linux/module.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/pci.h>
#include <linux/string.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/bitops.h>
#include <asm/page.h>
/*
* Pool allocator ... wraps the pci_alloc_consistent page allocator, so
* small blocks are easily used by drivers for bus mastering controllers.
* This should probably be sharing the guts of the slab allocator.
*/
struct pci_pool { /* the pool */
struct list_head page_list;
spinlock_t lock;
size_t blocks_per_page;
size_t size;
struct pci_dev *dev;
size_t allocation;
char name [32];
wait_queue_head_t waitq;
};
struct pci_page { /* cacheable header for 'allocation' bytes */
struct list_head page_list;
void *vaddr;
dma_addr_t dma;
unsigned long bitmap [0];
};
#define POOL_TIMEOUT_JIFFIES ((100 /* msec */ * HZ) / 1000)
#define POOL_POISON_BYTE 0xa7
// #define CONFIG_PCIPOOL_DEBUG
static inline const char *slot_name(const struct pci_pool *pool)
{
struct pci_dev *pdev = (struct pci_dev *)pool->dev;
if (pdev == 0)
return "[0]";
else if (pcidev_is_sa1111(pdev))
return "[SA-1111]";
else
return pci_name(pdev);
}
/**
* pci_pool_create - Creates a pool of pci consistent memory blocks, for dma.
* @name: name of pool, for diagnostics
* @pdev: pci device that will be doing the DMA
* @size: size of the blocks in this pool.
* @align: alignment requirement for blocks; must be a power of two
* @allocation: returned blocks won't cross this boundary (or zero)
* Context: !in_interrupt()
*
* Returns a pci allocation pool with the requested characteristics, or
* null if one can't be created. Given one of these pools, pci_pool_alloc()
* may be used to allocate memory. Such memory will all have "consistent"
* DMA mappings, accessible by the device and its driver without using
* cache flushing primitives. The actual size of blocks allocated may be
* larger than requested because of alignment.
*
* If allocation is nonzero, objects returned from pci_pool_alloc() won't
* cross that size boundary. This is useful for devices which have
* addressing restrictions on individual DMA transfers, such as not crossing
* boundaries of 4KBytes.
*/
struct pci_pool *
pci_pool_create (const char *name, struct pci_dev *pdev,
size_t size, size_t align, size_t allocation)
{
struct pci_pool *retval;
if (align == 0)
align = 1;
if (size == 0)
return 0;
else if (size < align)
size = align;
else if ((size % align) != 0) {
size += align + 1;
size &= ~(align - 1);
}
if (allocation == 0) {
if (PAGE_SIZE < size)
allocation = size;
else
allocation = PAGE_SIZE;
// FIXME: round up for less fragmentation
} else if (allocation < size)
return 0;
if (!(retval = kmalloc (sizeof *retval, SLAB_KERNEL)))
return retval;
strlcpy (retval->name, name, sizeof retval->name);
retval->dev = pdev;
INIT_LIST_HEAD (&retval->page_list);
spin_lock_init (&retval->lock);
retval->size = size;
retval->allocation = allocation;
retval->blocks_per_page = allocation / size;
init_waitqueue_head (&retval->waitq);
#ifdef CONFIG_PCIPOOL_DEBUG
printk (KERN_DEBUG "pcipool create %s/%s size %d, %d/page (%d alloc)\n",
slot_name(retval), retval->name, size,
retval->blocks_per_page, allocation);
#endif
return retval;
}
static struct pci_page *
pool_alloc_page (struct pci_pool *pool, int mem_flags)
{
struct pci_page *page;
int mapsize;
mapsize = pool->blocks_per_page;
mapsize = (mapsize + BITS_PER_LONG - 1) / BITS_PER_LONG;
mapsize *= sizeof (long);
page = (struct pci_page *) kmalloc (mapsize + sizeof *page, mem_flags);
if (!page)
return 0;
page->vaddr = pci_alloc_consistent (pool->dev,
pool->allocation,
&page->dma);
if (page->vaddr) {
memset (page->bitmap, 0xff, mapsize); // bit set == free
#ifdef CONFIG_DEBUG_SLAB
memset (page->vaddr, POOL_POISON_BYTE, pool->allocation);
#endif
list_add (&page->page_list, &pool->page_list);
} else {
kfree (page);
page = 0;
}
return page;
}
static inline int
is_page_busy (int blocks, unsigned long *bitmap)
{
while (blocks > 0) {
if (*bitmap++ != ~0UL)
return 1;
blocks -= BITS_PER_LONG;
}
return 0;
}
static void
pool_free_page (struct pci_pool *pool, struct pci_page *page)
{
dma_addr_t dma = page->dma;
#ifdef CONFIG_DEBUG_SLAB
memset (page->vaddr, POOL_POISON_BYTE, pool->allocation);
#endif
pci_free_consistent (pool->dev, pool->allocation, page->vaddr, dma);
list_del (&page->page_list);
kfree (page);
}
/**
* pci_pool_destroy - destroys a pool of pci memory blocks.
* @pool: pci pool that will be destroyed
*
* Caller guarantees that no more memory from the pool is in use,
* and that nothing will try to use the pool after this call.
*/
void
pci_pool_destroy (struct pci_pool *pool)
{
unsigned long flags;
#ifdef CONFIG_PCIPOOL_DEBUG
printk (KERN_DEBUG "pcipool destroy %s/%s\n",
slot_name(pool), pool->name);
#endif
spin_lock_irqsave (&pool->lock, flags);
while (!list_empty (&pool->page_list)) {
struct pci_page *page;
page = list_entry (pool->page_list.next,
struct pci_page, page_list);
if (is_page_busy (pool->blocks_per_page, page->bitmap)) {
printk (KERN_ERR "pci_pool_destroy %s/%s, %p busy\n",
slot_name(pool), pool->name, page->vaddr);
/* leak the still-in-use consistent memory */
list_del (&page->page_list);
kfree (page);
} else
pool_free_page (pool, page);
}
spin_unlock_irqrestore (&pool->lock, flags);
kfree (pool);
}
/**
* pci_pool_alloc - get a block of consistent memory
* @pool: pci pool that will produce the block
* @mem_flags: SLAB_KERNEL or SLAB_ATOMIC
* @handle: pointer to dma address of block
*
* This returns the kernel virtual address of a currently unused block,
* and reports its dma address through the handle.
* If such a memory block can't be allocated, null is returned.
*/
void *
pci_pool_alloc (struct pci_pool *pool, int mem_flags, dma_addr_t *handle)
{
unsigned long flags;
struct list_head *entry;
struct pci_page *page;
int map, block;
size_t offset;
void *retval;
restart:
spin_lock_irqsave (&pool->lock, flags);
list_for_each (entry, &pool->page_list) {
int i;
page = list_entry (entry, struct pci_page, page_list);
/* only cachable accesses here ... */
for (map = 0, i = 0;
i < pool->blocks_per_page;
i += BITS_PER_LONG, map++) {
if (page->bitmap [map] == 0)
continue;
block = ffz (~ page->bitmap [map]);
if ((i + block) < pool->blocks_per_page) {
clear_bit (block, &page->bitmap [map]);
offset = (BITS_PER_LONG * map) + block;
offset *= pool->size;
goto ready;
}
}
}
if (!(page = pool_alloc_page (pool, mem_flags))) {
if (mem_flags == SLAB_KERNEL) {
DECLARE_WAITQUEUE (wait, current);
current->state = TASK_INTERRUPTIBLE;
add_wait_queue (&pool->waitq, &wait);
spin_unlock_irqrestore (&pool->lock, flags);
schedule_timeout (POOL_TIMEOUT_JIFFIES);
remove_wait_queue (&pool->waitq, &wait);
goto restart;
}
retval = 0;
goto done;
}
clear_bit (0, &page->bitmap [0]);
offset = 0;
ready:
retval = offset + page->vaddr;
*handle = offset + page->dma;
done:
spin_unlock_irqrestore (&pool->lock, flags);
return retval;
}
static struct pci_page *
pool_find_page (struct pci_pool *pool, dma_addr_t dma)
{
unsigned long flags;
struct list_head *entry;
struct pci_page *page;
spin_lock_irqsave (&pool->lock, flags);
list_for_each (entry, &pool->page_list) {
page = list_entry (entry, struct pci_page, page_list);
if (dma < page->dma)
continue;
if (dma < (page->dma + pool->allocation))
goto done;
}
page = 0;
done:
spin_unlock_irqrestore (&pool->lock, flags);
return page;
}
/**
* pci_pool_free - put block back into pci pool
* @pool: the pci pool holding the block
* @vaddr: virtual address of block
* @dma: dma address of block
*
* Caller promises neither device nor driver will again touch this block
* unless it is first re-allocated.
*/
void
pci_pool_free (struct pci_pool *pool, void *vaddr, dma_addr_t dma)
{
struct pci_page *page;
unsigned long flags;
int map, block;
if ((page = pool_find_page (pool, dma)) == 0) {
printk (KERN_ERR "pci_pool_free %s/%s, %p/%lx (bad dma)\n",
pool->dev ? pci_name(pool->dev) : NULL,
pool->name, vaddr, (unsigned long) dma);
return;
}
block = dma - page->dma;
block /= pool->size;
map = block / BITS_PER_LONG;
block %= BITS_PER_LONG;
#ifdef CONFIG_DEBUG_SLAB
if (((dma - page->dma) + (void *)page->vaddr) != vaddr) {
printk (KERN_ERR "pci_pool_free %s/%s, %p (bad vaddr)/%lx\n",
pool->dev ? pci_name(pool->dev) : NULL,
pool->name, vaddr, (unsigned long) dma);
return;
}
if (page->bitmap [map] & (1UL << block)) {
printk (KERN_ERR "pci_pool_free %s/%s, dma %x already free\n",
pool->dev ? pci_name(pool->dev) : NULL,
pool->name, dma);
return;
}
memset (vaddr, POOL_POISON_BYTE, pool->size);
#endif
spin_lock_irqsave (&pool->lock, flags);
set_bit (block, &page->bitmap [map]);
if (waitqueue_active (&pool->waitq))
wake_up (&pool->waitq);
/*
* Resist a temptation to do
* if (!is_page_busy(bpp, page->bitmap)) pool_free_page(pool, page);
* it is not interrupt safe. Better have empty pages hang around.
*/
spin_unlock_irqrestore (&pool->lock, flags);
}
EXPORT_SYMBOL (pci_pool_create);
EXPORT_SYMBOL (pci_pool_destroy);
EXPORT_SYMBOL (pci_pool_alloc);
EXPORT_SYMBOL (pci_pool_free);
/* **************************************** */
static int __init pcipool_init(void)
{
MOD_INC_USE_COUNT; /* never unload */
return 0;
}
module_init(pcipool_init);
MODULE_LICENSE("GPL");
......@@ -22,12 +22,12 @@ extern void consistent_sync(void *kaddr, size_t size, int rw);
* For SA-1111 these functions are "magic" and utilize bounce
* bufferes as needed to work around SA-1111 DMA bugs.
*/
dma_addr_t sa1111_map_single(void *, size_t, int);
void sa1111_unmap_single(dma_addr_t, size_t, int);
int sa1111_map_sg(struct scatterlist *, int, int);
void sa1111_unmap_sg(struct scatterlist *, int, int);
void sa1111_dma_sync_single(dma_addr_t, size_t, int);
void sa1111_dma_sync_sg(struct scatterlist *, int, int);
dma_addr_t sa1111_map_single(struct device *dev, void *, size_t, enum dma_data_direction);
void sa1111_unmap_single(struct device *dev, dma_addr_t, size_t, enum dma_data_direction);
int sa1111_map_sg(struct device *dev, struct scatterlist *, int, enum dma_data_direction);
void sa1111_unmap_sg(struct device *dev, struct scatterlist *, int, enum dma_data_direction);
void sa1111_dma_sync_single(struct device *dev, dma_addr_t, size_t, enum dma_data_direction);
void sa1111_dma_sync_sg(struct device *dev, struct scatterlist *, int, enum dma_data_direction);
#ifdef CONFIG_SA1111
......@@ -122,7 +122,7 @@ dma_map_single(struct device *dev, void *cpu_addr, size_t size,
enum dma_data_direction dir)
{
if (dmadev_is_sa1111(dev))
return sa1111_map_single(cpu_addr, size, dir);
return sa1111_map_single(dev, cpu_addr, size, dir);
consistent_sync(cpu_addr, size, dir);
return __virt_to_bus((unsigned long)cpu_addr);
......@@ -169,7 +169,7 @@ dma_unmap_single(struct device *dev, dma_addr_t handle, size_t size,
enum dma_data_direction dir)
{
if (dmadev_is_sa1111(dev))
sa1111_unmap_single(handle, size, dir);
sa1111_unmap_single(dev, handle, size, dir);
/* nothing to do */
}
......@@ -224,7 +224,7 @@ dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
int i;
if (dmadev_is_sa1111(dev))
return sa1111_map_sg(sg, nents, dir);
return sa1111_map_sg(dev, sg, nents, dir);
for (i = 0; i < nents; i++, sg++) {
char *virt;
......@@ -253,7 +253,7 @@ dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
enum dma_data_direction dir)
{
if (dmadev_is_sa1111(dev)) {
sa1111_unmap_sg(sg, nents, dir);
sa1111_unmap_sg(dev, sg, nents, dir);
return;
}
......@@ -281,7 +281,7 @@ dma_sync_single(struct device *dev, dma_addr_t handle, size_t size,
enum dma_data_direction dir)
{
if (dmadev_is_sa1111(dev)) {
sa1111_dma_sync_single(handle, size, dir);
sa1111_dma_sync_single(dev, handle, size, dir);
return;
}
......@@ -308,7 +308,7 @@ dma_sync_sg(struct device *dev, struct scatterlist *sg, int nents,
int i;
if (dmadev_is_sa1111(dev)) {
sa1111_dma_sync_sg(sg, nents, dir);
sa1111_dma_sync_sg(dev, sg, nents, dir);
return;
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment