Commit 6ebb1e2d authored by David Brownell's avatar David Brownell Committed by Linus Torvalds

[PATCH] pci/pool.c less spinlock abuse

That previous patch got rid of a boot time might_sleep() warning,
but I noticed two later on:

  - kmalloc() needed SLAB_ATOMIC

  - destroying the 'pools' driverfs attribute could sleep too

The clean/simple patch for the second one tweaked an API:

  - pci_pool_create() can't be called in_interrupt() any more.
    nobody used it there, and such support isn't needed; plus
    that rule matches its sibling call, pci_pool_destroy().

  - that made its SLAB_* flags parameter more useless, so it's
    removed and the DMA-mapping.txt is updated.  (this param
    was more trouble than it was worth -- good that it's gone.)

Nobody (even DaveM) objected to those API changes, so I think
this should be merged.
parent 38b4e748
......@@ -323,16 +323,12 @@ Create a pci_pool like this:
struct pci_pool *pool;
pool = pci_pool_create(name, dev, size, align, alloc, flags);
pool = pci_pool_create(name, dev, size, align, alloc);
The "name" is for diagnostics (like a kmem_cache name); dev and size
are as above. The device's hardware alignment requirement for this
type of data is "align" (which is expressed in bytes, and must be a
power of two). The flags are SLAB_ flags as you'd pass to
kmem_cache_create. Not all flags are understood, but SLAB_POISON may
help you find driver bugs. If you call this in a non- sleeping
context (f.e. in_interrupt is true or while holding SMP locks), pass
SLAB_ATOMIC. If your device has no boundary crossing restrictions,
power of two). If your device has no boundary crossing restrictions,
pass 0 for alloc; passing 4096 says memory allocated from this pool
must not cross 4KByte boundaries (but at that time it may be better to
go for pci_alloc_consistent directly instead).
......
......@@ -834,7 +834,7 @@ static int stream_alloc_packet_lists(struct stream *s)
max_packet_size = max_nevents * s->dimension * 4 + 8;
s->packet_pool = pci_pool_create("packet pool", s->host->ohci->dev,
max_packet_size, 0, 0, SLAB_KERNEL);
max_packet_size, 0, 0);
if (s->packet_pool == NULL)
return -1;
......@@ -1020,7 +1020,7 @@ struct stream *stream_alloc(struct amdtp_host *host)
s->descriptor_pool = pci_pool_create("descriptor pool", host->ohci->dev,
sizeof(struct descriptor_block),
16, 0, SLAB_KERNEL);
16, 0);
if (s->descriptor_pool == NULL) {
kfree(s->input);
kfree(s);
......
......@@ -31,13 +31,12 @@ struct pci_page { /* cacheable header for 'allocation' bytes */
#define POOL_TIMEOUT_JIFFIES ((100 /* msec */ * HZ) / 1000)
#define POOL_POISON_BYTE 0xa7
static spinlock_t pools_lock = SPIN_LOCK_UNLOCKED;
DECLARE_MUTEX (pools_lock);
static ssize_t
show_pools (struct device *dev, char *buf, size_t count, loff_t off)
{
struct pci_dev *pdev;
unsigned long flags;
unsigned temp, size;
char *next;
struct list_head *i, *j;
......@@ -53,7 +52,7 @@ show_pools (struct device *dev, char *buf, size_t count, loff_t off)
size -= temp;
next += temp;
spin_lock_irqsave (&pools_lock, flags);
down (&pools_lock);
list_for_each (i, &pdev->pools) {
struct pci_pool *pool;
unsigned pages = 0, blocks = 0;
......@@ -76,7 +75,7 @@ show_pools (struct device *dev, char *buf, size_t count, loff_t off)
size -= temp;
next += temp;
}
spin_unlock_irqrestore (&pools_lock, flags);
up (&pools_lock);
return count - size;
}
......@@ -89,7 +88,7 @@ static DEVICE_ATTR (pools, S_IRUGO, show_pools, NULL);
* @size: size of the blocks in this pool.
* @align: alignment requirement for blocks; must be a power of two
* @allocation: returned blocks won't cross this boundary (or zero)
* @mem_flags: SLAB_* flags.
* Context: !in_interrupt()
*
* Returns a pci allocation pool with the requested characteristics, or
* null if one can't be created. Given one of these pools, pci_pool_alloc()
......@@ -105,10 +104,9 @@ static DEVICE_ATTR (pools, S_IRUGO, show_pools, NULL);
*/
struct pci_pool *
pci_pool_create (const char *name, struct pci_dev *pdev,
size_t size, size_t align, size_t allocation, int mem_flags)
size_t size, size_t align, size_t allocation)
{
struct pci_pool *retval;
unsigned long flags;
if (align == 0)
align = 1;
......@@ -130,7 +128,7 @@ pci_pool_create (const char *name, struct pci_dev *pdev,
} else if (allocation < size)
return 0;
if (!(retval = kmalloc (sizeof *retval, mem_flags)))
if (!(retval = kmalloc (sizeof *retval, SLAB_KERNEL)))
return retval;
strncpy (retval->name, name, sizeof retval->name);
......@@ -138,18 +136,6 @@ pci_pool_create (const char *name, struct pci_dev *pdev,
retval->dev = pdev;
if (pdev) {
int do_add;
spin_lock_irqsave (&pools_lock, flags);
do_add = list_empty (&pdev->pools);
/* note: not currently insisting "name" be unique */
list_add (&retval->pools, &pdev->pools);
spin_unlock_irqrestore (&pools_lock, flags);
if (do_add)
device_create_file (&pdev->dev, &dev_attr_pools);
} else
INIT_LIST_HEAD (&retval->pools);
INIT_LIST_HEAD (&retval->page_list);
spin_lock_init (&retval->lock);
retval->size = size;
......@@ -157,6 +143,16 @@ pci_pool_create (const char *name, struct pci_dev *pdev,
retval->blocks_per_page = allocation / size;
init_waitqueue_head (&retval->waitq);
if (pdev) {
down (&pools_lock);
if (list_empty (&pdev->pools))
device_create_file (&pdev->dev, &dev_attr_pools);
/* note: not currently insisting "name" be unique */
list_add (&retval->pools, &pdev->pools);
up (&pools_lock);
} else
INIT_LIST_HEAD (&retval->pools);
return retval;
}
......@@ -220,6 +216,7 @@ pool_free_page (struct pci_pool *pool, struct pci_page *page)
/**
* pci_pool_destroy - destroys a pool of pci memory blocks.
* @pool: pci pool that will be destroyed
* Context: !in_interrupt()
*
* Caller guarantees that no more memory from the pool is in use,
* and that nothing will try to use the pool after this call.
......@@ -227,9 +224,12 @@ pool_free_page (struct pci_pool *pool, struct pci_page *page)
void
pci_pool_destroy (struct pci_pool *pool)
{
unsigned long flags;
down (&pools_lock);
list_del (&pool->pools);
if (pool->dev && list_empty (&pool->dev->pools))
device_remove_file (&pool->dev->dev, &dev_attr_pools);
up (&pools_lock);
spin_lock_irqsave (&pool->lock, flags);
while (!list_empty (&pool->page_list)) {
struct pci_page *page;
page = list_entry (pool->page_list.next,
......@@ -245,13 +245,6 @@ pci_pool_destroy (struct pci_pool *pool)
pool_free_page (pool, page);
}
spin_lock (&pools_lock);
list_del (&pool->pools);
if (pool->dev && list_empty (&pool->dev->pools))
device_remove_file (&pool->dev->dev, &dev_attr_pools);
spin_unlock (&pools_lock);
spin_unlock_irqrestore (&pool->lock, flags);
kfree (pool);
}
......@@ -296,7 +289,7 @@ pci_pool_alloc (struct pci_pool *pool, int mem_flags, dma_addr_t *handle)
}
}
}
if (!(page = pool_alloc_page (pool, mem_flags))) {
if (!(page = pool_alloc_page (pool, SLAB_ATOMIC))) {
if (mem_flags == SLAB_KERNEL) {
DECLARE_WAITQUEUE (wait, current);
......
......@@ -42,6 +42,7 @@ static const size_t pool_max [HCD_BUFFER_POOLS] = {
/**
* hcd_buffer_create - initialize buffer pools
* @hcd: the bus whose buffer pools are to be initialized
* Context: !in_interrupt()
*
* Call this as part of initializing a host controller that uses the pci dma
* memory allocators. It initializes some pools of dma-consistent memory that
......@@ -60,7 +61,7 @@ int hcd_buffer_create (struct usb_hcd *hcd)
continue;
snprintf (name, sizeof name, "buffer-%d", size);
hcd->pool [i] = pci_pool_create (name, hcd->pdev,
size, size, 0, SLAB_KERNEL);
size, size, 0);
if (!hcd->pool [i]) {
hcd_buffer_destroy (hcd);
return -ENOMEM;
......@@ -74,6 +75,7 @@ EXPORT_SYMBOL (hcd_buffer_create);
/**
* hcd_buffer_destroy - deallocate buffer pools
* @hcd: the bus whose buffer pools are to be destroyed
* Context: !in_interrupt()
*
* This frees the buffer pools created by hcd_buffer_create().
*/
......
......@@ -166,8 +166,7 @@ static int ehci_mem_init (struct ehci_hcd *ehci, int flags)
ehci->qtd_pool = pci_pool_create ("ehci_qtd", ehci->hcd.pdev,
sizeof (struct ehci_qtd),
32 /* byte alignment (for hw parts) */,
4096 /* can't cross 4K */,
flags);
4096 /* can't cross 4K */);
if (!ehci->qtd_pool) {
dbg ("no qtd pool");
ehci_mem_cleanup (ehci);
......@@ -178,8 +177,7 @@ static int ehci_mem_init (struct ehci_hcd *ehci, int flags)
ehci->qh_pool = pci_pool_create ("ehci_qh", ehci->hcd.pdev,
sizeof (struct ehci_qh),
32 /* byte alignment (for hw parts) */,
4096 /* can't cross 4K */,
flags);
4096 /* can't cross 4K */);
if (!ehci->qh_pool) {
dbg ("no qh pool");
ehci_mem_cleanup (ehci);
......@@ -190,8 +188,7 @@ static int ehci_mem_init (struct ehci_hcd *ehci, int flags)
ehci->itd_pool = pci_pool_create ("ehci_itd", ehci->hcd.pdev,
sizeof (struct ehci_itd),
32 /* byte alignment (for hw parts) */,
4096 /* can't cross 4K */,
flags);
4096 /* can't cross 4K */);
if (!ehci->itd_pool) {
dbg ("no itd pool");
ehci_mem_cleanup (ehci);
......@@ -202,8 +199,7 @@ static int ehci_mem_init (struct ehci_hcd *ehci, int flags)
ehci->sitd_pool = pci_pool_create ("ehci_sitd", ehci->hcd.pdev,
sizeof (struct ehci_sitd),
32 /* byte alignment (for hw parts) */,
4096 /* can't cross 4K */,
flags);
4096 /* can't cross 4K */);
if (!ehci->sitd_pool) {
dbg ("no sitd pool");
ehci_mem_cleanup (ehci);
......
......@@ -135,15 +135,13 @@ static int ohci_mem_init (struct ohci_hcd *ohci)
ohci->td_cache = pci_pool_create ("ohci_td", ohci->hcd.pdev,
sizeof (struct td),
32 /* byte alignment */,
0 /* no page-crossing issues */,
GFP_KERNEL);
0 /* no page-crossing issues */);
if (!ohci->td_cache)
return -ENOMEM;
ohci->ed_cache = pci_pool_create ("ohci_ed", ohci->hcd.pdev,
sizeof (struct ed),
16 /* byte alignment */,
0 /* no page-crossing issues */,
GFP_KERNEL);
0 /* no page-crossing issues */);
if (!ohci->ed_cache) {
pci_pool_destroy (ohci->td_cache);
return -ENOMEM;
......@@ -178,6 +176,13 @@ td_alloc (struct ohci_hcd *hc, int mem_flags)
pci_pool_free (hc->td_cache, td, dma);
return NULL;
}
// DEBUG ONLY want to see if these tds are really getting
// allocated. the last one in a page shouldn't be getting
// allocated during these tests!
if ((dma & 0x0fff) == 0x0fc0) {
dbg ("td = %p", td);
dump_stack ();
}
}
return td;
}
......
......@@ -2149,14 +2149,14 @@ static int __devinit uhci_start(struct usb_hcd *hcd)
uhci->fl->dma_handle = dma_handle;
uhci->td_pool = pci_pool_create("uhci_td", hcd->pdev,
sizeof(struct uhci_td), 16, 0, GFP_ATOMIC);
sizeof(struct uhci_td), 16, 0);
if (!uhci->td_pool) {
err("unable to create td pci_pool");
goto err_create_td_pool;
}
uhci->qh_pool = pci_pool_create("uhci_qh", hcd->pdev,
sizeof(struct uhci_qh), 16, 0, GFP_ATOMIC);
sizeof(struct uhci_qh), 16, 0);
if (!uhci->qh_pool) {
err("unable to create qh pci_pool");
goto err_create_qh_pool;
......
......@@ -637,7 +637,7 @@ struct pci_bus * pci_add_new_bus(struct pci_bus *parent, struct pci_dev *dev, in
/* kmem_cache style wrapper around pci_alloc_consistent() */
struct pci_pool *pci_pool_create (const char *name, struct pci_dev *dev,
size_t size, size_t align, size_t allocation, int flags);
size_t size, size_t align, size_t allocation);
void pci_pool_destroy (struct pci_pool *pool);
void *pci_pool_alloc (struct pci_pool *pool, int flags, dma_addr_t *handle);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment