Commit 0a23a9c0 authored by Greg Kroah-Hartman's avatar Greg Kroah-Hartman

dmapool: fix up list_for_each() calls to list_for_each_entry()

  
Now this should get that Rusty^Wmonkey off my back...
parent 64b7907f
...@@ -43,9 +43,11 @@ static DECLARE_MUTEX (pools_lock); ...@@ -43,9 +43,11 @@ static DECLARE_MUTEX (pools_lock);
static ssize_t static ssize_t
show_pools (struct device *dev, char *buf) show_pools (struct device *dev, char *buf)
{ {
unsigned temp, size; unsigned temp;
unsigned size;
char *next; char *next;
struct list_head *i, *j; struct dma_page *page;
struct dma_pool *pool;
next = buf; next = buf;
size = PAGE_SIZE; size = PAGE_SIZE;
...@@ -55,16 +57,11 @@ show_pools (struct device *dev, char *buf) ...@@ -55,16 +57,11 @@ show_pools (struct device *dev, char *buf)
next += temp; next += temp;
down (&pools_lock); down (&pools_lock);
list_for_each (i, &dev->dma_pools) { list_for_each_entry(pool, &dev->dma_pools, pools) {
struct dma_pool *pool; unsigned pages = 0;
unsigned pages = 0, blocks = 0; unsigned blocks = 0;
pool = list_entry (i, struct dma_pool, pools);
list_for_each (j, &pool->page_list) {
struct dma_page *page;
page = list_entry (j, struct dma_page, page_list); list_for_each_entry(page, &pool->page_list, page_list) {
pages++; pages++;
blocks += page->in_use; blocks += page->in_use;
} }
...@@ -268,7 +265,6 @@ void * ...@@ -268,7 +265,6 @@ void *
dma_pool_alloc (struct dma_pool *pool, int mem_flags, dma_addr_t *handle) dma_pool_alloc (struct dma_pool *pool, int mem_flags, dma_addr_t *handle)
{ {
unsigned long flags; unsigned long flags;
struct list_head *entry;
struct dma_page *page; struct dma_page *page;
int map, block; int map, block;
size_t offset; size_t offset;
...@@ -276,9 +272,8 @@ dma_pool_alloc (struct dma_pool *pool, int mem_flags, dma_addr_t *handle) ...@@ -276,9 +272,8 @@ dma_pool_alloc (struct dma_pool *pool, int mem_flags, dma_addr_t *handle)
restart: restart:
spin_lock_irqsave (&pool->lock, flags); spin_lock_irqsave (&pool->lock, flags);
list_for_each (entry, &pool->page_list) { list_for_each_entry(page, &pool->page_list, page_list) {
int i; int i;
page = list_entry (entry, struct dma_page, page_list);
/* only cachable accesses here ... */ /* only cachable accesses here ... */
for (map = 0, i = 0; for (map = 0, i = 0;
i < pool->blocks_per_page; i < pool->blocks_per_page;
...@@ -330,12 +325,10 @@ static struct dma_page * ...@@ -330,12 +325,10 @@ static struct dma_page *
pool_find_page (struct dma_pool *pool, dma_addr_t dma) pool_find_page (struct dma_pool *pool, dma_addr_t dma)
{ {
unsigned long flags; unsigned long flags;
struct list_head *entry;
struct dma_page *page; struct dma_page *page;
spin_lock_irqsave (&pool->lock, flags); spin_lock_irqsave (&pool->lock, flags);
list_for_each (entry, &pool->page_list) { list_for_each_entry(page, &pool->page_list, page_list) {
page = list_entry (entry, struct dma_page, page_list);
if (dma < page->dma) if (dma < page->dma)
continue; continue;
if (dma < (page->dma + pool->allocation)) if (dma < (page->dma + pool->allocation))
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment