Commit 387870f2 authored by Marek Szyprowski's avatar Marek Szyprowski

mm: dmapool: use provided gfp flags for all dma_alloc_coherent() calls

dmapool always calls dma_alloc_coherent() with GFP_ATOMIC flag,
regardless the flags provided by the caller. This causes excessive
pruning of emergency memory pools without any good reason. Additionaly,
on ARM architecture any driver which is using dmapools will sooner or
later  trigger the following error:
"ERROR: 256 KiB atomic DMA coherent pool is too small!
Please increase it with coherent_pool= kernel parameter!".
Increasing the coherent pool size usually doesn't help much and only
delays such error, because all GFP_ATOMIC DMA allocations are always
served from the special, very limited memory pool.

This patch changes the dmapool code to correctly use gfp flags provided
by the dmapool caller.
Reported-by: default avatarSoeren Moch <smoch@web.de>
Reported-by: default avatarThomas Petazzoni <thomas.petazzoni@free-electrons.com>
Signed-off-by: default avatarMarek Szyprowski <m.szyprowski@samsung.com>
Tested-by: default avatarAndrew Lunn <andrew@lunn.ch>
Tested-by: default avatarSoeren Moch <smoch@web.de>
Cc: stable@vger.kernel.org
parent 29594404
...@@ -50,7 +50,6 @@ struct dma_pool { /* the pool */ ...@@ -50,7 +50,6 @@ struct dma_pool { /* the pool */
size_t allocation; size_t allocation;
size_t boundary; size_t boundary;
char name[32]; char name[32];
wait_queue_head_t waitq;
struct list_head pools; struct list_head pools;
}; };
...@@ -62,8 +61,6 @@ struct dma_page { /* cacheable header for 'allocation' bytes */ ...@@ -62,8 +61,6 @@ struct dma_page { /* cacheable header for 'allocation' bytes */
unsigned int offset; unsigned int offset;
}; };
#define POOL_TIMEOUT_JIFFIES ((100 /* msec */ * HZ) / 1000)
static DEFINE_MUTEX(pools_lock); static DEFINE_MUTEX(pools_lock);
static ssize_t static ssize_t
...@@ -172,7 +169,6 @@ struct dma_pool *dma_pool_create(const char *name, struct device *dev, ...@@ -172,7 +169,6 @@ struct dma_pool *dma_pool_create(const char *name, struct device *dev,
retval->size = size; retval->size = size;
retval->boundary = boundary; retval->boundary = boundary;
retval->allocation = allocation; retval->allocation = allocation;
init_waitqueue_head(&retval->waitq);
if (dev) { if (dev) {
int ret; int ret;
...@@ -227,7 +223,6 @@ static struct dma_page *pool_alloc_page(struct dma_pool *pool, gfp_t mem_flags) ...@@ -227,7 +223,6 @@ static struct dma_page *pool_alloc_page(struct dma_pool *pool, gfp_t mem_flags)
memset(page->vaddr, POOL_POISON_FREED, pool->allocation); memset(page->vaddr, POOL_POISON_FREED, pool->allocation);
#endif #endif
pool_initialise_page(pool, page); pool_initialise_page(pool, page);
list_add(&page->page_list, &pool->page_list);
page->in_use = 0; page->in_use = 0;
page->offset = 0; page->offset = 0;
} else { } else {
...@@ -315,30 +310,21 @@ void *dma_pool_alloc(struct dma_pool *pool, gfp_t mem_flags, ...@@ -315,30 +310,21 @@ void *dma_pool_alloc(struct dma_pool *pool, gfp_t mem_flags,
might_sleep_if(mem_flags & __GFP_WAIT); might_sleep_if(mem_flags & __GFP_WAIT);
spin_lock_irqsave(&pool->lock, flags); spin_lock_irqsave(&pool->lock, flags);
restart:
list_for_each_entry(page, &pool->page_list, page_list) { list_for_each_entry(page, &pool->page_list, page_list) {
if (page->offset < pool->allocation) if (page->offset < pool->allocation)
goto ready; goto ready;
} }
page = pool_alloc_page(pool, GFP_ATOMIC);
if (!page) {
if (mem_flags & __GFP_WAIT) {
DECLARE_WAITQUEUE(wait, current);
__set_current_state(TASK_UNINTERRUPTIBLE); /* pool_alloc_page() might sleep, so temporarily drop &pool->lock */
__add_wait_queue(&pool->waitq, &wait); spin_unlock_irqrestore(&pool->lock, flags);
spin_unlock_irqrestore(&pool->lock, flags);
schedule_timeout(POOL_TIMEOUT_JIFFIES); page = pool_alloc_page(pool, mem_flags);
if (!page)
return NULL;
spin_lock_irqsave(&pool->lock, flags); spin_lock_irqsave(&pool->lock, flags);
__remove_wait_queue(&pool->waitq, &wait);
goto restart;
}
retval = NULL;
goto done;
}
list_add(&page->page_list, &pool->page_list);
ready: ready:
page->in_use++; page->in_use++;
offset = page->offset; offset = page->offset;
...@@ -348,7 +334,6 @@ void *dma_pool_alloc(struct dma_pool *pool, gfp_t mem_flags, ...@@ -348,7 +334,6 @@ void *dma_pool_alloc(struct dma_pool *pool, gfp_t mem_flags,
#ifdef DMAPOOL_DEBUG #ifdef DMAPOOL_DEBUG
memset(retval, POOL_POISON_ALLOCATED, pool->size); memset(retval, POOL_POISON_ALLOCATED, pool->size);
#endif #endif
done:
spin_unlock_irqrestore(&pool->lock, flags); spin_unlock_irqrestore(&pool->lock, flags);
return retval; return retval;
} }
...@@ -435,8 +420,6 @@ void dma_pool_free(struct dma_pool *pool, void *vaddr, dma_addr_t dma) ...@@ -435,8 +420,6 @@ void dma_pool_free(struct dma_pool *pool, void *vaddr, dma_addr_t dma)
page->in_use--; page->in_use--;
*(int *)vaddr = page->offset; *(int *)vaddr = page->offset;
page->offset = offset; page->offset = offset;
if (waitqueue_active(&pool->waitq))
wake_up_locked(&pool->waitq);
/* /*
* Resist a temptation to do * Resist a temptation to do
* if (!is_page_busy(page)) pool_free_page(pool, page); * if (!is_page_busy(page)) pool_free_page(pool, page);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment