Commit 7ee793a6 authored by Laura Abbott's avatar Laura Abbott Committed by Marek Szyprowski

cma: Remove potential deadlock situation

CMA locking is currently very coarse. The cma_mutex protects both
the bitmap and avoids concurrency with alloc_contig_range. There
are several situations which may result in a deadlock on the CMA
mutex currently, mostly involving AB/BA situations with alloc and
free. Fix this issue by protecting the bitmap with a mutex per CMA
region and use the existing mutex for protecting against concurrency
with alloc_contig_range.
Signed-off-by: default avatarLaura Abbott <lauraa@codeaurora.org>
Signed-off-by: default avatarMarek Szyprowski <m.szyprowski@samsung.com>
parent d6d211db
...@@ -37,6 +37,7 @@ struct cma { ...@@ -37,6 +37,7 @@ struct cma {
unsigned long base_pfn; unsigned long base_pfn;
unsigned long count; unsigned long count;
unsigned long *bitmap; unsigned long *bitmap;
struct mutex lock;
}; };
struct cma *dma_contiguous_default_area; struct cma *dma_contiguous_default_area;
...@@ -161,6 +162,7 @@ static int __init cma_activate_area(struct cma *cma) ...@@ -161,6 +162,7 @@ static int __init cma_activate_area(struct cma *cma)
init_cma_reserved_pageblock(pfn_to_page(base_pfn)); init_cma_reserved_pageblock(pfn_to_page(base_pfn));
} while (--i); } while (--i);
mutex_init(&cma->lock);
return 0; return 0;
} }
...@@ -261,6 +263,13 @@ int __init dma_contiguous_reserve_area(phys_addr_t size, phys_addr_t base, ...@@ -261,6 +263,13 @@ int __init dma_contiguous_reserve_area(phys_addr_t size, phys_addr_t base,
return ret; return ret;
} }
static void clear_cma_bitmap(struct cma *cma, unsigned long pfn, int count)
{
mutex_lock(&cma->lock);
bitmap_clear(cma->bitmap, pfn - cma->base_pfn, count);
mutex_unlock(&cma->lock);
}
/** /**
* dma_alloc_from_contiguous() - allocate pages from contiguous area * dma_alloc_from_contiguous() - allocate pages from contiguous area
* @dev: Pointer to device for which the allocation is performed. * @dev: Pointer to device for which the allocation is performed.
...@@ -294,30 +303,41 @@ struct page *dma_alloc_from_contiguous(struct device *dev, int count, ...@@ -294,30 +303,41 @@ struct page *dma_alloc_from_contiguous(struct device *dev, int count,
mask = (1 << align) - 1; mask = (1 << align) - 1;
mutex_lock(&cma_mutex);
for (;;) { for (;;) {
mutex_lock(&cma->lock);
pageno = bitmap_find_next_zero_area(cma->bitmap, cma->count, pageno = bitmap_find_next_zero_area(cma->bitmap, cma->count,
start, count, mask); start, count, mask);
if (pageno >= cma->count) if (pageno >= cma->count) {
mutex_unlock(&cma_mutex);
break; break;
}
bitmap_set(cma->bitmap, pageno, count);
/*
* It's safe to drop the lock here. We've marked this region for
* our exclusive use. If the migration fails we will take the
* lock again and unmark it.
*/
mutex_unlock(&cma->lock);
pfn = cma->base_pfn + pageno; pfn = cma->base_pfn + pageno;
mutex_lock(&cma_mutex);
ret = alloc_contig_range(pfn, pfn + count, MIGRATE_CMA); ret = alloc_contig_range(pfn, pfn + count, MIGRATE_CMA);
mutex_unlock(&cma_mutex);
if (ret == 0) { if (ret == 0) {
bitmap_set(cma->bitmap, pageno, count);
page = pfn_to_page(pfn); page = pfn_to_page(pfn);
break; break;
} else if (ret != -EBUSY) { } else if (ret != -EBUSY) {
clear_cma_bitmap(cma, pfn, count);
break; break;
} }
clear_cma_bitmap(cma, pfn, count);
pr_debug("%s(): memory range at %p is busy, retrying\n", pr_debug("%s(): memory range at %p is busy, retrying\n",
__func__, pfn_to_page(pfn)); __func__, pfn_to_page(pfn));
/* try again with a bit different memory target */ /* try again with a bit different memory target */
start = pageno + mask + 1; start = pageno + mask + 1;
} }
mutex_unlock(&cma_mutex);
pr_debug("%s(): returned %p\n", __func__, page); pr_debug("%s(): returned %p\n", __func__, page);
return page; return page;
} }
...@@ -350,10 +370,8 @@ bool dma_release_from_contiguous(struct device *dev, struct page *pages, ...@@ -350,10 +370,8 @@ bool dma_release_from_contiguous(struct device *dev, struct page *pages,
VM_BUG_ON(pfn + count > cma->base_pfn + cma->count); VM_BUG_ON(pfn + count > cma->base_pfn + cma->count);
mutex_lock(&cma_mutex);
bitmap_clear(cma->bitmap, pfn - cma->base_pfn, count);
free_contig_range(pfn, count); free_contig_range(pfn, count);
mutex_unlock(&cma_mutex); clear_cma_bitmap(cma, pfn, count);
return true; return true;
} }
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment