Commit 8b312bb9 authored by Colin Cross's avatar Colin Cross Committed by Greg Kroah-Hartman

ion: optimize ion_heap_buffer_zero

ion_heap_buffer_zero can spend a long time in unmap_kernel_range
if it has to broadcast a tlb flush to every cpu for every page.
Modify it to batch pages into a larger region to clear using a
single mapping.  This may cause the mapping size to change if
the buffer size is not a multiple of the mapping size, so
switch to allocating the address space for each chunk.  This
allows us to use vm_map_ram to handle the allocation and mapping
together.

The number of pages to zero using a single mapping is set to 32
to hit the fastpath in vm_map_ram.
Signed-off-by: default avatarColin Cross <ccross@android.com>
Signed-off-by: default avatarJohn Stultz <john.stultz@linaro.org>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent f0f06763
...@@ -100,40 +100,48 @@ int ion_heap_map_user(struct ion_heap *heap, struct ion_buffer *buffer, ...@@ -100,40 +100,48 @@ int ion_heap_map_user(struct ion_heap *heap, struct ion_buffer *buffer,
return 0; return 0;
} }
static int ion_heap_clear_pages(struct page **pages, int num, pgprot_t pgprot)
{
void *addr = vm_map_ram(pages, num, -1, pgprot);
if (!addr)
return -ENOMEM;
memset(addr, 0, PAGE_SIZE * num);
vm_unmap_ram(addr, num);
return 0;
}
int ion_heap_buffer_zero(struct ion_buffer *buffer) int ion_heap_buffer_zero(struct ion_buffer *buffer)
{ {
struct sg_table *table = buffer->sg_table; struct sg_table *table = buffer->sg_table;
pgprot_t pgprot; pgprot_t pgprot;
struct scatterlist *sg; struct scatterlist *sg;
struct vm_struct *vm_struct;
int i, j, ret = 0; int i, j, ret = 0;
struct page *pages[32];
int k = 0;
if (buffer->flags & ION_FLAG_CACHED) if (buffer->flags & ION_FLAG_CACHED)
pgprot = PAGE_KERNEL; pgprot = PAGE_KERNEL;
else else
pgprot = pgprot_writecombine(PAGE_KERNEL); pgprot = pgprot_writecombine(PAGE_KERNEL);
vm_struct = get_vm_area(PAGE_SIZE, VM_ALLOC);
if (!vm_struct)
return -ENOMEM;
for_each_sg(table->sgl, sg, table->nents, i) { for_each_sg(table->sgl, sg, table->nents, i) {
struct page *page = sg_page(sg); struct page *page = sg_page(sg);
unsigned long len = sg->length; unsigned long len = sg->length;
for (j = 0; j < len / PAGE_SIZE; j++) { for (j = 0; j < len / PAGE_SIZE; j++) {
struct page *sub_page = page + j; pages[k++] = page + j;
struct page **pages = &sub_page; if (k == ARRAY_SIZE(pages)) {
ret = map_vm_area(vm_struct, pgprot, &pages); ret = ion_heap_clear_pages(pages, k, pgprot);
if (ret) if (ret)
goto end; goto end;
memset(vm_struct->addr, 0, PAGE_SIZE); k = 0;
unmap_kernel_range((unsigned long)vm_struct->addr, }
PAGE_SIZE);
} }
if (k)
ret = ion_heap_clear_pages(pages, k, pgprot);
} }
end: end:
free_vm_area(vm_struct);
return ret; return ret;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment