Commit 77cbe828 authored by Rebecca Schultz Zavin's avatar Rebecca Schultz Zavin Committed by Greg Kroah-Hartman

gpu: ion: Modify zeroing code so it only allocates address space once

vmap/vunmap spend a significant amount of time allocating the
address space to map into.  Rather than allocating address space
for each page, instead allocate once for the entire allocation
and then just map and unmap each page into that address space.
Signed-off-by: default avatarRebecca Schultz Zavin <rschultz@google.com>
[jstultz: modified patch to apply to staging directory]
Signed-off-by: default avatarJohn Stultz <john.stultz@linaro.org>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent 2bb9f503
...@@ -91,7 +91,7 @@ static struct page *alloc_buffer_page(struct ion_system_heap *heap, ...@@ -91,7 +91,7 @@ static struct page *alloc_buffer_page(struct ion_system_heap *heap,
static void free_buffer_page(struct ion_system_heap *heap, static void free_buffer_page(struct ion_system_heap *heap,
struct ion_buffer *buffer, struct page *page, struct ion_buffer *buffer, struct page *page,
unsigned int order) unsigned int order, struct vm_struct *vm_struct)
{ {
bool cached = ion_buffer_cached(buffer); bool cached = ion_buffer_cached(buffer);
bool split_pages = ion_buffer_fault_user_mappings(buffer); bool split_pages = ion_buffer_fault_user_mappings(buffer);
...@@ -105,10 +105,13 @@ static void free_buffer_page(struct ion_system_heap *heap, ...@@ -105,10 +105,13 @@ static void free_buffer_page(struct ion_system_heap *heap,
purpose is to keep the pages out of the cache */ purpose is to keep the pages out of the cache */
for (i = 0; i < (1 << order); i++) { for (i = 0; i < (1 << order); i++) {
struct page *sub_page = page + i; struct page *sub_page = page + i;
void *addr = vmap(&sub_page, 1, VM_MAP, struct page **pages = &sub_page;
pgprot_writecombine(PAGE_KERNEL)); map_vm_area(vm_struct,
memset(addr, 0, PAGE_SIZE); pgprot_writecombine(PAGE_KERNEL),
vunmap(addr); &pages);
memset(vm_struct->addr, 0, PAGE_SIZE);
unmap_kernel_range((unsigned long)vm_struct->addr,
PAGE_SIZE);
} }
ion_page_pool_free(pool, page); ion_page_pool_free(pool, page);
} else if (split_pages) { } else if (split_pages) {
...@@ -164,6 +167,8 @@ static int ion_system_heap_allocate(struct ion_heap *heap, ...@@ -164,6 +167,8 @@ static int ion_system_heap_allocate(struct ion_heap *heap,
long size_remaining = PAGE_ALIGN(size); long size_remaining = PAGE_ALIGN(size);
unsigned int max_order = orders[0]; unsigned int max_order = orders[0];
bool split_pages = ion_buffer_fault_user_mappings(buffer); bool split_pages = ion_buffer_fault_user_mappings(buffer);
struct vm_struct *vm_struct;
pte_t *ptes;
INIT_LIST_HEAD(&pages); INIT_LIST_HEAD(&pages);
while (size_remaining > 0) { while (size_remaining > 0) {
...@@ -211,10 +216,13 @@ static int ion_system_heap_allocate(struct ion_heap *heap, ...@@ -211,10 +216,13 @@ static int ion_system_heap_allocate(struct ion_heap *heap,
err1: err1:
kfree(table); kfree(table);
err: err:
vm_struct = get_vm_area(PAGE_SIZE, &ptes);
list_for_each_entry(info, &pages, list) { list_for_each_entry(info, &pages, list) {
free_buffer_page(sys_heap, buffer, info->page, info->order); free_buffer_page(sys_heap, buffer, info->page, info->order,
vm_struct);
kfree(info); kfree(info);
} }
free_vm_area(vm_struct);
return -ENOMEM; return -ENOMEM;
} }
...@@ -227,10 +235,16 @@ void ion_system_heap_free(struct ion_buffer *buffer) ...@@ -227,10 +235,16 @@ void ion_system_heap_free(struct ion_buffer *buffer)
struct sg_table *table = buffer->sg_table; struct sg_table *table = buffer->sg_table;
struct scatterlist *sg; struct scatterlist *sg;
LIST_HEAD(pages); LIST_HEAD(pages);
struct vm_struct *vm_struct;
pte_t *ptes;
int i; int i;
vm_struct = get_vm_area(PAGE_SIZE, &ptes);
for_each_sg(table->sgl, sg, table->nents, i) for_each_sg(table->sgl, sg, table->nents, i)
free_buffer_page(sys_heap, buffer, sg_page(sg), get_order(sg_dma_len(sg))); free_buffer_page(sys_heap, buffer, sg_page(sg),
get_order(sg_dma_len(sg)), vm_struct);
free_vm_area(vm_struct);
sg_free_table(table); sg_free_table(table);
kfree(table); kfree(table);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment