Commit e1d855b0 authored by John Stultz's avatar John Stultz Committed by Greg Kroah-Hartman

ion: Cleanup whitespace issues and other checkpatch problems

Just some simple cleanups to address whitespace issues and
other issues found w/ checkpatch.
Signed-off-by: default avatarJohn Stultz <john.stultz@linaro.org>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent 661f82f6
......@@ -110,8 +110,8 @@ struct ion_handle {
bool ion_buffer_fault_user_mappings(struct ion_buffer *buffer)
{
return ((buffer->flags & ION_FLAG_CACHED) &&
!(buffer->flags & ION_FLAG_CACHED_NEEDS_SYNC));
return (buffer->flags & ION_FLAG_CACHED) &&
!(buffer->flags & ION_FLAG_CACHED_NEEDS_SYNC);
}
bool ion_buffer_cached(struct ion_buffer *buffer)
......@@ -202,7 +202,8 @@ static struct ion_buffer *ion_buffer_create(struct ion_heap *heap,
buffer->size = len;
table = heap->ops->map_dma(heap, buffer);
if (WARN_ONCE(table == NULL, "heap->ops->map_dma should return ERR_PTR on error"))
if (WARN_ONCE(table == NULL,
"heap->ops->map_dma should return ERR_PTR on error"))
table = ERR_PTR(-EINVAL);
if (IS_ERR(table)) {
heap->ops->free(buffer);
......@@ -424,7 +425,8 @@ static struct ion_handle *ion_handle_get_by_id(struct ion_client *client,
return handle ? handle : ERR_PTR(-EINVAL);
}
static bool ion_handle_validate(struct ion_client *client, struct ion_handle *handle)
static bool ion_handle_validate(struct ion_client *client,
struct ion_handle *handle)
{
WARN_ON(!mutex_is_locked(&client->lock));
return (idr_find(&client->idr, handle->id) == handle);
......@@ -578,7 +580,8 @@ static void *ion_buffer_kmap_get(struct ion_buffer *buffer)
return buffer->vaddr;
}
vaddr = buffer->heap->ops->map_kernel(buffer->heap, buffer);
if (WARN_ONCE(vaddr == NULL, "heap->ops->map_kernel should return ERR_PTR on error"))
if (WARN_ONCE(vaddr == NULL,
"heap->ops->map_kernel should return ERR_PTR on error"))
return ERR_PTR(-EINVAL);
if (IS_ERR(vaddr))
return vaddr;
......
......@@ -13,7 +13,6 @@
* GNU General Public License for more details.
*
*/
//#include <linux/spinlock.h>
#include <linux/dma-mapping.h>
#include <linux/err.h>
#include <linux/genalloc.h>
......
......@@ -49,10 +49,9 @@ void *ion_heap_map_kernel(struct ion_heap *heap,
int npages_this_entry = PAGE_ALIGN(sg->length) / PAGE_SIZE;
struct page *page = sg_page(sg);
BUG_ON(i >= npages);
for (j = 0; j < npages_this_entry; j++) {
for (j = 0; j < npages_this_entry; j++)
*(tmp++) = page++;
}
}
vaddr = vmap(pages, npages, VM_MAP, pgprot);
vfree(pages);
......@@ -159,7 +158,7 @@ int ion_heap_pages_zero(struct page *page, size_t size, pgprot_t pgprot)
return ion_heap_sglist_zero(&sg, 1, pgprot);
}
void ion_heap_freelist_add(struct ion_heap *heap, struct ion_buffer * buffer)
void ion_heap_freelist_add(struct ion_heap *heap, struct ion_buffer *buffer)
{
rt_mutex_lock(&heap->lock);
list_add(&buffer->list, &heap->free_list);
......
......@@ -108,7 +108,7 @@ void *ion_page_pool_alloc(struct ion_page_pool *pool)
return page;
}
void ion_page_pool_free(struct ion_page_pool *pool, struct page* page)
void ion_page_pool_free(struct ion_page_pool *pool, struct page *page)
{
int ret;
......
......@@ -148,7 +148,8 @@ static int ion_system_heap_allocate(struct ion_heap *heap,
INIT_LIST_HEAD(&pages);
while (size_remaining > 0) {
info = alloc_largest_available(sys_heap, buffer, size_remaining, max_order);
info = alloc_largest_available(sys_heap, buffer, size_remaining,
max_order);
if (!info)
goto err;
list_add_tail(&info->list, &pages);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment