Commit e1d855b0 authored by John Stultz's avatar John Stultz Committed by Greg Kroah-Hartman

ion: Cleanup whitespace issues and other checkpatch problems

Just some simple cleanups to address whitespace issues and
other issues found w/ checkpatch.
Signed-off-by: default avatarJohn Stultz <john.stultz@linaro.org>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent 661f82f6
......@@ -91,8 +91,8 @@ static int compat_get_ion_custom_data(
struct compat_ion_custom_data __user *data32,
struct ion_custom_data __user *data)
{
compat_uint_t cmd;
compat_ulong_t arg;
compat_uint_t cmd;
compat_ulong_t arg;
int err;
err = get_user(cmd, &data32->cmd);
......
......@@ -110,8 +110,8 @@ struct ion_handle {
bool ion_buffer_fault_user_mappings(struct ion_buffer *buffer)
{
return ((buffer->flags & ION_FLAG_CACHED) &&
!(buffer->flags & ION_FLAG_CACHED_NEEDS_SYNC));
return (buffer->flags & ION_FLAG_CACHED) &&
!(buffer->flags & ION_FLAG_CACHED_NEEDS_SYNC);
}
bool ion_buffer_cached(struct ion_buffer *buffer)
......@@ -202,7 +202,8 @@ static struct ion_buffer *ion_buffer_create(struct ion_heap *heap,
buffer->size = len;
table = heap->ops->map_dma(heap, buffer);
if (WARN_ONCE(table == NULL, "heap->ops->map_dma should return ERR_PTR on error"))
if (WARN_ONCE(table == NULL,
"heap->ops->map_dma should return ERR_PTR on error"))
table = ERR_PTR(-EINVAL);
if (IS_ERR(table)) {
heap->ops->free(buffer);
......@@ -424,7 +425,8 @@ static struct ion_handle *ion_handle_get_by_id(struct ion_client *client,
return handle ? handle : ERR_PTR(-EINVAL);
}
static bool ion_handle_validate(struct ion_client *client, struct ion_handle *handle)
static bool ion_handle_validate(struct ion_client *client,
struct ion_handle *handle)
{
WARN_ON(!mutex_is_locked(&client->lock));
return (idr_find(&client->idr, handle->id) == handle);
......@@ -578,7 +580,8 @@ static void *ion_buffer_kmap_get(struct ion_buffer *buffer)
return buffer->vaddr;
}
vaddr = buffer->heap->ops->map_kernel(buffer->heap, buffer);
if (WARN_ONCE(vaddr == NULL, "heap->ops->map_kernel should return ERR_PTR on error"))
if (WARN_ONCE(vaddr == NULL,
"heap->ops->map_kernel should return ERR_PTR on error"))
return ERR_PTR(-EINVAL);
if (IS_ERR(vaddr))
return vaddr;
......@@ -1403,39 +1406,39 @@ static const struct file_operations debug_heap_fops = {
#ifdef DEBUG_HEAP_SHRINKER
static int debug_shrink_set(void *data, u64 val)
{
struct ion_heap *heap = data;
struct shrink_control sc;
int objs;
struct ion_heap *heap = data;
struct shrink_control sc;
int objs;
sc.gfp_mask = -1;
sc.nr_to_scan = 0;
sc.gfp_mask = -1;
sc.nr_to_scan = 0;
if (!val)
return 0;
if (!val)
return 0;
objs = heap->shrinker.shrink(&heap->shrinker, &sc);
sc.nr_to_scan = objs;
objs = heap->shrinker.shrink(&heap->shrinker, &sc);
sc.nr_to_scan = objs;
heap->shrinker.shrink(&heap->shrinker, &sc);
return 0;
heap->shrinker.shrink(&heap->shrinker, &sc);
return 0;
}
static int debug_shrink_get(void *data, u64 *val)
{
struct ion_heap *heap = data;
struct shrink_control sc;
int objs;
struct ion_heap *heap = data;
struct shrink_control sc;
int objs;
sc.gfp_mask = -1;
sc.nr_to_scan = 0;
sc.gfp_mask = -1;
sc.nr_to_scan = 0;
objs = heap->shrinker.shrink(&heap->shrinker, &sc);
*val = objs;
return 0;
objs = heap->shrinker.shrink(&heap->shrinker, &sc);
*val = objs;
return 0;
}
DEFINE_SIMPLE_ATTRIBUTE(debug_shrink_fops, debug_shrink_get,
debug_shrink_set, "%llu\n");
debug_shrink_set, "%llu\n");
#endif
void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap)
......
......@@ -38,7 +38,7 @@ struct ion_buffer;
* struct ion_platform_heap - defines a heap in the given platform
* @type: type of the heap from ion_heap_type enum
* @id: unique identifier for heap. When allocating higher numbers
* will be allocated from first. At allocation these are passed
* will be allocated from first. At allocation these are passed
* as a bit mask and therefore can not exceed ION_NUM_HEAP_IDS.
* @name: used for debug purposes
* @base: base address of heap in physical memory if applicable
......
......@@ -13,7 +13,6 @@
* GNU General Public License for more details.
*
*/
//#include <linux/spinlock.h>
#include <linux/dma-mapping.h>
#include <linux/err.h>
#include <linux/genalloc.h>
......
......@@ -49,9 +49,8 @@ void *ion_heap_map_kernel(struct ion_heap *heap,
int npages_this_entry = PAGE_ALIGN(sg->length) / PAGE_SIZE;
struct page *page = sg_page(sg);
BUG_ON(i >= npages);
for (j = 0; j < npages_this_entry; j++) {
for (j = 0; j < npages_this_entry; j++)
*(tmp++) = page++;
}
}
vaddr = vmap(pages, npages, VM_MAP, pgprot);
vfree(pages);
......@@ -159,7 +158,7 @@ int ion_heap_pages_zero(struct page *page, size_t size, pgprot_t pgprot)
return ion_heap_sglist_zero(&sg, 1, pgprot);
}
void ion_heap_freelist_add(struct ion_heap *heap, struct ion_buffer * buffer)
void ion_heap_freelist_add(struct ion_heap *heap, struct ion_buffer *buffer)
{
rt_mutex_lock(&heap->lock);
list_add(&buffer->list, &heap->free_list);
......
......@@ -108,7 +108,7 @@ void *ion_page_pool_alloc(struct ion_page_pool *pool)
return page;
}
void ion_page_pool_free(struct ion_page_pool *pool, struct page* page)
void ion_page_pool_free(struct ion_page_pool *pool, struct page *page)
{
int ret;
......
......@@ -230,7 +230,7 @@ int ion_heap_init_deferred_free(struct ion_heap *heap);
/**
* ion_heap_freelist_add - add a buffer to the deferred free list
* @heap: the heap
* @buffer: the buffer
* @buffer: the buffer
*
* Adds an item to the deferred freelist.
*/
......
......@@ -148,7 +148,8 @@ static int ion_system_heap_allocate(struct ion_heap *heap,
INIT_LIST_HEAD(&pages);
while (size_remaining > 0) {
info = alloc_largest_available(sys_heap, buffer, size_remaining, max_order);
info = alloc_largest_available(sys_heap, buffer, size_remaining,
max_order);
if (!info)
goto err;
list_add_tail(&info->list, &pages);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment