Commit 6a72a700 authored by John Stultz's avatar John Stultz Committed by Greg Kroah-Hartman

staging: ion: Avoid using rt_mutexes directly

RT_MUTEXES can be configured out of the kernel, causing compile
problems with ION.

To quote Colin:
"rt_mutexes were added with the deferred freeing feature.  Heaps need
to return zeroed memory to userspace, but zeroing the memory on every
allocation was causing performance issues.  We added a SCHED_IDLE
thread to zero memory in the background after freeing, but locking the
heap from the SCHED_IDLE thread might block a high priority allocation
thread for a long time.

The lock is only used to protect the heap's free_list and
free_list_size members, and is not held for any long or sleeping
operations.  Converting to a spinlock should prevent priority
inversion without using the rt_mutex.  I'd also rename it to free_lock
to so it doesn't get used as a general heap lock."

Thus this patch converts the rt_mutex usage to a spinlock and
renames the lock free_lock to be more clear as to its use.

I also had to change a bit of logic in ion_heap_freelist_drain()
to safely avoid list corruption.
Acked-by: default avatarColin Cross <ccross@android.com>
Cc: Android Kernel Team <kernel-team@android.com>
Reported-by: default avatarJim Davis <jim.epost@gmail.com>
Signed-off-by: default avatarJohn Stultz <john.stultz@linaro.org>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent 7dcfee65
...@@ -160,10 +160,10 @@ int ion_heap_pages_zero(struct page *page, size_t size, pgprot_t pgprot) ...@@ -160,10 +160,10 @@ int ion_heap_pages_zero(struct page *page, size_t size, pgprot_t pgprot)
void ion_heap_freelist_add(struct ion_heap *heap, struct ion_buffer *buffer) void ion_heap_freelist_add(struct ion_heap *heap, struct ion_buffer *buffer)
{ {
rt_mutex_lock(&heap->lock); spin_lock(&heap->free_lock);
list_add(&buffer->list, &heap->free_list); list_add(&buffer->list, &heap->free_list);
heap->free_list_size += buffer->size; heap->free_list_size += buffer->size;
rt_mutex_unlock(&heap->lock); spin_unlock(&heap->free_lock);
wake_up(&heap->waitqueue); wake_up(&heap->waitqueue);
} }
...@@ -171,34 +171,38 @@ size_t ion_heap_freelist_size(struct ion_heap *heap) ...@@ -171,34 +171,38 @@ size_t ion_heap_freelist_size(struct ion_heap *heap)
{ {
size_t size; size_t size;
rt_mutex_lock(&heap->lock); spin_lock(&heap->free_lock);
size = heap->free_list_size; size = heap->free_list_size;
rt_mutex_unlock(&heap->lock); spin_unlock(&heap->free_lock);
return size; return size;
} }
size_t ion_heap_freelist_drain(struct ion_heap *heap, size_t size) size_t ion_heap_freelist_drain(struct ion_heap *heap, size_t size)
{ {
struct ion_buffer *buffer, *tmp; struct ion_buffer *buffer;
size_t total_drained = 0; size_t total_drained = 0;
if (ion_heap_freelist_size(heap) == 0) if (ion_heap_freelist_size(heap) == 0)
return 0; return 0;
rt_mutex_lock(&heap->lock); spin_lock(&heap->free_lock);
if (size == 0) if (size == 0)
size = heap->free_list_size; size = heap->free_list_size;
list_for_each_entry_safe(buffer, tmp, &heap->free_list, list) { while (!list_empty(&heap->free_list)) {
if (total_drained >= size) if (total_drained >= size)
break; break;
buffer = list_first_entry(&heap->free_list, struct ion_buffer,
list);
list_del(&buffer->list); list_del(&buffer->list);
heap->free_list_size -= buffer->size; heap->free_list_size -= buffer->size;
total_drained += buffer->size; total_drained += buffer->size;
spin_unlock(&heap->free_lock);
ion_buffer_destroy(buffer); ion_buffer_destroy(buffer);
spin_lock(&heap->free_lock);
} }
rt_mutex_unlock(&heap->lock); spin_unlock(&heap->free_lock);
return total_drained; return total_drained;
} }
...@@ -213,16 +217,16 @@ static int ion_heap_deferred_free(void *data) ...@@ -213,16 +217,16 @@ static int ion_heap_deferred_free(void *data)
wait_event_freezable(heap->waitqueue, wait_event_freezable(heap->waitqueue,
ion_heap_freelist_size(heap) > 0); ion_heap_freelist_size(heap) > 0);
rt_mutex_lock(&heap->lock); spin_lock(&heap->free_lock);
if (list_empty(&heap->free_list)) { if (list_empty(&heap->free_list)) {
rt_mutex_unlock(&heap->lock); spin_unlock(&heap->free_lock);
continue; continue;
} }
buffer = list_first_entry(&heap->free_list, struct ion_buffer, buffer = list_first_entry(&heap->free_list, struct ion_buffer,
list); list);
list_del(&buffer->list); list_del(&buffer->list);
heap->free_list_size -= buffer->size; heap->free_list_size -= buffer->size;
rt_mutex_unlock(&heap->lock); spin_unlock(&heap->free_lock);
ion_buffer_destroy(buffer); ion_buffer_destroy(buffer);
} }
...@@ -235,7 +239,7 @@ int ion_heap_init_deferred_free(struct ion_heap *heap) ...@@ -235,7 +239,7 @@ int ion_heap_init_deferred_free(struct ion_heap *heap)
INIT_LIST_HEAD(&heap->free_list); INIT_LIST_HEAD(&heap->free_list);
heap->free_list_size = 0; heap->free_list_size = 0;
rt_mutex_init(&heap->lock); spin_lock_init(&heap->free_lock);
init_waitqueue_head(&heap->waitqueue); init_waitqueue_head(&heap->waitqueue);
heap->task = kthread_run(ion_heap_deferred_free, heap, heap->task = kthread_run(ion_heap_deferred_free, heap,
"%s", heap->name); "%s", heap->name);
......
...@@ -159,7 +159,7 @@ struct ion_heap { ...@@ -159,7 +159,7 @@ struct ion_heap {
struct shrinker shrinker; struct shrinker shrinker;
struct list_head free_list; struct list_head free_list;
size_t free_list_size; size_t free_list_size;
struct rt_mutex lock; spinlock_t free_lock;
wait_queue_head_t waitqueue; wait_queue_head_t waitqueue;
struct task_struct *task; struct task_struct *task;
int (*debug_show)(struct ion_heap *heap, struct seq_file *, void *); int (*debug_show)(struct ion_heap *heap, struct seq_file *, void *);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment