Commit 4073536c authored by Alexey Skidanov's avatar Alexey Skidanov Committed by Greg Kroah-Hartman

staging: android: ion: Add per-heap counters

Heap statistics have been removed and currently even basics statistics
are missing.

This patch creates per heap debugfs directory /sys/kernel/debug/<heap_name>
and adds the following counters:
- the number of allocated buffers;
- the number of allocated bytes;
- the number of allocated bytes watermark.
Signed-off-by: default avatarAlexey Skidanov <alexey.skidanov@intel.com>
Acked-by: default avatarLaura Abbott <labbott@redhat.com>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent 91e24a4c
...@@ -95,6 +95,13 @@ static struct ion_buffer *ion_buffer_create(struct ion_heap *heap, ...@@ -95,6 +95,13 @@ static struct ion_buffer *ion_buffer_create(struct ion_heap *heap,
goto err1; goto err1;
} }
spin_lock(&heap->stat_lock);
heap->num_of_buffers++;
heap->num_of_alloc_bytes += len;
if (heap->num_of_alloc_bytes > heap->alloc_bytes_wm)
heap->alloc_bytes_wm = heap->num_of_alloc_bytes;
spin_unlock(&heap->stat_lock);
INIT_LIST_HEAD(&buffer->attachments); INIT_LIST_HEAD(&buffer->attachments);
mutex_init(&buffer->lock); mutex_init(&buffer->lock);
mutex_lock(&dev->buffer_lock); mutex_lock(&dev->buffer_lock);
...@@ -117,6 +124,11 @@ void ion_buffer_destroy(struct ion_buffer *buffer) ...@@ -117,6 +124,11 @@ void ion_buffer_destroy(struct ion_buffer *buffer)
buffer->heap->ops->unmap_kernel(buffer->heap, buffer); buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
} }
buffer->heap->ops->free(buffer); buffer->heap->ops->free(buffer);
spin_lock(&buffer->heap->stat_lock);
buffer->heap->num_of_buffers--;
buffer->heap->num_of_alloc_bytes -= buffer->size;
spin_unlock(&buffer->heap->stat_lock);
kfree(buffer); kfree(buffer);
} }
...@@ -528,12 +540,15 @@ void ion_device_add_heap(struct ion_heap *heap) ...@@ -528,12 +540,15 @@ void ion_device_add_heap(struct ion_heap *heap)
{ {
struct ion_device *dev = internal_dev; struct ion_device *dev = internal_dev;
int ret; int ret;
struct dentry *heap_root;
char debug_name[64];
if (!heap->ops->allocate || !heap->ops->free) if (!heap->ops->allocate || !heap->ops->free)
pr_err("%s: can not add heap with invalid ops struct.\n", pr_err("%s: can not add heap with invalid ops struct.\n",
__func__); __func__);
spin_lock_init(&heap->free_lock); spin_lock_init(&heap->free_lock);
spin_lock_init(&heap->stat_lock);
heap->free_list_size = 0; heap->free_list_size = 0;
if (heap->flags & ION_HEAP_FLAG_DEFER_FREE) if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
...@@ -546,6 +561,33 @@ void ion_device_add_heap(struct ion_heap *heap) ...@@ -546,6 +561,33 @@ void ion_device_add_heap(struct ion_heap *heap)
} }
heap->dev = dev; heap->dev = dev;
heap->num_of_buffers = 0;
heap->num_of_alloc_bytes = 0;
heap->alloc_bytes_wm = 0;
heap_root = debugfs_create_dir(heap->name, dev->debug_root);
debugfs_create_u64("num_of_buffers",
0444, heap_root,
&heap->num_of_buffers);
debugfs_create_u64("num_of_alloc_bytes",
0444,
heap_root,
&heap->num_of_alloc_bytes);
debugfs_create_u64("alloc_bytes_wm",
0444,
heap_root,
&heap->alloc_bytes_wm);
if (heap->shrinker.count_objects &&
heap->shrinker.scan_objects) {
snprintf(debug_name, 64, "%s_shrink", heap->name);
debugfs_create_file(debug_name,
0644,
heap_root,
heap,
&debug_shrink_fops);
}
down_write(&dev->lock); down_write(&dev->lock);
heap->id = heap_id++; heap->id = heap_id++;
/* /*
...@@ -555,14 +597,6 @@ void ion_device_add_heap(struct ion_heap *heap) ...@@ -555,14 +597,6 @@ void ion_device_add_heap(struct ion_heap *heap)
plist_node_init(&heap->node, -heap->id); plist_node_init(&heap->node, -heap->id);
plist_add(&heap->node, &dev->heaps); plist_add(&heap->node, &dev->heaps);
if (heap->shrinker.count_objects && heap->shrinker.scan_objects) {
char debug_name[64];
snprintf(debug_name, 64, "%s_shrink", heap->name);
debugfs_create_file(debug_name, 0644, dev->debug_root,
heap, &debug_shrink_fops);
}
dev->heap_cnt++; dev->heap_cnt++;
up_write(&dev->lock); up_write(&dev->lock);
} }
......
...@@ -157,6 +157,9 @@ struct ion_heap_ops { ...@@ -157,6 +157,9 @@ struct ion_heap_ops {
* @lock: protects the free list * @lock: protects the free list
* @waitqueue: queue to wait on from deferred free thread * @waitqueue: queue to wait on from deferred free thread
* @task: task struct of deferred free thread * @task: task struct of deferred free thread
* @num_of_buffers the number of currently allocated buffers
* @num_of_alloc_bytes the number of allocated bytes
* @alloc_bytes_wm the number of allocated bytes watermark
* *
* Represents a pool of memory from which buffers can be made. In some * Represents a pool of memory from which buffers can be made. In some
* systems the only heap is regular system memory allocated via vmalloc. * systems the only heap is regular system memory allocated via vmalloc.
...@@ -177,6 +180,12 @@ struct ion_heap { ...@@ -177,6 +180,12 @@ struct ion_heap {
spinlock_t free_lock; spinlock_t free_lock;
wait_queue_head_t waitqueue; wait_queue_head_t waitqueue;
struct task_struct *task; struct task_struct *task;
u64 num_of_buffers;
u64 num_of_alloc_bytes;
u64 alloc_bytes_wm;
/* protect heap statistics */
spinlock_t stat_lock;
}; };
/** /**
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment