Commit 0f0d8406 authored by Maarten Lankhorst's avatar Maarten Lankhorst Committed by Greg Kroah-Hartman

android: convert sync to fence api, v6

Just to show it's easy.

Android syncpoints can be mapped to a timeline. This removes the need
to maintain a separate api for synchronization. I've left the android
trace events in place, but the core fence events should already be
sufficient for debugging.

v2:
- Call fence_remove_callback in sync_fence_free if not all fences have fired.
v3:
- Merge Colin Cross' bugfixes, and the android fence merge optimization.
v4:
- Merge with the upstream fixes.
v5:
- Fix small style issues pointed out by Thomas Hellstrom.
v6:
- Fix for updates to fence api.
Signed-off-by: default avatarMaarten Lankhorst <maarten.lankhorst@canonical.com>
Acked-by: default avatarJohn Stultz <john.stultz@linaro.org>
Acked-by: default avatarSumit Semwal <sumit.semwal@linaro.org>
Acked-by: default avatarDaniel Vetter <daniel@ffwll.ch>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent 3aac4502
...@@ -88,6 +88,7 @@ config SYNC ...@@ -88,6 +88,7 @@ config SYNC
bool "Synchronization framework" bool "Synchronization framework"
default n default n
select ANON_INODES select ANON_INODES
select DMA_SHARED_BUFFER
---help--- ---help---
This option enables the framework for synchronization between multiple This option enables the framework for synchronization between multiple
drivers. Sync implementations can take advantage of hardware drivers. Sync implementations can take advantage of hardware
......
...@@ -9,5 +9,5 @@ obj-$(CONFIG_ANDROID_TIMED_OUTPUT) += timed_output.o ...@@ -9,5 +9,5 @@ obj-$(CONFIG_ANDROID_TIMED_OUTPUT) += timed_output.o
obj-$(CONFIG_ANDROID_TIMED_GPIO) += timed_gpio.o obj-$(CONFIG_ANDROID_TIMED_GPIO) += timed_gpio.o
obj-$(CONFIG_ANDROID_LOW_MEMORY_KILLER) += lowmemorykiller.o obj-$(CONFIG_ANDROID_LOW_MEMORY_KILLER) += lowmemorykiller.o
obj-$(CONFIG_ANDROID_INTF_ALARM_DEV) += alarm-dev.o obj-$(CONFIG_ANDROID_INTF_ALARM_DEV) += alarm-dev.o
obj-$(CONFIG_SYNC) += sync.o obj-$(CONFIG_SYNC) += sync.o sync_debug.o
obj-$(CONFIG_SW_SYNC) += sw_sync.o obj-$(CONFIG_SW_SYNC) += sw_sync.o
...@@ -50,7 +50,7 @@ static struct sync_pt *sw_sync_pt_dup(struct sync_pt *sync_pt) ...@@ -50,7 +50,7 @@ static struct sync_pt *sw_sync_pt_dup(struct sync_pt *sync_pt)
{ {
struct sw_sync_pt *pt = (struct sw_sync_pt *) sync_pt; struct sw_sync_pt *pt = (struct sw_sync_pt *) sync_pt;
struct sw_sync_timeline *obj = struct sw_sync_timeline *obj =
(struct sw_sync_timeline *)sync_pt->parent; (struct sw_sync_timeline *)sync_pt_parent(sync_pt);
return (struct sync_pt *) sw_sync_pt_create(obj, pt->value); return (struct sync_pt *) sw_sync_pt_create(obj, pt->value);
} }
...@@ -59,7 +59,7 @@ static int sw_sync_pt_has_signaled(struct sync_pt *sync_pt) ...@@ -59,7 +59,7 @@ static int sw_sync_pt_has_signaled(struct sync_pt *sync_pt)
{ {
struct sw_sync_pt *pt = (struct sw_sync_pt *)sync_pt; struct sw_sync_pt *pt = (struct sw_sync_pt *)sync_pt;
struct sw_sync_timeline *obj = struct sw_sync_timeline *obj =
(struct sw_sync_timeline *)sync_pt->parent; (struct sw_sync_timeline *)sync_pt_parent(sync_pt);
return sw_sync_cmp(obj->value, pt->value) >= 0; return sw_sync_cmp(obj->value, pt->value) >= 0;
} }
...@@ -97,7 +97,6 @@ static void sw_sync_pt_value_str(struct sync_pt *sync_pt, ...@@ -97,7 +97,6 @@ static void sw_sync_pt_value_str(struct sync_pt *sync_pt,
char *str, int size) char *str, int size)
{ {
struct sw_sync_pt *pt = (struct sw_sync_pt *)sync_pt; struct sw_sync_pt *pt = (struct sw_sync_pt *)sync_pt;
snprintf(str, size, "%d", pt->value); snprintf(str, size, "%d", pt->value);
} }
...@@ -157,7 +156,6 @@ static int sw_sync_open(struct inode *inode, struct file *file) ...@@ -157,7 +156,6 @@ static int sw_sync_open(struct inode *inode, struct file *file)
static int sw_sync_release(struct inode *inode, struct file *file) static int sw_sync_release(struct inode *inode, struct file *file)
{ {
struct sw_sync_timeline *obj = file->private_data; struct sw_sync_timeline *obj = file->private_data;
sync_timeline_destroy(&obj->obj); sync_timeline_destroy(&obj->obj);
return 0; return 0;
} }
......
This diff is collapsed.
...@@ -19,6 +19,7 @@ ...@@ -19,6 +19,7 @@
#include <linux/list.h> #include <linux/list.h>
#include <linux/spinlock.h> #include <linux/spinlock.h>
#include <linux/wait.h> #include <linux/wait.h>
#include <linux/fence.h>
#include "uapi/sync.h" #include "uapi/sync.h"
...@@ -40,8 +41,6 @@ struct sync_fence; ...@@ -40,8 +41,6 @@ struct sync_fence;
* -1 if a will signal before b * -1 if a will signal before b
* @free_pt: called before sync_pt is freed * @free_pt: called before sync_pt is freed
* @release_obj: called before sync_timeline is freed * @release_obj: called before sync_timeline is freed
* @print_obj: deprecated
* @print_pt: deprecated
* @fill_driver_data: write implementation specific driver data to data. * @fill_driver_data: write implementation specific driver data to data.
* should return an error if there is not enough room * should return an error if there is not enough room
* as specified by size. This information is returned * as specified by size. This information is returned
...@@ -67,13 +66,6 @@ struct sync_timeline_ops { ...@@ -67,13 +66,6 @@ struct sync_timeline_ops {
/* optional */ /* optional */
void (*release_obj)(struct sync_timeline *sync_timeline); void (*release_obj)(struct sync_timeline *sync_timeline);
/* deprecated */
void (*print_obj)(struct seq_file *s,
struct sync_timeline *sync_timeline);
/* deprecated */
void (*print_pt)(struct seq_file *s, struct sync_pt *sync_pt);
/* optional */ /* optional */
int (*fill_driver_data)(struct sync_pt *syncpt, void *data, int size); int (*fill_driver_data)(struct sync_pt *syncpt, void *data, int size);
...@@ -104,19 +96,21 @@ struct sync_timeline { ...@@ -104,19 +96,21 @@ struct sync_timeline {
/* protected by child_list_lock */ /* protected by child_list_lock */
bool destroyed; bool destroyed;
int context, value;
struct list_head child_list_head; struct list_head child_list_head;
spinlock_t child_list_lock; spinlock_t child_list_lock;
struct list_head active_list_head; struct list_head active_list_head;
spinlock_t active_list_lock;
#ifdef CONFIG_DEBUG_FS
struct list_head sync_timeline_list; struct list_head sync_timeline_list;
#endif
}; };
/** /**
* struct sync_pt - sync point * struct sync_pt - sync point
* @parent: sync_timeline to which this sync_pt belongs * @fence: base fence class
* @child_list: membership in sync_timeline.child_list_head * @child_list: membership in sync_timeline.child_list_head
* @active_list: membership in sync_timeline.active_list_head * @active_list: membership in sync_timeline.active_list_head
* @signaled_list: membership in temporary signaled_list on stack * @signaled_list: membership in temporary signaled_list on stack
...@@ -127,19 +121,22 @@ struct sync_timeline { ...@@ -127,19 +121,22 @@ struct sync_timeline {
* signaled or error. * signaled or error.
*/ */
struct sync_pt { struct sync_pt {
struct sync_timeline *parent; struct fence base;
struct list_head child_list;
struct list_head child_list;
struct list_head active_list; struct list_head active_list;
struct list_head signaled_list; };
struct sync_fence *fence;
struct list_head pt_list;
/* protected by parent->active_list_lock */ static inline struct sync_timeline *sync_pt_parent(struct sync_pt *pt)
int status; {
return container_of(pt->base.lock, struct sync_timeline,
child_list_lock);
}
ktime_t timestamp; struct sync_fence_cb {
struct fence_cb cb;
struct fence *sync_pt;
struct sync_fence *fence;
}; };
/** /**
...@@ -149,9 +146,7 @@ struct sync_pt { ...@@ -149,9 +146,7 @@ struct sync_pt {
* @name: name of sync_fence. Useful for debugging * @name: name of sync_fence. Useful for debugging
* @pt_list_head: list of sync_pts in the fence. immutable once fence * @pt_list_head: list of sync_pts in the fence. immutable once fence
* is created * is created
* @waiter_list_head: list of asynchronous waiters on this fence * @status: 0: signaled, >0:active, <0: error
* @waiter_list_lock: lock protecting @waiter_list_head and @status
* @status: 1: signaled, 0:active, <0: error
* *
* @wq: wait queue for fence signaling * @wq: wait queue for fence signaling
* @sync_fence_list: membership in global fence list * @sync_fence_list: membership in global fence list
...@@ -160,17 +155,15 @@ struct sync_fence { ...@@ -160,17 +155,15 @@ struct sync_fence {
struct file *file; struct file *file;
struct kref kref; struct kref kref;
char name[32]; char name[32];
#ifdef CONFIG_DEBUG_FS
/* this list is immutable once the fence is created */ struct list_head sync_fence_list;
struct list_head pt_list_head; #endif
int num_fences;
struct list_head waiter_list_head;
spinlock_t waiter_list_lock; /* also protects status */
int status;
wait_queue_head_t wq; wait_queue_head_t wq;
atomic_t status;
struct list_head sync_fence_list; struct sync_fence_cb cbs[];
}; };
struct sync_fence_waiter; struct sync_fence_waiter;
...@@ -184,14 +177,14 @@ typedef void (*sync_callback_t)(struct sync_fence *fence, ...@@ -184,14 +177,14 @@ typedef void (*sync_callback_t)(struct sync_fence *fence,
* @callback_data: pointer to pass to @callback * @callback_data: pointer to pass to @callback
*/ */
struct sync_fence_waiter { struct sync_fence_waiter {
struct list_head waiter_list; wait_queue_t work;
sync_callback_t callback;
sync_callback_t callback;
}; };
static inline void sync_fence_waiter_init(struct sync_fence_waiter *waiter, static inline void sync_fence_waiter_init(struct sync_fence_waiter *waiter,
sync_callback_t callback) sync_callback_t callback)
{ {
INIT_LIST_HEAD(&waiter->work.task_list);
waiter->callback = callback; waiter->callback = callback;
} }
...@@ -341,4 +334,22 @@ int sync_fence_cancel_async(struct sync_fence *fence, ...@@ -341,4 +334,22 @@ int sync_fence_cancel_async(struct sync_fence *fence,
*/ */
int sync_fence_wait(struct sync_fence *fence, long timeout); int sync_fence_wait(struct sync_fence *fence, long timeout);
#ifdef CONFIG_DEBUG_FS
extern void sync_timeline_debug_add(struct sync_timeline *obj);
extern void sync_timeline_debug_remove(struct sync_timeline *obj);
extern void sync_fence_debug_add(struct sync_fence *fence);
extern void sync_fence_debug_remove(struct sync_fence *fence);
extern void sync_dump(void);
#else
# define sync_timeline_debug_add(obj)
# define sync_timeline_debug_remove(obj)
# define sync_fence_debug_add(fence)
# define sync_fence_debug_remove(fence)
# define sync_dump()
#endif
int sync_fence_wake_up_wq(wait_queue_t *curr, unsigned mode,
int wake_flags, void *key);
#endif /* _LINUX_SYNC_H */ #endif /* _LINUX_SYNC_H */
/*
* drivers/base/sync.c
*
* Copyright (C) 2012 Google, Inc.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
* may be copied, distributed, and modified under those terms.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#include <linux/debugfs.h>
#include <linux/export.h>
#include <linux/file.h>
#include <linux/fs.h>
#include <linux/kernel.h>
#include <linux/poll.h>
#include <linux/sched.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
#include <linux/uaccess.h>
#include <linux/anon_inodes.h>
#include "sync.h"
#ifdef CONFIG_DEBUG_FS
static LIST_HEAD(sync_timeline_list_head);
static DEFINE_SPINLOCK(sync_timeline_list_lock);
static LIST_HEAD(sync_fence_list_head);
static DEFINE_SPINLOCK(sync_fence_list_lock);
void sync_timeline_debug_add(struct sync_timeline *obj)
{
unsigned long flags;
spin_lock_irqsave(&sync_timeline_list_lock, flags);
list_add_tail(&obj->sync_timeline_list, &sync_timeline_list_head);
spin_unlock_irqrestore(&sync_timeline_list_lock, flags);
}
void sync_timeline_debug_remove(struct sync_timeline *obj)
{
unsigned long flags;
spin_lock_irqsave(&sync_timeline_list_lock, flags);
list_del(&obj->sync_timeline_list);
spin_unlock_irqrestore(&sync_timeline_list_lock, flags);
}
void sync_fence_debug_add(struct sync_fence *fence)
{
unsigned long flags;
spin_lock_irqsave(&sync_fence_list_lock, flags);
list_add_tail(&fence->sync_fence_list, &sync_fence_list_head);
spin_unlock_irqrestore(&sync_fence_list_lock, flags);
}
void sync_fence_debug_remove(struct sync_fence *fence)
{
unsigned long flags;
spin_lock_irqsave(&sync_fence_list_lock, flags);
list_del(&fence->sync_fence_list);
spin_unlock_irqrestore(&sync_fence_list_lock, flags);
}
static const char *sync_status_str(int status)
{
if (status == 0)
return "signaled";
else if (status > 0)
return "active";
else
return "error";
}
static void sync_print_pt(struct seq_file *s, struct sync_pt *pt, bool fence)
{
int status = 1;
struct sync_timeline *parent = sync_pt_parent(pt);
if (fence_is_signaled_locked(&pt->base))
status = pt->base.status;
seq_printf(s, " %s%spt %s",
fence ? parent->name : "",
fence ? "_" : "",
sync_status_str(status));
if (status <= 0) {
struct timeval tv = ktime_to_timeval(pt->base.timestamp);
seq_printf(s, "@%ld.%06ld", tv.tv_sec, tv.tv_usec);
}
if (parent->ops->timeline_value_str &&
parent->ops->pt_value_str) {
char value[64];
parent->ops->pt_value_str(pt, value, sizeof(value));
seq_printf(s, ": %s", value);
if (fence) {
parent->ops->timeline_value_str(parent, value,
sizeof(value));
seq_printf(s, " / %s", value);
}
}
seq_puts(s, "\n");
}
static void sync_print_obj(struct seq_file *s, struct sync_timeline *obj)
{
struct list_head *pos;
unsigned long flags;
seq_printf(s, "%s %s", obj->name, obj->ops->driver_name);
if (obj->ops->timeline_value_str) {
char value[64];
obj->ops->timeline_value_str(obj, value, sizeof(value));
seq_printf(s, ": %s", value);
}
seq_puts(s, "\n");
spin_lock_irqsave(&obj->child_list_lock, flags);
list_for_each(pos, &obj->child_list_head) {
struct sync_pt *pt =
container_of(pos, struct sync_pt, child_list);
sync_print_pt(s, pt, false);
}
spin_unlock_irqrestore(&obj->child_list_lock, flags);
}
static void sync_print_fence(struct seq_file *s, struct sync_fence *fence)
{
wait_queue_t *pos;
unsigned long flags;
int i;
seq_printf(s, "[%p] %s: %s\n", fence, fence->name,
sync_status_str(atomic_read(&fence->status)));
for (i = 0; i < fence->num_fences; ++i) {
struct sync_pt *pt =
container_of(fence->cbs[i].sync_pt,
struct sync_pt, base);
sync_print_pt(s, pt, true);
}
spin_lock_irqsave(&fence->wq.lock, flags);
list_for_each_entry(pos, &fence->wq.task_list, task_list) {
struct sync_fence_waiter *waiter;
if (pos->func != &sync_fence_wake_up_wq)
continue;
waiter = container_of(pos, struct sync_fence_waiter, work);
seq_printf(s, "waiter %pF\n", waiter->callback);
}
spin_unlock_irqrestore(&fence->wq.lock, flags);
}
static int sync_debugfs_show(struct seq_file *s, void *unused)
{
unsigned long flags;
struct list_head *pos;
seq_puts(s, "objs:\n--------------\n");
spin_lock_irqsave(&sync_timeline_list_lock, flags);
list_for_each(pos, &sync_timeline_list_head) {
struct sync_timeline *obj =
container_of(pos, struct sync_timeline,
sync_timeline_list);
sync_print_obj(s, obj);
seq_puts(s, "\n");
}
spin_unlock_irqrestore(&sync_timeline_list_lock, flags);
seq_puts(s, "fences:\n--------------\n");
spin_lock_irqsave(&sync_fence_list_lock, flags);
list_for_each(pos, &sync_fence_list_head) {
struct sync_fence *fence =
container_of(pos, struct sync_fence, sync_fence_list);
sync_print_fence(s, fence);
seq_puts(s, "\n");
}
spin_unlock_irqrestore(&sync_fence_list_lock, flags);
return 0;
}
static int sync_debugfs_open(struct inode *inode, struct file *file)
{
return single_open(file, sync_debugfs_show, inode->i_private);
}
static const struct file_operations sync_debugfs_fops = {
.open = sync_debugfs_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
static __init int sync_debugfs_init(void)
{
debugfs_create_file("sync", S_IRUGO, NULL, NULL, &sync_debugfs_fops);
return 0;
}
late_initcall(sync_debugfs_init);
#define DUMP_CHUNK 256
static char sync_dump_buf[64 * 1024];
void sync_dump(void)
{
struct seq_file s = {
.buf = sync_dump_buf,
.size = sizeof(sync_dump_buf) - 1,
};
int i;
sync_debugfs_show(&s, NULL);
for (i = 0; i < s.count; i += DUMP_CHUNK) {
if ((s.count - i) > DUMP_CHUNK) {
char c = s.buf[i + DUMP_CHUNK];
s.buf[i + DUMP_CHUNK] = 0;
pr_cont("%s", s.buf + i);
s.buf[i + DUMP_CHUNK] = c;
} else {
s.buf[s.count] = 0;
pr_cont("%s", s.buf + i);
}
}
}
#endif
...@@ -45,7 +45,7 @@ TRACE_EVENT(sync_wait, ...@@ -45,7 +45,7 @@ TRACE_EVENT(sync_wait,
TP_fast_assign( TP_fast_assign(
__assign_str(name, fence->name); __assign_str(name, fence->name);
__entry->status = fence->status; __entry->status = atomic_read(&fence->status);
__entry->begin = begin; __entry->begin = begin;
), ),
...@@ -54,19 +54,19 @@ TRACE_EVENT(sync_wait, ...@@ -54,19 +54,19 @@ TRACE_EVENT(sync_wait,
); );
TRACE_EVENT(sync_pt, TRACE_EVENT(sync_pt,
TP_PROTO(struct sync_pt *pt), TP_PROTO(struct fence *pt),
TP_ARGS(pt), TP_ARGS(pt),
TP_STRUCT__entry( TP_STRUCT__entry(
__string(timeline, pt->parent->name) __string(timeline, pt->ops->get_timeline_name(pt))
__array(char, value, 32) __array(char, value, 32)
), ),
TP_fast_assign( TP_fast_assign(
__assign_str(timeline, pt->parent->name); __assign_str(timeline, pt->ops->get_timeline_name(pt));
if (pt->parent->ops->pt_value_str) { if (pt->ops->fence_value_str) {
pt->parent->ops->pt_value_str(pt, __entry->value, pt->ops->fence_value_str(pt, __entry->value,
sizeof(__entry->value)); sizeof(__entry->value));
} else { } else {
__entry->value[0] = '\0'; __entry->value[0] = '\0';
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment