Commit bdfb95c4 authored by Greg Kroah-Hartman's avatar Greg Kroah-Hartman

staging: greybus: remove timesync protocol support

While the timesync protocol was a great idea, it never ended up getting
implemented by any known hardware devices.  It's also a bit
"interesting" in how it ties into the platform controller.

So, just remove it for now.  It's not needed, no one uses it, and it's a
stumbling block in getting the greybus core code merged out of the
staging tree.  If anyone wants it in the future, reverting this patch is
a great place to start from.
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Reviewed-by: default avatarJohan Hovold <johan@kernel.org>
Reviewed-by: default avatarAlex Elder <elder@kernel.org>
Acked-by: default avatarBryan O'Donoghue <pure.logic@nexus-software.ie>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent 02bbd980
......@@ -10,9 +10,7 @@ greybus-y := core.o \
control.o \
svc.o \
svc_watchdog.o \
operation.o \
timesync.o \
timesync_platform.o
operation.o
obj-$(CONFIG_GREYBUS) += greybus.o
......
......@@ -10,8 +10,6 @@
#ifndef __ARCHE_PLATFORM_H
#define __ARCHE_PLATFORM_H
#include "timesync.h"
enum arche_platform_state {
ARCHE_PLATFORM_STATE_OFF,
ARCHE_PLATFORM_STATE_ACTIVE,
......
......@@ -198,56 +198,6 @@ int gb_control_mode_switch_operation(struct gb_control *control)
return ret;
}
int gb_control_timesync_enable(struct gb_control *control, u8 count,
u64 frame_time, u32 strobe_delay, u32 refclk)
{
struct gb_control_timesync_enable_request request;
request.count = count;
request.frame_time = cpu_to_le64(frame_time);
request.strobe_delay = cpu_to_le32(strobe_delay);
request.refclk = cpu_to_le32(refclk);
return gb_operation_sync(control->connection,
GB_CONTROL_TYPE_TIMESYNC_ENABLE, &request,
sizeof(request), NULL, 0);
}
int gb_control_timesync_disable(struct gb_control *control)
{
return gb_operation_sync(control->connection,
GB_CONTROL_TYPE_TIMESYNC_DISABLE, NULL, 0,
NULL, 0);
}
int gb_control_timesync_get_last_event(struct gb_control *control,
u64 *frame_time)
{
struct gb_control_timesync_get_last_event_response response;
int ret;
ret = gb_operation_sync(control->connection,
GB_CONTROL_TYPE_TIMESYNC_GET_LAST_EVENT,
NULL, 0, &response, sizeof(response));
if (!ret)
*frame_time = le64_to_cpu(response.frame_time);
return ret;
}
int gb_control_timesync_authoritative(struct gb_control *control,
u64 *frame_time)
{
struct gb_control_timesync_authoritative_request request;
int i;
for (i = 0; i < GB_TIMESYNC_MAX_STROBES; i++)
request.frame_time[i] = cpu_to_le64(frame_time[i]);
return gb_operation_sync(control->connection,
GB_CONTROL_TYPE_TIMESYNC_AUTHORITATIVE,
&request, sizeof(request),
NULL, 0);
}
static int gb_control_bundle_pm_status_map(u8 status)
{
switch (status) {
......
......@@ -48,13 +48,6 @@ void gb_control_mode_switch_complete(struct gb_control *control);
int gb_control_get_manifest_size_operation(struct gb_interface *intf);
int gb_control_get_manifest_operation(struct gb_interface *intf, void *manifest,
size_t size);
int gb_control_timesync_enable(struct gb_control *control, u8 count,
u64 frame_time, u32 strobe_delay, u32 refclk);
int gb_control_timesync_disable(struct gb_control *control);
int gb_control_timesync_get_last_event(struct gb_control *control,
u64 *frame_time);
int gb_control_timesync_authoritative(struct gb_control *control,
u64 *frame_time);
int gb_control_bundle_suspend(struct gb_control *control, u8 bundle_id);
int gb_control_bundle_resume(struct gb_control *control, u8 bundle_id);
int gb_control_bundle_deactivate(struct gb_control *control, u8 bundle_id);
......
......@@ -218,8 +218,6 @@ static int greybus_probe(struct device *dev)
return retval;
}
gb_timesync_schedule_synchronous(bundle->intf);
pm_runtime_put(&bundle->intf->dev);
return 0;
......@@ -326,16 +324,8 @@ static int __init gb_init(void)
pr_err("gb_operation_init failed (%d)\n", retval);
goto error_operation;
}
retval = gb_timesync_init();
if (retval) {
pr_err("gb_timesync_init failed\n");
goto error_timesync;
}
return 0; /* Success */
error_timesync:
gb_operation_exit();
error_operation:
gb_hd_exit();
error_hd:
......@@ -349,7 +339,6 @@ module_init(gb_init);
static void __exit gb_exit(void)
{
gb_timesync_exit();
gb_operation_exit();
gb_hd_exit();
bus_unregister(&greybus_bus_type);
......
......@@ -127,29 +127,6 @@ struct es2_ap_dev {
struct list_head arpcs;
};
/**
* timesync_enable_request - Enable timesync in an APBridge
* @count: number of TimeSync Pulses to expect
* @frame_time: the initial FrameTime at the first TimeSync Pulse
* @strobe_delay: the expected delay in microseconds between each TimeSync Pulse
* @refclk: The AP mandated reference clock to run FrameTime at
*/
struct timesync_enable_request {
__u8 count;
__le64 frame_time;
__le32 strobe_delay;
__le32 refclk;
} __packed;
/**
* timesync_authoritative_request - Transmit authoritative FrameTime to APBridge
* @frame_time: An array of authoritative FrameTimes provided by the SVC
* and relayed to the APBridge by the AP
*/
struct timesync_authoritative_request {
__le64 frame_time[GB_TIMESYNC_MAX_STROBES];
} __packed;
struct arpc {
struct list_head list;
struct arpc_request_message *req;
......@@ -754,111 +731,6 @@ static int latency_tag_disable(struct gb_host_device *hd, u16 cport_id)
return retval;
}
static int timesync_enable(struct gb_host_device *hd, u8 count,
u64 frame_time, u32 strobe_delay, u32 refclk)
{
int retval;
struct es2_ap_dev *es2 = hd_to_es2(hd);
struct usb_device *udev = es2->usb_dev;
struct gb_control_timesync_enable_request *request;
request = kzalloc(sizeof(*request), GFP_KERNEL);
if (!request)
return -ENOMEM;
request->count = count;
request->frame_time = cpu_to_le64(frame_time);
request->strobe_delay = cpu_to_le32(strobe_delay);
request->refclk = cpu_to_le32(refclk);
retval = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
GB_APB_REQUEST_TIMESYNC_ENABLE,
USB_DIR_OUT | USB_TYPE_VENDOR |
USB_RECIP_INTERFACE, 0, 0, request,
sizeof(*request), ES2_USB_CTRL_TIMEOUT);
if (retval < 0)
dev_err(&udev->dev, "Cannot enable timesync %d\n", retval);
kfree(request);
return retval;
}
static int timesync_disable(struct gb_host_device *hd)
{
int retval;
struct es2_ap_dev *es2 = hd_to_es2(hd);
struct usb_device *udev = es2->usb_dev;
retval = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
GB_APB_REQUEST_TIMESYNC_DISABLE,
USB_DIR_OUT | USB_TYPE_VENDOR |
USB_RECIP_INTERFACE, 0, 0, NULL,
0, ES2_USB_CTRL_TIMEOUT);
if (retval < 0)
dev_err(&udev->dev, "Cannot disable timesync %d\n", retval);
return retval;
}
static int timesync_authoritative(struct gb_host_device *hd, u64 *frame_time)
{
int retval, i;
struct es2_ap_dev *es2 = hd_to_es2(hd);
struct usb_device *udev = es2->usb_dev;
struct timesync_authoritative_request *request;
request = kzalloc(sizeof(*request), GFP_KERNEL);
if (!request)
return -ENOMEM;
for (i = 0; i < GB_TIMESYNC_MAX_STROBES; i++)
request->frame_time[i] = cpu_to_le64(frame_time[i]);
retval = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
GB_APB_REQUEST_TIMESYNC_AUTHORITATIVE,
USB_DIR_OUT | USB_TYPE_VENDOR |
USB_RECIP_INTERFACE, 0, 0, request,
sizeof(*request), ES2_USB_CTRL_TIMEOUT);
if (retval < 0)
dev_err(&udev->dev, "Cannot timesync authoritative out %d\n", retval);
kfree(request);
return retval;
}
static int timesync_get_last_event(struct gb_host_device *hd, u64 *frame_time)
{
int retval;
struct es2_ap_dev *es2 = hd_to_es2(hd);
struct usb_device *udev = es2->usb_dev;
__le64 *response_frame_time;
response_frame_time = kzalloc(sizeof(*response_frame_time), GFP_KERNEL);
if (!response_frame_time)
return -ENOMEM;
retval = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
GB_APB_REQUEST_TIMESYNC_GET_LAST_EVENT,
USB_DIR_IN | USB_TYPE_VENDOR |
USB_RECIP_INTERFACE, 0, 0, response_frame_time,
sizeof(*response_frame_time),
ES2_USB_CTRL_TIMEOUT);
if (retval != sizeof(*response_frame_time)) {
dev_err(&udev->dev, "Cannot get last TimeSync event: %d\n",
retval);
if (retval >= 0)
retval = -EIO;
goto out;
}
*frame_time = le64_to_cpu(*response_frame_time);
retval = 0;
out:
kfree(response_frame_time);
return retval;
}
static struct gb_hd_driver es2_driver = {
.hd_priv_size = sizeof(struct es2_ap_dev),
.message_send = message_send,
......@@ -874,10 +746,6 @@ static struct gb_hd_driver es2_driver = {
.latency_tag_enable = latency_tag_enable,
.latency_tag_disable = latency_tag_disable,
.output = output,
.timesync_enable = timesync_enable,
.timesync_disable = timesync_disable,
.timesync_authoritative = timesync_authoritative,
.timesync_get_last_event = timesync_get_last_event,
};
/* Common function to report consistent warnings based on URB status */
......
......@@ -33,7 +33,6 @@
#include "bundle.h"
#include "connection.h"
#include "operation.h"
#include "timesync.h"
/* Matches up with the Greybus Protocol specification document */
#define GREYBUS_VERSION_MAJOR 0x00
......
......@@ -173,26 +173,6 @@ struct gb_control_disconnected_request {
} __packed;
/* Control protocol [dis]connected response has no payload */
#define GB_TIMESYNC_MAX_STROBES 0x04
struct gb_control_timesync_enable_request {
__u8 count;
__le64 frame_time;
__le32 strobe_delay;
__le32 refclk;
} __packed;
/* timesync enable response has no payload */
struct gb_control_timesync_authoritative_request {
__le64 frame_time[GB_TIMESYNC_MAX_STROBES];
} __packed;
/* timesync authoritative response has no payload */
/* timesync get_last_event_request has no payload */
struct gb_control_timesync_get_last_event_response {
__le64 frame_time;
} __packed;
/*
* All Bundle power management operations use the same request and response
* layout and status codes.
......@@ -1169,33 +1149,6 @@ struct gb_svc_intf_unipro_response {
#define GB_SVC_INTF_UNIPRO_NOT_OFF 0x03
} __packed;
struct gb_svc_timesync_enable_request {
__u8 count;
__le64 frame_time;
__le32 strobe_delay;
__le32 refclk;
} __packed;
/* timesync enable response has no payload */
/* timesync authoritative request has no payload */
struct gb_svc_timesync_authoritative_response {
__le64 frame_time[GB_TIMESYNC_MAX_STROBES];
};
struct gb_svc_timesync_wake_pins_acquire_request {
__le32 strobe_mask;
};
/* timesync wake pins acquire response has no payload */
/* timesync wake pins release request has no payload */
/* timesync wake pins release response has no payload */
/* timesync svc ping request has no payload */
struct gb_svc_timesync_ping_response {
__le64 frame_time;
} __packed;
#define GB_SVC_UNIPRO_FAST_MODE 0x01
#define GB_SVC_UNIPRO_SLOW_MODE 0x02
#define GB_SVC_UNIPRO_FAST_AUTO_MODE 0x04
......
......@@ -488,34 +488,6 @@ DEFINE_HD_EVENT(gb_hd_in);
#undef DEFINE_HD_EVENT
/*
* Occurs on a TimeSync synchronization event or a TimeSync ping event.
*/
TRACE_EVENT(gb_timesync_irq,
TP_PROTO(u8 ping, u8 strobe, u8 count, u64 frame_time),
TP_ARGS(ping, strobe, count, frame_time),
TP_STRUCT__entry(
__field(u8, ping)
__field(u8, strobe)
__field(u8, count)
__field(u64, frame_time)
),
TP_fast_assign(
__entry->ping = ping;
__entry->strobe = strobe;
__entry->count = count;
__entry->frame_time = frame_time;
),
TP_printk("%s %d/%d frame-time %llu\n",
__entry->ping ? "ping" : "strobe", __entry->strobe,
__entry->count, __entry->frame_time)
);
#endif /* _TRACE_GREYBUS_H */
/* This part must be outside protection */
......
......@@ -37,13 +37,6 @@ struct gb_hd_driver {
int (*latency_tag_disable)(struct gb_host_device *hd, u16 cport_id);
int (*output)(struct gb_host_device *hd, void *req, u16 size, u8 cmd,
bool async);
int (*timesync_enable)(struct gb_host_device *hd, u8 count,
u64 frame_time, u32 strobe_delay, u32 refclk);
int (*timesync_disable)(struct gb_host_device *hd);
int (*timesync_authoritative)(struct gb_host_device *hd,
u64 *frame_time);
int (*timesync_get_last_event)(struct gb_host_device *hd,
u64 *frame_time);
};
struct gb_host_device {
......
......@@ -702,14 +702,12 @@ static void gb_interface_release(struct device *dev)
static int gb_interface_suspend(struct device *dev)
{
struct gb_interface *intf = to_gb_interface(dev);
int ret, timesync_ret;
int ret;
ret = gb_control_interface_suspend_prepare(intf->control);
if (ret)
return ret;
gb_timesync_interface_remove(intf);
ret = gb_control_suspend(intf->control);
if (ret)
goto err_hibernate_abort;
......@@ -730,12 +728,6 @@ static int gb_interface_suspend(struct device *dev)
err_hibernate_abort:
gb_control_interface_hibernate_abort(intf->control);
timesync_ret = gb_timesync_interface_add(intf);
if (timesync_ret) {
dev_err(dev, "failed to add to timesync: %d\n", timesync_ret);
return timesync_ret;
}
return ret;
}
......@@ -757,18 +749,6 @@ static int gb_interface_resume(struct device *dev)
if (ret)
return ret;
ret = gb_timesync_interface_add(intf);
if (ret) {
dev_err(dev, "failed to add to timesync: %d\n", ret);
return ret;
}
ret = gb_timesync_schedule_synchronous(intf);
if (ret) {
dev_err(dev, "failed to synchronize FrameTime: %d\n", ret);
return ret;
}
return 0;
}
......@@ -1152,16 +1132,10 @@ int gb_interface_enable(struct gb_interface *intf)
if (ret)
goto err_destroy_bundles;
ret = gb_timesync_interface_add(intf);
if (ret) {
dev_err(&intf->dev, "failed to add to timesync: %d\n", ret);
goto err_destroy_bundles;
}
/* Register the control device and any bundles */
ret = gb_control_add(intf->control);
if (ret)
goto err_remove_timesync;
goto err_destroy_bundles;
pm_runtime_use_autosuspend(&intf->dev);
pm_runtime_get_noresume(&intf->dev);
......@@ -1186,8 +1160,6 @@ int gb_interface_enable(struct gb_interface *intf)
return 0;
err_remove_timesync:
gb_timesync_interface_remove(intf);
err_destroy_bundles:
list_for_each_entry_safe(bundle, tmp, &intf->bundles, links)
gb_bundle_destroy(bundle);
......@@ -1230,7 +1202,6 @@ void gb_interface_disable(struct gb_interface *intf)
gb_control_interface_deactivate_prepare(intf->control);
gb_control_del(intf->control);
gb_timesync_interface_remove(intf);
gb_control_disable(intf->control);
gb_control_put(intf->control);
intf->control = NULL;
......@@ -1243,29 +1214,6 @@ void gb_interface_disable(struct gb_interface *intf)
pm_runtime_put_noidle(&intf->dev);
}
/* Enable TimeSync on an Interface control connection. */
int gb_interface_timesync_enable(struct gb_interface *intf, u8 count,
u64 frame_time, u32 strobe_delay, u32 refclk)
{
return gb_control_timesync_enable(intf->control, count,
frame_time, strobe_delay,
refclk);
}
/* Disable TimeSync on an Interface control connection. */
int gb_interface_timesync_disable(struct gb_interface *intf)
{
return gb_control_timesync_disable(intf->control);
}
/* Transmit the Authoritative FrameTime via an Interface control connection. */
int gb_interface_timesync_authoritative(struct gb_interface *intf,
u64 *frame_time)
{
return gb_control_timesync_authoritative(intf->control,
frame_time);
}
/* Register an interface. */
int gb_interface_add(struct gb_interface *intf)
{
......
......@@ -72,11 +72,6 @@ int gb_interface_activate(struct gb_interface *intf);
void gb_interface_deactivate(struct gb_interface *intf);
int gb_interface_enable(struct gb_interface *intf);
void gb_interface_disable(struct gb_interface *intf);
int gb_interface_timesync_enable(struct gb_interface *intf, u8 count,
u64 frame_time, u32 strobe_delay, u32 refclk);
int gb_interface_timesync_authoritative(struct gb_interface *intf,
u64 *frame_time);
int gb_interface_timesync_disable(struct gb_interface *intf);
int gb_interface_add(struct gb_interface *intf);
void gb_interface_del(struct gb_interface *intf);
void gb_interface_put(struct gb_interface *intf);
......
......@@ -518,85 +518,6 @@ void gb_svc_connection_destroy(struct gb_svc *svc, u8 intf1_id, u16 cport1_id,
}
}
int gb_svc_timesync_enable(struct gb_svc *svc, u8 count, u64 frame_time,
u32 strobe_delay, u32 refclk)
{
struct gb_connection *connection = svc->connection;
struct gb_svc_timesync_enable_request request;
request.count = count;
request.frame_time = cpu_to_le64(frame_time);
request.strobe_delay = cpu_to_le32(strobe_delay);
request.refclk = cpu_to_le32(refclk);
return gb_operation_sync(connection,
GB_SVC_TYPE_TIMESYNC_ENABLE,
&request, sizeof(request), NULL, 0);
}
int gb_svc_timesync_disable(struct gb_svc *svc)
{
struct gb_connection *connection = svc->connection;
return gb_operation_sync(connection,
GB_SVC_TYPE_TIMESYNC_DISABLE,
NULL, 0, NULL, 0);
}
int gb_svc_timesync_authoritative(struct gb_svc *svc, u64 *frame_time)
{
struct gb_connection *connection = svc->connection;
struct gb_svc_timesync_authoritative_response response;
int ret, i;
ret = gb_operation_sync(connection,
GB_SVC_TYPE_TIMESYNC_AUTHORITATIVE, NULL, 0,
&response, sizeof(response));
if (ret < 0)
return ret;
for (i = 0; i < GB_TIMESYNC_MAX_STROBES; i++)
frame_time[i] = le64_to_cpu(response.frame_time[i]);
return 0;
}
int gb_svc_timesync_ping(struct gb_svc *svc, u64 *frame_time)
{
struct gb_connection *connection = svc->connection;
struct gb_svc_timesync_ping_response response;
int ret;
ret = gb_operation_sync(connection,
GB_SVC_TYPE_TIMESYNC_PING,
NULL, 0,
&response, sizeof(response));
if (ret < 0)
return ret;
*frame_time = le64_to_cpu(response.frame_time);
return 0;
}
int gb_svc_timesync_wake_pins_acquire(struct gb_svc *svc, u32 strobe_mask)
{
struct gb_connection *connection = svc->connection;
struct gb_svc_timesync_wake_pins_acquire_request request;
request.strobe_mask = cpu_to_le32(strobe_mask);
return gb_operation_sync(connection,
GB_SVC_TYPE_TIMESYNC_WAKE_PINS_ACQUIRE,
&request, sizeof(request),
NULL, 0);
}
int gb_svc_timesync_wake_pins_release(struct gb_svc *svc)
{
struct gb_connection *connection = svc->connection;
return gb_operation_sync(connection,
GB_SVC_TYPE_TIMESYNC_WAKE_PINS_RELEASE,
NULL, 0, NULL, 0);
}
/* Creates bi-directional routes between the devices */
int gb_svc_route_create(struct gb_svc *svc, u8 intf1_id, u8 dev1_id,
u8 intf2_id, u8 dev2_id)
......@@ -945,13 +866,6 @@ static int gb_svc_hello(struct gb_operation *op)
gb_svc_debugfs_init(svc);
ret = gb_timesync_svc_add(svc);
if (ret) {
dev_err(&svc->dev, "failed to add SVC to timesync: %d\n", ret);
gb_svc_debugfs_exit(svc);
goto err_unregister_device;
}
return gb_svc_queue_deferred_request(op);
err_unregister_device:
......@@ -1467,7 +1381,6 @@ void gb_svc_del(struct gb_svc *svc)
* The SVC device may have been registered from the request handler.
*/
if (device_is_registered(&svc->dev)) {
gb_timesync_svc_remove(svc);
gb_svc_debugfs_exit(svc);
gb_svc_watchdog_destroy(svc);
device_del(&svc->dev);
......
......@@ -95,13 +95,6 @@ void gb_svc_watchdog_destroy(struct gb_svc *svc);
bool gb_svc_watchdog_enabled(struct gb_svc *svc);
int gb_svc_watchdog_enable(struct gb_svc *svc);
int gb_svc_watchdog_disable(struct gb_svc *svc);
int gb_svc_timesync_enable(struct gb_svc *svc, u8 count, u64 frame_time,
u32 strobe_delay, u32 refclk);
int gb_svc_timesync_disable(struct gb_svc *svc);
int gb_svc_timesync_authoritative(struct gb_svc *svc, u64 *frame_time);
int gb_svc_timesync_ping(struct gb_svc *svc, u64 *frame_time);
int gb_svc_timesync_wake_pins_acquire(struct gb_svc *svc, u32 strobe_mask);
int gb_svc_timesync_wake_pins_release(struct gb_svc *svc);
int gb_svc_protocol_init(void);
void gb_svc_protocol_exit(void);
......
/*
* TimeSync API driver.
*
* Copyright 2016 Google Inc.
* Copyright 2016 Linaro Ltd.
*
* Released under the GPLv2 only.
*/
#include <linux/debugfs.h>
#include <linux/hrtimer.h>
#include "greybus.h"
#include "timesync.h"
#include "greybus_trace.h"
/*
* Minimum inter-strobe value of one millisecond is chosen because it
* just-about fits the common definition of a jiffy.
*
* Maximum value OTOH is constrained by the number of bits the SVC can fit
* into a 16 bit up-counter. The SVC configures the timer in microseconds
* so the maximum allowable value is 65535 microseconds. We clip that value
* to 10000 microseconds for the sake of using nice round base 10 numbers
* and since right-now there's no imaginable use-case requiring anything
* other than a one millisecond inter-strobe time, let alone something
* higher than ten milliseconds.
*/
#define GB_TIMESYNC_STROBE_DELAY_US 1000
#define GB_TIMESYNC_DEFAULT_OFFSET_US 1000
/* Work queue timers long, short and SVC strobe timeout */
#define GB_TIMESYNC_DELAYED_WORK_LONG msecs_to_jiffies(10)
#define GB_TIMESYNC_DELAYED_WORK_SHORT msecs_to_jiffies(1)
#define GB_TIMESYNC_MAX_WAIT_SVC msecs_to_jiffies(5000)
#define GB_TIMESYNC_KTIME_UPDATE msecs_to_jiffies(1000)
#define GB_TIMESYNC_MAX_KTIME_CONVERSION 15
/* Maximum number of times we'll retry a failed synchronous sync */
#define GB_TIMESYNC_MAX_RETRIES 5
/* Reported nanoseconds/femtoseconds per clock */
static u64 gb_timesync_ns_per_clock;
static u64 gb_timesync_fs_per_clock;
/* Maximum difference we will accept converting FrameTime to ktime */
static u32 gb_timesync_max_ktime_diff;
/* Reported clock rate */
static unsigned long gb_timesync_clock_rate;
/* Workqueue */
static void gb_timesync_worker(struct work_struct *work);
/* List of SVCs with one FrameTime per SVC */
static LIST_HEAD(gb_timesync_svc_list);
/* Synchronize parallel contexts accessing a valid timesync_svc pointer */
static DEFINE_MUTEX(gb_timesync_svc_list_mutex);
/* Structure to convert from FrameTime to timespec/ktime */
struct gb_timesync_frame_time_data {
u64 frame_time;
struct timespec ts;
};
struct gb_timesync_svc {
struct list_head list;
struct list_head interface_list;
struct gb_svc *svc;
struct gb_timesync_host_device *timesync_hd;
spinlock_t spinlock; /* Per SVC spinlock to sync with ISR */
struct mutex mutex; /* Per SVC mutex for regular synchronization */
struct dentry *frame_time_dentry;
struct dentry *frame_ktime_dentry;
struct workqueue_struct *work_queue;
wait_queue_head_t wait_queue;
struct delayed_work delayed_work;
struct timer_list ktime_timer;
/* The current local FrameTime */
u64 frame_time_offset;
struct gb_timesync_frame_time_data strobe_data[GB_TIMESYNC_MAX_STROBES];
struct gb_timesync_frame_time_data ktime_data;
/* The SVC FrameTime and relative AP FrameTime @ last TIMESYNC_PING */
u64 svc_ping_frame_time;
u64 ap_ping_frame_time;
/* Transitory settings */
u32 strobe_mask;
bool offset_down;
bool print_ping;
bool capture_ping;
int strobe;
/* Current state */
int state;
};
struct gb_timesync_host_device {
struct list_head list;
struct gb_host_device *hd;
u64 ping_frame_time;
};
struct gb_timesync_interface {
struct list_head list;
struct gb_interface *interface;
u64 ping_frame_time;
};
enum gb_timesync_state {
GB_TIMESYNC_STATE_INVALID = 0,
GB_TIMESYNC_STATE_INACTIVE = 1,
GB_TIMESYNC_STATE_INIT = 2,
GB_TIMESYNC_STATE_WAIT_SVC = 3,
GB_TIMESYNC_STATE_AUTHORITATIVE = 4,
GB_TIMESYNC_STATE_PING = 5,
GB_TIMESYNC_STATE_ACTIVE = 6,
};
static void gb_timesync_ktime_timer_fn(unsigned long data);
static u64 gb_timesync_adjust_count(struct gb_timesync_svc *timesync_svc,
u64 counts)
{
if (timesync_svc->offset_down)
return counts - timesync_svc->frame_time_offset;
else
return counts + timesync_svc->frame_time_offset;
}
/*
* This function provides the authoritative FrameTime to a calling function. It
* is designed to be lockless and should remain that way the caller is assumed
* to be state-aware.
*/
static u64 __gb_timesync_get_frame_time(struct gb_timesync_svc *timesync_svc)
{
u64 clocks = gb_timesync_platform_get_counter();
return gb_timesync_adjust_count(timesync_svc, clocks);
}
static void gb_timesync_schedule_svc_timeout(struct gb_timesync_svc
*timesync_svc)
{
queue_delayed_work(timesync_svc->work_queue,
&timesync_svc->delayed_work,
GB_TIMESYNC_MAX_WAIT_SVC);
}
static void gb_timesync_set_state(struct gb_timesync_svc *timesync_svc,
int state)
{
switch (state) {
case GB_TIMESYNC_STATE_INVALID:
timesync_svc->state = state;
wake_up(&timesync_svc->wait_queue);
break;
case GB_TIMESYNC_STATE_INACTIVE:
timesync_svc->state = state;
wake_up(&timesync_svc->wait_queue);
break;
case GB_TIMESYNC_STATE_INIT:
if (timesync_svc->state != GB_TIMESYNC_STATE_INVALID) {
timesync_svc->strobe = 0;
timesync_svc->frame_time_offset = 0;
timesync_svc->state = state;
cancel_delayed_work(&timesync_svc->delayed_work);
queue_delayed_work(timesync_svc->work_queue,
&timesync_svc->delayed_work,
GB_TIMESYNC_DELAYED_WORK_LONG);
}
break;
case GB_TIMESYNC_STATE_WAIT_SVC:
if (timesync_svc->state == GB_TIMESYNC_STATE_INIT)
timesync_svc->state = state;
break;
case GB_TIMESYNC_STATE_AUTHORITATIVE:
if (timesync_svc->state == GB_TIMESYNC_STATE_WAIT_SVC) {
timesync_svc->state = state;
cancel_delayed_work(&timesync_svc->delayed_work);
queue_delayed_work(timesync_svc->work_queue,
&timesync_svc->delayed_work, 0);
}
break;
case GB_TIMESYNC_STATE_PING:
if (timesync_svc->state == GB_TIMESYNC_STATE_ACTIVE) {
timesync_svc->state = state;
queue_delayed_work(timesync_svc->work_queue,
&timesync_svc->delayed_work,
GB_TIMESYNC_DELAYED_WORK_SHORT);
}
break;
case GB_TIMESYNC_STATE_ACTIVE:
if (timesync_svc->state == GB_TIMESYNC_STATE_AUTHORITATIVE ||
timesync_svc->state == GB_TIMESYNC_STATE_PING) {
timesync_svc->state = state;
wake_up(&timesync_svc->wait_queue);
}
break;
}
if (WARN_ON(timesync_svc->state != state)) {
pr_err("Invalid state transition %d=>%d\n",
timesync_svc->state, state);
}
}
static void gb_timesync_set_state_atomic(struct gb_timesync_svc *timesync_svc,
int state)
{
unsigned long flags;
spin_lock_irqsave(&timesync_svc->spinlock, flags);
gb_timesync_set_state(timesync_svc, state);
spin_unlock_irqrestore(&timesync_svc->spinlock, flags);
}
static u64 gb_timesync_diff(u64 x, u64 y)
{
if (x > y)
return x - y;
else
return y - x;
}
static void gb_timesync_adjust_to_svc(struct gb_timesync_svc *svc,
u64 svc_frame_time, u64 ap_frame_time)
{
if (svc_frame_time > ap_frame_time) {
svc->frame_time_offset = svc_frame_time - ap_frame_time;
svc->offset_down = false;
} else {
svc->frame_time_offset = ap_frame_time - svc_frame_time;
svc->offset_down = true;
}
}
/*
* Associate a FrameTime with a ktime timestamp represented as struct timespec
* Requires the calling context to hold timesync_svc->mutex
*/
static void gb_timesync_store_ktime(struct gb_timesync_svc *timesync_svc,
struct timespec ts, u64 frame_time)
{
timesync_svc->ktime_data.ts = ts;
timesync_svc->ktime_data.frame_time = frame_time;
}
/*
* Find the two pulses that best-match our expected inter-strobe gap and
* then calculate the difference between the SVC time at the second pulse
* to the local time at the second pulse.
*/
static void gb_timesync_collate_frame_time(struct gb_timesync_svc *timesync_svc,
u64 *frame_time)
{
int i = 0;
u64 delta, ap_frame_time;
u64 strobe_delay_ns = GB_TIMESYNC_STROBE_DELAY_US * NSEC_PER_USEC;
u64 least = 0;
for (i = 1; i < GB_TIMESYNC_MAX_STROBES; i++) {
delta = timesync_svc->strobe_data[i].frame_time -
timesync_svc->strobe_data[i - 1].frame_time;
delta *= gb_timesync_ns_per_clock;
delta = gb_timesync_diff(delta, strobe_delay_ns);
if (!least || delta < least) {
least = delta;
gb_timesync_adjust_to_svc(timesync_svc, frame_time[i],
timesync_svc->strobe_data[i].frame_time);
ap_frame_time = timesync_svc->strobe_data[i].frame_time;
ap_frame_time = gb_timesync_adjust_count(timesync_svc,
ap_frame_time);
gb_timesync_store_ktime(timesync_svc,
timesync_svc->strobe_data[i].ts,
ap_frame_time);
pr_debug("adjust %s local %llu svc %llu delta %llu\n",
timesync_svc->offset_down ? "down" : "up",
timesync_svc->strobe_data[i].frame_time,
frame_time[i], delta);
}
}
}
static void gb_timesync_teardown(struct gb_timesync_svc *timesync_svc)
{
struct gb_timesync_interface *timesync_interface;
struct gb_svc *svc = timesync_svc->svc;
struct gb_interface *interface;
struct gb_host_device *hd;
int ret;
list_for_each_entry(timesync_interface,
&timesync_svc->interface_list, list) {
interface = timesync_interface->interface;
ret = gb_interface_timesync_disable(interface);
if (ret) {
dev_err(&interface->dev,
"interface timesync_disable %d\n", ret);
}
}
hd = timesync_svc->timesync_hd->hd;
ret = hd->driver->timesync_disable(hd);
if (ret < 0) {
dev_err(&hd->dev, "host timesync_disable %d\n",
ret);
}
gb_svc_timesync_wake_pins_release(svc);
gb_svc_timesync_disable(svc);
gb_timesync_platform_unlock_bus();
gb_timesync_set_state_atomic(timesync_svc, GB_TIMESYNC_STATE_INACTIVE);
}
static void gb_timesync_platform_lock_bus_fail(struct gb_timesync_svc
*timesync_svc, int ret)
{
if (ret == -EAGAIN) {
gb_timesync_set_state(timesync_svc, timesync_svc->state);
} else {
pr_err("Failed to lock timesync bus %d\n", ret);
gb_timesync_set_state(timesync_svc, GB_TIMESYNC_STATE_INACTIVE);
}
}
static void gb_timesync_enable(struct gb_timesync_svc *timesync_svc)
{
struct gb_svc *svc = timesync_svc->svc;
struct gb_host_device *hd;
struct gb_timesync_interface *timesync_interface;
struct gb_interface *interface;
u64 init_frame_time;
unsigned long clock_rate = gb_timesync_clock_rate;
int ret;
/*
* Get access to the wake pins in the AP and SVC
* Release these pins either in gb_timesync_teardown() or in
* gb_timesync_authoritative()
*/
ret = gb_timesync_platform_lock_bus(timesync_svc);
if (ret < 0) {
gb_timesync_platform_lock_bus_fail(timesync_svc, ret);
return;
}
ret = gb_svc_timesync_wake_pins_acquire(svc, timesync_svc->strobe_mask);
if (ret) {
dev_err(&svc->dev,
"gb_svc_timesync_wake_pins_acquire %d\n", ret);
gb_timesync_teardown(timesync_svc);
return;
}
/* Choose an initial time in the future */
init_frame_time = __gb_timesync_get_frame_time(timesync_svc) + 100000UL;
/* Send enable command to all relevant participants */
list_for_each_entry(timesync_interface, &timesync_svc->interface_list,
list) {
interface = timesync_interface->interface;
ret = gb_interface_timesync_enable(interface,
GB_TIMESYNC_MAX_STROBES,
init_frame_time,
GB_TIMESYNC_STROBE_DELAY_US,
clock_rate);
if (ret) {
dev_err(&interface->dev,
"interface timesync_enable %d\n", ret);
}
}
hd = timesync_svc->timesync_hd->hd;
ret = hd->driver->timesync_enable(hd, GB_TIMESYNC_MAX_STROBES,
init_frame_time,
GB_TIMESYNC_STROBE_DELAY_US,
clock_rate);
if (ret < 0) {
dev_err(&hd->dev, "host timesync_enable %d\n",
ret);
}
gb_timesync_set_state_atomic(timesync_svc, GB_TIMESYNC_STATE_WAIT_SVC);
ret = gb_svc_timesync_enable(svc, GB_TIMESYNC_MAX_STROBES,
init_frame_time,
GB_TIMESYNC_STROBE_DELAY_US,
clock_rate);
if (ret) {
dev_err(&svc->dev,
"gb_svc_timesync_enable %d\n", ret);
gb_timesync_teardown(timesync_svc);
return;
}
/* Schedule a timeout waiting for SVC to complete strobing */
gb_timesync_schedule_svc_timeout(timesync_svc);
}
static void gb_timesync_authoritative(struct gb_timesync_svc *timesync_svc)
{
struct gb_svc *svc = timesync_svc->svc;
struct gb_host_device *hd;
struct gb_timesync_interface *timesync_interface;
struct gb_interface *interface;
u64 svc_frame_time[GB_TIMESYNC_MAX_STROBES];
int ret;
/* Get authoritative time from SVC and adjust local clock */
ret = gb_svc_timesync_authoritative(svc, svc_frame_time);
if (ret) {
dev_err(&svc->dev,
"gb_svc_timesync_authoritative %d\n", ret);
gb_timesync_teardown(timesync_svc);
return;
}
gb_timesync_collate_frame_time(timesync_svc, svc_frame_time);
/* Transmit authoritative time to downstream slaves */
hd = timesync_svc->timesync_hd->hd;
ret = hd->driver->timesync_authoritative(hd, svc_frame_time);
if (ret < 0)
dev_err(&hd->dev, "host timesync_authoritative %d\n", ret);
list_for_each_entry(timesync_interface,
&timesync_svc->interface_list, list) {
interface = timesync_interface->interface;
ret = gb_interface_timesync_authoritative(
interface,
svc_frame_time);
if (ret) {
dev_err(&interface->dev,
"interface timesync_authoritative %d\n", ret);
}
}
/* Release wake pins */
gb_svc_timesync_wake_pins_release(svc);
gb_timesync_platform_unlock_bus();
/* Transition to state ACTIVE */
gb_timesync_set_state_atomic(timesync_svc, GB_TIMESYNC_STATE_ACTIVE);
/* Schedule a ping to verify the synchronized system time */
timesync_svc->print_ping = true;
gb_timesync_set_state_atomic(timesync_svc, GB_TIMESYNC_STATE_PING);
}
static int __gb_timesync_get_status(struct gb_timesync_svc *timesync_svc)
{
int ret = -EINVAL;
switch (timesync_svc->state) {
case GB_TIMESYNC_STATE_INVALID:
case GB_TIMESYNC_STATE_INACTIVE:
ret = -ENODEV;
break;
case GB_TIMESYNC_STATE_INIT:
case GB_TIMESYNC_STATE_WAIT_SVC:
case GB_TIMESYNC_STATE_AUTHORITATIVE:
ret = -EAGAIN;
break;
case GB_TIMESYNC_STATE_PING:
case GB_TIMESYNC_STATE_ACTIVE:
ret = 0;
break;
}
return ret;
}
/*
* This routine takes a FrameTime and derives the difference with-respect
* to a reference FrameTime/ktime pair. It then returns the calculated
* ktime based on the difference between the supplied FrameTime and
* the reference FrameTime.
*
* The time difference is calculated to six decimal places. Taking 19.2MHz
* as an example this means we have 52.083333~ nanoseconds per clock or
* 52083333~ femtoseconds per clock.
*
* Naively taking the count difference and converting to
* seconds/nanoseconds would quickly see the 0.0833 component produce
* noticeable errors. For example a time difference of one second would
* loose 19200000 * 0.08333x nanoseconds or 1.59 seconds.
*
* In contrast calculating in femtoseconds the same example of 19200000 *
* 0.000000083333x nanoseconds per count of error is just 1.59 nanoseconds!
*
* Continuing the example of 19.2 MHz we cap the maximum error difference
* at a worst-case 0.3 microseconds over a potential calculation window of
* abount 15 seconds, meaning you can convert a FrameTime that is <= 15
* seconds older/younger than the reference time with a maximum error of
* 0.2385 useconds. Note 19.2MHz is an example frequency not a requirement.
*/
static int gb_timesync_to_timespec(struct gb_timesync_svc *timesync_svc,
u64 frame_time, struct timespec *ts)
{
unsigned long flags;
u64 delta_fs, counts, sec, nsec;
bool add;
int ret = 0;
memset(ts, 0x00, sizeof(*ts));
mutex_lock(&timesync_svc->mutex);
spin_lock_irqsave(&timesync_svc->spinlock, flags);
ret = __gb_timesync_get_status(timesync_svc);
if (ret)
goto done;
/* Support calculating ktime upwards or downwards from the reference */
if (frame_time < timesync_svc->ktime_data.frame_time) {
add = false;
counts = timesync_svc->ktime_data.frame_time - frame_time;
} else {
add = true;
counts = frame_time - timesync_svc->ktime_data.frame_time;
}
/* Enforce the .23 of a usecond boundary @ 19.2MHz */
if (counts > gb_timesync_max_ktime_diff) {
ret = -EINVAL;
goto done;
}
/* Determine the time difference in femtoseconds */
delta_fs = counts * gb_timesync_fs_per_clock;
/* Convert to seconds */
sec = delta_fs;
do_div(sec, NSEC_PER_SEC);
do_div(sec, 1000000UL);
/* Get the nanosecond remainder */
nsec = do_div(delta_fs, sec);
do_div(nsec, 1000000UL);
if (add) {
/* Add the calculated offset - overflow nanoseconds upwards */
ts->tv_sec = timesync_svc->ktime_data.ts.tv_sec + sec;
ts->tv_nsec = timesync_svc->ktime_data.ts.tv_nsec + nsec;
if (ts->tv_nsec >= NSEC_PER_SEC) {
ts->tv_sec++;
ts->tv_nsec -= NSEC_PER_SEC;
}
} else {
/* Subtract the difference over/underflow as necessary */
if (nsec > timesync_svc->ktime_data.ts.tv_nsec) {
sec++;
nsec = nsec + timesync_svc->ktime_data.ts.tv_nsec;
nsec = do_div(nsec, NSEC_PER_SEC);
} else {
nsec = timesync_svc->ktime_data.ts.tv_nsec - nsec;
}
/* Cannot return a negative second value */
if (sec > timesync_svc->ktime_data.ts.tv_sec) {
ret = -EINVAL;
goto done;
}
ts->tv_sec = timesync_svc->ktime_data.ts.tv_sec - sec;
ts->tv_nsec = nsec;
}
done:
spin_unlock_irqrestore(&timesync_svc->spinlock, flags);
mutex_unlock(&timesync_svc->mutex);
return ret;
}
static size_t gb_timesync_log_frame_time(struct gb_timesync_svc *timesync_svc,
char *buf, size_t buflen)
{
struct gb_svc *svc = timesync_svc->svc;
struct gb_host_device *hd;
struct gb_timesync_interface *timesync_interface;
struct gb_interface *interface;
unsigned int len;
size_t off;
/* AP/SVC */
off = snprintf(buf, buflen, "%s frametime: ap=%llu %s=%llu ",
greybus_bus_type.name,
timesync_svc->ap_ping_frame_time, dev_name(&svc->dev),
timesync_svc->svc_ping_frame_time);
len = buflen - off;
/* APB/GPB */
if (len < buflen) {
hd = timesync_svc->timesync_hd->hd;
off += snprintf(&buf[off], len, "%s=%llu ", dev_name(&hd->dev),
timesync_svc->timesync_hd->ping_frame_time);
len = buflen - off;
}
list_for_each_entry(timesync_interface,
&timesync_svc->interface_list, list) {
if (len < buflen) {
interface = timesync_interface->interface;
off += snprintf(&buf[off], len, "%s=%llu ",
dev_name(&interface->dev),
timesync_interface->ping_frame_time);
len = buflen - off;
}
}
if (len < buflen)
off += snprintf(&buf[off], len, "\n");
return off;
}
static size_t gb_timesync_log_frame_ktime(struct gb_timesync_svc *timesync_svc,
char *buf, size_t buflen)
{
struct gb_svc *svc = timesync_svc->svc;
struct gb_host_device *hd;
struct gb_timesync_interface *timesync_interface;
struct gb_interface *interface;
struct timespec ts;
unsigned int len;
size_t off;
/* AP */
gb_timesync_to_timespec(timesync_svc, timesync_svc->ap_ping_frame_time,
&ts);
off = snprintf(buf, buflen, "%s frametime: ap=%lu.%lu ",
greybus_bus_type.name, ts.tv_sec, ts.tv_nsec);
len = buflen - off;
if (len >= buflen)
goto done;
/* SVC */
gb_timesync_to_timespec(timesync_svc, timesync_svc->svc_ping_frame_time,
&ts);
off += snprintf(&buf[off], len, "%s=%lu.%lu ", dev_name(&svc->dev),
ts.tv_sec, ts.tv_nsec);
len = buflen - off;
if (len >= buflen)
goto done;
/* APB/GPB */
hd = timesync_svc->timesync_hd->hd;
gb_timesync_to_timespec(timesync_svc,
timesync_svc->timesync_hd->ping_frame_time,
&ts);
off += snprintf(&buf[off], len, "%s=%lu.%lu ",
dev_name(&hd->dev),
ts.tv_sec, ts.tv_nsec);
len = buflen - off;
if (len >= buflen)
goto done;
list_for_each_entry(timesync_interface,
&timesync_svc->interface_list, list) {
interface = timesync_interface->interface;
gb_timesync_to_timespec(timesync_svc,
timesync_interface->ping_frame_time,
&ts);
off += snprintf(&buf[off], len, "%s=%lu.%lu ",
dev_name(&interface->dev),
ts.tv_sec, ts.tv_nsec);
len = buflen - off;
if (len >= buflen)
goto done;
}
off += snprintf(&buf[off], len, "\n");
done:
return off;
}
/*
* Send an SVC initiated wake 'ping' to each TimeSync participant.
* Get the FrameTime from each participant associated with the wake
* ping.
*/
static void gb_timesync_ping(struct gb_timesync_svc *timesync_svc)
{
struct gb_svc *svc = timesync_svc->svc;
struct gb_host_device *hd;
struct gb_timesync_interface *timesync_interface;
struct gb_control *control;
u64 *ping_frame_time;
int ret;
/* Get access to the wake pins in the AP and SVC */
ret = gb_timesync_platform_lock_bus(timesync_svc);
if (ret < 0) {
gb_timesync_platform_lock_bus_fail(timesync_svc, ret);
return;
}
ret = gb_svc_timesync_wake_pins_acquire(svc, timesync_svc->strobe_mask);
if (ret) {
dev_err(&svc->dev,
"gb_svc_timesync_wake_pins_acquire %d\n", ret);
gb_timesync_teardown(timesync_svc);
return;
}
/* Have SVC generate a timesync ping */
timesync_svc->capture_ping = true;
timesync_svc->svc_ping_frame_time = 0;
ret = gb_svc_timesync_ping(svc, &timesync_svc->svc_ping_frame_time);
timesync_svc->capture_ping = false;
if (ret) {
dev_err(&svc->dev,
"gb_svc_timesync_ping %d\n", ret);
gb_timesync_teardown(timesync_svc);
return;
}
/* Get the ping FrameTime from each APB/GPB */
hd = timesync_svc->timesync_hd->hd;
timesync_svc->timesync_hd->ping_frame_time = 0;
ret = hd->driver->timesync_get_last_event(hd,
&timesync_svc->timesync_hd->ping_frame_time);
if (ret)
dev_err(&hd->dev, "host timesync_get_last_event %d\n", ret);
list_for_each_entry(timesync_interface,
&timesync_svc->interface_list, list) {
control = timesync_interface->interface->control;
timesync_interface->ping_frame_time = 0;
ping_frame_time = &timesync_interface->ping_frame_time;
ret = gb_control_timesync_get_last_event(control,
ping_frame_time);
if (ret) {
dev_err(&timesync_interface->interface->dev,
"gb_control_timesync_get_last_event %d\n", ret);
}
}
/* Ping success - move to timesync active */
gb_svc_timesync_wake_pins_release(svc);
gb_timesync_platform_unlock_bus();
gb_timesync_set_state_atomic(timesync_svc, GB_TIMESYNC_STATE_ACTIVE);
}
static void gb_timesync_log_ping_time(struct gb_timesync_svc *timesync_svc)
{
char *buf;
if (!timesync_svc->print_ping)
return;
buf = kzalloc(PAGE_SIZE, GFP_KERNEL);
if (buf) {
gb_timesync_log_frame_time(timesync_svc, buf, PAGE_SIZE);
dev_dbg(&timesync_svc->svc->dev, "%s", buf);
kfree(buf);
}
}
/*
* Perform the actual work of scheduled TimeSync logic.
*/
static void gb_timesync_worker(struct work_struct *work)
{
struct delayed_work *delayed_work = to_delayed_work(work);
struct gb_timesync_svc *timesync_svc =
container_of(delayed_work, struct gb_timesync_svc, delayed_work);
mutex_lock(&timesync_svc->mutex);
switch (timesync_svc->state) {
case GB_TIMESYNC_STATE_INIT:
gb_timesync_enable(timesync_svc);
break;
case GB_TIMESYNC_STATE_WAIT_SVC:
dev_err(&timesync_svc->svc->dev,
"timeout SVC strobe completion %d/%d\n",
timesync_svc->strobe, GB_TIMESYNC_MAX_STROBES);
gb_timesync_teardown(timesync_svc);
break;
case GB_TIMESYNC_STATE_AUTHORITATIVE:
gb_timesync_authoritative(timesync_svc);
break;
case GB_TIMESYNC_STATE_PING:
gb_timesync_ping(timesync_svc);
gb_timesync_log_ping_time(timesync_svc);
break;
default:
pr_err("Invalid state %d for delayed work\n",
timesync_svc->state);
break;
}
mutex_unlock(&timesync_svc->mutex);
}
/*
* Schedule a new TimeSync INIT or PING operation serialized w/r to
* gb_timesync_worker().
*/
static int gb_timesync_schedule(struct gb_timesync_svc *timesync_svc, int state)
{
int ret = 0;
if (state != GB_TIMESYNC_STATE_INIT && state != GB_TIMESYNC_STATE_PING)
return -EINVAL;
mutex_lock(&timesync_svc->mutex);
if (timesync_svc->state != GB_TIMESYNC_STATE_INVALID)
gb_timesync_set_state_atomic(timesync_svc, state);
else
ret = -ENODEV;
mutex_unlock(&timesync_svc->mutex);
return ret;
}
static int __gb_timesync_schedule_synchronous(
struct gb_timesync_svc *timesync_svc, int state)
{
unsigned long flags;
int ret;
ret = gb_timesync_schedule(timesync_svc, state);
if (ret)
return ret;
ret = wait_event_interruptible(timesync_svc->wait_queue,
(timesync_svc->state == GB_TIMESYNC_STATE_ACTIVE ||
timesync_svc->state == GB_TIMESYNC_STATE_INACTIVE ||
timesync_svc->state == GB_TIMESYNC_STATE_INVALID));
if (ret)
return ret;
mutex_lock(&timesync_svc->mutex);
spin_lock_irqsave(&timesync_svc->spinlock, flags);
ret = __gb_timesync_get_status(timesync_svc);
spin_unlock_irqrestore(&timesync_svc->spinlock, flags);
mutex_unlock(&timesync_svc->mutex);
return ret;
}
static struct gb_timesync_svc *gb_timesync_find_timesync_svc(
struct gb_host_device *hd)
{
struct gb_timesync_svc *timesync_svc;
list_for_each_entry(timesync_svc, &gb_timesync_svc_list, list) {
if (timesync_svc->svc == hd->svc)
return timesync_svc;
}
return NULL;
}
static struct gb_timesync_interface *gb_timesync_find_timesync_interface(
struct gb_timesync_svc *timesync_svc,
struct gb_interface *interface)
{
struct gb_timesync_interface *timesync_interface;
list_for_each_entry(timesync_interface, &timesync_svc->interface_list, list) {
if (timesync_interface->interface == interface)
return timesync_interface;
}
return NULL;
}
int gb_timesync_schedule_synchronous(struct gb_interface *interface)
{
int ret;
struct gb_timesync_svc *timesync_svc;
int retries;
if (!(interface->features & GREYBUS_INTERFACE_FEATURE_TIMESYNC))
return 0;
mutex_lock(&gb_timesync_svc_list_mutex);
for (retries = 0; retries < GB_TIMESYNC_MAX_RETRIES; retries++) {
timesync_svc = gb_timesync_find_timesync_svc(interface->hd);
if (!timesync_svc) {
ret = -ENODEV;
goto done;
}
ret = __gb_timesync_schedule_synchronous(timesync_svc,
GB_TIMESYNC_STATE_INIT);
if (!ret)
break;
}
if (ret && retries == GB_TIMESYNC_MAX_RETRIES)
ret = -ETIMEDOUT;
done:
mutex_unlock(&gb_timesync_svc_list_mutex);
return ret;
}
EXPORT_SYMBOL_GPL(gb_timesync_schedule_synchronous);
void gb_timesync_schedule_asynchronous(struct gb_interface *interface)
{
struct gb_timesync_svc *timesync_svc;
if (!(interface->features & GREYBUS_INTERFACE_FEATURE_TIMESYNC))
return;
mutex_lock(&gb_timesync_svc_list_mutex);
timesync_svc = gb_timesync_find_timesync_svc(interface->hd);
if (!timesync_svc)
goto done;
gb_timesync_schedule(timesync_svc, GB_TIMESYNC_STATE_INIT);
done:
mutex_unlock(&gb_timesync_svc_list_mutex);
return;
}
EXPORT_SYMBOL_GPL(gb_timesync_schedule_asynchronous);
static ssize_t gb_timesync_ping_read(struct file *file, char __user *ubuf,
size_t len, loff_t *offset, bool ktime)
{
struct gb_timesync_svc *timesync_svc = file_inode(file)->i_private;
char *buf;
ssize_t ret = 0;
mutex_lock(&gb_timesync_svc_list_mutex);
mutex_lock(&timesync_svc->mutex);
if (list_empty(&timesync_svc->interface_list))
ret = -ENODEV;
timesync_svc->print_ping = false;
mutex_unlock(&timesync_svc->mutex);
if (ret)
goto done;
ret = __gb_timesync_schedule_synchronous(timesync_svc,
GB_TIMESYNC_STATE_PING);
if (ret)
goto done;
buf = kzalloc(PAGE_SIZE, GFP_KERNEL);
if (!buf) {
ret = -ENOMEM;
goto done;
}
if (ktime)
ret = gb_timesync_log_frame_ktime(timesync_svc, buf, PAGE_SIZE);
else
ret = gb_timesync_log_frame_time(timesync_svc, buf, PAGE_SIZE);
if (ret > 0)
ret = simple_read_from_buffer(ubuf, len, offset, buf, ret);
kfree(buf);
done:
mutex_unlock(&gb_timesync_svc_list_mutex);
return ret;
}
static ssize_t gb_timesync_ping_read_frame_time(struct file *file,
char __user *buf,
size_t len, loff_t *offset)
{
return gb_timesync_ping_read(file, buf, len, offset, false);
}
static ssize_t gb_timesync_ping_read_frame_ktime(struct file *file,
char __user *buf,
size_t len, loff_t *offset)
{
return gb_timesync_ping_read(file, buf, len, offset, true);
}
static const struct file_operations gb_timesync_debugfs_frame_time_ops = {
.read = gb_timesync_ping_read_frame_time,
};
static const struct file_operations gb_timesync_debugfs_frame_ktime_ops = {
.read = gb_timesync_ping_read_frame_ktime,
};
static int gb_timesync_hd_add(struct gb_timesync_svc *timesync_svc,
struct gb_host_device *hd)
{
struct gb_timesync_host_device *timesync_hd;
timesync_hd = kzalloc(sizeof(*timesync_hd), GFP_KERNEL);
if (!timesync_hd)
return -ENOMEM;
WARN_ON(timesync_svc->timesync_hd);
timesync_hd->hd = hd;
timesync_svc->timesync_hd = timesync_hd;
return 0;
}
static void gb_timesync_hd_remove(struct gb_timesync_svc *timesync_svc,
struct gb_host_device *hd)
{
if (timesync_svc->timesync_hd->hd == hd) {
kfree(timesync_svc->timesync_hd);
timesync_svc->timesync_hd = NULL;
return;
}
WARN_ON(1);
}
int gb_timesync_svc_add(struct gb_svc *svc)
{
struct gb_timesync_svc *timesync_svc;
int ret;
timesync_svc = kzalloc(sizeof(*timesync_svc), GFP_KERNEL);
if (!timesync_svc)
return -ENOMEM;
timesync_svc->work_queue =
create_singlethread_workqueue("gb-timesync-work_queue");
if (!timesync_svc->work_queue) {
kfree(timesync_svc);
return -ENOMEM;
}
mutex_lock(&gb_timesync_svc_list_mutex);
INIT_LIST_HEAD(&timesync_svc->interface_list);
INIT_DELAYED_WORK(&timesync_svc->delayed_work, gb_timesync_worker);
mutex_init(&timesync_svc->mutex);
spin_lock_init(&timesync_svc->spinlock);
init_waitqueue_head(&timesync_svc->wait_queue);
timesync_svc->svc = svc;
timesync_svc->frame_time_offset = 0;
timesync_svc->capture_ping = false;
gb_timesync_set_state_atomic(timesync_svc, GB_TIMESYNC_STATE_INACTIVE);
timesync_svc->frame_time_dentry =
debugfs_create_file("frame-time", S_IRUGO, svc->debugfs_dentry,
timesync_svc,
&gb_timesync_debugfs_frame_time_ops);
timesync_svc->frame_ktime_dentry =
debugfs_create_file("frame-ktime", S_IRUGO, svc->debugfs_dentry,
timesync_svc,
&gb_timesync_debugfs_frame_ktime_ops);
list_add(&timesync_svc->list, &gb_timesync_svc_list);
ret = gb_timesync_hd_add(timesync_svc, svc->hd);
if (ret) {
list_del(&timesync_svc->list);
debugfs_remove(timesync_svc->frame_ktime_dentry);
debugfs_remove(timesync_svc->frame_time_dentry);
destroy_workqueue(timesync_svc->work_queue);
kfree(timesync_svc);
goto done;
}
init_timer(&timesync_svc->ktime_timer);
timesync_svc->ktime_timer.function = gb_timesync_ktime_timer_fn;
timesync_svc->ktime_timer.expires = jiffies + GB_TIMESYNC_KTIME_UPDATE;
timesync_svc->ktime_timer.data = (unsigned long)timesync_svc;
add_timer(&timesync_svc->ktime_timer);
done:
mutex_unlock(&gb_timesync_svc_list_mutex);
return ret;
}
EXPORT_SYMBOL_GPL(gb_timesync_svc_add);
void gb_timesync_svc_remove(struct gb_svc *svc)
{
struct gb_timesync_svc *timesync_svc;
struct gb_timesync_interface *timesync_interface;
struct gb_timesync_interface *next;
mutex_lock(&gb_timesync_svc_list_mutex);
timesync_svc = gb_timesync_find_timesync_svc(svc->hd);
if (!timesync_svc)
goto done;
cancel_delayed_work_sync(&timesync_svc->delayed_work);
mutex_lock(&timesync_svc->mutex);
gb_timesync_set_state_atomic(timesync_svc, GB_TIMESYNC_STATE_INVALID);
del_timer_sync(&timesync_svc->ktime_timer);
gb_timesync_teardown(timesync_svc);
gb_timesync_hd_remove(timesync_svc, svc->hd);
list_for_each_entry_safe(timesync_interface, next,
&timesync_svc->interface_list, list) {
list_del(&timesync_interface->list);
kfree(timesync_interface);
}
debugfs_remove(timesync_svc->frame_ktime_dentry);
debugfs_remove(timesync_svc->frame_time_dentry);
destroy_workqueue(timesync_svc->work_queue);
list_del(&timesync_svc->list);
mutex_unlock(&timesync_svc->mutex);
kfree(timesync_svc);
done:
mutex_unlock(&gb_timesync_svc_list_mutex);
}
EXPORT_SYMBOL_GPL(gb_timesync_svc_remove);
/*
* Add a Greybus Interface to the set of TimeSync Interfaces.
*/
int gb_timesync_interface_add(struct gb_interface *interface)
{
struct gb_timesync_svc *timesync_svc;
struct gb_timesync_interface *timesync_interface;
int ret = 0;
if (!(interface->features & GREYBUS_INTERFACE_FEATURE_TIMESYNC))
return 0;
mutex_lock(&gb_timesync_svc_list_mutex);
timesync_svc = gb_timesync_find_timesync_svc(interface->hd);
if (!timesync_svc) {
ret = -ENODEV;
goto done;
}
timesync_interface = kzalloc(sizeof(*timesync_interface), GFP_KERNEL);
if (!timesync_interface) {
ret = -ENOMEM;
goto done;
}
mutex_lock(&timesync_svc->mutex);
timesync_interface->interface = interface;
list_add(&timesync_interface->list, &timesync_svc->interface_list);
timesync_svc->strobe_mask |= 1 << interface->interface_id;
mutex_unlock(&timesync_svc->mutex);
done:
mutex_unlock(&gb_timesync_svc_list_mutex);
return ret;
}
EXPORT_SYMBOL_GPL(gb_timesync_interface_add);
/*
* Remove a Greybus Interface from the set of TimeSync Interfaces.
*/
void gb_timesync_interface_remove(struct gb_interface *interface)
{
struct gb_timesync_svc *timesync_svc;
struct gb_timesync_interface *timesync_interface;
if (!(interface->features & GREYBUS_INTERFACE_FEATURE_TIMESYNC))
return;
mutex_lock(&gb_timesync_svc_list_mutex);
timesync_svc = gb_timesync_find_timesync_svc(interface->hd);
if (!timesync_svc)
goto done;
timesync_interface = gb_timesync_find_timesync_interface(timesync_svc,
interface);
if (!timesync_interface)
goto done;
mutex_lock(&timesync_svc->mutex);
timesync_svc->strobe_mask &= ~(1 << interface->interface_id);
list_del(&timesync_interface->list);
kfree(timesync_interface);
mutex_unlock(&timesync_svc->mutex);
done:
mutex_unlock(&gb_timesync_svc_list_mutex);
}
EXPORT_SYMBOL_GPL(gb_timesync_interface_remove);
/*
* Give the authoritative FrameTime to the calling function. Returns zero if we
* are not in GB_TIMESYNC_STATE_ACTIVE.
*/
static u64 gb_timesync_get_frame_time(struct gb_timesync_svc *timesync_svc)
{
unsigned long flags;
u64 ret;
spin_lock_irqsave(&timesync_svc->spinlock, flags);
if (timesync_svc->state == GB_TIMESYNC_STATE_ACTIVE)
ret = __gb_timesync_get_frame_time(timesync_svc);
else
ret = 0;
spin_unlock_irqrestore(&timesync_svc->spinlock, flags);
return ret;
}
u64 gb_timesync_get_frame_time_by_interface(struct gb_interface *interface)
{
struct gb_timesync_svc *timesync_svc;
u64 ret = 0;
mutex_lock(&gb_timesync_svc_list_mutex);
timesync_svc = gb_timesync_find_timesync_svc(interface->hd);
if (!timesync_svc)
goto done;
ret = gb_timesync_get_frame_time(timesync_svc);
done:
mutex_unlock(&gb_timesync_svc_list_mutex);
return ret;
}
EXPORT_SYMBOL_GPL(gb_timesync_get_frame_time_by_interface);
u64 gb_timesync_get_frame_time_by_svc(struct gb_svc *svc)
{
struct gb_timesync_svc *timesync_svc;
u64 ret = 0;
mutex_lock(&gb_timesync_svc_list_mutex);
timesync_svc = gb_timesync_find_timesync_svc(svc->hd);
if (!timesync_svc)
goto done;
ret = gb_timesync_get_frame_time(timesync_svc);
done:
mutex_unlock(&gb_timesync_svc_list_mutex);
return ret;
}
EXPORT_SYMBOL_GPL(gb_timesync_get_frame_time_by_svc);
/* Incrementally updates the conversion base from FrameTime to ktime */
static void gb_timesync_ktime_timer_fn(unsigned long data)
{
struct gb_timesync_svc *timesync_svc =
(struct gb_timesync_svc *)data;
unsigned long flags;
u64 frame_time;
struct timespec ts;
spin_lock_irqsave(&timesync_svc->spinlock, flags);
if (timesync_svc->state != GB_TIMESYNC_STATE_ACTIVE)
goto done;
ktime_get_ts(&ts);
frame_time = __gb_timesync_get_frame_time(timesync_svc);
gb_timesync_store_ktime(timesync_svc, ts, frame_time);
done:
spin_unlock_irqrestore(&timesync_svc->spinlock, flags);
mod_timer(&timesync_svc->ktime_timer,
jiffies + GB_TIMESYNC_KTIME_UPDATE);
}
int gb_timesync_to_timespec_by_svc(struct gb_svc *svc, u64 frame_time,
struct timespec *ts)
{
struct gb_timesync_svc *timesync_svc;
int ret = 0;
mutex_lock(&gb_timesync_svc_list_mutex);
timesync_svc = gb_timesync_find_timesync_svc(svc->hd);
if (!timesync_svc) {
ret = -ENODEV;
goto done;
}
ret = gb_timesync_to_timespec(timesync_svc, frame_time, ts);
done:
mutex_unlock(&gb_timesync_svc_list_mutex);
return ret;
}
EXPORT_SYMBOL_GPL(gb_timesync_to_timespec_by_svc);
int gb_timesync_to_timespec_by_interface(struct gb_interface *interface,
u64 frame_time, struct timespec *ts)
{
struct gb_timesync_svc *timesync_svc;
int ret = 0;
mutex_lock(&gb_timesync_svc_list_mutex);
timesync_svc = gb_timesync_find_timesync_svc(interface->hd);
if (!timesync_svc) {
ret = -ENODEV;
goto done;
}
ret = gb_timesync_to_timespec(timesync_svc, frame_time, ts);
done:
mutex_unlock(&gb_timesync_svc_list_mutex);
return ret;
}
EXPORT_SYMBOL_GPL(gb_timesync_to_timespec_by_interface);
void gb_timesync_irq(struct gb_timesync_svc *timesync_svc)
{
unsigned long flags;
u64 strobe_time;
bool strobe_is_ping = true;
struct timespec ts;
ktime_get_ts(&ts);
strobe_time = __gb_timesync_get_frame_time(timesync_svc);
spin_lock_irqsave(&timesync_svc->spinlock, flags);
if (timesync_svc->state == GB_TIMESYNC_STATE_PING) {
if (!timesync_svc->capture_ping)
goto done_nolog;
timesync_svc->ap_ping_frame_time = strobe_time;
goto done_log;
} else if (timesync_svc->state != GB_TIMESYNC_STATE_WAIT_SVC) {
goto done_nolog;
}
timesync_svc->strobe_data[timesync_svc->strobe].frame_time = strobe_time;
timesync_svc->strobe_data[timesync_svc->strobe].ts = ts;
if (++timesync_svc->strobe == GB_TIMESYNC_MAX_STROBES) {
gb_timesync_set_state(timesync_svc,
GB_TIMESYNC_STATE_AUTHORITATIVE);
}
strobe_is_ping = false;
done_log:
trace_gb_timesync_irq(strobe_is_ping, timesync_svc->strobe,
GB_TIMESYNC_MAX_STROBES, strobe_time);
done_nolog:
spin_unlock_irqrestore(&timesync_svc->spinlock, flags);
}
EXPORT_SYMBOL(gb_timesync_irq);
int __init gb_timesync_init(void)
{
int ret = 0;
ret = gb_timesync_platform_init();
if (ret) {
pr_err("timesync platform init fail!\n");
return ret;
}
gb_timesync_clock_rate = gb_timesync_platform_get_clock_rate();
/* Calculate nanoseconds and femtoseconds per clock */
gb_timesync_fs_per_clock = FSEC_PER_SEC;
do_div(gb_timesync_fs_per_clock, gb_timesync_clock_rate);
gb_timesync_ns_per_clock = NSEC_PER_SEC;
do_div(gb_timesync_ns_per_clock, gb_timesync_clock_rate);
/* Calculate the maximum number of clocks we will convert to ktime */
gb_timesync_max_ktime_diff =
GB_TIMESYNC_MAX_KTIME_CONVERSION * gb_timesync_clock_rate;
pr_info("Time-Sync @ %lu Hz max ktime conversion +/- %d seconds\n",
gb_timesync_clock_rate, GB_TIMESYNC_MAX_KTIME_CONVERSION);
return 0;
}
void gb_timesync_exit(void)
{
gb_timesync_platform_exit();
}
/*
* TimeSync API driver.
*
* Copyright 2016 Google Inc.
* Copyright 2016 Linaro Ltd.
*
* Released under the GPLv2 only.
*/
#ifndef __TIMESYNC_H
#define __TIMESYNC_H
struct gb_svc;
struct gb_interface;
struct gb_timesync_svc;
/* Platform */
u64 gb_timesync_platform_get_counter(void);
u32 gb_timesync_platform_get_clock_rate(void);
int gb_timesync_platform_lock_bus(struct gb_timesync_svc *pdata);
void gb_timesync_platform_unlock_bus(void);
int gb_timesync_platform_init(void);
void gb_timesync_platform_exit(void);
/* Core API */
int gb_timesync_interface_add(struct gb_interface *interface);
void gb_timesync_interface_remove(struct gb_interface *interface);
int gb_timesync_svc_add(struct gb_svc *svc);
void gb_timesync_svc_remove(struct gb_svc *svc);
u64 gb_timesync_get_frame_time_by_interface(struct gb_interface *interface);
u64 gb_timesync_get_frame_time_by_svc(struct gb_svc *svc);
int gb_timesync_to_timespec_by_svc(struct gb_svc *svc, u64 frame_time,
struct timespec *ts);
int gb_timesync_to_timespec_by_interface(struct gb_interface *interface,
u64 frame_time, struct timespec *ts);
int gb_timesync_schedule_synchronous(struct gb_interface *intf);
void gb_timesync_schedule_asynchronous(struct gb_interface *intf);
void gb_timesync_irq(struct gb_timesync_svc *timesync_svc);
int gb_timesync_init(void);
void gb_timesync_exit(void);
#endif /* __TIMESYNC_H */
/*
* TimeSync API driver.
*
* Copyright 2016 Google Inc.
* Copyright 2016 Linaro Ltd.
*
* Released under the GPLv2 only.
*
* This code reads directly from an ARMv7 memory-mapped timer that lives in
* MMIO space. Since this counter lives inside of MMIO space its shared between
* cores and that means we don't have to worry about issues like TSC on x86
* where each time-stamp-counter (TSC) is local to a particular core.
*
* Register-level access code is based on
* drivers/clocksource/arm_arch_timer.c
*/
#include <linux/cpufreq.h>
#include <linux/of_platform.h>
#include "greybus.h"
#include "arche_platform.h"
#define DEFAULT_FRAMETIME_CLOCK_HZ 19200000
static u32 gb_timesync_clock_frequency;
int (*arche_platform_change_state_cb)(enum arche_platform_state state,
struct gb_timesync_svc *pdata);
EXPORT_SYMBOL_GPL(arche_platform_change_state_cb);
u64 gb_timesync_platform_get_counter(void)
{
return (u64)get_cycles();
}
u32 gb_timesync_platform_get_clock_rate(void)
{
if (unlikely(!gb_timesync_clock_frequency)) {
gb_timesync_clock_frequency = cpufreq_get(0);
if (!gb_timesync_clock_frequency)
gb_timesync_clock_frequency = DEFAULT_FRAMETIME_CLOCK_HZ;
}
return gb_timesync_clock_frequency;
}
int gb_timesync_platform_lock_bus(struct gb_timesync_svc *pdata)
{
return arche_platform_change_state_cb(ARCHE_PLATFORM_STATE_TIME_SYNC,
pdata);
}
void gb_timesync_platform_unlock_bus(void)
{
arche_platform_change_state_cb(ARCHE_PLATFORM_STATE_ACTIVE, NULL);
}
static const struct of_device_id arch_timer_of_match[] = {
{ .compatible = "google,greybus-frame-time-counter", },
{},
};
int __init gb_timesync_platform_init(void)
{
struct device_node *np;
np = of_find_matching_node(NULL, arch_timer_of_match);
if (!np) {
/* Tolerate not finding to allow BBB etc to continue */
pr_warn("Unable to find a compatible ARMv7 timer\n");
return 0;
}
if (of_property_read_u32(np, "clock-frequency",
&gb_timesync_clock_frequency)) {
pr_err("Unable to find timer clock-frequency\n");
return -ENODEV;
}
return 0;
}
void gb_timesync_platform_exit(void) {}
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment