Commit 9835e58a authored by Rafael J. Wysocki's avatar Rafael J. Wysocki

Merge tag 'pull_req_20150129' of...

Merge tag 'pull_req_20150129' of git://git.kernel.org/pub/scm/linux/kernel/git/mzx/devfreq into pm-devfreq

Pull devfreq changes for v3.20 from MyungJoo Ham.

* tag 'pull_req_20150129' of git://git.kernel.org/pub/scm/linux/kernel/git/mzx/devfreq:
  PM / devfreq: event: Add documentation for exynos-ppmu devfreq-event driver
  devfreq: Fix build break of devfreq-event class
  PM / devfreq: event: Add devfreq_event class
  PM / devfreq: tegra: add devfreq driver for Tegra Activity Monitor
parents 26bc420b 6d96302f
* Samsung Exynos PPMU (Platform Performance Monitoring Unit) device
The Samsung Exynos SoC has PPMU (Platform Performance Monitoring Unit) for
each IP. PPMU provides the primitive values to get performance data. These
PPMU events provide information of the SoC's behaviors so that you may
use to analyze system performance, to make behaviors visible and to count
usages of each IP (DMC, CPU, RIGHTBUS, LEFTBUS, CAM interface, LCD, G3D, MFC).
The Exynos PPMU driver uses the devfreq-event class to provide event data
to various devfreq devices. The devfreq devices would use the event data when
derterming the current state of each IP.
Required properties:
- compatible: Should be "samsung,exynos-ppmu".
- reg: physical base address of each PPMU and length of memory mapped region.
Optional properties:
- clock-names : the name of clock used by the PPMU, "ppmu"
- clocks : phandles for clock specified in "clock-names" property
- #clock-cells: should be 1.
Example1 : PPMU nodes in exynos3250.dtsi are listed below.
ppmu_dmc0: ppmu_dmc0@106a0000 {
compatible = "samsung,exynos-ppmu";
reg = <0x106a0000 0x2000>;
status = "disabled";
};
ppmu_dmc1: ppmu_dmc1@106b0000 {
compatible = "samsung,exynos-ppmu";
reg = <0x106b0000 0x2000>;
status = "disabled";
};
ppmu_cpu: ppmu_cpu@106c0000 {
compatible = "samsung,exynos-ppmu";
reg = <0x106c0000 0x2000>;
status = "disabled";
};
ppmu_rightbus: ppmu_rightbus@112a0000 {
compatible = "samsung,exynos-ppmu";
reg = <0x112a0000 0x2000>;
clocks = <&cmu CLK_PPMURIGHT>;
clock-names = "ppmu";
status = "disabled";
};
ppmu_leftbus: ppmu_leftbus0@116a0000 {
compatible = "samsung,exynos-ppmu";
reg = <0x116a0000 0x2000>;
clocks = <&cmu CLK_PPMULEFT>;
clock-names = "ppmu";
status = "disabled";
};
Example2 : Events of each PPMU node in exynos3250-rinato.dts are listed below.
&ppmu_dmc0 {
status = "okay";
events {
ppmu_dmc0_3: ppmu-event3-dmc0 {
event-name = "ppmu-event3-dmc0";
};
ppmu_dmc0_2: ppmu-event2-dmc0 {
event-name = "ppmu-event2-dmc0";
};
ppmu_dmc0_1: ppmu-event1-dmc0 {
event-name = "ppmu-event1-dmc0";
};
ppmu_dmc0_0: ppmu-event0-dmc0 {
event-name = "ppmu-event0-dmc0";
};
};
};
&ppmu_dmc1 {
status = "okay";
events {
ppmu_dmc1_3: ppmu-event3-dmc1 {
event-name = "ppmu-event3-dmc1";
};
};
};
&ppmu_leftbus {
status = "okay";
events {
ppmu_leftbus_3: ppmu-event3-leftbus {
event-name = "ppmu-event3-leftbus";
};
};
};
&ppmu_rightbus {
status = "okay";
events {
ppmu_rightbus_3: ppmu-event3-rightbus {
event-name = "ppmu-event3-rightbus";
};
};
};
......@@ -87,4 +87,16 @@ config ARM_EXYNOS5_BUS_DEVFREQ
It reads PPMU counters of memory controllers and adjusts the
operating frequencies and voltages with OPP support.
config ARM_TEGRA_DEVFREQ
tristate "Tegra DEVFREQ Driver"
depends on ARCH_TEGRA_124_SOC
select DEVFREQ_GOV_SIMPLE_ONDEMAND
select PM_OPP
help
This adds the DEVFREQ driver for the Tegra family of SoCs.
It reads ACTMON counters of memory controllers and adjusts the
operating frequencies and voltages with OPP support.
source "drivers/devfreq/event/Kconfig"
endif # PM_DEVFREQ
obj-$(CONFIG_PM_DEVFREQ) += devfreq.o
obj-$(CONFIG_PM_DEVFREQ_EVENT) += devfreq-event.o
obj-$(CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND) += governor_simpleondemand.o
obj-$(CONFIG_DEVFREQ_GOV_PERFORMANCE) += governor_performance.o
obj-$(CONFIG_DEVFREQ_GOV_POWERSAVE) += governor_powersave.o
......@@ -7,3 +8,7 @@ obj-$(CONFIG_DEVFREQ_GOV_USERSPACE) += governor_userspace.o
# DEVFREQ Drivers
obj-$(CONFIG_ARM_EXYNOS4_BUS_DEVFREQ) += exynos/
obj-$(CONFIG_ARM_EXYNOS5_BUS_DEVFREQ) += exynos/
obj-$(CONFIG_ARM_TEGRA_DEVFREQ) += tegra-devfreq.o
# DEVFREQ Event Drivers
obj-$(CONFIG_PM_DEVFREQ_EVENT) += event/
/*
* devfreq-event: a framework to provide raw data and events of devfreq devices
*
* Copyright (C) 2015 Samsung Electronics
* Author: Chanwoo Choi <cw00.choi@samsung.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* This driver is based on drivers/devfreq/devfreq.c.
*/
#include <linux/devfreq-event.h>
#include <linux/kernel.h>
#include <linux/err.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/list.h>
#include <linux/of.h>
static struct class *devfreq_event_class;
/* The list of all devfreq event list */
static LIST_HEAD(devfreq_event_list);
static DEFINE_MUTEX(devfreq_event_list_lock);
#define to_devfreq_event(DEV) container_of(DEV, struct devfreq_event_dev, dev)
/**
* devfreq_event_enable_edev() - Enable the devfreq-event dev and increase
* the enable_count of devfreq-event dev.
* @edev : the devfreq-event device
*
* Note that this function increase the enable_count and enable the
* devfreq-event device. The devfreq-event device should be enabled before
* using it by devfreq device.
*/
int devfreq_event_enable_edev(struct devfreq_event_dev *edev)
{
int ret = 0;
if (!edev || !edev->desc)
return -EINVAL;
mutex_lock(&edev->lock);
if (edev->desc->ops && edev->desc->ops->enable
&& edev->enable_count == 0) {
ret = edev->desc->ops->enable(edev);
if (ret < 0)
goto err;
}
edev->enable_count++;
err:
mutex_unlock(&edev->lock);
return ret;
}
EXPORT_SYMBOL_GPL(devfreq_event_enable_edev);
/**
* devfreq_event_disable_edev() - Disable the devfreq-event dev and decrease
* the enable_count of the devfreq-event dev.
* @edev : the devfreq-event device
*
* Note that this function decrease the enable_count and disable the
* devfreq-event device. After the devfreq-event device is disabled,
* devfreq device can't use the devfreq-event device for get/set/reset
* operations.
*/
int devfreq_event_disable_edev(struct devfreq_event_dev *edev)
{
int ret = 0;
if (!edev || !edev->desc)
return -EINVAL;
mutex_lock(&edev->lock);
if (edev->enable_count <= 0) {
dev_warn(&edev->dev, "unbalanced enable_count\n");
ret = -EIO;
goto err;
}
if (edev->desc->ops && edev->desc->ops->disable
&& edev->enable_count == 1) {
ret = edev->desc->ops->disable(edev);
if (ret < 0)
goto err;
}
edev->enable_count--;
err:
mutex_unlock(&edev->lock);
return ret;
}
EXPORT_SYMBOL_GPL(devfreq_event_disable_edev);
/**
* devfreq_event_is_enabled() - Check whether devfreq-event dev is enabled or
* not.
* @edev : the devfreq-event device
*
* Note that this function check whether devfreq-event dev is enabled or not.
* If return true, the devfreq-event dev is enabeld. If return false, the
* devfreq-event dev is disabled.
*/
bool devfreq_event_is_enabled(struct devfreq_event_dev *edev)
{
bool enabled = false;
if (!edev || !edev->desc)
return enabled;
mutex_lock(&edev->lock);
if (edev->enable_count > 0)
enabled = true;
mutex_unlock(&edev->lock);
return enabled;
}
EXPORT_SYMBOL_GPL(devfreq_event_is_enabled);
/**
* devfreq_event_set_event() - Set event to devfreq-event dev to start.
* @edev : the devfreq-event device
*
* Note that this function set the event to the devfreq-event device to start
* for getting the event data which could be various event type.
*/
int devfreq_event_set_event(struct devfreq_event_dev *edev)
{
int ret;
if (!edev || !edev->desc)
return -EINVAL;
if (!edev->desc->ops || !edev->desc->ops->set_event)
return -EINVAL;
if (!devfreq_event_is_enabled(edev))
return -EPERM;
mutex_lock(&edev->lock);
ret = edev->desc->ops->set_event(edev);
mutex_unlock(&edev->lock);
return ret;
}
EXPORT_SYMBOL_GPL(devfreq_event_set_event);
/**
* devfreq_event_get_event() - Get {load|total}_count from devfreq-event dev.
* @edev : the devfreq-event device
* @edata : the calculated data of devfreq-event device
*
* Note that this function get the calculated event data from devfreq-event dev
* after stoping the progress of whole sequence of devfreq-event dev.
*/
int devfreq_event_get_event(struct devfreq_event_dev *edev,
struct devfreq_event_data *edata)
{
int ret;
if (!edev || !edev->desc)
return -EINVAL;
if (!edev->desc->ops || !edev->desc->ops->get_event)
return -EINVAL;
if (!devfreq_event_is_enabled(edev))
return -EINVAL;
edata->total_count = edata->load_count = 0;
mutex_lock(&edev->lock);
ret = edev->desc->ops->get_event(edev, edata);
if (ret < 0)
edata->total_count = edata->load_count = 0;
mutex_unlock(&edev->lock);
return ret;
}
EXPORT_SYMBOL_GPL(devfreq_event_get_event);
/**
* devfreq_event_reset_event() - Reset all opeations of devfreq-event dev.
* @edev : the devfreq-event device
*
* Note that this function stop all operations of devfreq-event dev and reset
* the current event data to make the devfreq-event device into initial state.
*/
int devfreq_event_reset_event(struct devfreq_event_dev *edev)
{
int ret = 0;
if (!edev || !edev->desc)
return -EINVAL;
if (!devfreq_event_is_enabled(edev))
return -EPERM;
mutex_lock(&edev->lock);
if (edev->desc->ops && edev->desc->ops->reset)
ret = edev->desc->ops->reset(edev);
mutex_unlock(&edev->lock);
return ret;
}
EXPORT_SYMBOL_GPL(devfreq_event_reset_event);
/**
* devfreq_event_get_edev_by_phandle() - Get the devfreq-event dev from
* devicetree.
* @dev : the pointer to the given device
* @index : the index into list of devfreq-event device
*
* Note that this function return the pointer of devfreq-event device.
*/
struct devfreq_event_dev *devfreq_event_get_edev_by_phandle(struct device *dev,
int index)
{
struct device_node *node;
struct devfreq_event_dev *edev;
if (!dev->of_node) {
dev_err(dev, "device does not have a device node entry\n");
return ERR_PTR(-EINVAL);
}
node = of_parse_phandle(dev->of_node, "devfreq-events", index);
if (!node) {
dev_err(dev, "failed to get phandle in %s node\n",
dev->of_node->full_name);
return ERR_PTR(-ENODEV);
}
mutex_lock(&devfreq_event_list_lock);
list_for_each_entry(edev, &devfreq_event_list, node) {
if (!strcmp(edev->desc->name, node->name))
goto out;
}
edev = NULL;
out:
mutex_unlock(&devfreq_event_list_lock);
if (!edev) {
dev_err(dev, "unable to get devfreq-event device : %s\n",
node->name);
of_node_put(node);
return ERR_PTR(-ENODEV);
}
of_node_put(node);
return edev;
}
EXPORT_SYMBOL_GPL(devfreq_event_get_edev_by_phandle);
/**
* devfreq_event_get_edev_count() - Get the count of devfreq-event dev
* @dev : the pointer to the given device
*
* Note that this function return the count of devfreq-event devices.
*/
int devfreq_event_get_edev_count(struct device *dev)
{
int count;
if (!dev->of_node) {
dev_err(dev, "device does not have a device node entry\n");
return -EINVAL;
}
count = of_property_count_elems_of_size(dev->of_node, "devfreq-events",
sizeof(u32));
if (count < 0 ) {
dev_err(dev,
"failed to get the count of devfreq-event in %s node\n",
dev->of_node->full_name);
return count;
}
return count;
}
EXPORT_SYMBOL_GPL(devfreq_event_get_edev_count);
static void devfreq_event_release_edev(struct device *dev)
{
struct devfreq_event_dev *edev = to_devfreq_event(dev);
kfree(edev);
}
/**
* devfreq_event_add_edev() - Add new devfreq-event device.
* @dev : the device owning the devfreq-event device being created
* @desc : the devfreq-event device's decriptor which include essential
* data for devfreq-event device.
*
* Note that this function add new devfreq-event device to devfreq-event class
* list and register the device of the devfreq-event device.
*/
struct devfreq_event_dev *devfreq_event_add_edev(struct device *dev,
struct devfreq_event_desc *desc)
{
struct devfreq_event_dev *edev;
static atomic_t event_no = ATOMIC_INIT(0);
int ret;
if (!dev || !desc)
return ERR_PTR(-EINVAL);
if (!desc->name || !desc->ops)
return ERR_PTR(-EINVAL);
if (!desc->ops->set_event || !desc->ops->get_event)
return ERR_PTR(-EINVAL);
edev = kzalloc(sizeof(struct devfreq_event_dev), GFP_KERNEL);
if (!edev)
return ERR_PTR(-ENOMEM);
mutex_init(&edev->lock);
edev->desc = desc;
edev->enable_count = 0;
edev->dev.parent = dev;
edev->dev.class = devfreq_event_class;
edev->dev.release = devfreq_event_release_edev;
dev_set_name(&edev->dev, "event.%d", atomic_inc_return(&event_no) - 1);
ret = device_register(&edev->dev);
if (ret < 0) {
put_device(&edev->dev);
return ERR_PTR(ret);
}
dev_set_drvdata(&edev->dev, edev);
INIT_LIST_HEAD(&edev->node);
mutex_lock(&devfreq_event_list_lock);
list_add(&edev->node, &devfreq_event_list);
mutex_unlock(&devfreq_event_list_lock);
return edev;
}
EXPORT_SYMBOL_GPL(devfreq_event_add_edev);
/**
* devfreq_event_remove_edev() - Remove the devfreq-event device registered.
* @dev : the devfreq-event device
*
* Note that this function remove the registered devfreq-event device.
*/
int devfreq_event_remove_edev(struct devfreq_event_dev *edev)
{
if (!edev)
return -EINVAL;
WARN_ON(edev->enable_count);
mutex_lock(&devfreq_event_list_lock);
list_del(&edev->node);
mutex_unlock(&devfreq_event_list_lock);
device_unregister(&edev->dev);
return 0;
}
EXPORT_SYMBOL_GPL(devfreq_event_remove_edev);
static int devm_devfreq_event_match(struct device *dev, void *res, void *data)
{
struct devfreq_event_dev **r = res;
if (WARN_ON(!r || !*r))
return 0;
return *r == data;
}
static void devm_devfreq_event_release(struct device *dev, void *res)
{
devfreq_event_remove_edev(*(struct devfreq_event_dev **)res);
}
/**
* devm_devfreq_event_add_edev() - Resource-managed devfreq_event_add_edev()
* @dev : the device owning the devfreq-event device being created
* @desc : the devfreq-event device's decriptor which include essential
* data for devfreq-event device.
*
* Note that this function manages automatically the memory of devfreq-event
* device using device resource management and simplify the free operation
* for memory of devfreq-event device.
*/
struct devfreq_event_dev *devm_devfreq_event_add_edev(struct device *dev,
struct devfreq_event_desc *desc)
{
struct devfreq_event_dev **ptr, *edev;
ptr = devres_alloc(devm_devfreq_event_release, sizeof(*ptr), GFP_KERNEL);
if (!ptr)
return ERR_PTR(-ENOMEM);
edev = devfreq_event_add_edev(dev, desc);
if (IS_ERR(edev)) {
devres_free(ptr);
return ERR_PTR(-ENOMEM);
}
*ptr = edev;
devres_add(dev, ptr);
return edev;
}
EXPORT_SYMBOL_GPL(devm_devfreq_event_add_edev);
/**
* devm_devfreq_event_remove_edev()- Resource-managed devfreq_event_remove_edev()
* @dev : the device owning the devfreq-event device being created
* @edev : the devfreq-event device
*
* Note that this function manages automatically the memory of devfreq-event
* device using device resource management.
*/
void devm_devfreq_event_remove_edev(struct device *dev,
struct devfreq_event_dev *edev)
{
WARN_ON(devres_release(dev, devm_devfreq_event_release,
devm_devfreq_event_match, edev));
}
EXPORT_SYMBOL_GPL(devm_devfreq_event_remove_edev);
/*
* Device attributes for devfreq-event class.
*/
static ssize_t name_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct devfreq_event_dev *edev = to_devfreq_event(dev);
if (!edev || !edev->desc)
return -EINVAL;
return sprintf(buf, "%s\n", edev->desc->name);
}
static DEVICE_ATTR_RO(name);
static ssize_t enable_count_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct devfreq_event_dev *edev = to_devfreq_event(dev);
if (!edev || !edev->desc)
return -EINVAL;
return sprintf(buf, "%d\n", edev->enable_count);
}
static DEVICE_ATTR_RO(enable_count);
static struct attribute *devfreq_event_attrs[] = {
&dev_attr_name.attr,
&dev_attr_enable_count.attr,
NULL,
};
ATTRIBUTE_GROUPS(devfreq_event);
static int __init devfreq_event_init(void)
{
devfreq_event_class = class_create(THIS_MODULE, "devfreq-event");
if (IS_ERR(devfreq_event_class)) {
pr_err("%s: couldn't create class\n", __FILE__);
return PTR_ERR(devfreq_event_class);
}
devfreq_event_class->dev_groups = devfreq_event_groups;
return 0;
}
subsys_initcall(devfreq_event_init);
static void __exit devfreq_event_exit(void)
{
class_destroy(devfreq_event_class);
}
module_exit(devfreq_event_exit);
MODULE_AUTHOR("Chanwoo Choi <cw00.choi@samsung.com>");
MODULE_DESCRIPTION("DEVFREQ-Event class support");
MODULE_LICENSE("GPL");
menuconfig PM_DEVFREQ_EVENT
bool "DEVFREQ-Event device Support"
help
The devfreq-event device provide the raw data and events which
indicate the current state of devfreq-event device. The provided
data from devfreq-event device is used to monitor the state of
device and determine the suitable size of resource to reduce the
wasted resource.
The devfreq-event device can support the various type of events
(e.g., raw data, utilization, latency, bandwidth). The events
may be used by devfreq governor and other subsystem.
if PM_DEVFREQ_EVENT
config DEVFREQ_EVENT_EXYNOS_PPMU
bool "EXYNOS PPMU (Platform Performance Monitoring Unit) DEVFREQ event Driver"
depends on ARCH_EXYNOS
select PM_OPP
help
This add the devfreq-event driver for Exynos SoC. It provides PPMU
(Platform Performance Monitoring Unit) counters to estimate the
utilization of each module.
endif # PM_DEVFREQ_EVENT
# Exynos DEVFREQ Event Drivers
obj-$(CONFIG_DEVFREQ_EVENT_EXYNOS_PPMU) += exynos-ppmu.o
/*
* exynos_ppmu.c - EXYNOS PPMU (Platform Performance Monitoring Unit) support
*
* Copyright (c) 2014 Samsung Electronics Co., Ltd.
* Author : Chanwoo Choi <cw00.choi@samsung.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* This driver is based on drivers/devfreq/exynos/exynos_ppmu.c
*/
#include <linux/clk.h>
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/of_address.h>
#include <linux/platform_device.h>
#include <linux/suspend.h>
#include <linux/devfreq-event.h>
#include "exynos-ppmu.h"
struct exynos_ppmu_data {
void __iomem *base;
struct clk *clk;
};
struct exynos_ppmu {
struct devfreq_event_dev **edev;
struct devfreq_event_desc *desc;
unsigned int num_events;
struct device *dev;
struct mutex lock;
struct exynos_ppmu_data ppmu;
};
#define PPMU_EVENT(name) \
{ "ppmu-event0-"#name, PPMU_PMNCNT0 }, \
{ "ppmu-event1-"#name, PPMU_PMNCNT1 }, \
{ "ppmu-event2-"#name, PPMU_PMNCNT2 }, \
{ "ppmu-event3-"#name, PPMU_PMNCNT3 }
struct __exynos_ppmu_events {
char *name;
int id;
} ppmu_events[] = {
/* For Exynos3250, Exynos4 and Exynos5260 */
PPMU_EVENT(g3d),
PPMU_EVENT(fsys),
/* For Exynos4 SoCs and Exynos3250 */
PPMU_EVENT(dmc0),
PPMU_EVENT(dmc1),
PPMU_EVENT(cpu),
PPMU_EVENT(rightbus),
PPMU_EVENT(leftbus),
PPMU_EVENT(lcd0),
PPMU_EVENT(camif),
/* Only for Exynos3250 and Exynos5260 */
PPMU_EVENT(mfc),
/* Only for Exynos4 SoCs */
PPMU_EVENT(mfc-left),
PPMU_EVENT(mfc-right),
/* Only for Exynos5260 SoCs */
PPMU_EVENT(drex0-s0),
PPMU_EVENT(drex0-s1),
PPMU_EVENT(drex1-s0),
PPMU_EVENT(drex1-s1),
PPMU_EVENT(eagle),
PPMU_EVENT(kfc),
PPMU_EVENT(isp),
PPMU_EVENT(fimc),
PPMU_EVENT(gscl),
PPMU_EVENT(mscl),
PPMU_EVENT(fimd0x),
PPMU_EVENT(fimd1x),
{ /* sentinel */ },
};
static int exynos_ppmu_find_ppmu_id(struct devfreq_event_dev *edev)
{
int i;
for (i = 0; i < ARRAY_SIZE(ppmu_events); i++)
if (!strcmp(edev->desc->name, ppmu_events[i].name))
return ppmu_events[i].id;
return -EINVAL;
}
static int exynos_ppmu_disable(struct devfreq_event_dev *edev)
{
struct exynos_ppmu *info = devfreq_event_get_drvdata(edev);
u32 pmnc;
/* Disable all counters */
__raw_writel(PPMU_CCNT_MASK |
PPMU_PMCNT0_MASK |
PPMU_PMCNT1_MASK |
PPMU_PMCNT2_MASK |
PPMU_PMCNT3_MASK,
info->ppmu.base + PPMU_CNTENC);
/* Disable PPMU */
pmnc = __raw_readl(info->ppmu.base + PPMU_PMNC);
pmnc &= ~PPMU_PMNC_ENABLE_MASK;
__raw_writel(pmnc, info->ppmu.base + PPMU_PMNC);
return 0;
}
static int exynos_ppmu_set_event(struct devfreq_event_dev *edev)
{
struct exynos_ppmu *info = devfreq_event_get_drvdata(edev);
int id = exynos_ppmu_find_ppmu_id(edev);
u32 pmnc, cntens;
if (id < 0)
return id;
/* Enable specific counter */
cntens = __raw_readl(info->ppmu.base + PPMU_CNTENS);
cntens |= (PPMU_CCNT_MASK | (PPMU_ENABLE << id));
__raw_writel(cntens, info->ppmu.base + PPMU_CNTENS);
/* Set the event of Read/Write data count */
__raw_writel(PPMU_RO_DATA_CNT | PPMU_WO_DATA_CNT,
info->ppmu.base + PPMU_BEVTxSEL(id));
/* Reset cycle counter/performance counter and enable PPMU */
pmnc = __raw_readl(info->ppmu.base + PPMU_PMNC);
pmnc &= ~(PPMU_PMNC_ENABLE_MASK
| PPMU_PMNC_COUNTER_RESET_MASK
| PPMU_PMNC_CC_RESET_MASK);
pmnc |= (PPMU_ENABLE << PPMU_PMNC_ENABLE_SHIFT);
pmnc |= (PPMU_ENABLE << PPMU_PMNC_COUNTER_RESET_SHIFT);
pmnc |= (PPMU_ENABLE << PPMU_PMNC_CC_RESET_SHIFT);
__raw_writel(pmnc, info->ppmu.base + PPMU_PMNC);
return 0;
}
static int exynos_ppmu_get_event(struct devfreq_event_dev *edev,
struct devfreq_event_data *edata)
{
struct exynos_ppmu *info = devfreq_event_get_drvdata(edev);
int id = exynos_ppmu_find_ppmu_id(edev);
u32 pmnc, cntenc;
if (id < 0)
return -EINVAL;
/* Disable PPMU */
pmnc = __raw_readl(info->ppmu.base + PPMU_PMNC);
pmnc &= ~PPMU_PMNC_ENABLE_MASK;
__raw_writel(pmnc, info->ppmu.base + PPMU_PMNC);
/* Read cycle count */
edata->total_count = __raw_readl(info->ppmu.base + PPMU_CCNT);
/* Read performance count */
switch (id) {
case PPMU_PMNCNT0:
case PPMU_PMNCNT1:
case PPMU_PMNCNT2:
edata->load_count
= __raw_readl(info->ppmu.base + PPMU_PMNCT(id));
break;
case PPMU_PMNCNT3:
edata->load_count =
((__raw_readl(info->ppmu.base + PPMU_PMCNT3_HIGH) << 8)
| __raw_readl(info->ppmu.base + PPMU_PMCNT3_LOW));
break;
default:
return -EINVAL;
}
/* Disable specific counter */
cntenc = __raw_readl(info->ppmu.base + PPMU_CNTENC);
cntenc |= (PPMU_CCNT_MASK | (PPMU_ENABLE << id));
__raw_writel(cntenc, info->ppmu.base + PPMU_CNTENC);
dev_dbg(&edev->dev, "%s (event: %ld/%ld)\n", edev->desc->name,
edata->load_count, edata->total_count);
return 0;
}
static struct devfreq_event_ops exynos_ppmu_ops = {
.disable = exynos_ppmu_disable,
.set_event = exynos_ppmu_set_event,
.get_event = exynos_ppmu_get_event,
};
static int of_get_devfreq_events(struct device_node *np,
struct exynos_ppmu *info)
{
struct devfreq_event_desc *desc;
struct device *dev = info->dev;
struct device_node *events_np, *node;
int i, j, count;
events_np = of_get_child_by_name(np, "events");
if (!events_np) {
dev_err(dev,
"failed to get child node of devfreq-event devices\n");
return -EINVAL;
}
count = of_get_child_count(events_np);
desc = devm_kzalloc(dev, sizeof(*desc) * count, GFP_KERNEL);
if (!desc)
return -ENOMEM;
info->num_events = count;
j = 0;
for_each_child_of_node(events_np, node) {
for (i = 0; i < ARRAY_SIZE(ppmu_events); i++) {
if (!ppmu_events[i].name)
continue;
if (!of_node_cmp(node->name, ppmu_events[i].name))
break;
}
if (i == ARRAY_SIZE(ppmu_events)) {
dev_warn(dev,
"don't know how to configure events : %s\n",
node->name);
continue;
}
desc[j].ops = &exynos_ppmu_ops;
desc[j].driver_data = info;
of_property_read_string(node, "event-name", &desc[j].name);
j++;
of_node_put(node);
}
info->desc = desc;
of_node_put(events_np);
return 0;
}
static int exynos_ppmu_parse_dt(struct exynos_ppmu *info)
{
struct device *dev = info->dev;
struct device_node *np = dev->of_node;
int ret = 0;
if (!np) {
dev_err(dev, "failed to find devicetree node\n");
return -EINVAL;
}
/* Maps the memory mapped IO to control PPMU register */
info->ppmu.base = of_iomap(np, 0);
if (IS_ERR_OR_NULL(info->ppmu.base)) {
dev_err(dev, "failed to map memory region\n");
return -ENOMEM;
}
info->ppmu.clk = devm_clk_get(dev, "ppmu");
if (IS_ERR(info->ppmu.clk)) {
info->ppmu.clk = NULL;
dev_warn(dev, "cannot get PPMU clock\n");
}
ret = of_get_devfreq_events(np, info);
if (ret < 0) {
dev_err(dev, "failed to parse exynos ppmu dt node\n");
goto err;
}
return 0;
err:
iounmap(info->ppmu.base);
return ret;
}
static int exynos_ppmu_probe(struct platform_device *pdev)
{
struct exynos_ppmu *info;
struct devfreq_event_dev **edev;
struct devfreq_event_desc *desc;
int i, ret = 0, size;
info = devm_kzalloc(&pdev->dev, sizeof(*info), GFP_KERNEL);
if (!info)
return -ENOMEM;
mutex_init(&info->lock);
info->dev = &pdev->dev;
/* Parse dt data to get resource */
ret = exynos_ppmu_parse_dt(info);
if (ret < 0) {
dev_err(&pdev->dev,
"failed to parse devicetree for resource\n");
return ret;
}
desc = info->desc;
size = sizeof(struct devfreq_event_dev *) * info->num_events;
info->edev = devm_kzalloc(&pdev->dev, size, GFP_KERNEL);
if (!info->edev) {
dev_err(&pdev->dev,
"failed to allocate memory devfreq-event devices\n");
return -ENOMEM;
}
edev = info->edev;
platform_set_drvdata(pdev, info);
for (i = 0; i < info->num_events; i++) {
edev[i] = devm_devfreq_event_add_edev(&pdev->dev, &desc[i]);
if (IS_ERR(edev)) {
ret = PTR_ERR(edev);
dev_err(&pdev->dev,
"failed to add devfreq-event device\n");
goto err;
}
}
clk_prepare_enable(info->ppmu.clk);
return 0;
err:
iounmap(info->ppmu.base);
return ret;
}
static int exynos_ppmu_remove(struct platform_device *pdev)
{
struct exynos_ppmu *info = platform_get_drvdata(pdev);
clk_disable_unprepare(info->ppmu.clk);
iounmap(info->ppmu.base);
return 0;
}
static struct of_device_id exynos_ppmu_id_match[] = {
{ .compatible = "samsung,exynos-ppmu", },
{ /* sentinel */ },
};
static struct platform_driver exynos_ppmu_driver = {
.probe = exynos_ppmu_probe,
.remove = exynos_ppmu_remove,
.driver = {
.name = "exynos-ppmu",
.of_match_table = exynos_ppmu_id_match,
},
};
module_platform_driver(exynos_ppmu_driver);
MODULE_DESCRIPTION("Exynos PPMU(Platform Performance Monitoring Unit) driver");
MODULE_AUTHOR("Chanwoo Choi <cw00.choi@samsung.com>");
MODULE_LICENSE("GPL");
/*
* exynos_ppmu.h - EXYNOS PPMU header file
*
* Copyright (c) 2015 Samsung Electronics Co., Ltd.
* Author : Chanwoo Choi <cw00.choi@samsung.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#ifndef __EXYNOS_PPMU_H__
#define __EXYNOS_PPMU_H__
enum ppmu_state {
PPMU_DISABLE = 0,
PPMU_ENABLE,
};
enum ppmu_counter {
PPMU_PMNCNT0 = 0,
PPMU_PMNCNT1,
PPMU_PMNCNT2,
PPMU_PMNCNT3,
PPMU_PMNCNT_MAX,
};
enum ppmu_event_type {
PPMU_RO_BUSY_CYCLE_CNT = 0x0,
PPMU_WO_BUSY_CYCLE_CNT = 0x1,
PPMU_RW_BUSY_CYCLE_CNT = 0x2,
PPMU_RO_REQUEST_CNT = 0x3,
PPMU_WO_REQUEST_CNT = 0x4,
PPMU_RO_DATA_CNT = 0x5,
PPMU_WO_DATA_CNT = 0x6,
PPMU_RO_LATENCY = 0x12,
PPMU_WO_LATENCY = 0x16,
};
enum ppmu_reg {
/* PPC control register */
PPMU_PMNC = 0x00,
PPMU_CNTENS = 0x10,
PPMU_CNTENC = 0x20,
PPMU_INTENS = 0x30,
PPMU_INTENC = 0x40,
PPMU_FLAG = 0x50,
/* Cycle Counter and Performance Event Counter Register */
PPMU_CCNT = 0x100,
PPMU_PMCNT0 = 0x110,
PPMU_PMCNT1 = 0x120,
PPMU_PMCNT2 = 0x130,
PPMU_PMCNT3_HIGH = 0x140,
PPMU_PMCNT3_LOW = 0x150,
/* Bus Event Generator */
PPMU_BEVT0SEL = 0x1000,
PPMU_BEVT1SEL = 0x1100,
PPMU_BEVT2SEL = 0x1200,
PPMU_BEVT3SEL = 0x1300,
PPMU_COUNTER_RESET = 0x1810,
PPMU_READ_OVERFLOW_CNT = 0x1810,
PPMU_READ_UNDERFLOW_CNT = 0x1814,
PPMU_WRITE_OVERFLOW_CNT = 0x1850,
PPMU_WRITE_UNDERFLOW_CNT = 0x1854,
PPMU_READ_PENDING_CNT = 0x1880,
PPMU_WRITE_PENDING_CNT = 0x1884
};
/* PMNC register */
#define PPMU_PMNC_CC_RESET_SHIFT 2
#define PPMU_PMNC_COUNTER_RESET_SHIFT 1
#define PPMU_PMNC_ENABLE_SHIFT 0
#define PPMU_PMNC_START_MODE_MASK BIT(16)
#define PPMU_PMNC_CC_DIVIDER_MASK BIT(3)
#define PPMU_PMNC_CC_RESET_MASK BIT(2)
#define PPMU_PMNC_COUNTER_RESET_MASK BIT(1)
#define PPMU_PMNC_ENABLE_MASK BIT(0)
/* CNTENS/CNTENC/INTENS/INTENC/FLAG register */
#define PPMU_CCNT_MASK BIT(31)
#define PPMU_PMCNT3_MASK BIT(3)
#define PPMU_PMCNT2_MASK BIT(2)
#define PPMU_PMCNT1_MASK BIT(1)
#define PPMU_PMCNT0_MASK BIT(0)
/* PPMU_PMNCTx/PPMU_BETxSEL registers */
#define PPMU_PMNCT(x) (PPMU_PMCNT0 + (0x10 * x))
#define PPMU_BEVTxSEL(x) (PPMU_BEVT0SEL + (0x100 * x))
#endif /* __EXYNOS_PPMU_H__ */
/*
* A devfreq driver for NVIDIA Tegra SoCs
*
* Copyright (c) 2014 NVIDIA CORPORATION. All rights reserved.
* Copyright (C) 2014 Google, Inc
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*
*/
#include <linux/clk.h>
#include <linux/cpufreq.h>
#include <linux/devfreq.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/pm_opp.h>
#include <linux/reset.h>
#include "governor.h"
#define ACTMON_GLB_STATUS 0x0
#define ACTMON_GLB_PERIOD_CTRL 0x4
#define ACTMON_DEV_CTRL 0x0
#define ACTMON_DEV_CTRL_K_VAL_SHIFT 10
#define ACTMON_DEV_CTRL_ENB_PERIODIC BIT(18)
#define ACTMON_DEV_CTRL_AVG_BELOW_WMARK_EN BIT(20)
#define ACTMON_DEV_CTRL_AVG_ABOVE_WMARK_EN BIT(21)
#define ACTMON_DEV_CTRL_CONSECUTIVE_BELOW_WMARK_NUM_SHIFT 23
#define ACTMON_DEV_CTRL_CONSECUTIVE_ABOVE_WMARK_NUM_SHIFT 26
#define ACTMON_DEV_CTRL_CONSECUTIVE_BELOW_WMARK_EN BIT(29)
#define ACTMON_DEV_CTRL_CONSECUTIVE_ABOVE_WMARK_EN BIT(30)
#define ACTMON_DEV_CTRL_ENB BIT(31)
#define ACTMON_DEV_UPPER_WMARK 0x4
#define ACTMON_DEV_LOWER_WMARK 0x8
#define ACTMON_DEV_INIT_AVG 0xc
#define ACTMON_DEV_AVG_UPPER_WMARK 0x10
#define ACTMON_DEV_AVG_LOWER_WMARK 0x14
#define ACTMON_DEV_COUNT_WEIGHT 0x18
#define ACTMON_DEV_AVG_COUNT 0x20
#define ACTMON_DEV_INTR_STATUS 0x24
#define ACTMON_INTR_STATUS_CLEAR 0xffffffff
#define ACTMON_DEV_INTR_CONSECUTIVE_UPPER BIT(31)
#define ACTMON_DEV_INTR_CONSECUTIVE_LOWER BIT(30)
#define ACTMON_ABOVE_WMARK_WINDOW 1
#define ACTMON_BELOW_WMARK_WINDOW 3
#define ACTMON_BOOST_FREQ_STEP 16000
/* activity counter is incremented every 256 memory transactions, and each
* transaction takes 4 EMC clocks for Tegra124; So the COUNT_WEIGHT is
* 4 * 256 = 1024.
*/
#define ACTMON_COUNT_WEIGHT 0x400
/*
* ACTMON_AVERAGE_WINDOW_LOG2: default value for @DEV_CTRL_K_VAL, which
* translates to 2 ^ (K_VAL + 1). ex: 2 ^ (6 + 1) = 128
*/
#define ACTMON_AVERAGE_WINDOW_LOG2 6
#define ACTMON_SAMPLING_PERIOD 12 /* ms */
#define ACTMON_DEFAULT_AVG_BAND 6 /* 1/10 of % */
#define KHZ 1000
/* Assume that the bus is saturated if the utilization is 25% */
#define BUS_SATURATION_RATIO 25
/**
* struct tegra_devfreq_device_config - configuration specific to an ACTMON
* device
*
* Coefficients and thresholds are in %
*/
struct tegra_devfreq_device_config {
u32 offset;
u32 irq_mask;
unsigned int boost_up_coeff;
unsigned int boost_down_coeff;
unsigned int boost_up_threshold;
unsigned int boost_down_threshold;
u32 avg_dependency_threshold;
};
enum tegra_actmon_device {
MCALL = 0,
MCCPU,
};
static struct tegra_devfreq_device_config actmon_device_configs[] = {
{
/* MCALL */
.offset = 0x1c0,
.irq_mask = 1 << 26,
.boost_up_coeff = 200,
.boost_down_coeff = 50,
.boost_up_threshold = 60,
.boost_down_threshold = 40,
},
{
/* MCCPU */
.offset = 0x200,
.irq_mask = 1 << 25,
.boost_up_coeff = 800,
.boost_down_coeff = 90,
.boost_up_threshold = 27,
.boost_down_threshold = 10,
.avg_dependency_threshold = 50000,
},
};
/**
* struct tegra_devfreq_device - state specific to an ACTMON device
*
* Frequencies are in kHz.
*/
struct tegra_devfreq_device {
const struct tegra_devfreq_device_config *config;
void __iomem *regs;
u32 avg_band_freq;
u32 avg_count;
unsigned long target_freq;
unsigned long boost_freq;
};
struct tegra_devfreq {
struct devfreq *devfreq;
struct platform_device *pdev;
struct reset_control *reset;
struct clk *clock;
void __iomem *regs;
spinlock_t lock;
struct clk *emc_clock;
unsigned long max_freq;
unsigned long cur_freq;
struct notifier_block rate_change_nb;
struct tegra_devfreq_device devices[ARRAY_SIZE(actmon_device_configs)];
};
struct tegra_actmon_emc_ratio {
unsigned long cpu_freq;
unsigned long emc_freq;
};
static struct tegra_actmon_emc_ratio actmon_emc_ratios[] = {
{ 1400000, ULONG_MAX },
{ 1200000, 750000 },
{ 1100000, 600000 },
{ 1000000, 500000 },
{ 800000, 375000 },
{ 500000, 200000 },
{ 250000, 100000 },
};
static unsigned long do_percent(unsigned long val, unsigned int pct)
{
return val * pct / 100;
}
static void tegra_devfreq_update_avg_wmark(struct tegra_devfreq_device *dev)
{
u32 avg = dev->avg_count;
u32 band = dev->avg_band_freq * ACTMON_SAMPLING_PERIOD;
writel(avg + band, dev->regs + ACTMON_DEV_AVG_UPPER_WMARK);
avg = max(avg, band);
writel(avg - band, dev->regs + ACTMON_DEV_AVG_LOWER_WMARK);
}
static void tegra_devfreq_update_wmark(struct tegra_devfreq *tegra,
struct tegra_devfreq_device *dev)
{
u32 val = tegra->cur_freq * ACTMON_SAMPLING_PERIOD;
writel(do_percent(val, dev->config->boost_up_threshold),
dev->regs + ACTMON_DEV_UPPER_WMARK);
writel(do_percent(val, dev->config->boost_down_threshold),
dev->regs + ACTMON_DEV_LOWER_WMARK);
}
static void actmon_write_barrier(struct tegra_devfreq *tegra)
{
/* ensure the update has reached the ACTMON */
wmb();
readl(tegra->regs + ACTMON_GLB_STATUS);
}
static irqreturn_t actmon_isr(int irq, void *data)
{
struct tegra_devfreq *tegra = data;
struct tegra_devfreq_device *dev = NULL;
unsigned long flags;
u32 val;
unsigned int i;
val = readl(tegra->regs + ACTMON_GLB_STATUS);
for (i = 0; i < ARRAY_SIZE(tegra->devices); i++) {
if (val & tegra->devices[i].config->irq_mask) {
dev = tegra->devices + i;
break;
}
}
if (!dev)
return IRQ_NONE;
spin_lock_irqsave(&tegra->lock, flags);
dev->avg_count = readl(dev->regs + ACTMON_DEV_AVG_COUNT);
tegra_devfreq_update_avg_wmark(dev);
val = readl(dev->regs + ACTMON_DEV_INTR_STATUS);
if (val & ACTMON_DEV_INTR_CONSECUTIVE_UPPER) {
val = readl(dev->regs + ACTMON_DEV_CTRL) |
ACTMON_DEV_CTRL_CONSECUTIVE_ABOVE_WMARK_EN |
ACTMON_DEV_CTRL_CONSECUTIVE_BELOW_WMARK_EN;
/*
* new_boost = min(old_boost * up_coef + step, max_freq)
*/
dev->boost_freq = do_percent(dev->boost_freq,
dev->config->boost_up_coeff);
dev->boost_freq += ACTMON_BOOST_FREQ_STEP;
if (dev->boost_freq >= tegra->max_freq) {
dev->boost_freq = tegra->max_freq;
val &= ~ACTMON_DEV_CTRL_CONSECUTIVE_ABOVE_WMARK_EN;
}
writel(val, dev->regs + ACTMON_DEV_CTRL);
} else if (val & ACTMON_DEV_INTR_CONSECUTIVE_LOWER) {
val = readl(dev->regs + ACTMON_DEV_CTRL) |
ACTMON_DEV_CTRL_CONSECUTIVE_ABOVE_WMARK_EN |
ACTMON_DEV_CTRL_CONSECUTIVE_BELOW_WMARK_EN;
/*
* new_boost = old_boost * down_coef
* or 0 if (old_boost * down_coef < step / 2)
*/
dev->boost_freq = do_percent(dev->boost_freq,
dev->config->boost_down_coeff);
if (dev->boost_freq < (ACTMON_BOOST_FREQ_STEP >> 1)) {
dev->boost_freq = 0;
val &= ~ACTMON_DEV_CTRL_CONSECUTIVE_BELOW_WMARK_EN;
}
writel(val, dev->regs + ACTMON_DEV_CTRL);
}
if (dev->config->avg_dependency_threshold) {
val = readl(dev->regs + ACTMON_DEV_CTRL);
if (dev->avg_count >= dev->config->avg_dependency_threshold)
val |= ACTMON_DEV_CTRL_CONSECUTIVE_BELOW_WMARK_EN;
else if (dev->boost_freq == 0)
val &= ~ACTMON_DEV_CTRL_CONSECUTIVE_BELOW_WMARK_EN;
writel(val, dev->regs + ACTMON_DEV_CTRL);
}
writel(ACTMON_INTR_STATUS_CLEAR, dev->regs + ACTMON_DEV_INTR_STATUS);
actmon_write_barrier(tegra);
spin_unlock_irqrestore(&tegra->lock, flags);
return IRQ_WAKE_THREAD;
}
static unsigned long actmon_cpu_to_emc_rate(struct tegra_devfreq *tegra,
unsigned long cpu_freq)
{
unsigned int i;
struct tegra_actmon_emc_ratio *ratio = actmon_emc_ratios;
for (i = 0; i < ARRAY_SIZE(actmon_emc_ratios); i++, ratio++) {
if (cpu_freq >= ratio->cpu_freq) {
if (ratio->emc_freq >= tegra->max_freq)
return tegra->max_freq;
else
return ratio->emc_freq;
}
}
return 0;
}
static void actmon_update_target(struct tegra_devfreq *tegra,
struct tegra_devfreq_device *dev)
{
unsigned long cpu_freq = 0;
unsigned long static_cpu_emc_freq = 0;
unsigned int avg_sustain_coef;
unsigned long flags;
if (dev->config->avg_dependency_threshold) {
cpu_freq = cpufreq_get(0);
static_cpu_emc_freq = actmon_cpu_to_emc_rate(tegra, cpu_freq);
}
spin_lock_irqsave(&tegra->lock, flags);
dev->target_freq = dev->avg_count / ACTMON_SAMPLING_PERIOD;
avg_sustain_coef = 100 * 100 / dev->config->boost_up_threshold;
dev->target_freq = do_percent(dev->target_freq, avg_sustain_coef);
dev->target_freq += dev->boost_freq;
if (dev->avg_count >= dev->config->avg_dependency_threshold)
dev->target_freq = max(dev->target_freq, static_cpu_emc_freq);
spin_unlock_irqrestore(&tegra->lock, flags);
}
static irqreturn_t actmon_thread_isr(int irq, void *data)
{
struct tegra_devfreq *tegra = data;
mutex_lock(&tegra->devfreq->lock);
update_devfreq(tegra->devfreq);
mutex_unlock(&tegra->devfreq->lock);
return IRQ_HANDLED;
}
static int tegra_actmon_rate_notify_cb(struct notifier_block *nb,
unsigned long action, void *ptr)
{
struct clk_notifier_data *data = ptr;
struct tegra_devfreq *tegra = container_of(nb, struct tegra_devfreq,
rate_change_nb);
unsigned int i;
unsigned long flags;
spin_lock_irqsave(&tegra->lock, flags);
switch (action) {
case POST_RATE_CHANGE:
tegra->cur_freq = data->new_rate / KHZ;
for (i = 0; i < ARRAY_SIZE(tegra->devices); i++)
tegra_devfreq_update_wmark(tegra, tegra->devices + i);
actmon_write_barrier(tegra);
break;
case PRE_RATE_CHANGE:
/* fall through */
case ABORT_RATE_CHANGE:
break;
};
spin_unlock_irqrestore(&tegra->lock, flags);
return NOTIFY_OK;
}
static void tegra_actmon_configure_device(struct tegra_devfreq *tegra,
struct tegra_devfreq_device *dev)
{
u32 val;
dev->avg_band_freq = tegra->max_freq * ACTMON_DEFAULT_AVG_BAND / KHZ;
dev->target_freq = tegra->cur_freq;
dev->avg_count = tegra->cur_freq * ACTMON_SAMPLING_PERIOD;
writel(dev->avg_count, dev->regs + ACTMON_DEV_INIT_AVG);
tegra_devfreq_update_avg_wmark(dev);
tegra_devfreq_update_wmark(tegra, dev);
writel(ACTMON_COUNT_WEIGHT, dev->regs + ACTMON_DEV_COUNT_WEIGHT);
writel(ACTMON_INTR_STATUS_CLEAR, dev->regs + ACTMON_DEV_INTR_STATUS);
val = 0;
val |= ACTMON_DEV_CTRL_ENB_PERIODIC |
ACTMON_DEV_CTRL_AVG_ABOVE_WMARK_EN |
ACTMON_DEV_CTRL_AVG_BELOW_WMARK_EN;
val |= (ACTMON_AVERAGE_WINDOW_LOG2 - 1)
<< ACTMON_DEV_CTRL_K_VAL_SHIFT;
val |= (ACTMON_BELOW_WMARK_WINDOW - 1)
<< ACTMON_DEV_CTRL_CONSECUTIVE_BELOW_WMARK_NUM_SHIFT;
val |= (ACTMON_ABOVE_WMARK_WINDOW - 1)
<< ACTMON_DEV_CTRL_CONSECUTIVE_ABOVE_WMARK_NUM_SHIFT;
val |= ACTMON_DEV_CTRL_CONSECUTIVE_BELOW_WMARK_EN |
ACTMON_DEV_CTRL_CONSECUTIVE_ABOVE_WMARK_EN;
writel(val, dev->regs + ACTMON_DEV_CTRL);
actmon_write_barrier(tegra);
val = readl(dev->regs + ACTMON_DEV_CTRL);
val |= ACTMON_DEV_CTRL_ENB;
writel(val, dev->regs + ACTMON_DEV_CTRL);
actmon_write_barrier(tegra);
}
static int tegra_devfreq_suspend(struct device *dev)
{
struct platform_device *pdev;
struct tegra_devfreq *tegra;
struct tegra_devfreq_device *actmon_dev;
unsigned int i;
u32 val;
pdev = container_of(dev, struct platform_device, dev);
tegra = platform_get_drvdata(pdev);
for (i = 0; i < ARRAY_SIZE(tegra->devices); i++) {
actmon_dev = &tegra->devices[i];
val = readl(actmon_dev->regs + ACTMON_DEV_CTRL);
val &= ~ACTMON_DEV_CTRL_ENB;
writel(val, actmon_dev->regs + ACTMON_DEV_CTRL);
writel(ACTMON_INTR_STATUS_CLEAR,
actmon_dev->regs + ACTMON_DEV_INTR_STATUS);
actmon_write_barrier(tegra);
}
return 0;
}
static int tegra_devfreq_resume(struct device *dev)
{
struct platform_device *pdev;
struct tegra_devfreq *tegra;
struct tegra_devfreq_device *actmon_dev;
unsigned int i;
pdev = container_of(dev, struct platform_device, dev);
tegra = platform_get_drvdata(pdev);
for (i = 0; i < ARRAY_SIZE(tegra->devices); i++) {
actmon_dev = &tegra->devices[i];
tegra_actmon_configure_device(tegra, actmon_dev);
}
return 0;
}
static int tegra_devfreq_target(struct device *dev, unsigned long *freq,
u32 flags)
{
struct platform_device *pdev;
struct tegra_devfreq *tegra;
struct dev_pm_opp *opp;
unsigned long rate = *freq * KHZ;
pdev = container_of(dev, struct platform_device, dev);
tegra = platform_get_drvdata(pdev);
rcu_read_lock();
opp = devfreq_recommended_opp(dev, &rate, flags);
if (IS_ERR(opp)) {
rcu_read_unlock();
dev_err(dev, "Failed to find opp for %lu KHz\n", *freq);
return PTR_ERR(opp);
}
rate = dev_pm_opp_get_freq(opp);
rcu_read_unlock();
/* TODO: Once we have per-user clk constraints, set a floor */
clk_set_rate(tegra->emc_clock, rate);
/* TODO: Set voltage as well */
return 0;
}
static int tegra_devfreq_get_dev_status(struct device *dev,
struct devfreq_dev_status *stat)
{
struct platform_device *pdev;
struct tegra_devfreq *tegra;
struct tegra_devfreq_device *actmon_dev;
pdev = container_of(dev, struct platform_device, dev);
tegra = platform_get_drvdata(pdev);
stat->current_frequency = tegra->cur_freq;
/* To be used by the tegra governor */
stat->private_data = tegra;
/* The below are to be used by the other governors */
actmon_dev = &tegra->devices[MCALL];
/* Number of cycles spent on memory access */
stat->busy_time = actmon_dev->avg_count;
/* The bus can be considered to be saturated way before 100% */
stat->busy_time *= 100 / BUS_SATURATION_RATIO;
/* Number of cycles in a sampling period */
stat->total_time = ACTMON_SAMPLING_PERIOD * tegra->cur_freq;
return 0;
}
static int tegra_devfreq_get_target(struct devfreq *devfreq,
unsigned long *freq)
{
struct devfreq_dev_status stat;
struct tegra_devfreq *tegra;
struct tegra_devfreq_device *dev;
unsigned long target_freq = 0;
unsigned int i;
int err;
err = devfreq->profile->get_dev_status(devfreq->dev.parent, &stat);
if (err)
return err;
tegra = stat.private_data;
for (i = 0; i < ARRAY_SIZE(tegra->devices); i++) {
dev = &tegra->devices[i];
actmon_update_target(tegra, dev);
target_freq = max(target_freq, dev->target_freq);
}
*freq = target_freq;
return 0;
}
static int tegra_devfreq_event_handler(struct devfreq *devfreq,
unsigned int event, void *data)
{
return 0;
}
static struct devfreq_governor tegra_devfreq_governor = {
.name = "tegra",
.get_target_freq = tegra_devfreq_get_target,
.event_handler = tegra_devfreq_event_handler,
};
static struct devfreq_dev_profile tegra_devfreq_profile = {
.polling_ms = 0,
.target = tegra_devfreq_target,
.get_dev_status = tegra_devfreq_get_dev_status,
};
static int tegra_devfreq_probe(struct platform_device *pdev)
{
struct tegra_devfreq *tegra;
struct tegra_devfreq_device *dev;
struct resource *res;
unsigned long max_freq;
unsigned int i;
int irq;
int err;
tegra = devm_kzalloc(&pdev->dev, sizeof(*tegra), GFP_KERNEL);
if (!tegra)
return -ENOMEM;
spin_lock_init(&tegra->lock);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res) {
dev_err(&pdev->dev, "Failed to get regs resource\n");
return -ENODEV;
}
tegra->regs = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(tegra->regs)) {
dev_err(&pdev->dev, "Failed to get IO memory\n");
return PTR_ERR(tegra->regs);
}
tegra->reset = devm_reset_control_get(&pdev->dev, "actmon");
if (IS_ERR(tegra->reset)) {
dev_err(&pdev->dev, "Failed to get reset\n");
return PTR_ERR(tegra->reset);
}
tegra->clock = devm_clk_get(&pdev->dev, "actmon");
if (IS_ERR(tegra->clock)) {
dev_err(&pdev->dev, "Failed to get actmon clock\n");
return PTR_ERR(tegra->clock);
}
tegra->emc_clock = devm_clk_get(&pdev->dev, "emc");
if (IS_ERR(tegra->emc_clock)) {
dev_err(&pdev->dev, "Failed to get emc clock\n");
return PTR_ERR(tegra->emc_clock);
}
err = of_init_opp_table(&pdev->dev);
if (err) {
dev_err(&pdev->dev, "Failed to init operating point table\n");
return err;
}
tegra->rate_change_nb.notifier_call = tegra_actmon_rate_notify_cb;
err = clk_notifier_register(tegra->emc_clock, &tegra->rate_change_nb);
if (err) {
dev_err(&pdev->dev,
"Failed to register rate change notifier\n");
return err;
}
reset_control_assert(tegra->reset);
err = clk_prepare_enable(tegra->clock);
if (err) {
reset_control_deassert(tegra->reset);
return err;
}
reset_control_deassert(tegra->reset);
max_freq = clk_round_rate(tegra->emc_clock, ULONG_MAX);
tegra->max_freq = max_freq / KHZ;
clk_set_rate(tegra->emc_clock, max_freq);
tegra->cur_freq = clk_get_rate(tegra->emc_clock) / KHZ;
writel(ACTMON_SAMPLING_PERIOD - 1,
tegra->regs + ACTMON_GLB_PERIOD_CTRL);
for (i = 0; i < ARRAY_SIZE(actmon_device_configs); i++) {
dev = tegra->devices + i;
dev->config = actmon_device_configs + i;
dev->regs = tegra->regs + dev->config->offset;
tegra_actmon_configure_device(tegra, tegra->devices + i);
}
err = devfreq_add_governor(&tegra_devfreq_governor);
if (err) {
dev_err(&pdev->dev, "Failed to add governor\n");
return err;
}
tegra_devfreq_profile.initial_freq = clk_get_rate(tegra->emc_clock);
tegra->devfreq = devm_devfreq_add_device(&pdev->dev,
&tegra_devfreq_profile,
"tegra",
NULL);
irq = platform_get_irq(pdev, 0);
err = devm_request_threaded_irq(&pdev->dev, irq, actmon_isr,
actmon_thread_isr, IRQF_SHARED,
"tegra-devfreq", tegra);
if (err) {
dev_err(&pdev->dev, "Interrupt request failed\n");
return err;
}
platform_set_drvdata(pdev, tegra);
return 0;
}
static int tegra_devfreq_remove(struct platform_device *pdev)
{
struct tegra_devfreq *tegra = platform_get_drvdata(pdev);
clk_notifier_unregister(tegra->emc_clock, &tegra->rate_change_nb);
clk_disable_unprepare(tegra->clock);
return 0;
}
static SIMPLE_DEV_PM_OPS(tegra_devfreq_pm_ops,
tegra_devfreq_suspend,
tegra_devfreq_resume);
static struct of_device_id tegra_devfreq_of_match[] = {
{ .compatible = "nvidia,tegra124-actmon" },
{ },
};
static struct platform_driver tegra_devfreq_driver = {
.probe = tegra_devfreq_probe,
.remove = tegra_devfreq_remove,
.driver = {
.name = "tegra-devfreq",
.owner = THIS_MODULE,
.of_match_table = tegra_devfreq_of_match,
.pm = &tegra_devfreq_pm_ops,
},
};
module_platform_driver(tegra_devfreq_driver);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Tegra devfreq driver");
MODULE_AUTHOR("Tomeu Vizoso <tomeu.vizoso@collabora.com>");
MODULE_DEVICE_TABLE(of, tegra_devfreq_of_match);
/*
* devfreq-event: a framework to provide raw data and events of devfreq devices
*
* Copyright (C) 2014 Samsung Electronics
* Author: Chanwoo Choi <cw00.choi@samsung.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#ifndef __LINUX_DEVFREQ_EVENT_H__
#define __LINUX_DEVFREQ_EVENT_H__
#include <linux/device.h>
/**
* struct devfreq_event_dev - the devfreq-event device
*
* @node : Contain the devfreq-event device that have been registered.
* @dev : the device registered by devfreq-event class. dev.parent is
* the device using devfreq-event.
* @lock : a mutex to protect accessing devfreq-event.
* @enable_count: the number of enable function have been called.
* @desc : the description for devfreq-event device.
*
* This structure contains devfreq-event device information.
*/
struct devfreq_event_dev {
struct list_head node;
struct device dev;
struct mutex lock;
u32 enable_count;
const struct devfreq_event_desc *desc;
};
/**
* struct devfreq_event_data - the devfreq-event data
*
* @load_count : load count of devfreq-event device for the given period.
* @total_count : total count of devfreq-event device for the given period.
* each count may represent a clock cycle, a time unit
* (ns/us/...), or anything the device driver wants.
* Generally, utilization is load_count / total_count.
*
* This structure contains the data of devfreq-event device for polling period.
*/
struct devfreq_event_data {
unsigned long load_count;
unsigned long total_count;
};
/**
* struct devfreq_event_ops - the operations of devfreq-event device
*
* @enable : Enable the devfreq-event device.
* @disable : Disable the devfreq-event device.
* @reset : Reset all setting of the devfreq-event device.
* @set_event : Set the specific event type for the devfreq-event device.
* @get_event : Get the result of the devfreq-event devie with specific
* event type.
*
* This structure contains devfreq-event device operations which can be
* implemented by devfreq-event device drivers.
*/
struct devfreq_event_ops {
/* Optional functions */
int (*enable)(struct devfreq_event_dev *edev);
int (*disable)(struct devfreq_event_dev *edev);
int (*reset)(struct devfreq_event_dev *edev);
/* Mandatory functions */
int (*set_event)(struct devfreq_event_dev *edev);
int (*get_event)(struct devfreq_event_dev *edev,
struct devfreq_event_data *edata);
};
/**
* struct devfreq_event_desc - the descriptor of devfreq-event device
*
* @name : the name of devfreq-event device.
* @driver_data : the private data for devfreq-event driver.
* @ops : the operation to control devfreq-event device.
*
* Each devfreq-event device is described with a this structure.
* This structure contains the various data for devfreq-event device.
*/
struct devfreq_event_desc {
const char *name;
void *driver_data;
struct devfreq_event_ops *ops;
};
#if defined(CONFIG_PM_DEVFREQ_EVENT)
extern int devfreq_event_enable_edev(struct devfreq_event_dev *edev);
extern int devfreq_event_disable_edev(struct devfreq_event_dev *edev);
extern bool devfreq_event_is_enabled(struct devfreq_event_dev *edev);
extern int devfreq_event_set_event(struct devfreq_event_dev *edev);
extern int devfreq_event_get_event(struct devfreq_event_dev *edev,
struct devfreq_event_data *edata);
extern int devfreq_event_reset_event(struct devfreq_event_dev *edev);
extern struct devfreq_event_dev *devfreq_event_get_edev_by_phandle(
struct device *dev, int index);
extern int devfreq_event_get_edev_count(struct device *dev);
extern struct devfreq_event_dev *devfreq_event_add_edev(struct device *dev,
struct devfreq_event_desc *desc);
extern int devfreq_event_remove_edev(struct devfreq_event_dev *edev);
extern struct devfreq_event_dev *devm_devfreq_event_add_edev(struct device *dev,
struct devfreq_event_desc *desc);
extern void devm_devfreq_event_remove_edev(struct device *dev,
struct devfreq_event_dev *edev);
static inline void *devfreq_event_get_drvdata(struct devfreq_event_dev *edev)
{
return edev->desc->driver_data;
}
#else
static inline int devfreq_event_enable_edev(struct devfreq_event_dev *edev)
{
return -EINVAL;
}
static inline int devfreq_event_disable_edev(struct devfreq_event_dev *edev)
{
return -EINVAL;
}
static inline bool devfreq_event_is_enabled(struct devfreq_event_dev *edev)
{
return false;
}
static inline int devfreq_event_set_event(struct devfreq_event_dev *edev)
{
return -EINVAL;
}
static inline int devfreq_event_get_event(struct devfreq_event_dev *edev,
struct devfreq_event_data *edata)
{
return -EINVAL;
}
static inline int devfreq_event_reset_event(struct devfreq_event_dev *edev)
{
return -EINVAL;
}
static inline void *devfreq_event_get_drvdata(struct devfreq_event_dev *edev)
{
return ERR_PTR(-EINVAL);
}
static inline struct devfreq_event_dev *devfreq_event_get_edev_by_phandle(
struct device *dev, int index)
{
return ERR_PTR(-EINVAL);
}
static inline int devfreq_event_get_edev_count(struct device *dev)
{
return -EINVAL;
}
static inline struct devfreq_event_dev *devfreq_event_add_edev(struct device *dev,
struct devfreq_event_desc *desc)
{
return ERR_PTR(-EINVAL);
}
static inline int devfreq_event_remove_edev(struct devfreq_event_dev *edev)
{
return -EINVAL;
}
static inline struct devfreq_event_dev *devm_devfreq_event_add_edev(
struct device *dev,
struct devfreq_event_desc *desc)
{
return ERR_PTR(-EINVAL);
}
static inline void devm_devfreq_event_remove_edev(struct device *dev,
struct devfreq_event_dev *edev)
{
}
static inline void *devfreq_event_get_drvdata(struct devfreq_event_dev *edev)
{
return NULL;
}
#endif /* CONFIG_PM_DEVFREQ_EVENT */
#endif /* __LINUX_DEVFREQ_EVENT_H__ */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment