Commit fa9a67ef authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'pm+acpi-4.3-rc1-2' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm

Pull more power management and ACPI updates from Rafael Wysocki:
 "These are mostly fixes and cleanups on top of the previous PM+ACPI
  pull request (cpufreq core and drivers, cpuidle, generic power domains
  framework).  Some of them didn't make to that pull request and some
  fix issues introduced by it.

  The only really new thing is the support for suspend frequency in the
  cpufreq-dt driver, but it is needed to fix an issue with Exynos
  platforms.

  Specifics:

   - build fix for the new Mediatek MT8173 cpufreq driver (Guenter
     Roeck).

   - generic power domains framework fixes (power on error code path,
     subdomain removal) and cleanup of a deprecated API user (Geert
     Uytterhoeven, Jon Hunter, Ulf Hansson).

   - cpufreq-dt driver fixes including two fixes for bugs related to the
     new Operating Performance Points Device Tree bindings introduced
     recently (Viresh Kumar).

   - suspend frequency support for the cpufreq-dt driver (Bartlomiej
     Zolnierkiewicz, Viresh Kumar).

   - cpufreq core cleanups (Viresh Kumar).

   - intel_pstate driver fixes (Chen Yu, Kristen Carlson Accardi).

   - additional sanity check in the cpuidle core (Xunlei Pang).

   - fix for a comment related to CPU power management (Lina Iyer)"

* tag 'pm+acpi-4.3-rc1-2' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm:
  intel_pstate: fix PCT_TO_HWP macro
  intel_pstate: Fix user input of min/max to legal policy region
  PM / OPP: Return suspend_opp only if it is enabled
  cpufreq-dt: add suspend frequency support
  cpufreq: allow cpufreq_generic_suspend() to work without suspend frequency
  PM / OPP: add dev_pm_opp_get_suspend_opp() helper
  staging: board: Migrate away from __pm_genpd_name_add_device()
  cpufreq: Use __func__ to print function's name
  cpufreq: staticize cpufreq_cpu_get_raw()
  PM / Domains: Ensure subdomain is not in use before removing
  cpufreq: Add ARM_MT8173_CPUFREQ dependency on THERMAL
  cpuidle/coupled: Add sanity check for safe_state_index
  PM / Domains: Try power off masters in error path of __pm_genpd_poweron()
  cpufreq: dt: Tolerance applies on both sides of target voltage
  cpufreq: dt: Print error on failing to mark OPPs as shared
  cpufreq: dt: Check OPP count before marking them shared
  kernel/cpu_pm: fix cpu_cluster_pm_exit comment
parents 05c78081 4614e0cc
...@@ -212,6 +212,18 @@ static int genpd_power_off(struct generic_pm_domain *genpd, bool timed) ...@@ -212,6 +212,18 @@ static int genpd_power_off(struct generic_pm_domain *genpd, bool timed)
return ret; return ret;
} }
/**
* genpd_queue_power_off_work - Queue up the execution of pm_genpd_poweroff().
* @genpd: PM domait to power off.
*
* Queue up the execution of pm_genpd_poweroff() unless it's already been done
* before.
*/
static void genpd_queue_power_off_work(struct generic_pm_domain *genpd)
{
queue_work(pm_wq, &genpd->power_off_work);
}
/** /**
* __pm_genpd_poweron - Restore power to a given PM domain and its masters. * __pm_genpd_poweron - Restore power to a given PM domain and its masters.
* @genpd: PM domain to power up. * @genpd: PM domain to power up.
...@@ -259,8 +271,12 @@ static int __pm_genpd_poweron(struct generic_pm_domain *genpd) ...@@ -259,8 +271,12 @@ static int __pm_genpd_poweron(struct generic_pm_domain *genpd)
return 0; return 0;
err: err:
list_for_each_entry_continue_reverse(link, &genpd->slave_links, slave_node) list_for_each_entry_continue_reverse(link,
&genpd->slave_links,
slave_node) {
genpd_sd_counter_dec(link->master); genpd_sd_counter_dec(link->master);
genpd_queue_power_off_work(link->master);
}
return ret; return ret;
} }
...@@ -348,18 +364,6 @@ static int genpd_dev_pm_qos_notifier(struct notifier_block *nb, ...@@ -348,18 +364,6 @@ static int genpd_dev_pm_qos_notifier(struct notifier_block *nb,
return NOTIFY_DONE; return NOTIFY_DONE;
} }
/**
* genpd_queue_power_off_work - Queue up the execution of pm_genpd_poweroff().
* @genpd: PM domait to power off.
*
* Queue up the execution of pm_genpd_poweroff() unless it's already been done
* before.
*/
static void genpd_queue_power_off_work(struct generic_pm_domain *genpd)
{
queue_work(pm_wq, &genpd->power_off_work);
}
/** /**
* pm_genpd_poweroff - Remove power from a given PM domain. * pm_genpd_poweroff - Remove power from a given PM domain.
* @genpd: PM domain to power down. * @genpd: PM domain to power down.
...@@ -1469,6 +1473,13 @@ int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd, ...@@ -1469,6 +1473,13 @@ int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
mutex_lock(&genpd->lock); mutex_lock(&genpd->lock);
if (!list_empty(&subdomain->slave_links) || subdomain->device_count) {
pr_warn("%s: unable to remove subdomain %s\n", genpd->name,
subdomain->name);
ret = -EBUSY;
goto out;
}
list_for_each_entry(link, &genpd->master_links, master_node) { list_for_each_entry(link, &genpd->master_links, master_node) {
if (link->slave != subdomain) if (link->slave != subdomain)
continue; continue;
...@@ -1487,6 +1498,7 @@ int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd, ...@@ -1487,6 +1498,7 @@ int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
break; break;
} }
out:
mutex_unlock(&genpd->lock); mutex_unlock(&genpd->lock);
return ret; return ret;
......
...@@ -340,6 +340,34 @@ unsigned long dev_pm_opp_get_max_clock_latency(struct device *dev) ...@@ -340,6 +340,34 @@ unsigned long dev_pm_opp_get_max_clock_latency(struct device *dev)
} }
EXPORT_SYMBOL_GPL(dev_pm_opp_get_max_clock_latency); EXPORT_SYMBOL_GPL(dev_pm_opp_get_max_clock_latency);
/**
* dev_pm_opp_get_suspend_opp() - Get suspend opp
* @dev: device for which we do this operation
*
* Return: This function returns pointer to the suspend opp if it is
* defined and available, otherwise it returns NULL.
*
* Locking: This function must be called under rcu_read_lock(). opp is a rcu
* protected pointer. The reason for the same is that the opp pointer which is
* returned will remain valid for use with opp_get_{voltage, freq} only while
* under the locked area. The pointer returned must be used prior to unlocking
* with rcu_read_unlock() to maintain the integrity of the pointer.
*/
struct dev_pm_opp *dev_pm_opp_get_suspend_opp(struct device *dev)
{
struct device_opp *dev_opp;
opp_rcu_lockdep_assert();
dev_opp = _find_device_opp(dev);
if (IS_ERR(dev_opp) || !dev_opp->suspend_opp ||
!dev_opp->suspend_opp->available)
return NULL;
return dev_opp->suspend_opp;
}
EXPORT_SYMBOL_GPL(dev_pm_opp_get_suspend_opp);
/** /**
* dev_pm_opp_get_opp_count() - Get number of opps available in the opp list * dev_pm_opp_get_opp_count() - Get number of opps available in the opp list
* @dev: device for which we do this operation * @dev: device for which we do this operation
......
...@@ -84,6 +84,7 @@ config ARM_KIRKWOOD_CPUFREQ ...@@ -84,6 +84,7 @@ config ARM_KIRKWOOD_CPUFREQ
config ARM_MT8173_CPUFREQ config ARM_MT8173_CPUFREQ
bool "Mediatek MT8173 CPUFreq support" bool "Mediatek MT8173 CPUFreq support"
depends on ARCH_MEDIATEK && REGULATOR depends on ARCH_MEDIATEK && REGULATOR
depends on !CPU_THERMAL || THERMAL=y
select PM_OPP select PM_OPP
help help
This adds the CPUFreq driver support for Mediatek MT8173 SoC. This adds the CPUFreq driver support for Mediatek MT8173 SoC.
......
...@@ -196,6 +196,7 @@ static int cpufreq_init(struct cpufreq_policy *policy) ...@@ -196,6 +196,7 @@ static int cpufreq_init(struct cpufreq_policy *policy)
struct device *cpu_dev; struct device *cpu_dev;
struct regulator *cpu_reg; struct regulator *cpu_reg;
struct clk *cpu_clk; struct clk *cpu_clk;
struct dev_pm_opp *suspend_opp;
unsigned long min_uV = ~0, max_uV = 0; unsigned long min_uV = ~0, max_uV = 0;
unsigned int transition_latency; unsigned int transition_latency;
bool need_update = false; bool need_update = false;
...@@ -239,6 +240,17 @@ static int cpufreq_init(struct cpufreq_policy *policy) ...@@ -239,6 +240,17 @@ static int cpufreq_init(struct cpufreq_policy *policy)
*/ */
of_cpumask_init_opp_table(policy->cpus); of_cpumask_init_opp_table(policy->cpus);
/*
* But we need OPP table to function so if it is not there let's
* give platform code chance to provide it for us.
*/
ret = dev_pm_opp_get_opp_count(cpu_dev);
if (ret <= 0) {
pr_debug("OPP table is not ready, deferring probe\n");
ret = -EPROBE_DEFER;
goto out_free_opp;
}
if (need_update) { if (need_update) {
struct cpufreq_dt_platform_data *pd = cpufreq_get_driver_data(); struct cpufreq_dt_platform_data *pd = cpufreq_get_driver_data();
...@@ -249,24 +261,16 @@ static int cpufreq_init(struct cpufreq_policy *policy) ...@@ -249,24 +261,16 @@ static int cpufreq_init(struct cpufreq_policy *policy)
* OPP tables are initialized only for policy->cpu, do it for * OPP tables are initialized only for policy->cpu, do it for
* others as well. * others as well.
*/ */
set_cpus_sharing_opps(cpu_dev, policy->cpus); ret = set_cpus_sharing_opps(cpu_dev, policy->cpus);
if (ret)
dev_err(cpu_dev, "%s: failed to mark OPPs as shared: %d\n",
__func__, ret);
of_property_read_u32(np, "clock-latency", &transition_latency); of_property_read_u32(np, "clock-latency", &transition_latency);
} else { } else {
transition_latency = dev_pm_opp_get_max_clock_latency(cpu_dev); transition_latency = dev_pm_opp_get_max_clock_latency(cpu_dev);
} }
/*
* But we need OPP table to function so if it is not there let's
* give platform code chance to provide it for us.
*/
ret = dev_pm_opp_get_opp_count(cpu_dev);
if (ret <= 0) {
pr_debug("OPP table is not ready, deferring probe\n");
ret = -EPROBE_DEFER;
goto out_free_opp;
}
priv = kzalloc(sizeof(*priv), GFP_KERNEL); priv = kzalloc(sizeof(*priv), GFP_KERNEL);
if (!priv) { if (!priv) {
ret = -ENOMEM; ret = -ENOMEM;
...@@ -300,7 +304,8 @@ static int cpufreq_init(struct cpufreq_policy *policy) ...@@ -300,7 +304,8 @@ static int cpufreq_init(struct cpufreq_policy *policy)
rcu_read_unlock(); rcu_read_unlock();
tol_uV = opp_uV * priv->voltage_tolerance / 100; tol_uV = opp_uV * priv->voltage_tolerance / 100;
if (regulator_is_supported_voltage(cpu_reg, opp_uV, if (regulator_is_supported_voltage(cpu_reg,
opp_uV - tol_uV,
opp_uV + tol_uV)) { opp_uV + tol_uV)) {
if (opp_uV < min_uV) if (opp_uV < min_uV)
min_uV = opp_uV; min_uV = opp_uV;
...@@ -329,6 +334,13 @@ static int cpufreq_init(struct cpufreq_policy *policy) ...@@ -329,6 +334,13 @@ static int cpufreq_init(struct cpufreq_policy *policy)
policy->driver_data = priv; policy->driver_data = priv;
policy->clk = cpu_clk; policy->clk = cpu_clk;
rcu_read_lock();
suspend_opp = dev_pm_opp_get_suspend_opp(cpu_dev);
if (suspend_opp)
policy->suspend_freq = dev_pm_opp_get_freq(suspend_opp) / 1000;
rcu_read_unlock();
ret = cpufreq_table_validate_and_show(policy, freq_table); ret = cpufreq_table_validate_and_show(policy, freq_table);
if (ret) { if (ret) {
dev_err(cpu_dev, "%s: invalid frequency table: %d\n", __func__, dev_err(cpu_dev, "%s: invalid frequency table: %d\n", __func__,
...@@ -419,6 +431,7 @@ static struct cpufreq_driver dt_cpufreq_driver = { ...@@ -419,6 +431,7 @@ static struct cpufreq_driver dt_cpufreq_driver = {
.ready = cpufreq_ready, .ready = cpufreq_ready,
.name = "cpufreq-dt", .name = "cpufreq-dt",
.attr = cpufreq_dt_attr, .attr = cpufreq_dt_attr,
.suspend = cpufreq_generic_suspend,
}; };
static int dt_cpufreq_probe(struct platform_device *pdev) static int dt_cpufreq_probe(struct platform_device *pdev)
......
...@@ -239,7 +239,7 @@ int cpufreq_generic_init(struct cpufreq_policy *policy, ...@@ -239,7 +239,7 @@ int cpufreq_generic_init(struct cpufreq_policy *policy,
EXPORT_SYMBOL_GPL(cpufreq_generic_init); EXPORT_SYMBOL_GPL(cpufreq_generic_init);
/* Only for cpufreq core internal use */ /* Only for cpufreq core internal use */
struct cpufreq_policy *cpufreq_cpu_get_raw(unsigned int cpu) static struct cpufreq_policy *cpufreq_cpu_get_raw(unsigned int cpu)
{ {
struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu); struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
...@@ -1626,8 +1626,8 @@ int cpufreq_generic_suspend(struct cpufreq_policy *policy) ...@@ -1626,8 +1626,8 @@ int cpufreq_generic_suspend(struct cpufreq_policy *policy)
int ret; int ret;
if (!policy->suspend_freq) { if (!policy->suspend_freq) {
pr_err("%s: suspend_freq can't be zero\n", __func__); pr_debug("%s: suspend_freq not defined\n", __func__);
return -EINVAL; return 0;
} }
pr_debug("%s: Setting suspend-freq: %u\n", __func__, pr_debug("%s: Setting suspend-freq: %u\n", __func__,
...@@ -2031,8 +2031,7 @@ static int __cpufreq_governor(struct cpufreq_policy *policy, ...@@ -2031,8 +2031,7 @@ static int __cpufreq_governor(struct cpufreq_policy *policy,
if (!try_module_get(policy->governor->owner)) if (!try_module_get(policy->governor->owner))
return -EINVAL; return -EINVAL;
pr_debug("__cpufreq_governor for CPU %u, event %u\n", pr_debug("%s: for CPU %u, event %u\n", __func__, policy->cpu, event);
policy->cpu, event);
mutex_lock(&cpufreq_governor_lock); mutex_lock(&cpufreq_governor_lock);
if ((policy->governor_enabled && event == CPUFREQ_GOV_START) if ((policy->governor_enabled && event == CPUFREQ_GOV_START)
......
...@@ -260,24 +260,31 @@ static inline void update_turbo_state(void) ...@@ -260,24 +260,31 @@ static inline void update_turbo_state(void)
cpu->pstate.max_pstate == cpu->pstate.turbo_pstate); cpu->pstate.max_pstate == cpu->pstate.turbo_pstate);
} }
#define PCT_TO_HWP(x) (x * 255 / 100)
static void intel_pstate_hwp_set(void) static void intel_pstate_hwp_set(void)
{ {
int min, max, cpu; int min, hw_min, max, hw_max, cpu, range, adj_range;
u64 value, freq; u64 value, cap;
rdmsrl(MSR_HWP_CAPABILITIES, cap);
hw_min = HWP_LOWEST_PERF(cap);
hw_max = HWP_HIGHEST_PERF(cap);
range = hw_max - hw_min;
get_online_cpus(); get_online_cpus();
for_each_online_cpu(cpu) { for_each_online_cpu(cpu) {
rdmsrl_on_cpu(cpu, MSR_HWP_REQUEST, &value); rdmsrl_on_cpu(cpu, MSR_HWP_REQUEST, &value);
min = PCT_TO_HWP(limits.min_perf_pct); adj_range = limits.min_perf_pct * range / 100;
min = hw_min + adj_range;
value &= ~HWP_MIN_PERF(~0L); value &= ~HWP_MIN_PERF(~0L);
value |= HWP_MIN_PERF(min); value |= HWP_MIN_PERF(min);
max = PCT_TO_HWP(limits.max_perf_pct); adj_range = limits.max_perf_pct * range / 100;
max = hw_min + adj_range;
if (limits.no_turbo) { if (limits.no_turbo) {
rdmsrl( MSR_HWP_CAPABILITIES, freq); hw_max = HWP_GUARANTEED_PERF(cap);
max = HWP_GUARANTEED_PERF(freq); if (hw_max < max)
max = hw_max;
} }
value &= ~HWP_MAX_PERF(~0L); value &= ~HWP_MAX_PERF(~0L);
...@@ -423,6 +430,8 @@ static ssize_t store_max_perf_pct(struct kobject *a, struct attribute *b, ...@@ -423,6 +430,8 @@ static ssize_t store_max_perf_pct(struct kobject *a, struct attribute *b,
limits.max_sysfs_pct = clamp_t(int, input, 0 , 100); limits.max_sysfs_pct = clamp_t(int, input, 0 , 100);
limits.max_perf_pct = min(limits.max_policy_pct, limits.max_sysfs_pct); limits.max_perf_pct = min(limits.max_policy_pct, limits.max_sysfs_pct);
limits.max_perf_pct = max(limits.min_policy_pct, limits.max_perf_pct);
limits.max_perf_pct = max(limits.min_perf_pct, limits.max_perf_pct);
limits.max_perf = div_fp(int_tofp(limits.max_perf_pct), int_tofp(100)); limits.max_perf = div_fp(int_tofp(limits.max_perf_pct), int_tofp(100));
if (hwp_active) if (hwp_active)
...@@ -442,6 +451,8 @@ static ssize_t store_min_perf_pct(struct kobject *a, struct attribute *b, ...@@ -442,6 +451,8 @@ static ssize_t store_min_perf_pct(struct kobject *a, struct attribute *b,
limits.min_sysfs_pct = clamp_t(int, input, 0 , 100); limits.min_sysfs_pct = clamp_t(int, input, 0 , 100);
limits.min_perf_pct = max(limits.min_policy_pct, limits.min_sysfs_pct); limits.min_perf_pct = max(limits.min_policy_pct, limits.min_sysfs_pct);
limits.min_perf_pct = min(limits.max_policy_pct, limits.min_perf_pct);
limits.min_perf_pct = min(limits.max_perf_pct, limits.min_perf_pct);
limits.min_perf = div_fp(int_tofp(limits.min_perf_pct), int_tofp(100)); limits.min_perf = div_fp(int_tofp(limits.min_perf_pct), int_tofp(100));
if (hwp_active) if (hwp_active)
...@@ -989,12 +1000,19 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy) ...@@ -989,12 +1000,19 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy)
limits.min_policy_pct = (policy->min * 100) / policy->cpuinfo.max_freq; limits.min_policy_pct = (policy->min * 100) / policy->cpuinfo.max_freq;
limits.min_policy_pct = clamp_t(int, limits.min_policy_pct, 0 , 100); limits.min_policy_pct = clamp_t(int, limits.min_policy_pct, 0 , 100);
limits.min_perf_pct = max(limits.min_policy_pct, limits.min_sysfs_pct);
limits.min_perf = div_fp(int_tofp(limits.min_perf_pct), int_tofp(100));
limits.max_policy_pct = (policy->max * 100) / policy->cpuinfo.max_freq; limits.max_policy_pct = (policy->max * 100) / policy->cpuinfo.max_freq;
limits.max_policy_pct = clamp_t(int, limits.max_policy_pct, 0 , 100); limits.max_policy_pct = clamp_t(int, limits.max_policy_pct, 0 , 100);
/* Normalize user input to [min_policy_pct, max_policy_pct] */
limits.min_perf_pct = max(limits.min_policy_pct, limits.min_sysfs_pct);
limits.min_perf_pct = min(limits.max_policy_pct, limits.min_perf_pct);
limits.max_perf_pct = min(limits.max_policy_pct, limits.max_sysfs_pct); limits.max_perf_pct = min(limits.max_policy_pct, limits.max_sysfs_pct);
limits.max_perf_pct = max(limits.min_policy_pct, limits.max_perf_pct);
/* Make sure min_perf_pct <= max_perf_pct */
limits.min_perf_pct = min(limits.max_perf_pct, limits.min_perf_pct);
limits.min_perf = div_fp(int_tofp(limits.min_perf_pct), int_tofp(100));
limits.max_perf = div_fp(int_tofp(limits.max_perf_pct), int_tofp(100)); limits.max_perf = div_fp(int_tofp(limits.max_perf_pct), int_tofp(100));
if (hwp_active) if (hwp_active)
......
...@@ -186,6 +186,28 @@ bool cpuidle_state_is_coupled(struct cpuidle_driver *drv, int state) ...@@ -186,6 +186,28 @@ bool cpuidle_state_is_coupled(struct cpuidle_driver *drv, int state)
return drv->states[state].flags & CPUIDLE_FLAG_COUPLED; return drv->states[state].flags & CPUIDLE_FLAG_COUPLED;
} }
/**
* cpuidle_coupled_state_verify - check if the coupled states are correctly set.
* @drv: struct cpuidle_driver for the platform
*
* Returns 0 for valid state values, a negative error code otherwise:
* * -EINVAL if any coupled state(safe_state_index) is wrongly set.
*/
int cpuidle_coupled_state_verify(struct cpuidle_driver *drv)
{
int i;
for (i = drv->state_count - 1; i >= 0; i--) {
if (cpuidle_state_is_coupled(drv, i) &&
(drv->safe_state_index == i ||
drv->safe_state_index < 0 ||
drv->safe_state_index >= drv->state_count))
return -EINVAL;
}
return 0;
}
/** /**
* cpuidle_coupled_set_ready - mark a cpu as ready * cpuidle_coupled_set_ready - mark a cpu as ready
* @coupled: the struct coupled that contains the current cpu * @coupled: the struct coupled that contains the current cpu
......
...@@ -35,6 +35,7 @@ extern void cpuidle_remove_sysfs(struct cpuidle_device *dev); ...@@ -35,6 +35,7 @@ extern void cpuidle_remove_sysfs(struct cpuidle_device *dev);
#ifdef CONFIG_ARCH_NEEDS_CPU_IDLE_COUPLED #ifdef CONFIG_ARCH_NEEDS_CPU_IDLE_COUPLED
bool cpuidle_state_is_coupled(struct cpuidle_driver *drv, int state); bool cpuidle_state_is_coupled(struct cpuidle_driver *drv, int state);
int cpuidle_coupled_state_verify(struct cpuidle_driver *drv);
int cpuidle_enter_state_coupled(struct cpuidle_device *dev, int cpuidle_enter_state_coupled(struct cpuidle_device *dev,
struct cpuidle_driver *drv, int next_state); struct cpuidle_driver *drv, int next_state);
int cpuidle_coupled_register_device(struct cpuidle_device *dev); int cpuidle_coupled_register_device(struct cpuidle_device *dev);
...@@ -46,6 +47,11 @@ bool cpuidle_state_is_coupled(struct cpuidle_driver *drv, int state) ...@@ -46,6 +47,11 @@ bool cpuidle_state_is_coupled(struct cpuidle_driver *drv, int state)
return false; return false;
} }
static inline int cpuidle_coupled_state_verify(struct cpuidle_driver *drv)
{
return 0;
}
static inline int cpuidle_enter_state_coupled(struct cpuidle_device *dev, static inline int cpuidle_enter_state_coupled(struct cpuidle_device *dev,
struct cpuidle_driver *drv, int next_state) struct cpuidle_driver *drv, int next_state)
{ {
......
...@@ -227,6 +227,10 @@ static int __cpuidle_register_driver(struct cpuidle_driver *drv) ...@@ -227,6 +227,10 @@ static int __cpuidle_register_driver(struct cpuidle_driver *drv)
if (!drv || !drv->state_count) if (!drv || !drv->state_count)
return -EINVAL; return -EINVAL;
ret = cpuidle_coupled_state_verify(drv);
if (ret)
return ret;
if (cpuidle_disabled()) if (cpuidle_disabled())
return -ENODEV; return -ENODEV;
......
...@@ -91,7 +91,7 @@ static const struct board_staging_dev armadillo800eva_devices[] __initconst = { ...@@ -91,7 +91,7 @@ static const struct board_staging_dev armadillo800eva_devices[] __initconst = {
.pdev = &lcdc0_device, .pdev = &lcdc0_device,
.clocks = lcdc0_clocks, .clocks = lcdc0_clocks,
.nclocks = ARRAY_SIZE(lcdc0_clocks), .nclocks = ARRAY_SIZE(lcdc0_clocks),
.domain = "a4lc", .domain = "/system-controller@e6180000/pm-domains/c5/a4lc@1"
}, },
}; };
......
...@@ -135,6 +135,40 @@ int __init board_staging_register_clock(const struct board_staging_clk *bsc) ...@@ -135,6 +135,40 @@ int __init board_staging_register_clock(const struct board_staging_clk *bsc)
return error; return error;
} }
#ifdef CONFIG_PM_GENERIC_DOMAINS_OF
static int board_staging_add_dev_domain(struct platform_device *pdev,
const char *domain)
{
struct of_phandle_args pd_args;
struct generic_pm_domain *pd;
struct device_node *np;
np = of_find_node_by_path(domain);
if (!np) {
pr_err("Cannot find domain node %s\n", domain);
return -ENOENT;
}
pd_args.np = np;
pd_args.args_count = 0;
pd = of_genpd_get_from_provider(&pd_args);
if (IS_ERR(pd)) {
pr_err("Cannot find genpd %s (%ld)\n", domain, PTR_ERR(pd));
return PTR_ERR(pd);
}
pr_debug("Found genpd %s for device %s\n", pd->name, pdev->name);
return pm_genpd_add_device(pd, &pdev->dev);
}
#else
static inline int board_staging_add_dev_domain(struct platform_device *pdev,
const char *domain)
{
return 0;
}
#endif
int __init board_staging_register_device(const struct board_staging_dev *dev) int __init board_staging_register_device(const struct board_staging_dev *dev)
{ {
struct platform_device *pdev = dev->pdev; struct platform_device *pdev = dev->pdev;
...@@ -161,7 +195,7 @@ int __init board_staging_register_device(const struct board_staging_dev *dev) ...@@ -161,7 +195,7 @@ int __init board_staging_register_device(const struct board_staging_dev *dev)
} }
if (dev->domain) if (dev->domain)
__pm_genpd_name_add_device(dev->domain, &pdev->dev, NULL); board_staging_add_dev_domain(pdev, dev->domain);
return error; return error;
} }
......
...@@ -34,6 +34,7 @@ bool dev_pm_opp_is_turbo(struct dev_pm_opp *opp); ...@@ -34,6 +34,7 @@ bool dev_pm_opp_is_turbo(struct dev_pm_opp *opp);
int dev_pm_opp_get_opp_count(struct device *dev); int dev_pm_opp_get_opp_count(struct device *dev);
unsigned long dev_pm_opp_get_max_clock_latency(struct device *dev); unsigned long dev_pm_opp_get_max_clock_latency(struct device *dev);
struct dev_pm_opp *dev_pm_opp_get_suspend_opp(struct device *dev);
struct dev_pm_opp *dev_pm_opp_find_freq_exact(struct device *dev, struct dev_pm_opp *dev_pm_opp_find_freq_exact(struct device *dev,
unsigned long freq, unsigned long freq,
...@@ -80,6 +81,11 @@ static inline unsigned long dev_pm_opp_get_max_clock_latency(struct device *dev) ...@@ -80,6 +81,11 @@ static inline unsigned long dev_pm_opp_get_max_clock_latency(struct device *dev)
return 0; return 0;
} }
static inline struct dev_pm_opp *dev_pm_opp_get_suspend_opp(struct device *dev)
{
return NULL;
}
static inline struct dev_pm_opp *dev_pm_opp_find_freq_exact(struct device *dev, static inline struct dev_pm_opp *dev_pm_opp_find_freq_exact(struct device *dev,
unsigned long freq, bool available) unsigned long freq, bool available)
{ {
......
...@@ -180,7 +180,7 @@ EXPORT_SYMBOL_GPL(cpu_cluster_pm_enter); ...@@ -180,7 +180,7 @@ EXPORT_SYMBOL_GPL(cpu_cluster_pm_enter);
* low power state that may have caused some blocks in the same power domain * low power state that may have caused some blocks in the same power domain
* to reset. * to reset.
* *
* Must be called after cpu_pm_exit has been called on all cpus in the power * Must be called after cpu_cluster_pm_enter has been called for the power
* domain, and before cpu_pm_exit has been called on any cpu in the power * domain, and before cpu_pm_exit has been called on any cpu in the power
* domain. Notified drivers can include VFP co-processor, interrupt controller * domain. Notified drivers can include VFP co-processor, interrupt controller
* and its PM extensions, local CPU timers context save/restore which * and its PM extensions, local CPU timers context save/restore which
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment