Commit 3e66c4b8 authored by Rafael J. Wysocki's avatar Rafael J. Wysocki

Merge branch 'pm-cpufreq'

* pm-cpufreq:
  intel_pstate: fix PCT_TO_HWP macro
  intel_pstate: Fix user input of min/max to legal policy region
  cpufreq-dt: add suspend frequency support
  cpufreq: allow cpufreq_generic_suspend() to work without suspend frequency
  cpufreq: Use __func__ to print function's name
  cpufreq: staticize cpufreq_cpu_get_raw()
  cpufreq: Add ARM_MT8173_CPUFREQ dependency on THERMAL
  cpufreq: dt: Tolerance applies on both sides of target voltage
  cpufreq: dt: Print error on failing to mark OPPs as shared
  cpufreq: dt: Check OPP count before marking them shared
parents 7c976664 74da56ce
...@@ -133,6 +133,7 @@ config ARM_KIRKWOOD_CPUFREQ ...@@ -133,6 +133,7 @@ config ARM_KIRKWOOD_CPUFREQ
config ARM_MT8173_CPUFREQ config ARM_MT8173_CPUFREQ
bool "Mediatek MT8173 CPUFreq support" bool "Mediatek MT8173 CPUFreq support"
depends on ARCH_MEDIATEK && REGULATOR depends on ARCH_MEDIATEK && REGULATOR
depends on !CPU_THERMAL || THERMAL=y
select PM_OPP select PM_OPP
help help
This adds the CPUFreq driver support for Mediatek MT8173 SoC. This adds the CPUFreq driver support for Mediatek MT8173 SoC.
......
...@@ -196,6 +196,7 @@ static int cpufreq_init(struct cpufreq_policy *policy) ...@@ -196,6 +196,7 @@ static int cpufreq_init(struct cpufreq_policy *policy)
struct device *cpu_dev; struct device *cpu_dev;
struct regulator *cpu_reg; struct regulator *cpu_reg;
struct clk *cpu_clk; struct clk *cpu_clk;
struct dev_pm_opp *suspend_opp;
unsigned long min_uV = ~0, max_uV = 0; unsigned long min_uV = ~0, max_uV = 0;
unsigned int transition_latency; unsigned int transition_latency;
bool need_update = false; bool need_update = false;
...@@ -239,6 +240,17 @@ static int cpufreq_init(struct cpufreq_policy *policy) ...@@ -239,6 +240,17 @@ static int cpufreq_init(struct cpufreq_policy *policy)
*/ */
of_cpumask_init_opp_table(policy->cpus); of_cpumask_init_opp_table(policy->cpus);
/*
* But we need OPP table to function so if it is not there let's
* give platform code chance to provide it for us.
*/
ret = dev_pm_opp_get_opp_count(cpu_dev);
if (ret <= 0) {
pr_debug("OPP table is not ready, deferring probe\n");
ret = -EPROBE_DEFER;
goto out_free_opp;
}
if (need_update) { if (need_update) {
struct cpufreq_dt_platform_data *pd = cpufreq_get_driver_data(); struct cpufreq_dt_platform_data *pd = cpufreq_get_driver_data();
...@@ -249,24 +261,16 @@ static int cpufreq_init(struct cpufreq_policy *policy) ...@@ -249,24 +261,16 @@ static int cpufreq_init(struct cpufreq_policy *policy)
* OPP tables are initialized only for policy->cpu, do it for * OPP tables are initialized only for policy->cpu, do it for
* others as well. * others as well.
*/ */
set_cpus_sharing_opps(cpu_dev, policy->cpus); ret = set_cpus_sharing_opps(cpu_dev, policy->cpus);
if (ret)
dev_err(cpu_dev, "%s: failed to mark OPPs as shared: %d\n",
__func__, ret);
of_property_read_u32(np, "clock-latency", &transition_latency); of_property_read_u32(np, "clock-latency", &transition_latency);
} else { } else {
transition_latency = dev_pm_opp_get_max_clock_latency(cpu_dev); transition_latency = dev_pm_opp_get_max_clock_latency(cpu_dev);
} }
/*
* But we need OPP table to function so if it is not there let's
* give platform code chance to provide it for us.
*/
ret = dev_pm_opp_get_opp_count(cpu_dev);
if (ret <= 0) {
pr_debug("OPP table is not ready, deferring probe\n");
ret = -EPROBE_DEFER;
goto out_free_opp;
}
priv = kzalloc(sizeof(*priv), GFP_KERNEL); priv = kzalloc(sizeof(*priv), GFP_KERNEL);
if (!priv) { if (!priv) {
ret = -ENOMEM; ret = -ENOMEM;
...@@ -300,7 +304,8 @@ static int cpufreq_init(struct cpufreq_policy *policy) ...@@ -300,7 +304,8 @@ static int cpufreq_init(struct cpufreq_policy *policy)
rcu_read_unlock(); rcu_read_unlock();
tol_uV = opp_uV * priv->voltage_tolerance / 100; tol_uV = opp_uV * priv->voltage_tolerance / 100;
if (regulator_is_supported_voltage(cpu_reg, opp_uV, if (regulator_is_supported_voltage(cpu_reg,
opp_uV - tol_uV,
opp_uV + tol_uV)) { opp_uV + tol_uV)) {
if (opp_uV < min_uV) if (opp_uV < min_uV)
min_uV = opp_uV; min_uV = opp_uV;
...@@ -329,6 +334,13 @@ static int cpufreq_init(struct cpufreq_policy *policy) ...@@ -329,6 +334,13 @@ static int cpufreq_init(struct cpufreq_policy *policy)
policy->driver_data = priv; policy->driver_data = priv;
policy->clk = cpu_clk; policy->clk = cpu_clk;
rcu_read_lock();
suspend_opp = dev_pm_opp_get_suspend_opp(cpu_dev);
if (suspend_opp)
policy->suspend_freq = dev_pm_opp_get_freq(suspend_opp) / 1000;
rcu_read_unlock();
ret = cpufreq_table_validate_and_show(policy, freq_table); ret = cpufreq_table_validate_and_show(policy, freq_table);
if (ret) { if (ret) {
dev_err(cpu_dev, "%s: invalid frequency table: %d\n", __func__, dev_err(cpu_dev, "%s: invalid frequency table: %d\n", __func__,
...@@ -419,6 +431,7 @@ static struct cpufreq_driver dt_cpufreq_driver = { ...@@ -419,6 +431,7 @@ static struct cpufreq_driver dt_cpufreq_driver = {
.ready = cpufreq_ready, .ready = cpufreq_ready,
.name = "cpufreq-dt", .name = "cpufreq-dt",
.attr = cpufreq_dt_attr, .attr = cpufreq_dt_attr,
.suspend = cpufreq_generic_suspend,
}; };
static int dt_cpufreq_probe(struct platform_device *pdev) static int dt_cpufreq_probe(struct platform_device *pdev)
......
...@@ -239,7 +239,7 @@ int cpufreq_generic_init(struct cpufreq_policy *policy, ...@@ -239,7 +239,7 @@ int cpufreq_generic_init(struct cpufreq_policy *policy,
EXPORT_SYMBOL_GPL(cpufreq_generic_init); EXPORT_SYMBOL_GPL(cpufreq_generic_init);
/* Only for cpufreq core internal use */ /* Only for cpufreq core internal use */
struct cpufreq_policy *cpufreq_cpu_get_raw(unsigned int cpu) static struct cpufreq_policy *cpufreq_cpu_get_raw(unsigned int cpu)
{ {
struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu); struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
...@@ -1626,8 +1626,8 @@ int cpufreq_generic_suspend(struct cpufreq_policy *policy) ...@@ -1626,8 +1626,8 @@ int cpufreq_generic_suspend(struct cpufreq_policy *policy)
int ret; int ret;
if (!policy->suspend_freq) { if (!policy->suspend_freq) {
pr_err("%s: suspend_freq can't be zero\n", __func__); pr_debug("%s: suspend_freq not defined\n", __func__);
return -EINVAL; return 0;
} }
pr_debug("%s: Setting suspend-freq: %u\n", __func__, pr_debug("%s: Setting suspend-freq: %u\n", __func__,
...@@ -2031,8 +2031,7 @@ static int __cpufreq_governor(struct cpufreq_policy *policy, ...@@ -2031,8 +2031,7 @@ static int __cpufreq_governor(struct cpufreq_policy *policy,
if (!try_module_get(policy->governor->owner)) if (!try_module_get(policy->governor->owner))
return -EINVAL; return -EINVAL;
pr_debug("__cpufreq_governor for CPU %u, event %u\n", pr_debug("%s: for CPU %u, event %u\n", __func__, policy->cpu, event);
policy->cpu, event);
mutex_lock(&cpufreq_governor_lock); mutex_lock(&cpufreq_governor_lock);
if ((policy->governor_enabled && event == CPUFREQ_GOV_START) if ((policy->governor_enabled && event == CPUFREQ_GOV_START)
......
...@@ -260,24 +260,31 @@ static inline void update_turbo_state(void) ...@@ -260,24 +260,31 @@ static inline void update_turbo_state(void)
cpu->pstate.max_pstate == cpu->pstate.turbo_pstate); cpu->pstate.max_pstate == cpu->pstate.turbo_pstate);
} }
#define PCT_TO_HWP(x) (x * 255 / 100)
static void intel_pstate_hwp_set(void) static void intel_pstate_hwp_set(void)
{ {
int min, max, cpu; int min, hw_min, max, hw_max, cpu, range, adj_range;
u64 value, freq; u64 value, cap;
rdmsrl(MSR_HWP_CAPABILITIES, cap);
hw_min = HWP_LOWEST_PERF(cap);
hw_max = HWP_HIGHEST_PERF(cap);
range = hw_max - hw_min;
get_online_cpus(); get_online_cpus();
for_each_online_cpu(cpu) { for_each_online_cpu(cpu) {
rdmsrl_on_cpu(cpu, MSR_HWP_REQUEST, &value); rdmsrl_on_cpu(cpu, MSR_HWP_REQUEST, &value);
min = PCT_TO_HWP(limits.min_perf_pct); adj_range = limits.min_perf_pct * range / 100;
min = hw_min + adj_range;
value &= ~HWP_MIN_PERF(~0L); value &= ~HWP_MIN_PERF(~0L);
value |= HWP_MIN_PERF(min); value |= HWP_MIN_PERF(min);
max = PCT_TO_HWP(limits.max_perf_pct); adj_range = limits.max_perf_pct * range / 100;
max = hw_min + adj_range;
if (limits.no_turbo) { if (limits.no_turbo) {
rdmsrl( MSR_HWP_CAPABILITIES, freq); hw_max = HWP_GUARANTEED_PERF(cap);
max = HWP_GUARANTEED_PERF(freq); if (hw_max < max)
max = hw_max;
} }
value &= ~HWP_MAX_PERF(~0L); value &= ~HWP_MAX_PERF(~0L);
...@@ -423,6 +430,8 @@ static ssize_t store_max_perf_pct(struct kobject *a, struct attribute *b, ...@@ -423,6 +430,8 @@ static ssize_t store_max_perf_pct(struct kobject *a, struct attribute *b,
limits.max_sysfs_pct = clamp_t(int, input, 0 , 100); limits.max_sysfs_pct = clamp_t(int, input, 0 , 100);
limits.max_perf_pct = min(limits.max_policy_pct, limits.max_sysfs_pct); limits.max_perf_pct = min(limits.max_policy_pct, limits.max_sysfs_pct);
limits.max_perf_pct = max(limits.min_policy_pct, limits.max_perf_pct);
limits.max_perf_pct = max(limits.min_perf_pct, limits.max_perf_pct);
limits.max_perf = div_fp(int_tofp(limits.max_perf_pct), int_tofp(100)); limits.max_perf = div_fp(int_tofp(limits.max_perf_pct), int_tofp(100));
if (hwp_active) if (hwp_active)
...@@ -442,6 +451,8 @@ static ssize_t store_min_perf_pct(struct kobject *a, struct attribute *b, ...@@ -442,6 +451,8 @@ static ssize_t store_min_perf_pct(struct kobject *a, struct attribute *b,
limits.min_sysfs_pct = clamp_t(int, input, 0 , 100); limits.min_sysfs_pct = clamp_t(int, input, 0 , 100);
limits.min_perf_pct = max(limits.min_policy_pct, limits.min_sysfs_pct); limits.min_perf_pct = max(limits.min_policy_pct, limits.min_sysfs_pct);
limits.min_perf_pct = min(limits.max_policy_pct, limits.min_perf_pct);
limits.min_perf_pct = min(limits.max_perf_pct, limits.min_perf_pct);
limits.min_perf = div_fp(int_tofp(limits.min_perf_pct), int_tofp(100)); limits.min_perf = div_fp(int_tofp(limits.min_perf_pct), int_tofp(100));
if (hwp_active) if (hwp_active)
...@@ -989,12 +1000,19 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy) ...@@ -989,12 +1000,19 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy)
limits.min_policy_pct = (policy->min * 100) / policy->cpuinfo.max_freq; limits.min_policy_pct = (policy->min * 100) / policy->cpuinfo.max_freq;
limits.min_policy_pct = clamp_t(int, limits.min_policy_pct, 0 , 100); limits.min_policy_pct = clamp_t(int, limits.min_policy_pct, 0 , 100);
limits.min_perf_pct = max(limits.min_policy_pct, limits.min_sysfs_pct);
limits.min_perf = div_fp(int_tofp(limits.min_perf_pct), int_tofp(100));
limits.max_policy_pct = (policy->max * 100) / policy->cpuinfo.max_freq; limits.max_policy_pct = (policy->max * 100) / policy->cpuinfo.max_freq;
limits.max_policy_pct = clamp_t(int, limits.max_policy_pct, 0 , 100); limits.max_policy_pct = clamp_t(int, limits.max_policy_pct, 0 , 100);
/* Normalize user input to [min_policy_pct, max_policy_pct] */
limits.min_perf_pct = max(limits.min_policy_pct, limits.min_sysfs_pct);
limits.min_perf_pct = min(limits.max_policy_pct, limits.min_perf_pct);
limits.max_perf_pct = min(limits.max_policy_pct, limits.max_sysfs_pct); limits.max_perf_pct = min(limits.max_policy_pct, limits.max_sysfs_pct);
limits.max_perf_pct = max(limits.min_policy_pct, limits.max_perf_pct);
/* Make sure min_perf_pct <= max_perf_pct */
limits.min_perf_pct = min(limits.max_perf_pct, limits.min_perf_pct);
limits.min_perf = div_fp(int_tofp(limits.min_perf_pct), int_tofp(100));
limits.max_perf = div_fp(int_tofp(limits.max_perf_pct), int_tofp(100)); limits.max_perf = div_fp(int_tofp(limits.max_perf_pct), int_tofp(100));
if (hwp_active) if (hwp_active)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment