Commit f41b8312 authored by Rafael J. Wysocki's avatar Rafael J. Wysocki

Merge branch 'pm-cpufreq'

* pm-cpufreq:
  cpufreq: Don't use smp_processor_id() in preemptible context
  cpufreq: governor: Fix typos in comments
  cpufreq: governors: Remove duplicate check of target freq in supported range
  cpufreq: Fix timer/workqueue corruption due to double queueing
  cpufreq: imx6q: Fix clock enable balance
  cpufreq: tegra: fix the wrong clock name
parents 499aa70a 69320783
......@@ -67,8 +67,6 @@ static void cs_check_cpu(int cpu, unsigned int load)
return;
dbs_info->requested_freq += get_freq_target(cs_tuners, policy);
if (dbs_info->requested_freq > policy->max)
dbs_info->requested_freq = policy->max;
__cpufreq_driver_target(policy, dbs_info->requested_freq,
CPUFREQ_RELATION_H);
......@@ -89,8 +87,6 @@ static void cs_check_cpu(int cpu, unsigned int load)
return;
dbs_info->requested_freq -= get_freq_target(cs_tuners, policy);
if (dbs_info->requested_freq < policy->min)
dbs_info->requested_freq = policy->min;
__cpufreq_driver_target(policy, dbs_info->requested_freq,
CPUFREQ_RELATION_L);
......
......@@ -119,8 +119,18 @@ void gov_queue_work(struct dbs_data *dbs_data, struct cpufreq_policy *policy,
{
int i;
if (!policy->governor_enabled)
return;
if (!all_cpus) {
__gov_queue_work(smp_processor_id(), dbs_data, delay);
/*
* Use raw_smp_processor_id() to avoid preemptible warnings.
* We know that this is only called with all_cpus == false from
* works that have been queued with *_work_on() functions and
* those works are canceled during CPU_DOWN_PREPARE so they
* can't possibly run on any other CPU.
*/
__gov_queue_work(raw_smp_processor_id(), dbs_data, delay);
} else {
for_each_cpu(i, policy->cpus)
__gov_queue_work(i, dbs_data, delay);
......@@ -230,7 +240,7 @@ int cpufreq_governor_dbs(struct cpufreq_policy *policy,
policy->governor_data = dbs_data;
/* policy latency is in nS. Convert it to uS first */
/* policy latency is in ns. Convert it to us first */
latency = policy->cpuinfo.transition_latency / 1000;
if (latency == 0)
latency = 1;
......
......@@ -25,11 +25,11 @@
/*
* The polling frequency depends on the capability of the processor. Default
* polling frequency is 1000 times the transition latency of the processor. The
* governor will work on any processor with transition latency <= 10mS, using
* governor will work on any processor with transition latency <= 10ms, using
* appropriate sampling rate.
*
* For CPUs with transition latency > 10mS (mostly drivers with CPUFREQ_ETERNAL)
* this governor will not work. All times here are in uS.
* For CPUs with transition latency > 10ms (mostly drivers with CPUFREQ_ETERNAL)
* this governor will not work. All times here are in us (micro seconds).
*/
#define MIN_SAMPLING_RATE_RATIO (2)
#define LATENCY_MULTIPLIER (1000)
......@@ -162,7 +162,7 @@ struct cs_cpu_dbs_info_s {
unsigned int enable:1;
};
/* Per policy Governers sysfs tunables */
/* Per policy Governors sysfs tunables */
struct od_dbs_tuners {
unsigned int ignore_nice_load;
unsigned int sampling_rate;
......@@ -181,7 +181,7 @@ struct cs_dbs_tuners {
unsigned int freq_step;
};
/* Common Governer data across policies */
/* Common Governor data across policies */
struct dbs_data;
struct common_dbs_data {
/* Common across governors */
......@@ -205,7 +205,7 @@ struct common_dbs_data {
void *gov_ops;
};
/* Governer Per policy data */
/* Governor Per policy data */
struct dbs_data {
struct common_dbs_data *cdata;
unsigned int min_sampling_rate;
......
......@@ -177,9 +177,6 @@ static void od_check_cpu(int cpu, unsigned int load)
/* No longer fully busy, reset rate_mult */
dbs_info->rate_mult = 1;
if (freq_next < policy->min)
freq_next = policy->min;
if (!od_tuners->powersave_bias) {
__cpufreq_driver_target(policy, freq_next,
CPUFREQ_RELATION_L);
......
......@@ -117,28 +117,11 @@ static int imx6q_set_target(struct cpufreq_policy *policy,
* - Reprogram pll1_sys_clk and reparent pll1_sw_clk back to it
* - Disable pll2_pfd2_396m_clk
*/
clk_prepare_enable(pll2_pfd2_396m_clk);
clk_set_parent(step_clk, pll2_pfd2_396m_clk);
clk_set_parent(pll1_sw_clk, step_clk);
if (freq_hz > clk_get_rate(pll2_pfd2_396m_clk)) {
clk_set_rate(pll1_sys_clk, freqs.new * 1000);
/*
* If we are leaving 396 MHz set-point, we need to enable
* pll1_sys_clk and disable pll2_pfd2_396m_clk to keep
* their use count correct.
*/
if (freqs.old * 1000 <= clk_get_rate(pll2_pfd2_396m_clk)) {
clk_prepare_enable(pll1_sys_clk);
clk_disable_unprepare(pll2_pfd2_396m_clk);
}
clk_set_parent(pll1_sw_clk, pll1_sys_clk);
clk_disable_unprepare(pll2_pfd2_396m_clk);
} else {
/*
* Disable pll1_sys_clk if pll2_pfd2_396m_clk is sufficient
* to provide the frequency.
*/
clk_disable_unprepare(pll1_sys_clk);
}
/* Ensure the arm clock divider is what we expect */
......
......@@ -255,7 +255,7 @@ static struct cpufreq_driver tegra_cpufreq_driver = {
static int __init tegra_cpufreq_init(void)
{
cpu_clk = clk_get_sys(NULL, "cpu");
cpu_clk = clk_get_sys(NULL, "cclk");
if (IS_ERR(cpu_clk))
return PTR_ERR(cpu_clk);
......@@ -263,7 +263,7 @@ static int __init tegra_cpufreq_init(void)
if (IS_ERR(pll_x_clk))
return PTR_ERR(pll_x_clk);
pll_p_clk = clk_get_sys(NULL, "pll_p_cclk");
pll_p_clk = clk_get_sys(NULL, "pll_p");
if (IS_ERR(pll_p_clk))
return PTR_ERR(pll_p_clk);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment