Commit 7855e102 authored by Rafael J. Wysocki's avatar Rafael J. Wysocki

Merge back earlier cpufreq material for v4.4.

parents 8e601a9f 4ef45148
...@@ -1546,6 +1546,9 @@ bytes respectively. Such letter suffixes can also be entirely omitted. ...@@ -1546,6 +1546,9 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
hwp_only hwp_only
Only load intel_pstate on systems which support Only load intel_pstate on systems which support
hardware P state control (HWP) if available. hardware P state control (HWP) if available.
no_acpi
Don't use ACPI processor performance control objects
_PSS and _PPC specified limits.
intremap= [X86-64, Intel-IOMMU] intremap= [X86-64, Intel-IOMMU]
on enable Interrupt Remapping (default) on enable Interrupt Remapping (default)
......
...@@ -206,6 +206,13 @@ ...@@ -206,6 +206,13 @@
#define MSR_GFX_PERF_LIMIT_REASONS 0x000006B0 #define MSR_GFX_PERF_LIMIT_REASONS 0x000006B0
#define MSR_RING_PERF_LIMIT_REASONS 0x000006B1 #define MSR_RING_PERF_LIMIT_REASONS 0x000006B1
/* Config TDP MSRs */
#define MSR_CONFIG_TDP_NOMINAL 0x00000648
#define MSR_CONFIG_TDP_LEVEL1 0x00000649
#define MSR_CONFIG_TDP_LEVEL2 0x0000064A
#define MSR_CONFIG_TDP_CONTROL 0x0000064B
#define MSR_TURBO_ACTIVATION_RATIO 0x0000064C
/* Hardware P state interface */ /* Hardware P state interface */
#define MSR_PPERF 0x0000064e #define MSR_PPERF 0x0000064e
#define MSR_PERF_LIMIT_REASONS 0x0000064f #define MSR_PERF_LIMIT_REASONS 0x0000064f
......
...@@ -5,6 +5,7 @@ ...@@ -5,6 +5,7 @@
config X86_INTEL_PSTATE config X86_INTEL_PSTATE
bool "Intel P state control" bool "Intel P state control"
depends on X86 depends on X86
select ACPI_PROCESSOR if ACPI
help help
This driver provides a P state for Intel core processors. This driver provides a P state for Intel core processors.
The driver implements an internal governor and will become The driver implements an internal governor and will become
......
...@@ -843,18 +843,11 @@ static ssize_t store(struct kobject *kobj, struct attribute *attr, ...@@ -843,18 +843,11 @@ static ssize_t store(struct kobject *kobj, struct attribute *attr,
down_write(&policy->rwsem); down_write(&policy->rwsem);
/* Updating inactive policies is invalid, so avoid doing that. */
if (unlikely(policy_is_inactive(policy))) {
ret = -EBUSY;
goto unlock_policy_rwsem;
}
if (fattr->store) if (fattr->store)
ret = fattr->store(policy, buf, count); ret = fattr->store(policy, buf, count);
else else
ret = -EIO; ret = -EIO;
unlock_policy_rwsem:
up_write(&policy->rwsem); up_write(&policy->rwsem);
unlock: unlock:
put_online_cpus(); put_online_cpus();
......
...@@ -23,6 +23,19 @@ ...@@ -23,6 +23,19 @@
static DEFINE_PER_CPU(struct cs_cpu_dbs_info_s, cs_cpu_dbs_info); static DEFINE_PER_CPU(struct cs_cpu_dbs_info_s, cs_cpu_dbs_info);
static int cs_cpufreq_governor_dbs(struct cpufreq_policy *policy,
unsigned int event);
#ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE
static
#endif
struct cpufreq_governor cpufreq_gov_conservative = {
.name = "conservative",
.governor = cs_cpufreq_governor_dbs,
.max_transition_latency = TRANSITION_LATENCY_LIMIT,
.owner = THIS_MODULE,
};
static inline unsigned int get_freq_target(struct cs_dbs_tuners *cs_tuners, static inline unsigned int get_freq_target(struct cs_dbs_tuners *cs_tuners,
struct cpufreq_policy *policy) struct cpufreq_policy *policy)
{ {
...@@ -119,12 +132,14 @@ static int dbs_cpufreq_notifier(struct notifier_block *nb, unsigned long val, ...@@ -119,12 +132,14 @@ static int dbs_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
struct cpufreq_freqs *freq = data; struct cpufreq_freqs *freq = data;
struct cs_cpu_dbs_info_s *dbs_info = struct cs_cpu_dbs_info_s *dbs_info =
&per_cpu(cs_cpu_dbs_info, freq->cpu); &per_cpu(cs_cpu_dbs_info, freq->cpu);
struct cpufreq_policy *policy; struct cpufreq_policy *policy = cpufreq_cpu_get_raw(freq->cpu);
if (!dbs_info->enable) if (!policy)
return 0; return 0;
policy = dbs_info->cdbs.shared->policy; /* policy isn't governed by conservative governor */
if (policy->governor != &cpufreq_gov_conservative)
return 0;
/* /*
* we only care if our internally tracked freq moves outside the 'valid' * we only care if our internally tracked freq moves outside the 'valid'
...@@ -367,16 +382,6 @@ static int cs_cpufreq_governor_dbs(struct cpufreq_policy *policy, ...@@ -367,16 +382,6 @@ static int cs_cpufreq_governor_dbs(struct cpufreq_policy *policy,
return cpufreq_governor_dbs(policy, &cs_dbs_cdata, event); return cpufreq_governor_dbs(policy, &cs_dbs_cdata, event);
} }
#ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE
static
#endif
struct cpufreq_governor cpufreq_gov_conservative = {
.name = "conservative",
.governor = cs_cpufreq_governor_dbs,
.max_transition_latency = TRANSITION_LATENCY_LIMIT,
.owner = THIS_MODULE,
};
static int __init cpufreq_gov_dbs_init(void) static int __init cpufreq_gov_dbs_init(void)
{ {
return cpufreq_register_governor(&cpufreq_gov_conservative); return cpufreq_register_governor(&cpufreq_gov_conservative);
......
...@@ -463,7 +463,6 @@ static int cpufreq_governor_start(struct cpufreq_policy *policy, ...@@ -463,7 +463,6 @@ static int cpufreq_governor_start(struct cpufreq_policy *policy,
cdata->get_cpu_dbs_info_s(cpu); cdata->get_cpu_dbs_info_s(cpu);
cs_dbs_info->down_skip = 0; cs_dbs_info->down_skip = 0;
cs_dbs_info->enable = 1;
cs_dbs_info->requested_freq = policy->cur; cs_dbs_info->requested_freq = policy->cur;
} else { } else {
struct od_ops *od_ops = cdata->gov_ops; struct od_ops *od_ops = cdata->gov_ops;
...@@ -482,9 +481,7 @@ static int cpufreq_governor_start(struct cpufreq_policy *policy, ...@@ -482,9 +481,7 @@ static int cpufreq_governor_start(struct cpufreq_policy *policy,
static int cpufreq_governor_stop(struct cpufreq_policy *policy, static int cpufreq_governor_stop(struct cpufreq_policy *policy,
struct dbs_data *dbs_data) struct dbs_data *dbs_data)
{ {
struct common_dbs_data *cdata = dbs_data->cdata; struct cpu_dbs_info *cdbs = dbs_data->cdata->get_cpu_cdbs(policy->cpu);
unsigned int cpu = policy->cpu;
struct cpu_dbs_info *cdbs = cdata->get_cpu_cdbs(cpu);
struct cpu_common_dbs_info *shared = cdbs->shared; struct cpu_common_dbs_info *shared = cdbs->shared;
/* State should be equivalent to START */ /* State should be equivalent to START */
...@@ -493,13 +490,6 @@ static int cpufreq_governor_stop(struct cpufreq_policy *policy, ...@@ -493,13 +490,6 @@ static int cpufreq_governor_stop(struct cpufreq_policy *policy,
gov_cancel_work(dbs_data, policy); gov_cancel_work(dbs_data, policy);
if (cdata->governor == GOV_CONSERVATIVE) {
struct cs_cpu_dbs_info_s *cs_dbs_info =
cdata->get_cpu_dbs_info_s(cpu);
cs_dbs_info->enable = 0;
}
shared->policy = NULL; shared->policy = NULL;
mutex_destroy(&shared->timer_mutex); mutex_destroy(&shared->timer_mutex);
return 0; return 0;
......
...@@ -170,7 +170,6 @@ struct cs_cpu_dbs_info_s { ...@@ -170,7 +170,6 @@ struct cs_cpu_dbs_info_s {
struct cpu_dbs_info cdbs; struct cpu_dbs_info cdbs;
unsigned int down_skip; unsigned int down_skip;
unsigned int requested_freq; unsigned int requested_freq;
unsigned int enable:1;
}; };
/* Per policy Governors sysfs tunables */ /* Per policy Governors sysfs tunables */
......
...@@ -30,6 +30,10 @@ static struct clk *pll1_sw_clk; ...@@ -30,6 +30,10 @@ static struct clk *pll1_sw_clk;
static struct clk *step_clk; static struct clk *step_clk;
static struct clk *pll2_pfd2_396m_clk; static struct clk *pll2_pfd2_396m_clk;
/* clk used by i.MX6UL */
static struct clk *pll2_bus_clk;
static struct clk *secondary_sel_clk;
static struct device *cpu_dev; static struct device *cpu_dev;
static bool free_opp; static bool free_opp;
static struct cpufreq_frequency_table *freq_table; static struct cpufreq_frequency_table *freq_table;
...@@ -91,16 +95,36 @@ static int imx6q_set_target(struct cpufreq_policy *policy, unsigned int index) ...@@ -91,16 +95,36 @@ static int imx6q_set_target(struct cpufreq_policy *policy, unsigned int index)
* The setpoints are selected per PLL/PDF frequencies, so we need to * The setpoints are selected per PLL/PDF frequencies, so we need to
* reprogram PLL for frequency scaling. The procedure of reprogramming * reprogram PLL for frequency scaling. The procedure of reprogramming
* PLL1 is as below. * PLL1 is as below.
* * For i.MX6UL, it has a secondary clk mux, the cpu frequency change
* flow is slightly different from other i.MX6 OSC.
* The cpu frequeny change flow for i.MX6(except i.MX6UL) is as below:
* - Enable pll2_pfd2_396m_clk and reparent pll1_sw_clk to it * - Enable pll2_pfd2_396m_clk and reparent pll1_sw_clk to it
* - Reprogram pll1_sys_clk and reparent pll1_sw_clk back to it * - Reprogram pll1_sys_clk and reparent pll1_sw_clk back to it
* - Disable pll2_pfd2_396m_clk * - Disable pll2_pfd2_396m_clk
*/ */
clk_set_parent(step_clk, pll2_pfd2_396m_clk); if (of_machine_is_compatible("fsl,imx6ul")) {
clk_set_parent(pll1_sw_clk, step_clk); /*
if (freq_hz > clk_get_rate(pll2_pfd2_396m_clk)) { * When changing pll1_sw_clk's parent to pll1_sys_clk,
clk_set_rate(pll1_sys_clk, new_freq * 1000); * CPU may run at higher than 528MHz, this will lead to
* the system unstable if the voltage is lower than the
* voltage of 528MHz, so lower the CPU frequency to one
* half before changing CPU frequency.
*/
clk_set_rate(arm_clk, (old_freq >> 1) * 1000);
clk_set_parent(pll1_sw_clk, pll1_sys_clk); clk_set_parent(pll1_sw_clk, pll1_sys_clk);
if (freq_hz > clk_get_rate(pll2_pfd2_396m_clk))
clk_set_parent(secondary_sel_clk, pll2_bus_clk);
else
clk_set_parent(secondary_sel_clk, pll2_pfd2_396m_clk);
clk_set_parent(step_clk, secondary_sel_clk);
clk_set_parent(pll1_sw_clk, step_clk);
} else {
clk_set_parent(step_clk, pll2_pfd2_396m_clk);
clk_set_parent(pll1_sw_clk, step_clk);
if (freq_hz > clk_get_rate(pll2_pfd2_396m_clk)) {
clk_set_rate(pll1_sys_clk, new_freq * 1000);
clk_set_parent(pll1_sw_clk, pll1_sys_clk);
}
} }
/* Ensure the arm clock divider is what we expect */ /* Ensure the arm clock divider is what we expect */
...@@ -186,6 +210,16 @@ static int imx6q_cpufreq_probe(struct platform_device *pdev) ...@@ -186,6 +210,16 @@ static int imx6q_cpufreq_probe(struct platform_device *pdev)
goto put_clk; goto put_clk;
} }
if (of_machine_is_compatible("fsl,imx6ul")) {
pll2_bus_clk = clk_get(cpu_dev, "pll2_bus");
secondary_sel_clk = clk_get(cpu_dev, "secondary_sel");
if (IS_ERR(pll2_bus_clk) || IS_ERR(secondary_sel_clk)) {
dev_err(cpu_dev, "failed to get clocks specific to imx6ul\n");
ret = -ENOENT;
goto put_clk;
}
}
arm_reg = regulator_get(cpu_dev, "arm"); arm_reg = regulator_get(cpu_dev, "arm");
pu_reg = regulator_get_optional(cpu_dev, "pu"); pu_reg = regulator_get_optional(cpu_dev, "pu");
soc_reg = regulator_get(cpu_dev, "soc"); soc_reg = regulator_get(cpu_dev, "soc");
...@@ -331,6 +365,10 @@ static int imx6q_cpufreq_probe(struct platform_device *pdev) ...@@ -331,6 +365,10 @@ static int imx6q_cpufreq_probe(struct platform_device *pdev)
clk_put(step_clk); clk_put(step_clk);
if (!IS_ERR(pll2_pfd2_396m_clk)) if (!IS_ERR(pll2_pfd2_396m_clk))
clk_put(pll2_pfd2_396m_clk); clk_put(pll2_pfd2_396m_clk);
if (!IS_ERR(pll2_bus_clk))
clk_put(pll2_bus_clk);
if (!IS_ERR(secondary_sel_clk))
clk_put(secondary_sel_clk);
of_node_put(np); of_node_put(np);
return ret; return ret;
} }
...@@ -350,6 +388,8 @@ static int imx6q_cpufreq_remove(struct platform_device *pdev) ...@@ -350,6 +388,8 @@ static int imx6q_cpufreq_remove(struct platform_device *pdev)
clk_put(pll1_sw_clk); clk_put(pll1_sw_clk);
clk_put(step_clk); clk_put(step_clk);
clk_put(pll2_pfd2_396m_clk); clk_put(pll2_pfd2_396m_clk);
clk_put(pll2_bus_clk);
clk_put(secondary_sel_clk);
return 0; return 0;
} }
......
...@@ -221,6 +221,8 @@ static const struct of_device_id integrator_cpufreq_match[] = { ...@@ -221,6 +221,8 @@ static const struct of_device_id integrator_cpufreq_match[] = {
{ }, { },
}; };
MODULE_DEVICE_TABLE(of, integrator_cpufreq_match);
static struct platform_driver integrator_cpufreq_driver = { static struct platform_driver integrator_cpufreq_driver = {
.driver = { .driver = {
.name = "integrator-cpufreq", .name = "integrator-cpufreq",
......
This diff is collapsed.
...@@ -327,8 +327,14 @@ static void powernv_cpufreq_throttle_check(void *data) ...@@ -327,8 +327,14 @@ static void powernv_cpufreq_throttle_check(void *data)
if (chips[i].throttled) if (chips[i].throttled)
goto next; goto next;
chips[i].throttled = true; chips[i].throttled = true;
pr_info("CPU %d on Chip %u has Pmax reduced to %d\n", cpu, if (pmsr_pmax < powernv_pstate_info.nominal)
chips[i].id, pmsr_pmax); pr_crit("CPU %d on Chip %u has Pmax reduced below nominal frequency (%d < %d)\n",
cpu, chips[i].id, pmsr_pmax,
powernv_pstate_info.nominal);
else
pr_info("CPU %d on Chip %u has Pmax reduced below turbo frequency (%d < %d)\n",
cpu, chips[i].id, pmsr_pmax,
powernv_pstate_info.max);
} else if (chips[i].throttled) { } else if (chips[i].throttled) {
chips[i].throttled = false; chips[i].throttled = false;
pr_info("CPU %d on Chip %u has Pmax restored to %d\n", cpu, pr_info("CPU %d on Chip %u has Pmax restored to %d\n", cpu,
......
...@@ -175,9 +175,7 @@ static struct cpufreq_driver tegra_cpufreq_driver = { ...@@ -175,9 +175,7 @@ static struct cpufreq_driver tegra_cpufreq_driver = {
.exit = tegra_cpu_exit, .exit = tegra_cpu_exit,
.name = "tegra", .name = "tegra",
.attr = cpufreq_generic_attr, .attr = cpufreq_generic_attr,
#ifdef CONFIG_PM
.suspend = cpufreq_generic_suspend, .suspend = cpufreq_generic_suspend,
#endif
}; };
static int __init tegra_cpufreq_init(void) static int __init tegra_cpufreq_init(void)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment