Commit 6ddee424 authored by Rafael J. Wysocki's avatar Rafael J. Wysocki

Merge back earlier 'pm-cpufreq' material.

Conflicts:
	drivers/cpufreq/omap-cpufreq.c
parents e4db1c74 a814613b
......@@ -40,7 +40,6 @@ config ARCH_DAVINCI_DA850
bool "DA850/OMAP-L138/AM18x based system"
select ARCH_DAVINCI_DA8XX
select ARCH_HAS_CPUFREQ
select CPU_FREQ_TABLE
select CP_INTC
config ARCH_DAVINCI_DA8XX
......
......@@ -615,14 +615,12 @@ endmenu
config PXA25x
bool
select CPU_XSCALE
select CPU_FREQ_TABLE if CPU_FREQ
help
Select code specific to PXA21x/25x/26x variants
config PXA27x
bool
select CPU_XSCALE
select CPU_FREQ_TABLE if CPU_FREQ
help
Select code specific to PXA27x variants
......@@ -635,7 +633,6 @@ config CPU_PXA26x
config PXA3xx
bool
select CPU_XSC3
select CPU_FREQ_TABLE if CPU_FREQ
help
Select code specific to PXA3xx variants
......
......@@ -42,23 +42,24 @@ EXPORT_SYMBOL(reset_status);
/*
* This table is setup for a 3.6864MHz Crystal.
*/
static const unsigned short cclk_frequency_100khz[NR_FREQS] = {
590, /* 59.0 MHz */
737, /* 73.7 MHz */
885, /* 88.5 MHz */
1032, /* 103.2 MHz */
1180, /* 118.0 MHz */
1327, /* 132.7 MHz */
1475, /* 147.5 MHz */
1622, /* 162.2 MHz */
1769, /* 176.9 MHz */
1917, /* 191.7 MHz */
2064, /* 206.4 MHz */
2212, /* 221.2 MHz */
2359, /* 235.9 MHz */
2507, /* 250.7 MHz */
2654, /* 265.4 MHz */
2802 /* 280.2 MHz */
struct cpufreq_frequency_table sa11x0_freq_table[NR_FREQS+1] = {
{ .frequency = 59000, /* 59.0 MHz */},
{ .frequency = 73700, /* 73.7 MHz */},
{ .frequency = 88500, /* 88.5 MHz */},
{ .frequency = 103200, /* 103.2 MHz */},
{ .frequency = 118000, /* 118.0 MHz */},
{ .frequency = 132700, /* 132.7 MHz */},
{ .frequency = 147500, /* 147.5 MHz */},
{ .frequency = 162200, /* 162.2 MHz */},
{ .frequency = 176900, /* 176.9 MHz */},
{ .frequency = 191700, /* 191.7 MHz */},
{ .frequency = 206400, /* 206.4 MHz */},
{ .frequency = 221200, /* 221.2 MHz */},
{ .frequency = 235900, /* 235.9 MHz */},
{ .frequency = 250700, /* 250.7 MHz */},
{ .frequency = 265400, /* 265.4 MHz */},
{ .frequency = 280200, /* 280.2 MHz */},
{ .frequency = CPUFREQ_TABLE_END, },
};
/* rounds up(!) */
......@@ -66,10 +67,8 @@ unsigned int sa11x0_freq_to_ppcr(unsigned int khz)
{
int i;
khz /= 100;
for (i = 0; i < NR_FREQS; i++)
if (cclk_frequency_100khz[i] >= khz)
if (sa11x0_freq_table[i].frequency >= khz)
break;
return i;
......@@ -79,37 +78,15 @@ unsigned int sa11x0_ppcr_to_freq(unsigned int idx)
{
unsigned int freq = 0;
if (idx < NR_FREQS)
freq = cclk_frequency_100khz[idx] * 100;
freq = sa11x0_freq_table[idx].frequency;
return freq;
}
/* make sure that only the "userspace" governor is run -- anything else wouldn't make sense on
* this platform, anyway.
*/
int sa11x0_verify_speed(struct cpufreq_policy *policy)
{
unsigned int tmp;
if (policy->cpu)
return -EINVAL;
cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq, policy->cpuinfo.max_freq);
/* make sure that at least one frequency is within the policy */
tmp = cclk_frequency_100khz[sa11x0_freq_to_ppcr(policy->min)] * 100;
if (tmp > policy->max)
policy->max = tmp;
cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq, policy->cpuinfo.max_freq);
return 0;
}
unsigned int sa11x0_getspeed(unsigned int cpu)
{
if (cpu)
return 0;
return cclk_frequency_100khz[PPCR & 0xf] * 100;
return sa11x0_freq_table[PPCR & 0xf].frequency;
}
/*
......
......@@ -3,6 +3,7 @@
*
* Author: Nicolas Pitre
*/
#include <linux/cpufreq.h>
#include <linux/reboot.h>
extern void sa1100_timer_init(void);
......@@ -19,10 +20,8 @@ extern void sa11x0_init_late(void);
extern void sa1110_mb_enable(void);
extern void sa1110_mb_disable(void);
struct cpufreq_policy;
extern struct cpufreq_frequency_table sa11x0_freq_table[];
extern unsigned int sa11x0_freq_to_ppcr(unsigned int khz);
extern int sa11x0_verify_speed(struct cpufreq_policy *policy);
extern unsigned int sa11x0_getspeed(unsigned int cpu);
extern unsigned int sa11x0_ppcr_to_freq(unsigned int idx);
......
......@@ -34,7 +34,6 @@ config UX500_SOC_COMMON
config UX500_SOC_DB8500
bool
select CPU_FREQ_TABLE if CPU_FREQ
select MFD_DB8500_PRCMU
select PINCTRL_DB8500
select PINCTRL_DB8540
......
......@@ -1429,7 +1429,6 @@ source "drivers/cpufreq/Kconfig"
config BFIN_CPU_FREQ
bool
depends on CPU_FREQ
select CPU_FREQ_TABLE
default y
config CPU_VOLTAGE
......
......@@ -130,13 +130,11 @@ config SVINTO_SIM
config ETRAXFS
bool "ETRAX-FS-V32"
select CPU_FREQ_TABLE if CPU_FREQ
help
Support CRIS V32.
config CRIS_MACH_ARTPEC3
bool "ARTPEC-3"
select CPU_FREQ_TABLE if CPU_FREQ
help
Support Axis ARTPEC-3.
......
......@@ -17,15 +17,11 @@ config CPU_FREQ
if CPU_FREQ
config CPU_FREQ_TABLE
tristate
config CPU_FREQ_GOV_COMMON
bool
config CPU_FREQ_STAT
tristate "CPU frequency translation statistics"
select CPU_FREQ_TABLE
default y
help
This driver exports CPU frequency statistics information through sysfs
......@@ -143,7 +139,6 @@ config CPU_FREQ_GOV_USERSPACE
config CPU_FREQ_GOV_ONDEMAND
tristate "'ondemand' cpufreq policy governor"
select CPU_FREQ_TABLE
select CPU_FREQ_GOV_COMMON
help
'ondemand' - This driver adds a dynamic cpufreq policy governor.
......@@ -187,7 +182,6 @@ config CPU_FREQ_GOV_CONSERVATIVE
config GENERIC_CPUFREQ_CPU0
tristate "Generic CPU0 cpufreq driver"
depends on HAVE_CLK && REGULATOR && PM_OPP && OF
select CPU_FREQ_TABLE
help
This adds a generic cpufreq driver for CPU0 frequency management.
It supports both uniprocessor (UP) and symmetric multiprocessor (SMP)
......@@ -223,7 +217,6 @@ depends on IA64
config IA64_ACPI_CPUFREQ
tristate "ACPI Processor P-States driver"
select CPU_FREQ_TABLE
depends on ACPI_PROCESSOR
help
This driver adds a CPUFreq driver which utilizes the ACPI
......@@ -240,7 +233,6 @@ depends on MIPS
config LOONGSON2_CPUFREQ
tristate "Loongson2 CPUFreq Driver"
select CPU_FREQ_TABLE
help
This option adds a CPUFreq driver for loongson processors which
support software configurable cpu frequency.
......@@ -262,7 +254,6 @@ menu "SPARC CPU frequency scaling drivers"
depends on SPARC64
config SPARC_US3_CPUFREQ
tristate "UltraSPARC-III CPU Frequency driver"
select CPU_FREQ_TABLE
help
This adds the CPUFreq driver for UltraSPARC-III processors.
......@@ -272,7 +263,6 @@ config SPARC_US3_CPUFREQ
config SPARC_US2E_CPUFREQ
tristate "UltraSPARC-IIe CPU Frequency driver"
select CPU_FREQ_TABLE
help
This adds the CPUFreq driver for UltraSPARC-IIe processors.
......@@ -285,7 +275,6 @@ menu "SH CPU Frequency scaling"
depends on SUPERH
config SH_CPU_FREQ
tristate "SuperH CPU Frequency driver"
select CPU_FREQ_TABLE
help
This adds the cpufreq driver for SuperH. Any CPU that supports
clock rate rounding through the clock framework can use this
......
......@@ -5,7 +5,6 @@
config ARM_BIG_LITTLE_CPUFREQ
tristate "Generic ARM big LITTLE CPUfreq driver"
depends on ARM_CPU_TOPOLOGY && PM_OPP && HAVE_CLK
select CPU_FREQ_TABLE
help
This enables the Generic CPUfreq driver for ARM big.LITTLE platforms.
......@@ -18,7 +17,6 @@ config ARM_DT_BL_CPUFREQ
config ARM_EXYNOS_CPUFREQ
bool
select CPU_FREQ_TABLE
config ARM_EXYNOS4210_CPUFREQ
bool "SAMSUNG EXYNOS4210"
......@@ -58,7 +56,6 @@ config ARM_EXYNOS5440_CPUFREQ
depends on SOC_EXYNOS5440
depends on HAVE_CLK && PM_OPP && OF
default y
select CPU_FREQ_TABLE
help
This adds the CPUFreq driver for Samsung EXYNOS5440
SoC. The nature of exynos5440 clock controller is
......@@ -85,7 +82,6 @@ config ARM_IMX6Q_CPUFREQ
tristate "Freescale i.MX6Q cpufreq support"
depends on SOC_IMX6Q
depends on REGULATOR_ANATOP
select CPU_FREQ_TABLE
help
This adds cpufreq driver support for Freescale i.MX6Q SOC.
......@@ -101,7 +97,6 @@ config ARM_INTEGRATOR
config ARM_KIRKWOOD_CPUFREQ
def_bool ARCH_KIRKWOOD && OF
select CPU_FREQ_TABLE
help
This adds the CPUFreq driver for Marvell Kirkwood
SoCs.
......@@ -110,7 +105,6 @@ config ARM_OMAP2PLUS_CPUFREQ
bool "TI OMAP2+"
depends on ARCH_OMAP2PLUS
default ARCH_OMAP2PLUS
select CPU_FREQ_TABLE
config ARM_S3C_CPUFREQ
bool
......@@ -165,7 +159,6 @@ config ARM_S3C2412_CPUFREQ
config ARM_S3C2416_CPUFREQ
bool "S3C2416 CPU Frequency scaling support"
depends on CPU_S3C2416
select CPU_FREQ_TABLE
help
This adds the CPUFreq driver for the Samsung S3C2416 and
S3C2450 SoC. The S3C2416 supports changing the rate of the
......@@ -196,7 +189,6 @@ config ARM_S3C2440_CPUFREQ
config ARM_S3C64XX_CPUFREQ
bool "Samsung S3C64XX"
depends on CPU_S3C6410
select CPU_FREQ_TABLE
default y
help
This adds the CPUFreq driver for Samsung S3C6410 SoC.
......@@ -206,7 +198,6 @@ config ARM_S3C64XX_CPUFREQ
config ARM_S5PV210_CPUFREQ
bool "Samsung S5PV210 and S5PC110"
depends on CPU_S5PV210
select CPU_FREQ_TABLE
default y
help
This adds the CPUFreq driver for Samsung S5PV210 and
......@@ -223,7 +214,6 @@ config ARM_SA1110_CPUFREQ
config ARM_SPEAR_CPUFREQ
bool "SPEAr CPUFreq support"
depends on PLAT_SPEAR
select CPU_FREQ_TABLE
default y
help
This adds the CPUFreq driver support for SPEAr SOCs.
......@@ -231,7 +221,6 @@ config ARM_SPEAR_CPUFREQ
config ARM_TEGRA_CPUFREQ
bool "TEGRA CPUFreq support"
depends on ARCH_TEGRA
select CPU_FREQ_TABLE
default y
help
This adds the CPUFreq driver support for TEGRA SOCs.
config CPU_FREQ_CBE
tristate "CBE frequency scaling"
depends on CBE_RAS && PPC_CELL
select CPU_FREQ_TABLE
default m
help
This adds the cpufreq driver for Cell BE processors.
......@@ -20,7 +19,6 @@ config CPU_FREQ_CBE_PMI
config CPU_FREQ_MAPLE
bool "Support for Maple 970FX Evaluation Board"
depends on PPC_MAPLE
select CPU_FREQ_TABLE
help
This adds support for frequency switching on Maple 970FX
Evaluation Board and compatible boards (IBM JS2x blades).
......@@ -28,7 +26,6 @@ config CPU_FREQ_MAPLE
config PPC_CORENET_CPUFREQ
tristate "CPU frequency scaling driver for Freescale E500MC SoCs"
depends on PPC_E500MC && OF && COMMON_CLK
select CPU_FREQ_TABLE
select CLK_PPC_CORENET
help
This adds the CPUFreq driver support for Freescale e500mc,
......@@ -38,7 +35,6 @@ config PPC_CORENET_CPUFREQ
config CPU_FREQ_PMAC
bool "Support for Apple PowerBooks"
depends on ADB_PMU && PPC32
select CPU_FREQ_TABLE
help
This adds support for frequency switching on Apple PowerBooks,
this currently includes some models of iBook & Titanium
......@@ -47,7 +43,6 @@ config CPU_FREQ_PMAC
config CPU_FREQ_PMAC64
bool "Support for some Apple G5s"
depends on PPC_PMAC && PPC64
select CPU_FREQ_TABLE
help
This adds support for frequency switching on Apple iMac G5,
and some of the more recent desktop G5 machines as well.
......@@ -55,7 +50,6 @@ config CPU_FREQ_PMAC64
config PPC_PASEMI_CPUFREQ
bool "Support for PA Semi PWRficient"
depends on PPC_PASEMI
select CPU_FREQ_TABLE
default y
help
This adds the support for frequency switching on PA Semi
......
......@@ -31,7 +31,6 @@ config X86_PCC_CPUFREQ
config X86_ACPI_CPUFREQ
tristate "ACPI Processor P-States driver"
select CPU_FREQ_TABLE
depends on ACPI_PROCESSOR
help
This driver adds a CPUFreq driver which utilizes the ACPI
......@@ -60,7 +59,6 @@ config X86_ACPI_CPUFREQ_CPB
config ELAN_CPUFREQ
tristate "AMD Elan SC400 and SC410"
select CPU_FREQ_TABLE
depends on MELAN
---help---
This adds the CPUFreq driver for AMD Elan SC400 and SC410
......@@ -76,7 +74,6 @@ config ELAN_CPUFREQ
config SC520_CPUFREQ
tristate "AMD Elan SC520"
select CPU_FREQ_TABLE
depends on MELAN
---help---
This adds the CPUFreq driver for AMD Elan SC520 processor.
......@@ -88,7 +85,6 @@ config SC520_CPUFREQ
config X86_POWERNOW_K6
tristate "AMD Mobile K6-2/K6-3 PowerNow!"
select CPU_FREQ_TABLE
depends on X86_32
help
This adds the CPUFreq driver for mobile AMD K6-2+ and mobile
......@@ -100,7 +96,6 @@ config X86_POWERNOW_K6
config X86_POWERNOW_K7
tristate "AMD Mobile Athlon/Duron PowerNow!"
select CPU_FREQ_TABLE
depends on X86_32
help
This adds the CPUFreq driver for mobile AMD K7 mobile processors.
......@@ -118,7 +113,6 @@ config X86_POWERNOW_K7_ACPI
config X86_POWERNOW_K8
tristate "AMD Opteron/Athlon64 PowerNow!"
select CPU_FREQ_TABLE
depends on ACPI && ACPI_PROCESSOR && X86_ACPI_CPUFREQ
help
This adds the CPUFreq driver for K8/early Opteron/Athlon64 processors.
......@@ -132,7 +126,6 @@ config X86_POWERNOW_K8
config X86_AMD_FREQ_SENSITIVITY
tristate "AMD frequency sensitivity feedback powersave bias"
depends on CPU_FREQ_GOV_ONDEMAND && X86_ACPI_CPUFREQ && CPU_SUP_AMD
select CPU_FREQ_TABLE
help
This adds AMD-specific powersave bias function to the ondemand
governor, which allows it to make more power-conscious frequency
......@@ -160,7 +153,6 @@ config X86_GX_SUSPMOD
config X86_SPEEDSTEP_CENTRINO
tristate "Intel Enhanced SpeedStep (deprecated)"
select CPU_FREQ_TABLE
select X86_SPEEDSTEP_CENTRINO_TABLE if X86_32
depends on X86_32 || (X86_64 && ACPI_PROCESSOR)
help
......@@ -190,7 +182,6 @@ config X86_SPEEDSTEP_CENTRINO_TABLE
config X86_SPEEDSTEP_ICH
tristate "Intel Speedstep on ICH-M chipsets (ioport interface)"
select CPU_FREQ_TABLE
depends on X86_32
help
This adds the CPUFreq driver for certain mobile Intel Pentium III
......@@ -204,7 +195,6 @@ config X86_SPEEDSTEP_ICH
config X86_SPEEDSTEP_SMI
tristate "Intel SpeedStep on 440BX/ZX/MX chipsets (SMI interface)"
select CPU_FREQ_TABLE
depends on X86_32
help
This adds the CPUFreq driver for certain mobile Intel Pentium III
......@@ -217,7 +207,6 @@ config X86_SPEEDSTEP_SMI
config X86_P4_CLOCKMOD
tristate "Intel Pentium 4 clock modulation"
select CPU_FREQ_TABLE
help
This adds the CPUFreq driver for Intel Pentium 4 / XEON
processors. When enabled it will lower CPU temperature by skipping
......@@ -259,7 +248,6 @@ config X86_LONGRUN
config X86_LONGHAUL
tristate "VIA Cyrix III Longhaul"
select CPU_FREQ_TABLE
depends on X86_32 && ACPI_PROCESSOR
help
This adds the CPUFreq driver for VIA Samuel/CyrixIII,
......@@ -272,7 +260,6 @@ config X86_LONGHAUL
config X86_E_POWERSAVER
tristate "VIA C7 Enhanced PowerSaver (DANGEROUS)"
select CPU_FREQ_TABLE
depends on X86_32 && ACPI_PROCESSOR
help
This adds the CPUFreq driver for VIA C7 processors. However, this driver
......
# CPUfreq core
obj-$(CONFIG_CPU_FREQ) += cpufreq.o
obj-$(CONFIG_CPU_FREQ) += cpufreq.o freq_table.o
# CPUfreq stats
obj-$(CONFIG_CPU_FREQ_STAT) += cpufreq_stats.o
......@@ -11,9 +11,6 @@ obj-$(CONFIG_CPU_FREQ_GOV_ONDEMAND) += cpufreq_ondemand.o
obj-$(CONFIG_CPU_FREQ_GOV_CONSERVATIVE) += cpufreq_conservative.o
obj-$(CONFIG_CPU_FREQ_GOV_COMMON) += cpufreq_governor.o
# CPUfreq cross-arch helpers
obj-$(CONFIG_CPU_FREQ_TABLE) += freq_table.o
obj-$(CONFIG_GENERIC_CPUFREQ_CPU0) += cpufreq-cpu0.o
##################################################################################
......
......@@ -516,15 +516,6 @@ static int acpi_cpufreq_target(struct cpufreq_policy *policy,
return result;
}
static int acpi_cpufreq_verify(struct cpufreq_policy *policy)
{
struct acpi_cpufreq_data *data = per_cpu(acfreq_data, policy->cpu);
pr_debug("acpi_cpufreq_verify\n");
return cpufreq_frequency_table_verify(policy, data->freq_table);
}
static unsigned long
acpi_cpufreq_guess_freq(struct acpi_cpufreq_data *data, unsigned int cpu)
{
......@@ -837,7 +828,7 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
data->freq_table[valid_states].frequency = CPUFREQ_TABLE_END;
perf->state = 0;
result = cpufreq_frequency_table_cpuinfo(policy, data->freq_table);
result = cpufreq_table_validate_and_show(policy, data->freq_table);
if (result)
goto err_freqfree;
......@@ -846,12 +837,16 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
switch (perf->control_register.space_id) {
case ACPI_ADR_SPACE_SYSTEM_IO:
/* Current speed is unknown and not detectable by IO port */
/*
* The core will not set policy->cur, because
* cpufreq_driver->get is NULL, so we need to set it here.
* However, we have to guess it, because the current speed is
* unknown and not detectable via IO ports.
*/
policy->cur = acpi_cpufreq_guess_freq(data, policy->cpu);
break;
case ACPI_ADR_SPACE_FIXED_HARDWARE:
acpi_cpufreq_driver.get = get_cur_freq_on_cpu;
policy->cur = get_cur_freq_on_cpu(cpu);
break;
default:
break;
......@@ -868,8 +863,6 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
(u32) perf->states[i].power,
(u32) perf->states[i].transition_latency);
cpufreq_frequency_table_get_attr(data->freq_table, policy->cpu);
/*
* the first call to ->target() should result in us actually
* writing something to the appropriate registers.
......@@ -929,7 +922,7 @@ static struct freq_attr *acpi_cpufreq_attr[] = {
};
static struct cpufreq_driver acpi_cpufreq_driver = {
.verify = acpi_cpufreq_verify,
.verify = cpufreq_generic_frequency_table_verify,
.target = acpi_cpufreq_target,
.bios_limit = acpi_processor_get_bios_limit,
.init = acpi_cpufreq_cpu_init,
......
......@@ -47,14 +47,6 @@ static unsigned int bL_cpufreq_get(unsigned int cpu)
return clk_get_rate(clk[cur_cluster]) / 1000;
}
/* Validate policy frequency range */
static int bL_cpufreq_verify_policy(struct cpufreq_policy *policy)
{
u32 cur_cluster = cpu_to_cluster(policy->cpu);
return cpufreq_frequency_table_verify(policy, freq_table[cur_cluster]);
}
/* Set clock frequency */
static int bL_cpufreq_set_target(struct cpufreq_policy *policy,
unsigned int target_freq, unsigned int relation)
......@@ -127,7 +119,7 @@ static int get_cluster_clk_and_freq_table(struct device *cpu_dev)
}
name[12] = cluster + '0';
clk[cluster] = clk_get_sys(name, NULL);
clk[cluster] = clk_get(cpu_dev, name);
if (!IS_ERR(clk[cluster])) {
dev_dbg(cpu_dev, "%s: clk: %p & freq table: %p, cluster: %d\n",
__func__, clk[cluster], freq_table[cluster],
......@@ -165,7 +157,7 @@ static int bL_cpufreq_init(struct cpufreq_policy *policy)
if (ret)
return ret;
ret = cpufreq_frequency_table_cpuinfo(policy, freq_table[cur_cluster]);
ret = cpufreq_table_validate_and_show(policy, freq_table[cur_cluster]);
if (ret) {
dev_err(cpu_dev, "CPU %d, cluster: %d invalid freq table\n",
policy->cpu, cur_cluster);
......@@ -173,16 +165,12 @@ static int bL_cpufreq_init(struct cpufreq_policy *policy)
return ret;
}
cpufreq_frequency_table_get_attr(freq_table[cur_cluster], policy->cpu);
if (arm_bL_ops->get_transition_latency)
policy->cpuinfo.transition_latency =
arm_bL_ops->get_transition_latency(cpu_dev);
else
policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL;
policy->cur = bL_cpufreq_get(policy->cpu);
cpumask_copy(policy->cpus, topology_core_cpumask(policy->cpu));
dev_info(cpu_dev, "%s: CPU %d initialized\n", __func__, policy->cpu);
......@@ -200,28 +188,23 @@ static int bL_cpufreq_exit(struct cpufreq_policy *policy)
return -ENODEV;
}
cpufreq_frequency_table_put_attr(policy->cpu);
put_cluster_clk_and_freq_table(cpu_dev);
dev_dbg(cpu_dev, "%s: Exited, cpu: %d\n", __func__, policy->cpu);
return 0;
}
/* Export freq_table to sysfs */
static struct freq_attr *bL_cpufreq_attr[] = {
&cpufreq_freq_attr_scaling_available_freqs,
NULL,
};
static struct cpufreq_driver bL_cpufreq_driver = {
.name = "arm-big-little",
.flags = CPUFREQ_STICKY,
.verify = bL_cpufreq_verify_policy,
.flags = CPUFREQ_STICKY |
CPUFREQ_HAVE_GOVERNOR_PER_POLICY,
.verify = cpufreq_generic_frequency_table_verify,
.target = bL_cpufreq_set_target,
.get = bL_cpufreq_get,
.init = bL_cpufreq_init,
.exit = bL_cpufreq_exit,
.have_governor_per_policy = true,
.attr = bL_cpufreq_attr,
.attr = cpufreq_generic_attr,
};
int bL_cpufreq_register(struct cpufreq_arm_bL_ops *ops)
......
......@@ -19,18 +19,10 @@
#include <linux/clk.h>
#include <linux/err.h>
#include <linux/export.h>
#include <linux/slab.h>
static struct clk *cpuclk;
static int at32_verify_speed(struct cpufreq_policy *policy)
{
if (policy->cpu != 0)
return -EINVAL;
cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq,
policy->cpuinfo.max_freq);
return 0;
}
static struct cpufreq_frequency_table *freq_table;
static unsigned int at32_get_speed(unsigned int cpu)
{
......@@ -85,31 +77,68 @@ static int at32_set_target(struct cpufreq_policy *policy,
static int __init at32_cpufreq_driver_init(struct cpufreq_policy *policy)
{
unsigned int frequency, rate, min_freq;
int retval, steps, i;
if (policy->cpu != 0)
return -EINVAL;
cpuclk = clk_get(NULL, "cpu");
if (IS_ERR(cpuclk)) {
pr_debug("cpufreq: could not get CPU clk\n");
return PTR_ERR(cpuclk);
retval = PTR_ERR(cpuclk);
goto out_err;
}
policy->cpuinfo.min_freq = (clk_round_rate(cpuclk, 1) + 500) / 1000;
policy->cpuinfo.max_freq = (clk_round_rate(cpuclk, ~0UL) + 500) / 1000;
min_freq = (clk_round_rate(cpuclk, 1) + 500) / 1000;
frequency = (clk_round_rate(cpuclk, ~0UL) + 500) / 1000;
policy->cpuinfo.transition_latency = 0;
policy->cur = at32_get_speed(0);
policy->min = policy->cpuinfo.min_freq;
policy->max = policy->cpuinfo.max_freq;
printk("cpufreq: AT32AP CPU frequency driver\n");
/*
* AVR32 CPU frequency rate scales in power of two between maximum and
* minimum, also add space for the table end marker.
*
* Further validate that the frequency is usable, and append it to the
* frequency table.
*/
steps = fls(frequency / min_freq) + 1;
freq_table = kzalloc(steps * sizeof(struct cpufreq_frequency_table),
GFP_KERNEL);
if (!freq_table) {
retval = -ENOMEM;
goto out_err_put_clk;
}
for (i = 0; i < (steps - 1); i++) {
rate = clk_round_rate(cpuclk, frequency * 1000) / 1000;
return 0;
if (rate != frequency)
freq_table[i].frequency = CPUFREQ_ENTRY_INVALID;
else
freq_table[i].frequency = frequency;
frequency /= 2;
}
freq_table[steps - 1].frequency = CPUFREQ_TABLE_END;
retval = cpufreq_table_validate_and_show(policy, freq_table);
if (!retval) {
printk("cpufreq: AT32AP CPU frequency driver\n");
return 0;
}
kfree(freq_table);
out_err_put_clk:
clk_put(cpuclk);
out_err:
return retval;
}
static struct cpufreq_driver at32_driver = {
.name = "at32ap",
.init = at32_cpufreq_driver_init,
.verify = at32_verify_speed,
.verify = cpufreq_generic_frequency_table_verify,
.target = at32_set_target,
.get = at32_get_speed,
.flags = CPUFREQ_STICKY,
......
......@@ -191,11 +191,6 @@ static int bfin_target(struct cpufreq_policy *policy,
return ret;
}
static int bfin_verify_speed(struct cpufreq_policy *policy)
{
return cpufreq_frequency_table_verify(policy, bfin_freq_table);
}
static int __bfin_cpu_init(struct cpufreq_policy *policy)
{
......@@ -209,23 +204,17 @@ static int __bfin_cpu_init(struct cpufreq_policy *policy)
policy->cpuinfo.transition_latency = 50000; /* 50us assumed */
policy->cur = cclk;
cpufreq_frequency_table_get_attr(bfin_freq_table, policy->cpu);
return cpufreq_frequency_table_cpuinfo(policy, bfin_freq_table);
return cpufreq_table_validate_and_show(policy, bfin_freq_table);
}
static struct freq_attr *bfin_freq_attr[] = {
&cpufreq_freq_attr_scaling_available_freqs,
NULL,
};
static struct cpufreq_driver bfin_driver = {
.verify = bfin_verify_speed,
.verify = cpufreq_generic_frequency_table_verify,
.target = bfin_target,
.get = bfin_getfreq_khz,
.init = __bfin_cpu_init,
.exit = cpufreq_generic_exit,
.name = "bfin cpufreq",
.attr = bfin_freq_attr,
.attr = cpufreq_generic_attr,
};
static int __init bfin_cpu_init(void)
......
......@@ -30,11 +30,6 @@ static struct clk *cpu_clk;
static struct regulator *cpu_reg;
static struct cpufreq_frequency_table *freq_table;
static int cpu0_verify_speed(struct cpufreq_policy *policy)
{
return cpufreq_frequency_table_verify(policy, freq_table);
}
static unsigned int cpu0_get_speed(unsigned int cpu)
{
return clk_get_rate(cpu_clk) / 1000;
......@@ -127,50 +122,18 @@ static int cpu0_set_target(struct cpufreq_policy *policy,
static int cpu0_cpufreq_init(struct cpufreq_policy *policy)
{
int ret;
ret = cpufreq_frequency_table_cpuinfo(policy, freq_table);
if (ret) {
pr_err("invalid frequency table: %d\n", ret);
return ret;
}
policy->cpuinfo.transition_latency = transition_latency;
policy->cur = clk_get_rate(cpu_clk) / 1000;
/*
* The driver only supports the SMP configuartion where all processors
* share the clock and voltage and clock. Use cpufreq affected_cpus
* interface to have all CPUs scaled together.
*/
cpumask_setall(policy->cpus);
cpufreq_frequency_table_get_attr(freq_table, policy->cpu);
return 0;
}
static int cpu0_cpufreq_exit(struct cpufreq_policy *policy)
{
cpufreq_frequency_table_put_attr(policy->cpu);
return 0;
return cpufreq_generic_init(policy, freq_table, transition_latency);
}
static struct freq_attr *cpu0_cpufreq_attr[] = {
&cpufreq_freq_attr_scaling_available_freqs,
NULL,
};
static struct cpufreq_driver cpu0_cpufreq_driver = {
.flags = CPUFREQ_STICKY,
.verify = cpu0_verify_speed,
.verify = cpufreq_generic_frequency_table_verify,
.target = cpu0_set_target,
.get = cpu0_get_speed,
.init = cpu0_cpufreq_init,
.exit = cpu0_cpufreq_exit,
.exit = cpufreq_generic_exit,
.name = "generic_cpu0",
.attr = cpu0_cpufreq_attr,
.attr = cpufreq_generic_attr,
};
static int cpu0_cpufreq_probe(struct platform_device *pdev)
......
......@@ -303,9 +303,7 @@ static int nforce2_verify(struct cpufreq_policy *policy)
if (policy->min < (fsb_pol_max * fid * 100))
policy->max = (fsb_pol_max + 1) * fid * 100;
cpufreq_verify_within_limits(policy,
policy->cpuinfo.min_freq,
policy->cpuinfo.max_freq);
cpufreq_verify_within_cpu_limits(policy);
return 0;
}
......@@ -362,7 +360,6 @@ static int nforce2_cpu_init(struct cpufreq_policy *policy)
policy->min = policy->cpuinfo.min_freq = min_fsb * fid * 100;
policy->max = policy->cpuinfo.max_freq = max_fsb * fid * 100;
policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL;
policy->cur = nforce2_get(policy->cpu);
return 0;
}
......
......@@ -67,13 +67,11 @@ static DEFINE_PER_CPU(char[CPUFREQ_NAME_LEN], cpufreq_cpu_governor);
static DEFINE_PER_CPU(struct rw_semaphore, cpu_policy_rwsem);
#define lock_policy_rwsem(mode, cpu) \
static int lock_policy_rwsem_##mode(int cpu) \
static void lock_policy_rwsem_##mode(int cpu) \
{ \
struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu); \
BUG_ON(!policy); \
down_##mode(&per_cpu(cpu_policy_rwsem, policy->cpu)); \
\
return 0; \
}
lock_policy_rwsem(read, cpu);
......@@ -135,7 +133,7 @@ static DEFINE_MUTEX(cpufreq_governor_mutex);
bool have_governor_per_policy(void)
{
return cpufreq_driver->have_governor_per_policy;
return !!(cpufreq_driver->flags & CPUFREQ_HAVE_GOVERNOR_PER_POLICY);
}
EXPORT_SYMBOL_GPL(have_governor_per_policy);
......@@ -183,6 +181,37 @@ u64 get_cpu_idle_time(unsigned int cpu, u64 *wall, int io_busy)
}
EXPORT_SYMBOL_GPL(get_cpu_idle_time);
/*
* This is a generic cpufreq init() routine which can be used by cpufreq
* drivers of SMP systems. It will do following:
* - validate & show freq table passed
* - set policies transition latency
* - policy->cpus with all possible CPUs
*/
int cpufreq_generic_init(struct cpufreq_policy *policy,
struct cpufreq_frequency_table *table,
unsigned int transition_latency)
{
int ret;
ret = cpufreq_table_validate_and_show(policy, table);
if (ret) {
pr_err("%s: invalid frequency table: %d\n", __func__, ret);
return ret;
}
policy->cpuinfo.transition_latency = transition_latency;
/*
* The driver only supports the SMP configuartion where all processors
* share the clock and voltage and clock.
*/
cpumask_setall(policy->cpus);
return 0;
}
EXPORT_SYMBOL_GPL(cpufreq_generic_init);
struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu)
{
struct cpufreq_policy *policy = NULL;
......@@ -414,7 +443,7 @@ show_one(scaling_min_freq, min);
show_one(scaling_max_freq, max);
show_one(scaling_cur_freq, cur);
static int __cpufreq_set_policy(struct cpufreq_policy *policy,
static int cpufreq_set_policy(struct cpufreq_policy *policy,
struct cpufreq_policy *new_policy);
/**
......@@ -435,7 +464,7 @@ static ssize_t store_##file_name \
if (ret != 1) \
return -EINVAL; \
\
ret = __cpufreq_set_policy(policy, &new_policy); \
ret = cpufreq_set_policy(policy, &new_policy); \
policy->user_policy.object = policy->object; \
\
return ret ? ret : count; \
......@@ -493,11 +522,7 @@ static ssize_t store_scaling_governor(struct cpufreq_policy *policy,
&new_policy.governor))
return -EINVAL;
/*
* Do not use cpufreq_set_policy here or the user_policy.max
* will be wrongly overridden
*/
ret = __cpufreq_set_policy(policy, &new_policy);
ret = cpufreq_set_policy(policy, &new_policy);
policy->user_policy.policy = policy->policy;
policy->user_policy.governor = policy->governor;
......@@ -653,13 +678,12 @@ static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf)
{
struct cpufreq_policy *policy = to_policy(kobj);
struct freq_attr *fattr = to_attr(attr);
ssize_t ret = -EINVAL;
ssize_t ret;
if (!down_read_trylock(&cpufreq_rwsem))
goto exit;
return -EINVAL;
if (lock_policy_rwsem_read(policy->cpu) < 0)
goto up_read;
lock_policy_rwsem_read(policy->cpu);
if (fattr->show)
ret = fattr->show(policy, buf);
......@@ -667,10 +691,8 @@ static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf)
ret = -EIO;
unlock_policy_rwsem_read(policy->cpu);
up_read:
up_read(&cpufreq_rwsem);
exit:
return ret;
}
......@@ -689,8 +711,7 @@ static ssize_t store(struct kobject *kobj, struct attribute *attr,
if (!down_read_trylock(&cpufreq_rwsem))
goto unlock;
if (lock_policy_rwsem_write(policy->cpu) < 0)
goto up_read;
lock_policy_rwsem_write(policy->cpu);
if (fattr->store)
ret = fattr->store(policy, buf, count);
......@@ -699,7 +720,6 @@ static ssize_t store(struct kobject *kobj, struct attribute *attr,
unlock_policy_rwsem_write(policy->cpu);
up_read:
up_read(&cpufreq_rwsem);
unlock:
put_online_cpus();
......@@ -844,11 +864,11 @@ static void cpufreq_init_policy(struct cpufreq_policy *policy)
int ret = 0;
memcpy(&new_policy, policy, sizeof(*policy));
/* assure that the starting sequence is run in __cpufreq_set_policy */
/* assure that the starting sequence is run in cpufreq_set_policy */
policy->governor = NULL;
/* set default policy */
ret = __cpufreq_set_policy(policy, &new_policy);
ret = cpufreq_set_policy(policy, &new_policy);
policy->user_policy.policy = policy->policy;
policy->user_policy.governor = policy->governor;
......@@ -949,7 +969,7 @@ static void cpufreq_policy_free(struct cpufreq_policy *policy)
static void update_policy_cpu(struct cpufreq_policy *policy, unsigned int cpu)
{
if (cpu == policy->cpu)
if (WARN_ON(cpu == policy->cpu))
return;
/*
......@@ -966,9 +986,7 @@ static void update_policy_cpu(struct cpufreq_policy *policy, unsigned int cpu)
up_write(&per_cpu(cpu_policy_rwsem, policy->last_cpu));
#ifdef CONFIG_CPU_FREQ_TABLE
cpufreq_frequency_table_update_policy_cpu(policy);
#endif
blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
CPUFREQ_UPDATE_POLICY_CPU, policy);
}
......@@ -1053,6 +1071,14 @@ static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif,
goto err_set_policy_cpu;
}
if (cpufreq_driver->get) {
policy->cur = cpufreq_driver->get(policy->cpu);
if (!policy->cur) {
pr_err("%s: ->get() failed\n", __func__);
goto err_get_freq;
}
}
/* related cpus should atleast have policy->cpus */
cpumask_or(policy->related_cpus, policy->related_cpus, policy->cpus);
......@@ -1107,6 +1133,9 @@ static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif,
per_cpu(cpufreq_cpu_data, j) = NULL;
write_unlock_irqrestore(&cpufreq_driver_lock, flags);
err_get_freq:
if (cpufreq_driver->exit)
cpufreq_driver->exit(policy);
err_set_policy_cpu:
cpufreq_policy_free(policy);
nomem_out:
......@@ -1147,7 +1176,7 @@ static int cpufreq_nominate_new_policy_cpu(struct cpufreq_policy *policy,
if (ret) {
pr_err("%s: Failed to move kobj: %d", __func__, ret);
WARN_ON(lock_policy_rwsem_write(old_cpu));
lock_policy_rwsem_write(old_cpu);
cpumask_set_cpu(old_cpu, policy->cpus);
unlock_policy_rwsem_write(old_cpu);
......@@ -1208,14 +1237,13 @@ static int __cpufreq_remove_dev_prepare(struct device *dev,
if (!frozen)
sysfs_remove_link(&dev->kobj, "cpufreq");
} else if (cpus > 1) {
new_cpu = cpufreq_nominate_new_policy_cpu(policy, cpu, frozen);
if (new_cpu >= 0) {
update_policy_cpu(policy, new_cpu);
if (!frozen) {
pr_debug("%s: policy Kobject moved to cpu: %d "
"from: %d\n",__func__, new_cpu, cpu);
pr_debug("%s: policy Kobject moved to cpu: %d from: %d\n",
__func__, new_cpu, cpu);
}
}
}
......@@ -1243,7 +1271,7 @@ static int __cpufreq_remove_dev_finish(struct device *dev,
return -EINVAL;
}
WARN_ON(lock_policy_rwsem_write(cpu));
lock_policy_rwsem_write(cpu);
cpus = cpumask_weight(policy->cpus);
if (cpus > 1)
......@@ -1310,36 +1338,24 @@ static int __cpufreq_remove_dev_finish(struct device *dev,
}
/**
* __cpufreq_remove_dev - remove a CPU device
* cpufreq_remove_dev - remove a CPU device
*
* Removes the cpufreq interface for a CPU device.
* Caller should already have policy_rwsem in write mode for this CPU.
* This routine frees the rwsem before returning.
*/
static inline int __cpufreq_remove_dev(struct device *dev,
struct subsys_interface *sif,
bool frozen)
{
int ret;
ret = __cpufreq_remove_dev_prepare(dev, sif, frozen);
if (!ret)
ret = __cpufreq_remove_dev_finish(dev, sif, frozen);
return ret;
}
static int cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif)
{
unsigned int cpu = dev->id;
int retval;
int ret;
if (cpu_is_offline(cpu))
return 0;
retval = __cpufreq_remove_dev(dev, sif, false);
return retval;
ret = __cpufreq_remove_dev_prepare(dev, sif, false);
if (!ret)
ret = __cpufreq_remove_dev_finish(dev, sif, false);
return ret;
}
static void handle_update(struct work_struct *work)
......@@ -1466,14 +1482,11 @@ unsigned int cpufreq_get(unsigned int cpu)
if (!down_read_trylock(&cpufreq_rwsem))
return 0;
if (unlikely(lock_policy_rwsem_read(cpu)))
goto out_policy;
lock_policy_rwsem_read(cpu);
ret_freq = __cpufreq_get(cpu);
unlock_policy_rwsem_read(cpu);
out_policy:
up_read(&cpufreq_rwsem);
return ret_freq;
......@@ -1697,14 +1710,12 @@ int cpufreq_driver_target(struct cpufreq_policy *policy,
{
int ret = -EINVAL;
if (unlikely(lock_policy_rwsem_write(policy->cpu)))
goto fail;
lock_policy_rwsem_write(policy->cpu);
ret = __cpufreq_driver_target(policy, target_freq, relation);
unlock_policy_rwsem_write(policy->cpu);
fail:
return ret;
}
EXPORT_SYMBOL_GPL(cpufreq_driver_target);
......@@ -1871,10 +1882,10 @@ int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu)
EXPORT_SYMBOL(cpufreq_get_policy);
/*
* data : current policy.
* policy : policy to be set.
* policy : current policy.
* new_policy: policy to be set.
*/
static int __cpufreq_set_policy(struct cpufreq_policy *policy,
static int cpufreq_set_policy(struct cpufreq_policy *policy,
struct cpufreq_policy *new_policy)
{
int ret = 0, failed = 1;
......@@ -1995,10 +2006,7 @@ int cpufreq_update_policy(unsigned int cpu)
goto no_policy;
}
if (unlikely(lock_policy_rwsem_write(cpu))) {
ret = -EINVAL;
goto fail;
}
lock_policy_rwsem_write(cpu);
pr_debug("updating policy for CPU %u\n", cpu);
memcpy(&new_policy, policy, sizeof(*policy));
......@@ -2023,11 +2031,10 @@ int cpufreq_update_policy(unsigned int cpu)
}
}
ret = __cpufreq_set_policy(policy, &new_policy);
ret = cpufreq_set_policy(policy, &new_policy);
unlock_policy_rwsem_write(cpu);
fail:
cpufreq_cpu_put(policy);
no_policy:
return ret;
......
......@@ -191,7 +191,10 @@ struct common_dbs_data {
struct attribute_group *attr_group_gov_sys; /* one governor - system */
struct attribute_group *attr_group_gov_pol; /* one governor - policy */
/* Common data for platforms that don't set have_governor_per_policy */
/*
* Common data for platforms that don't set
* CPUFREQ_HAVE_GOVERNOR_PER_POLICY
*/
struct dbs_data *gdbs_data;
struct cpu_dbs_common_info *(*get_cpu_cdbs)(int cpu);
......
......@@ -38,18 +38,7 @@ static int cpufreq_set(struct cpufreq_policy *policy, unsigned int freq)
if (!per_cpu(cpu_is_managed, policy->cpu))
goto err;
/*
* We're safe from concurrent calls to ->target() here
* as we hold the userspace_mutex lock. If we were calling
* cpufreq_driver_target, a deadlock situation might occur:
* A: cpufreq_set (lock userspace_mutex) ->
* cpufreq_driver_target(lock policy->lock)
* B: cpufreq_set_policy(lock policy->lock) ->
* __cpufreq_governor ->
* cpufreq_governor_userspace (lock userspace_mutex)
*/
ret = __cpufreq_driver_target(policy, freq, CPUFREQ_RELATION_L);
err:
mutex_unlock(&userspace_mutex);
return ret;
......
......@@ -54,11 +54,6 @@ static void cris_freq_set_cpu_state(struct cpufreq_policy *policy,
cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
};
static int cris_freq_verify(struct cpufreq_policy *policy)
{
return cpufreq_frequency_table_verify(policy, &cris_freq_table[0]);
}
static int cris_freq_target(struct cpufreq_policy *policy,
unsigned int target_freq,
unsigned int relation)
......@@ -76,42 +71,17 @@ static int cris_freq_target(struct cpufreq_policy *policy,
static int cris_freq_cpu_init(struct cpufreq_policy *policy)
{
int result;
/* cpuinfo and default policy values */
policy->cpuinfo.transition_latency = 1000000; /* 1ms */
policy->cur = cris_freq_get_cpu_frequency(0);
result = cpufreq_frequency_table_cpuinfo(policy, cris_freq_table);
if (result)
return (result);
cpufreq_frequency_table_get_attr(cris_freq_table, policy->cpu);
return 0;
}
static int cris_freq_cpu_exit(struct cpufreq_policy *policy)
{
cpufreq_frequency_table_put_attr(policy->cpu);
return 0;
return cpufreq_generic_init(policy, cris_freq_table, 1000000);
}
static struct freq_attr *cris_freq_attr[] = {
&cpufreq_freq_attr_scaling_available_freqs,
NULL,
};
static struct cpufreq_driver cris_freq_driver = {
.get = cris_freq_get_cpu_frequency,
.verify = cris_freq_verify,
.verify = cpufreq_generic_frequency_table_verify,
.target = cris_freq_target,
.init = cris_freq_cpu_init,
.exit = cris_freq_cpu_exit,
.exit = cpufreq_generic_exit,
.name = "cris_freq",
.attr = cris_freq_attr,
.attr = cpufreq_generic_attr,
};
static int __init cris_freq_init(void)
......
......@@ -54,11 +54,6 @@ static void cris_freq_set_cpu_state(struct cpufreq_policy *policy,
cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
};
static int cris_freq_verify(struct cpufreq_policy *policy)
{
return cpufreq_frequency_table_verify(policy, &cris_freq_table[0]);
}
static int cris_freq_target(struct cpufreq_policy *policy,
unsigned int target_freq, unsigned int relation)
{
......@@ -75,40 +70,17 @@ static int cris_freq_target(struct cpufreq_policy *policy,
static int cris_freq_cpu_init(struct cpufreq_policy *policy)
{
int result;
/* cpuinfo and default policy values */
policy->cpuinfo.transition_latency = 1000000; /* 1ms */
policy->cur = cris_freq_get_cpu_frequency(0);
result = cpufreq_frequency_table_cpuinfo(policy, cris_freq_table);
if (result)
return (result);
cpufreq_frequency_table_get_attr(cris_freq_table, policy->cpu);
return 0;
}
static int cris_freq_cpu_exit(struct cpufreq_policy *policy)
{
cpufreq_frequency_table_put_attr(policy->cpu);
return 0;
return cpufreq_generic_init(policy, cris_freq_table, 1000000);
}
static struct freq_attr *cris_freq_attr[] = {
&cpufreq_freq_attr_scaling_available_freqs,
NULL,
};
static struct cpufreq_driver cris_freq_driver = {
.get = cris_freq_get_cpu_frequency,
.verify = cris_freq_verify,
.verify = cpufreq_generic_frequency_table_verify,
.target = cris_freq_target,
.init = cris_freq_cpu_init,
.exit = cris_freq_cpu_exit,
.exit = cpufreq_generic_exit,
.name = "cris_freq",
.attr = cris_freq_attr,
.attr = cpufreq_generic_attr,
};
static int __init cris_freq_init(void)
......
......@@ -50,9 +50,7 @@ static int davinci_verify_speed(struct cpufreq_policy *policy)
if (policy->cpu)
return -EINVAL;
cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq,
policy->cpuinfo.max_freq);
cpufreq_verify_within_cpu_limits(policy);
policy->min = clk_round_rate(armclk, policy->min * 1000) / 1000;
policy->max = clk_round_rate(armclk, policy->max * 1000) / 1000;
cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq,
......@@ -138,47 +136,24 @@ static int davinci_cpu_init(struct cpufreq_policy *policy)
return result;
}
policy->cur = davinci_getspeed(0);
result = cpufreq_frequency_table_cpuinfo(policy, freq_table);
if (result) {
pr_err("%s: cpufreq_frequency_table_cpuinfo() failed",
__func__);
return result;
}
cpufreq_frequency_table_get_attr(freq_table, policy->cpu);
/*
* Time measurement across the target() function yields ~1500-1800us
* time taken with no drivers on notification list.
* Setting the latency to 2000 us to accommodate addition of drivers
* to pre/post change notification list.
*/
policy->cpuinfo.transition_latency = 2000 * 1000;
return 0;
return cpufreq_generic_init(policy, freq_table, 2000 * 1000);
}
static int davinci_cpu_exit(struct cpufreq_policy *policy)
{
cpufreq_frequency_table_put_attr(policy->cpu);
return 0;
}
static struct freq_attr *davinci_cpufreq_attr[] = {
&cpufreq_freq_attr_scaling_available_freqs,
NULL,
};
static struct cpufreq_driver davinci_driver = {
.flags = CPUFREQ_STICKY,
.verify = davinci_verify_speed,
.target = davinci_target,
.get = davinci_getspeed,
.init = davinci_cpu_init,
.exit = davinci_cpu_exit,
.exit = cpufreq_generic_exit,
.name = "davinci",
.attr = davinci_cpufreq_attr,
.attr = cpufreq_generic_attr,
};
static int __init davinci_cpufreq_probe(struct platform_device *pdev)
......
......@@ -19,16 +19,6 @@
static struct cpufreq_frequency_table *freq_table;
static struct clk *armss_clk;
static struct freq_attr *dbx500_cpufreq_attr[] = {
&cpufreq_freq_attr_scaling_available_freqs,
NULL,
};
static int dbx500_cpufreq_verify_speed(struct cpufreq_policy *policy)
{
return cpufreq_frequency_table_verify(policy, freq_table);
}
static int dbx500_cpufreq_target(struct cpufreq_policy *policy,
unsigned int target_freq,
unsigned int relation)
......@@ -84,43 +74,17 @@ static unsigned int dbx500_cpufreq_getspeed(unsigned int cpu)
static int dbx500_cpufreq_init(struct cpufreq_policy *policy)
{
int res;
/* get policy fields based on the table */
res = cpufreq_frequency_table_cpuinfo(policy, freq_table);
if (!res)
cpufreq_frequency_table_get_attr(freq_table, policy->cpu);
else {
pr_err("dbx500-cpufreq: Failed to read policy table\n");
return res;
}
policy->min = policy->cpuinfo.min_freq;
policy->max = policy->cpuinfo.max_freq;
policy->cur = dbx500_cpufreq_getspeed(policy->cpu);
policy->governor = CPUFREQ_DEFAULT_GOVERNOR;
/*
* FIXME : Need to take time measurement across the target()
* function with no/some/all drivers in the notification
* list.
*/
policy->cpuinfo.transition_latency = 20 * 1000; /* in ns */
/* policy sharing between dual CPUs */
cpumask_setall(policy->cpus);
return 0;
return cpufreq_generic_init(policy, freq_table, 20 * 1000);
}
static struct cpufreq_driver dbx500_cpufreq_driver = {
.flags = CPUFREQ_STICKY | CPUFREQ_CONST_LOOPS,
.verify = dbx500_cpufreq_verify_speed,
.verify = cpufreq_generic_frequency_table_verify,
.target = dbx500_cpufreq_target,
.get = dbx500_cpufreq_getspeed,
.init = dbx500_cpufreq_init,
.name = "DBX500",
.attr = dbx500_cpufreq_attr,
.attr = cpufreq_generic_attr,
};
static int dbx500_cpufreq_probe(struct platform_device *pdev)
......
......@@ -198,12 +198,6 @@ static int eps_target(struct cpufreq_policy *policy,
return ret;
}
static int eps_verify(struct cpufreq_policy *policy)
{
return cpufreq_frequency_table_verify(policy,
&eps_cpu[policy->cpu]->freq_table[0]);
}
static int eps_cpu_init(struct cpufreq_policy *policy)
{
unsigned int i;
......@@ -401,15 +395,13 @@ static int eps_cpu_init(struct cpufreq_policy *policy)
}
policy->cpuinfo.transition_latency = 140000; /* 844mV -> 700mV in ns */
policy->cur = fsb * current_multiplier;
ret = cpufreq_frequency_table_cpuinfo(policy, &centaur->freq_table[0]);
ret = cpufreq_table_validate_and_show(policy, &centaur->freq_table[0]);
if (ret) {
kfree(centaur);
return ret;
}
cpufreq_frequency_table_get_attr(&centaur->freq_table[0], policy->cpu);
return 0;
}
......@@ -424,19 +416,14 @@ static int eps_cpu_exit(struct cpufreq_policy *policy)
return 0;
}
static struct freq_attr *eps_attr[] = {
&cpufreq_freq_attr_scaling_available_freqs,
NULL,
};
static struct cpufreq_driver eps_driver = {
.verify = eps_verify,
.verify = cpufreq_generic_frequency_table_verify,
.target = eps_target,
.init = eps_cpu_init,
.exit = eps_cpu_exit,
.get = eps_get,
.name = "e_powersaver",
.attr = eps_attr,
.attr = cpufreq_generic_attr,
};
......
......@@ -165,19 +165,6 @@ static void elanfreq_set_cpu_state(struct cpufreq_policy *policy,
};
/**
* elanfreq_validatespeed: test if frequency range is valid
* @policy: the policy to validate
*
* This function checks if a given frequency range in kHz is valid
* for the hardware supported by the driver.
*/
static int elanfreq_verify(struct cpufreq_policy *policy)
{
return cpufreq_frequency_table_verify(policy, &elanfreq_table[0]);
}
static int elanfreq_target(struct cpufreq_policy *policy,
unsigned int target_freq,
unsigned int relation)
......@@ -202,7 +189,6 @@ static int elanfreq_cpu_init(struct cpufreq_policy *policy)
{
struct cpuinfo_x86 *c = &cpu_data(0);
unsigned int i;
int result;
/* capability check */
if ((c->x86_vendor != X86_VENDOR_AMD) ||
......@@ -221,21 +207,8 @@ static int elanfreq_cpu_init(struct cpufreq_policy *policy)
/* cpuinfo and default policy values */
policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL;
policy->cur = elanfreq_get_cpu_frequency(0);
result = cpufreq_frequency_table_cpuinfo(policy, elanfreq_table);
if (result)
return result;
cpufreq_frequency_table_get_attr(elanfreq_table, policy->cpu);
return 0;
}
static int elanfreq_cpu_exit(struct cpufreq_policy *policy)
{
cpufreq_frequency_table_put_attr(policy->cpu);
return 0;
return cpufreq_table_validate_and_show(policy, elanfreq_table);
}
......@@ -261,20 +234,14 @@ __setup("elanfreq=", elanfreq_setup);
#endif
static struct freq_attr *elanfreq_attr[] = {
&cpufreq_freq_attr_scaling_available_freqs,
NULL,
};
static struct cpufreq_driver elanfreq_driver = {
.get = elanfreq_get_cpu_frequency,
.verify = elanfreq_verify,
.verify = cpufreq_generic_frequency_table_verify,
.target = elanfreq_target,
.init = elanfreq_cpu_init,
.exit = elanfreq_cpu_exit,
.exit = cpufreq_generic_exit,
.name = "elanfreq",
.attr = elanfreq_attr,
.attr = cpufreq_generic_attr,
};
static const struct x86_cpu_id elan_id[] = {
......
......@@ -31,12 +31,6 @@ static unsigned int locking_frequency;
static bool frequency_locked;
static DEFINE_MUTEX(cpufreq_lock);
static int exynos_verify_speed(struct cpufreq_policy *policy)
{
return cpufreq_frequency_table_verify(policy,
exynos_info->freq_table);
}
static unsigned int exynos_getspeed(unsigned int cpu)
{
return clk_get_rate(exynos_info->cpu_clk) / 1000;
......@@ -141,7 +135,7 @@ static int exynos_cpufreq_scale(unsigned int target_freq)
if ((freqs.new < freqs.old) ||
((freqs.new > freqs.old) && safe_arm_volt)) {
/* down the voltage after frequency change */
regulator_set_voltage(arm_regulator, arm_volt,
ret = regulator_set_voltage(arm_regulator, arm_volt,
arm_volt);
if (ret) {
pr_err("%s: failed to set cpu voltage to %d\n",
......@@ -247,38 +241,18 @@ static struct notifier_block exynos_cpufreq_nb = {
static int exynos_cpufreq_cpu_init(struct cpufreq_policy *policy)
{
policy->cur = policy->min = policy->max = exynos_getspeed(policy->cpu);
cpufreq_frequency_table_get_attr(exynos_info->freq_table, policy->cpu);
/* set the transition latency value */
policy->cpuinfo.transition_latency = 100000;
cpumask_setall(policy->cpus);
return cpufreq_frequency_table_cpuinfo(policy, exynos_info->freq_table);
return cpufreq_generic_init(policy, exynos_info->freq_table, 100000);
}
static int exynos_cpufreq_cpu_exit(struct cpufreq_policy *policy)
{
cpufreq_frequency_table_put_attr(policy->cpu);
return 0;
}
static struct freq_attr *exynos_cpufreq_attr[] = {
&cpufreq_freq_attr_scaling_available_freqs,
NULL,
};
static struct cpufreq_driver exynos_driver = {
.flags = CPUFREQ_STICKY,
.verify = exynos_verify_speed,
.verify = cpufreq_generic_frequency_table_verify,
.target = exynos_target,
.get = exynos_getspeed,
.init = exynos_cpufreq_cpu_init,
.exit = exynos_cpufreq_cpu_exit,
.exit = cpufreq_generic_exit,
.name = "exynos_cpufreq",
.attr = exynos_cpufreq_attr,
.attr = cpufreq_generic_attr,
#ifdef CONFIG_PM
.suspend = exynos_cpufreq_suspend,
.resume = exynos_cpufreq_resume,
......
......@@ -81,9 +81,9 @@ static void exynos4210_set_clkdiv(unsigned int div_index)
static void exynos4210_set_apll(unsigned int index)
{
unsigned int tmp;
unsigned int tmp, freq = apll_freq_4210[index].freq;
/* 1. MUX_CORE_SEL = MPLL, ARMCLK uses MPLL for lock time */
/* MUX_CORE_SEL = MPLL, ARMCLK uses MPLL for lock time */
clk_set_parent(moutcore, mout_mpll);
do {
......@@ -92,21 +92,9 @@ static void exynos4210_set_apll(unsigned int index)
tmp &= 0x7;
} while (tmp != 0x2);
/* 2. Set APLL Lock time */
__raw_writel(EXYNOS4_APLL_LOCKTIME, EXYNOS4_APLL_LOCK);
/* 3. Change PLL PMS values */
tmp = __raw_readl(EXYNOS4_APLL_CON0);
tmp &= ~((0x3ff << 16) | (0x3f << 8) | (0x7 << 0));
tmp |= apll_freq_4210[index].mps;
__raw_writel(tmp, EXYNOS4_APLL_CON0);
clk_set_rate(mout_apll, freq * 1000);
/* 4. wait_lock_time */
do {
tmp = __raw_readl(EXYNOS4_APLL_CON0);
} while (!(tmp & (0x1 << EXYNOS4_APLLCON0_LOCKED_SHIFT)));
/* 5. MUX_CORE_SEL = APLL */
/* MUX_CORE_SEL = APLL */
clk_set_parent(moutcore, mout_apll);
do {
......@@ -115,53 +103,15 @@ static void exynos4210_set_apll(unsigned int index)
} while (tmp != (0x1 << EXYNOS4_CLKSRC_CPU_MUXCORE_SHIFT));
}
static bool exynos4210_pms_change(unsigned int old_index, unsigned int new_index)
{
unsigned int old_pm = apll_freq_4210[old_index].mps >> 8;
unsigned int new_pm = apll_freq_4210[new_index].mps >> 8;
return (old_pm == new_pm) ? 0 : 1;
}
static void exynos4210_set_frequency(unsigned int old_index,
unsigned int new_index)
{
unsigned int tmp;
if (old_index > new_index) {
if (!exynos4210_pms_change(old_index, new_index)) {
/* 1. Change the system clock divider values */
exynos4210_set_clkdiv(new_index);
/* 2. Change just s value in apll m,p,s value */
tmp = __raw_readl(EXYNOS4_APLL_CON0);
tmp &= ~(0x7 << 0);
tmp |= apll_freq_4210[new_index].mps & 0x7;
__raw_writel(tmp, EXYNOS4_APLL_CON0);
} else {
/* Clock Configuration Procedure */
/* 1. Change the system clock divider values */
exynos4210_set_clkdiv(new_index);
/* 2. Change the apll m,p,s value */
exynos4210_set_apll(new_index);
}
exynos4210_set_clkdiv(new_index);
exynos4210_set_apll(new_index);
} else if (old_index < new_index) {
if (!exynos4210_pms_change(old_index, new_index)) {
/* 1. Change just s value in apll m,p,s value */
tmp = __raw_readl(EXYNOS4_APLL_CON0);
tmp &= ~(0x7 << 0);
tmp |= apll_freq_4210[new_index].mps & 0x7;
__raw_writel(tmp, EXYNOS4_APLL_CON0);
/* 2. Change the system clock divider values */
exynos4210_set_clkdiv(new_index);
} else {
/* Clock Configuration Procedure */
/* 1. Change the apll m,p,s value */
exynos4210_set_apll(new_index);
/* 2. Change the system clock divider values */
exynos4210_set_clkdiv(new_index);
}
exynos4210_set_apll(new_index);
exynos4210_set_clkdiv(new_index);
}
}
......@@ -194,7 +144,6 @@ int exynos4210_cpufreq_init(struct exynos_dvfs_info *info)
info->volt_table = exynos4210_volt_table;
info->freq_table = exynos4210_freq_table;
info->set_freq = exynos4210_set_frequency;
info->need_apll_change = exynos4210_pms_change;
return 0;
......
......@@ -128,9 +128,9 @@ static void exynos4x12_set_clkdiv(unsigned int div_index)
static void exynos4x12_set_apll(unsigned int index)
{
unsigned int tmp, pdiv;
unsigned int tmp, freq = apll_freq_4x12[index].freq;
/* 1. MUX_CORE_SEL = MPLL, ARMCLK uses MPLL for lock time */
/* MUX_CORE_SEL = MPLL, ARMCLK uses MPLL for lock time */
clk_set_parent(moutcore, mout_mpll);
do {
......@@ -140,24 +140,9 @@ static void exynos4x12_set_apll(unsigned int index)
tmp &= 0x7;
} while (tmp != 0x2);
/* 2. Set APLL Lock time */
pdiv = ((apll_freq_4x12[index].mps >> 8) & 0x3f);
clk_set_rate(mout_apll, freq * 1000);
__raw_writel((pdiv * 250), EXYNOS4_APLL_LOCK);
/* 3. Change PLL PMS values */
tmp = __raw_readl(EXYNOS4_APLL_CON0);
tmp &= ~((0x3ff << 16) | (0x3f << 8) | (0x7 << 0));
tmp |= apll_freq_4x12[index].mps;
__raw_writel(tmp, EXYNOS4_APLL_CON0);
/* 4. wait_lock_time */
do {
cpu_relax();
tmp = __raw_readl(EXYNOS4_APLL_CON0);
} while (!(tmp & (0x1 << EXYNOS4_APLLCON0_LOCKED_SHIFT)));
/* 5. MUX_CORE_SEL = APLL */
/* MUX_CORE_SEL = APLL */
clk_set_parent(moutcore, mout_apll);
do {
......@@ -167,52 +152,15 @@ static void exynos4x12_set_apll(unsigned int index)
} while (tmp != (0x1 << EXYNOS4_CLKSRC_CPU_MUXCORE_SHIFT));
}
static bool exynos4x12_pms_change(unsigned int old_index, unsigned int new_index)
{
unsigned int old_pm = apll_freq_4x12[old_index].mps >> 8;
unsigned int new_pm = apll_freq_4x12[new_index].mps >> 8;
return (old_pm == new_pm) ? 0 : 1;
}
static void exynos4x12_set_frequency(unsigned int old_index,
unsigned int new_index)
{
unsigned int tmp;
if (old_index > new_index) {
if (!exynos4x12_pms_change(old_index, new_index)) {
/* 1. Change the system clock divider values */
exynos4x12_set_clkdiv(new_index);
/* 2. Change just s value in apll m,p,s value */
tmp = __raw_readl(EXYNOS4_APLL_CON0);
tmp &= ~(0x7 << 0);
tmp |= apll_freq_4x12[new_index].mps & 0x7;
__raw_writel(tmp, EXYNOS4_APLL_CON0);
} else {
/* Clock Configuration Procedure */
/* 1. Change the system clock divider values */
exynos4x12_set_clkdiv(new_index);
/* 2. Change the apll m,p,s value */
exynos4x12_set_apll(new_index);
}
exynos4x12_set_clkdiv(new_index);
exynos4x12_set_apll(new_index);
} else if (old_index < new_index) {
if (!exynos4x12_pms_change(old_index, new_index)) {
/* 1. Change just s value in apll m,p,s value */
tmp = __raw_readl(EXYNOS4_APLL_CON0);
tmp &= ~(0x7 << 0);
tmp |= apll_freq_4x12[new_index].mps & 0x7;
__raw_writel(tmp, EXYNOS4_APLL_CON0);
/* 2. Change the system clock divider values */
exynos4x12_set_clkdiv(new_index);
} else {
/* Clock Configuration Procedure */
/* 1. Change the apll m,p,s value */
exynos4x12_set_apll(new_index);
/* 2. Change the system clock divider values */
exynos4x12_set_clkdiv(new_index);
}
exynos4x12_set_apll(new_index);
exynos4x12_set_clkdiv(new_index);
}
}
......@@ -250,7 +198,6 @@ int exynos4x12_cpufreq_init(struct exynos_dvfs_info *info)
info->volt_table = exynos4x12_volt_table;
info->freq_table = exynos4x12_freq_table;
info->set_freq = exynos4x12_set_frequency;
info->need_apll_change = exynos4x12_pms_change;
return 0;
......
......@@ -209,12 +209,6 @@ static void exynos_enable_dvfs(void)
dvfs_info->base + XMU_DVFS_CTRL);
}
static int exynos_verify_speed(struct cpufreq_policy *policy)
{
return cpufreq_frequency_table_verify(policy,
dvfs_info->freq_table);
}
static unsigned int exynos_getspeed(unsigned int cpu)
{
return dvfs_info->cur_frequency;
......@@ -324,30 +318,19 @@ static void exynos_sort_descend_freq_table(void)
static int exynos_cpufreq_cpu_init(struct cpufreq_policy *policy)
{
int ret;
ret = cpufreq_frequency_table_cpuinfo(policy, dvfs_info->freq_table);
if (ret) {
dev_err(dvfs_info->dev, "Invalid frequency table: %d\n", ret);
return ret;
}
policy->cur = dvfs_info->cur_frequency;
policy->cpuinfo.transition_latency = dvfs_info->latency;
cpumask_setall(policy->cpus);
cpufreq_frequency_table_get_attr(dvfs_info->freq_table, policy->cpu);
return 0;
return cpufreq_generic_init(policy, dvfs_info->freq_table,
dvfs_info->latency);
}
static struct cpufreq_driver exynos_driver = {
.flags = CPUFREQ_STICKY,
.verify = exynos_verify_speed,
.verify = cpufreq_generic_frequency_table_verify,
.target = exynos_target,
.get = exynos_getspeed,
.init = exynos_cpufreq_cpu_init,
.exit = cpufreq_generic_exit,
.name = CPUFREQ_NAME,
.attr = cpufreq_generic_attr,
};
static const struct of_device_id exynos_cpufreq_match[] = {
......
......@@ -54,31 +54,30 @@ EXPORT_SYMBOL_GPL(cpufreq_frequency_table_cpuinfo);
int cpufreq_frequency_table_verify(struct cpufreq_policy *policy,
struct cpufreq_frequency_table *table)
{
unsigned int next_larger = ~0;
unsigned int i;
unsigned int count = 0;
unsigned int next_larger = ~0, freq, i = 0;
bool found = false;
pr_debug("request for verification of policy (%u - %u kHz) for cpu %u\n",
policy->min, policy->max, policy->cpu);
cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq,
policy->cpuinfo.max_freq);
cpufreq_verify_within_cpu_limits(policy);
for (i = 0; (table[i].frequency != CPUFREQ_TABLE_END); i++) {
unsigned int freq = table[i].frequency;
for (; freq = table[i].frequency, freq != CPUFREQ_TABLE_END; i++) {
if (freq == CPUFREQ_ENTRY_INVALID)
continue;
if ((freq >= policy->min) && (freq <= policy->max))
count++;
else if ((next_larger > freq) && (freq > policy->max))
if ((freq >= policy->min) && (freq <= policy->max)) {
found = true;
break;
}
if ((next_larger > freq) && (freq > policy->max))
next_larger = freq;
}
if (!count)
if (!found) {
policy->max = next_larger;
cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq,
policy->cpuinfo.max_freq);
cpufreq_verify_within_cpu_limits(policy);
}
pr_debug("verification lead to (%u - %u kHz) for cpu %u\n",
policy->min, policy->max, policy->cpu);
......@@ -87,6 +86,20 @@ int cpufreq_frequency_table_verify(struct cpufreq_policy *policy,
}
EXPORT_SYMBOL_GPL(cpufreq_frequency_table_verify);
/*
* Generic routine to verify policy & frequency table, requires driver to call
* cpufreq_frequency_table_get_attr() prior to it.
*/
int cpufreq_generic_frequency_table_verify(struct cpufreq_policy *policy)
{
struct cpufreq_frequency_table *table =
cpufreq_frequency_get_table(policy->cpu);
if (!table)
return -ENODEV;
return cpufreq_frequency_table_verify(policy, table);
}
EXPORT_SYMBOL_GPL(cpufreq_generic_frequency_table_verify);
int cpufreq_frequency_table_target(struct cpufreq_policy *policy,
struct cpufreq_frequency_table *table,
......@@ -200,6 +213,12 @@ struct freq_attr cpufreq_freq_attr_scaling_available_freqs = {
};
EXPORT_SYMBOL_GPL(cpufreq_freq_attr_scaling_available_freqs);
struct freq_attr *cpufreq_generic_attr[] = {
&cpufreq_freq_attr_scaling_available_freqs,
NULL,
};
EXPORT_SYMBOL_GPL(cpufreq_generic_attr);
/*
* if you use these, you must assure that the frequency table is valid
* all the time between get_attr and put_attr!
......@@ -219,6 +238,18 @@ void cpufreq_frequency_table_put_attr(unsigned int cpu)
}
EXPORT_SYMBOL_GPL(cpufreq_frequency_table_put_attr);
int cpufreq_table_validate_and_show(struct cpufreq_policy *policy,
struct cpufreq_frequency_table *table)
{
int ret = cpufreq_frequency_table_cpuinfo(policy, table);
if (!ret)
cpufreq_frequency_table_get_attr(table, policy->cpu);
return ret;
}
EXPORT_SYMBOL_GPL(cpufreq_table_validate_and_show);
void cpufreq_frequency_table_update_policy_cpu(struct cpufreq_policy *policy)
{
pr_debug("Updating show_table for new_cpu %u from last_cpu %u\n",
......
......@@ -401,7 +401,7 @@ static int cpufreq_gx_target(struct cpufreq_policy *policy,
static int cpufreq_gx_cpu_init(struct cpufreq_policy *policy)
{
unsigned int maxfreq, curfreq;
unsigned int maxfreq;
if (!policy || policy->cpu != 0)
return -ENODEV;
......@@ -415,10 +415,8 @@ static int cpufreq_gx_cpu_init(struct cpufreq_policy *policy)
maxfreq = 30000 * gx_freq_mult[getCx86(CX86_DIR1) & 0x0f];
stock_freq = maxfreq;
curfreq = gx_get_cpuspeed(0);
pr_debug("cpu max frequency is %d.\n", maxfreq);
pr_debug("cpu current frequency is %dkHz.\n", curfreq);
/* setup basic struct for cpufreq API */
policy->cpu = 0;
......@@ -428,7 +426,6 @@ static int cpufreq_gx_cpu_init(struct cpufreq_policy *policy)
else
policy->min = maxfreq / POLICY_MIN_DIV;
policy->max = maxfreq;
policy->cur = curfreq;
policy->cpuinfo.min_freq = maxfreq / max_duration;
policy->cpuinfo.max_freq = maxfreq;
policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL;
......
......@@ -66,7 +66,8 @@ static int hb_cpufreq_driver_init(void)
struct device_node *np;
int ret;
if (!of_machine_is_compatible("calxeda,highbank"))
if ((!of_machine_is_compatible("calxeda,highbank")) &&
(!of_machine_is_compatible("calxeda,ecx-2000")))
return -ENODEV;
cpu_dev = get_cpu_device(0);
......
......@@ -247,22 +247,6 @@ acpi_cpufreq_target (
}
static int
acpi_cpufreq_verify (
struct cpufreq_policy *policy)
{
unsigned int result = 0;
struct cpufreq_acpi_io *data = acpi_io_data[policy->cpu];
pr_debug("acpi_cpufreq_verify\n");
result = cpufreq_frequency_table_verify(policy,
data->freq_table);
return (result);
}
static int
acpi_cpufreq_cpu_init (
struct cpufreq_policy *policy)
......@@ -321,7 +305,6 @@ acpi_cpufreq_cpu_init (
data->acpi_data.states[i].transition_latency * 1000;
}
}
policy->cur = processor_get_freq(data, policy->cpu);
/* table init */
for (i = 0; i <= data->acpi_data.state_count; i++)
......@@ -335,7 +318,7 @@ acpi_cpufreq_cpu_init (
}
}
result = cpufreq_frequency_table_cpuinfo(policy, data->freq_table);
result = cpufreq_table_validate_and_show(policy, data->freq_table);
if (result) {
goto err_freqfree;
}
......@@ -356,8 +339,6 @@ acpi_cpufreq_cpu_init (
(u32) data->acpi_data.states[i].status,
(u32) data->acpi_data.states[i].control);
cpufreq_frequency_table_get_attr(data->freq_table, policy->cpu);
/* the first call to ->target() should result in us actually
* writing something to the appropriate registers. */
data->resume = 1;
......@@ -396,20 +377,14 @@ acpi_cpufreq_cpu_exit (
}
static struct freq_attr* acpi_cpufreq_attr[] = {
&cpufreq_freq_attr_scaling_available_freqs,
NULL,
};
static struct cpufreq_driver acpi_cpufreq_driver = {
.verify = acpi_cpufreq_verify,
.verify = cpufreq_generic_frequency_table_verify,
.target = acpi_cpufreq_target,
.get = acpi_cpufreq_get,
.init = acpi_cpufreq_cpu_init,
.exit = acpi_cpufreq_cpu_exit,
.name = "acpi-cpufreq",
.attr = acpi_cpufreq_attr,
.attr = cpufreq_generic_attr,
};
......
......@@ -35,11 +35,6 @@ static struct device *cpu_dev;
static struct cpufreq_frequency_table *freq_table;
static unsigned int transition_latency;
static int imx6q_verify_speed(struct cpufreq_policy *policy)
{
return cpufreq_frequency_table_verify(policy, freq_table);
}
static unsigned int imx6q_get_speed(unsigned int cpu)
{
return clk_get_rate(arm_clk) / 1000;
......@@ -159,41 +154,17 @@ static int imx6q_set_target(struct cpufreq_policy *policy,
static int imx6q_cpufreq_init(struct cpufreq_policy *policy)
{
int ret;
ret = cpufreq_frequency_table_cpuinfo(policy, freq_table);
if (ret) {
dev_err(cpu_dev, "invalid frequency table: %d\n", ret);
return ret;
}
policy->cpuinfo.transition_latency = transition_latency;
policy->cur = clk_get_rate(arm_clk) / 1000;
cpumask_setall(policy->cpus);
cpufreq_frequency_table_get_attr(freq_table, policy->cpu);
return 0;
}
static int imx6q_cpufreq_exit(struct cpufreq_policy *policy)
{
cpufreq_frequency_table_put_attr(policy->cpu);
return 0;
return cpufreq_generic_init(policy, freq_table, transition_latency);
}
static struct freq_attr *imx6q_cpufreq_attr[] = {
&cpufreq_freq_attr_scaling_available_freqs,
NULL,
};
static struct cpufreq_driver imx6q_cpufreq_driver = {
.verify = imx6q_verify_speed,
.verify = cpufreq_generic_frequency_table_verify,
.target = imx6q_set_target,
.get = imx6q_get_speed,
.init = imx6q_cpufreq_init,
.exit = imx6q_cpufreq_exit,
.exit = cpufreq_generic_exit,
.name = "imx6q-cpufreq",
.attr = imx6q_cpufreq_attr,
.attr = cpufreq_generic_attr,
};
static int imx6q_cpufreq_probe(struct platform_device *pdev)
......
......@@ -59,9 +59,7 @@ static int integrator_verify_policy(struct cpufreq_policy *policy)
{
struct icst_vco vco;
cpufreq_verify_within_limits(policy,
policy->cpuinfo.min_freq,
policy->cpuinfo.max_freq);
cpufreq_verify_within_cpu_limits(policy);
vco = icst_hz_to_vco(&cclk_params, policy->max * 1000);
policy->max = icst_hz(&cclk_params, vco) / 1000;
......@@ -69,10 +67,7 @@ static int integrator_verify_policy(struct cpufreq_policy *policy)
vco = icst_hz_to_vco(&cclk_params, policy->min * 1000);
policy->min = icst_hz(&cclk_params, vco) / 1000;
cpufreq_verify_within_limits(policy,
policy->cpuinfo.min_freq,
policy->cpuinfo.max_freq);
cpufreq_verify_within_cpu_limits(policy);
return 0;
}
......@@ -186,10 +181,9 @@ static int integrator_cpufreq_init(struct cpufreq_policy *policy)
{
/* set default policy and cpuinfo */
policy->cpuinfo.max_freq = 160000;
policy->cpuinfo.min_freq = 12000;
policy->max = policy->cpuinfo.max_freq = 160000;
policy->min = policy->cpuinfo.min_freq = 12000;
policy->cpuinfo.transition_latency = 1000000; /* 1 ms, assumed */
policy->cur = policy->min = policy->max = integrator_get(policy->cpu);
return 0;
}
......
......@@ -615,9 +615,7 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy)
static int intel_pstate_verify_policy(struct cpufreq_policy *policy)
{
cpufreq_verify_within_limits(policy,
policy->cpuinfo.min_freq,
policy->cpuinfo.max_freq);
cpufreq_verify_within_cpu_limits(policy);
if ((policy->policy != CPUFREQ_POLICY_POWERSAVE) &&
(policy->policy != CPUFREQ_POLICY_PERFORMANCE))
......
......@@ -102,11 +102,6 @@ static void kirkwood_cpufreq_set_cpu_state(struct cpufreq_policy *policy,
cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
};
static int kirkwood_cpufreq_verify(struct cpufreq_policy *policy)
{
return cpufreq_frequency_table_verify(policy, kirkwood_freq_table);
}
static int kirkwood_cpufreq_target(struct cpufreq_policy *policy,
unsigned int target_freq,
unsigned int relation)
......@@ -125,40 +120,17 @@ static int kirkwood_cpufreq_target(struct cpufreq_policy *policy,
/* Module init and exit code */
static int kirkwood_cpufreq_cpu_init(struct cpufreq_policy *policy)
{
int result;
/* cpuinfo and default policy values */
policy->cpuinfo.transition_latency = 5000; /* 5uS */
policy->cur = kirkwood_cpufreq_get_cpu_frequency(0);
result = cpufreq_frequency_table_cpuinfo(policy, kirkwood_freq_table);
if (result)
return result;
cpufreq_frequency_table_get_attr(kirkwood_freq_table, policy->cpu);
return 0;
}
static int kirkwood_cpufreq_cpu_exit(struct cpufreq_policy *policy)
{
cpufreq_frequency_table_put_attr(policy->cpu);
return 0;
return cpufreq_generic_init(policy, kirkwood_freq_table, 5000);
}
static struct freq_attr *kirkwood_cpufreq_attr[] = {
&cpufreq_freq_attr_scaling_available_freqs,
NULL,
};
static struct cpufreq_driver kirkwood_cpufreq_driver = {
.get = kirkwood_cpufreq_get_cpu_frequency,
.verify = kirkwood_cpufreq_verify,
.verify = cpufreq_generic_frequency_table_verify,
.target = kirkwood_cpufreq_target,
.init = kirkwood_cpufreq_cpu_init,
.exit = kirkwood_cpufreq_cpu_exit,
.exit = cpufreq_generic_exit,
.name = "kirkwood-cpufreq",
.attr = kirkwood_cpufreq_attr,
.attr = cpufreq_generic_attr,
};
static int kirkwood_cpufreq_probe(struct platform_device *pdev)
......
......@@ -625,12 +625,6 @@ static void longhaul_setup_voltagescaling(void)
}
static int longhaul_verify(struct cpufreq_policy *policy)
{
return cpufreq_frequency_table_verify(policy, longhaul_table);
}
static int longhaul_target(struct cpufreq_policy *policy,
unsigned int target_freq, unsigned int relation)
{
......@@ -919,36 +913,18 @@ static int longhaul_cpu_init(struct cpufreq_policy *policy)
longhaul_setup_voltagescaling();
policy->cpuinfo.transition_latency = 200000; /* nsec */
policy->cur = calc_speed(longhaul_get_cpu_mult());
ret = cpufreq_frequency_table_cpuinfo(policy, longhaul_table);
if (ret)
return ret;
cpufreq_frequency_table_get_attr(longhaul_table, policy->cpu);
return 0;
return cpufreq_table_validate_and_show(policy, longhaul_table);
}
static int longhaul_cpu_exit(struct cpufreq_policy *policy)
{
cpufreq_frequency_table_put_attr(policy->cpu);
return 0;
}
static struct freq_attr *longhaul_attr[] = {
&cpufreq_freq_attr_scaling_available_freqs,
NULL,
};
static struct cpufreq_driver longhaul_driver = {
.verify = longhaul_verify,
.verify = cpufreq_generic_frequency_table_verify,
.target = longhaul_target,
.get = longhaul_get,
.init = longhaul_cpu_init,
.exit = longhaul_cpu_exit,
.exit = cpufreq_generic_exit,
.name = "longhaul",
.attr = longhaul_attr,
.attr = cpufreq_generic_attr,
};
static const struct x86_cpu_id longhaul_id[] = {
......
......@@ -129,9 +129,7 @@ static int longrun_verify_policy(struct cpufreq_policy *policy)
return -EINVAL;
policy->cpu = 0;
cpufreq_verify_within_limits(policy,
policy->cpuinfo.min_freq,
policy->cpuinfo.max_freq);
cpufreq_verify_within_cpu_limits(policy);
if ((policy->policy != CPUFREQ_POLICY_POWERSAVE) &&
(policy->policy != CPUFREQ_POLICY_PERFORMANCE))
......
......@@ -131,40 +131,24 @@ static int loongson2_cpufreq_cpu_init(struct cpufreq_policy *policy)
return ret;
}
policy->cur = loongson2_cpufreq_get(policy->cpu);
cpufreq_frequency_table_get_attr(&loongson2_clockmod_table[0],
policy->cpu);
return cpufreq_frequency_table_cpuinfo(policy,
&loongson2_clockmod_table[0]);
}
static int loongson2_cpufreq_verify(struct cpufreq_policy *policy)
{
return cpufreq_frequency_table_verify(policy,
&loongson2_clockmod_table[0]);
return cpufreq_generic_init(policy, &loongson2_clockmod_table[0], 0);
}
static int loongson2_cpufreq_exit(struct cpufreq_policy *policy)
{
cpufreq_frequency_table_put_attr(policy->cpu);
clk_put(cpuclk);
return 0;
}
static struct freq_attr *loongson2_table_attr[] = {
&cpufreq_freq_attr_scaling_available_freqs,
NULL,
};
static struct cpufreq_driver loongson2_cpufreq_driver = {
.name = "loongson2",
.init = loongson2_cpufreq_cpu_init,
.verify = loongson2_cpufreq_verify,
.verify = cpufreq_generic_frequency_table_verify,
.target = loongson2_cpufreq_target,
.get = loongson2_cpufreq_get,
.exit = loongson2_cpufreq_exit,
.attr = loongson2_table_attr,
.attr = cpufreq_generic_attr,
};
static struct platform_device_id platform_device_ids[] = {
......
......@@ -64,11 +64,6 @@ static struct cpufreq_frequency_table maple_cpu_freqs[] = {
{0, CPUFREQ_TABLE_END},
};
static struct freq_attr *maple_cpu_freqs_attr[] = {
&cpufreq_freq_attr_scaling_available_freqs,
NULL,
};
/* Power mode data is an array of the 32 bits PCR values to use for
* the various frequencies, retrieved from the device-tree
*/
......@@ -135,11 +130,6 @@ static int maple_scom_query_freq(void)
* Common interface to the cpufreq core
*/
static int maple_cpufreq_verify(struct cpufreq_policy *policy)
{
return cpufreq_frequency_table_verify(policy, maple_cpu_freqs);
}
static int maple_cpufreq_target(struct cpufreq_policy *policy,
unsigned int target_freq, unsigned int relation)
{
......@@ -175,27 +165,17 @@ static unsigned int maple_cpufreq_get_speed(unsigned int cpu)
static int maple_cpufreq_cpu_init(struct cpufreq_policy *policy)
{
policy->cpuinfo.transition_latency = 12000;
policy->cur = maple_cpu_freqs[maple_scom_query_freq()].frequency;
/* secondary CPUs are tied to the primary one by the
* cpufreq core if in the secondary policy we tell it that
* it actually must be one policy together with all others. */
cpumask_setall(policy->cpus);
cpufreq_frequency_table_get_attr(maple_cpu_freqs, policy->cpu);
return cpufreq_frequency_table_cpuinfo(policy,
maple_cpu_freqs);
return cpufreq_generic_init(policy, maple_cpu_freqs, 12000);
}
static struct cpufreq_driver maple_cpufreq_driver = {
.name = "maple",
.flags = CPUFREQ_CONST_LOOPS,
.init = maple_cpufreq_cpu_init,
.verify = maple_cpufreq_verify,
.verify = cpufreq_generic_frequency_table_verify,
.target = maple_cpufreq_target,
.get = maple_cpufreq_get_speed,
.attr = maple_cpu_freqs_attr,
.attr = cpufreq_generic_attr,
};
static int __init maple_cpufreq_init(void)
......
......@@ -40,13 +40,6 @@ static struct clk *mpu_clk;
static struct device *mpu_dev;
static struct regulator *mpu_reg;
static int omap_verify_speed(struct cpufreq_policy *policy)
{
if (!freq_table)
return -EINVAL;
return cpufreq_frequency_table_verify(policy, freq_table);
}
static unsigned int omap_getspeed(unsigned int cpu)
{
unsigned long rate;
......@@ -167,81 +160,52 @@ static inline void freq_table_free(void)
static int omap_cpu_init(struct cpufreq_policy *policy)
{
int result = 0;
int result;
mpu_clk = clk_get(NULL, "cpufreq_ck");
if (IS_ERR(mpu_clk))
return PTR_ERR(mpu_clk);
if (policy->cpu >= NR_CPUS) {
result = -EINVAL;
goto fail_ck;
}
policy->cur = omap_getspeed(policy->cpu);
if (!freq_table)
if (!freq_table) {
result = dev_pm_opp_init_cpufreq_table(mpu_dev, &freq_table);
if (result) {
dev_err(mpu_dev, "%s: cpu%d: failed creating freq table[%d]\n",
if (result) {
dev_err(mpu_dev,
"%s: cpu%d: failed creating freq table[%d]\n",
__func__, policy->cpu, result);
goto fail_ck;
goto fail;
}
}
atomic_inc_return(&freq_table_users);
result = cpufreq_frequency_table_cpuinfo(policy, freq_table);
if (result)
goto fail_table;
cpufreq_frequency_table_get_attr(freq_table, policy->cpu);
policy->cur = omap_getspeed(policy->cpu);
/*
* On OMAP SMP configuartion, both processors share the voltage
* and clock. So both CPUs needs to be scaled together and hence
* needs software co-ordination. Use cpufreq affected_cpus
* interface to handle this scenario. Additional is_smp() check
* is to keep SMP_ON_UP build working.
*/
if (is_smp())
cpumask_setall(policy->cpus);
/* FIXME: what's the actual transition time? */
policy->cpuinfo.transition_latency = 300 * 1000;
return 0;
result = cpufreq_generic_init(policy, freq_table, 300 * 1000);
if (!result)
return 0;
fail_table:
freq_table_free();
fail_ck:
fail:
clk_put(mpu_clk);
return result;
}
static int omap_cpu_exit(struct cpufreq_policy *policy)
{
cpufreq_frequency_table_put_attr(policy->cpu);
freq_table_free();
clk_put(mpu_clk);
return 0;
}
static struct freq_attr *omap_cpufreq_attr[] = {
&cpufreq_freq_attr_scaling_available_freqs,
NULL,
};
static struct cpufreq_driver omap_driver = {
.flags = CPUFREQ_STICKY,
.verify = omap_verify_speed,
.verify = cpufreq_generic_frequency_table_verify,
.target = omap_target,
.get = omap_getspeed,
.init = omap_cpu_init,
.exit = omap_cpu_exit,
.name = "omap",
.attr = omap_cpufreq_attr,
.attr = cpufreq_generic_attr,
};
static int omap_cpufreq_probe(struct platform_device *pdev)
......
......@@ -140,12 +140,6 @@ static int cpufreq_p4_target(struct cpufreq_policy *policy,
}
static int cpufreq_p4_verify(struct cpufreq_policy *policy)
{
return cpufreq_frequency_table_verify(policy, &p4clockmod_table[0]);
}
static unsigned int cpufreq_p4_get_frequency(struct cpuinfo_x86 *c)
{
if (c->x86 == 0x06) {
......@@ -230,25 +224,17 @@ static int cpufreq_p4_cpu_init(struct cpufreq_policy *policy)
else
p4clockmod_table[i].frequency = (stock_freq * i)/8;
}
cpufreq_frequency_table_get_attr(p4clockmod_table, policy->cpu);
/* cpuinfo and default policy values */
/* the transition latency is set to be 1 higher than the maximum
* transition latency of the ondemand governor */
policy->cpuinfo.transition_latency = 10000001;
policy->cur = stock_freq;
return cpufreq_frequency_table_cpuinfo(policy, &p4clockmod_table[0]);
return cpufreq_table_validate_and_show(policy, &p4clockmod_table[0]);
}
static int cpufreq_p4_cpu_exit(struct cpufreq_policy *policy)
{
cpufreq_frequency_table_put_attr(policy->cpu);
return 0;
}
static unsigned int cpufreq_p4_get(unsigned int cpu)
{
u32 l, h;
......@@ -267,19 +253,14 @@ static unsigned int cpufreq_p4_get(unsigned int cpu)
return stock_freq;
}
static struct freq_attr *p4clockmod_attr[] = {
&cpufreq_freq_attr_scaling_available_freqs,
NULL,
};
static struct cpufreq_driver p4clockmod_driver = {
.verify = cpufreq_p4_verify,
.verify = cpufreq_generic_frequency_table_verify,
.target = cpufreq_p4_target,
.init = cpufreq_p4_cpu_init,
.exit = cpufreq_p4_cpu_exit,
.exit = cpufreq_generic_exit,
.get = cpufreq_p4_get,
.name = "p4-clockmod",
.attr = p4clockmod_attr,
.attr = cpufreq_generic_attr,
};
static const struct x86_cpu_id cpufreq_p4_id[] = {
......
......@@ -69,11 +69,6 @@ static struct cpufreq_frequency_table pas_freqs[] = {
{0, CPUFREQ_TABLE_END},
};
static struct freq_attr *pas_cpu_freqs_attr[] = {
&cpufreq_freq_attr_scaling_available_freqs,
NULL,
};
/*
* hardware specific functions
*/
......@@ -209,22 +204,13 @@ static int pas_cpufreq_cpu_init(struct cpufreq_policy *policy)
pr_debug("%d: %d\n", i, pas_freqs[i].frequency);
}
policy->cpuinfo.transition_latency = get_gizmo_latency();
cur_astate = get_cur_astate(policy->cpu);
pr_debug("current astate is at %d\n",cur_astate);
policy->cur = pas_freqs[cur_astate].frequency;
cpumask_copy(policy->cpus, cpu_online_mask);
ppc_proc_freq = policy->cur * 1000ul;
cpufreq_frequency_table_get_attr(pas_freqs, policy->cpu);
/* this ensures that policy->cpuinfo_min and policy->cpuinfo_max
* are set correctly
*/
return cpufreq_frequency_table_cpuinfo(policy, pas_freqs);
return cpufreq_generic_init(policy, pas_freqs, get_gizmo_latency());
out_unmap_sdcpwr:
iounmap(sdcpwr_mapbase);
......@@ -253,11 +239,6 @@ static int pas_cpufreq_cpu_exit(struct cpufreq_policy *policy)
return 0;
}
static int pas_cpufreq_verify(struct cpufreq_policy *policy)
{
return cpufreq_frequency_table_verify(policy, pas_freqs);
}
static int pas_cpufreq_target(struct cpufreq_policy *policy,
unsigned int target_freq,
unsigned int relation)
......@@ -300,9 +281,9 @@ static struct cpufreq_driver pas_cpufreq_driver = {
.flags = CPUFREQ_CONST_LOOPS,
.init = pas_cpufreq_cpu_init,
.exit = pas_cpufreq_cpu_exit,
.verify = pas_cpufreq_verify,
.verify = cpufreq_generic_frequency_table_verify,
.target = pas_cpufreq_target,
.attr = pas_cpu_freqs_attr,
.attr = cpufreq_generic_attr,
};
/*
......
......@@ -111,8 +111,7 @@ static struct pcc_cpu __percpu *pcc_cpu_info;
static int pcc_cpufreq_verify(struct cpufreq_policy *policy)
{
cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq,
policy->cpuinfo.max_freq);
cpufreq_verify_within_cpu_limits(policy);
return 0;
}
......@@ -560,13 +559,6 @@ static int pcc_cpufreq_cpu_init(struct cpufreq_policy *policy)
ioread32(&pcch_hdr->nominal) * 1000;
policy->min = policy->cpuinfo.min_freq =
ioread32(&pcch_hdr->minimum_frequency) * 1000;
policy->cur = pcc_get_freq(cpu);
if (!policy->cur) {
pr_debug("init: Unable to get current CPU frequency\n");
result = -EINVAL;
goto out;
}
pr_debug("init: policy->max is %d, policy->min is %d\n",
policy->max, policy->min);
......
......@@ -86,11 +86,6 @@ static struct cpufreq_frequency_table pmac_cpu_freqs[] = {
{0, CPUFREQ_TABLE_END},
};
static struct freq_attr* pmac_cpu_freqs_attr[] = {
&cpufreq_freq_attr_scaling_available_freqs,
NULL,
};
static inline void local_delay(unsigned long ms)
{
if (no_schedule)
......@@ -378,11 +373,6 @@ static unsigned int pmac_cpufreq_get_speed(unsigned int cpu)
return cur_freq;
}
static int pmac_cpufreq_verify(struct cpufreq_policy *policy)
{
return cpufreq_frequency_table_verify(policy, pmac_cpu_freqs);
}
static int pmac_cpufreq_target( struct cpufreq_policy *policy,
unsigned int target_freq,
unsigned int relation)
......@@ -402,14 +392,7 @@ static int pmac_cpufreq_target( struct cpufreq_policy *policy,
static int pmac_cpufreq_cpu_init(struct cpufreq_policy *policy)
{
if (policy->cpu != 0)
return -ENODEV;
policy->cpuinfo.transition_latency = transition_latency;
policy->cur = cur_freq;
cpufreq_frequency_table_get_attr(pmac_cpu_freqs, policy->cpu);
return cpufreq_frequency_table_cpuinfo(policy, pmac_cpu_freqs);
return cpufreq_generic_init(policy, pmac_cpu_freqs, transition_latency);
}
static u32 read_gpio(struct device_node *np)
......@@ -469,14 +452,14 @@ static int pmac_cpufreq_resume(struct cpufreq_policy *policy)
}
static struct cpufreq_driver pmac_cpufreq_driver = {
.verify = pmac_cpufreq_verify,
.verify = cpufreq_generic_frequency_table_verify,
.target = pmac_cpufreq_target,
.get = pmac_cpufreq_get_speed,
.init = pmac_cpufreq_cpu_init,
.suspend = pmac_cpufreq_suspend,
.resume = pmac_cpufreq_resume,
.flags = CPUFREQ_PM_NO_WARN,
.attr = pmac_cpu_freqs_attr,
.attr = cpufreq_generic_attr,
.name = "powermac",
};
......
......@@ -70,11 +70,6 @@ static struct cpufreq_frequency_table g5_cpu_freqs[] = {
{0, CPUFREQ_TABLE_END},
};
static struct freq_attr* g5_cpu_freqs_attr[] = {
&cpufreq_freq_attr_scaling_available_freqs,
NULL,
};
/* Power mode data is an array of the 32 bits PCR values to use for
* the various frequencies, retrieved from the device-tree
*/
......@@ -142,7 +137,7 @@ static void g5_vdnap_switch_volt(int speed_mode)
pmf_call_one(pfunc_vdnap0_complete, &args);
if (done)
break;
msleep(1);
usleep_range(1000, 1000);
}
if (done == 0)
printk(KERN_WARNING "cpufreq: Timeout in clock slewing !\n");
......@@ -241,7 +236,7 @@ static void g5_pfunc_switch_volt(int speed_mode)
if (pfunc_cpu1_volt_low)
pmf_call_one(pfunc_cpu1_volt_low, NULL);
}
msleep(10); /* should be faster , to fix */
usleep_range(10000, 10000); /* should be faster , to fix */
}
/*
......@@ -286,7 +281,7 @@ static int g5_pfunc_switch_freq(int speed_mode)
pmf_call_one(pfunc_slewing_done, &args);
if (done)
break;
msleep(1);
usleep_range(500, 500);
}
if (done == 0)
printk(KERN_WARNING "cpufreq: Timeout in clock slewing !\n");
......@@ -317,11 +312,6 @@ static int g5_pfunc_query_freq(void)
* Common interface to the cpufreq core
*/
static int g5_cpufreq_verify(struct cpufreq_policy *policy)
{
return cpufreq_frequency_table_verify(policy, g5_cpu_freqs);
}
static int g5_cpufreq_target(struct cpufreq_policy *policy,
unsigned int target_freq, unsigned int relation)
{
......@@ -357,27 +347,17 @@ static unsigned int g5_cpufreq_get_speed(unsigned int cpu)
static int g5_cpufreq_cpu_init(struct cpufreq_policy *policy)
{
policy->cpuinfo.transition_latency = transition_latency;
policy->cur = g5_cpu_freqs[g5_query_freq()].frequency;
/* secondary CPUs are tied to the primary one by the
* cpufreq core if in the secondary policy we tell it that
* it actually must be one policy together with all others. */
cpumask_copy(policy->cpus, cpu_online_mask);
cpufreq_frequency_table_get_attr(g5_cpu_freqs, policy->cpu);
return cpufreq_frequency_table_cpuinfo(policy,
g5_cpu_freqs);
return cpufreq_generic_init(policy, g5_cpu_freqs, transition_latency);
}
static struct cpufreq_driver g5_cpufreq_driver = {
.name = "powermac",
.flags = CPUFREQ_CONST_LOOPS,
.init = g5_cpufreq_cpu_init,
.verify = g5_cpufreq_verify,
.verify = cpufreq_generic_frequency_table_verify,
.target = g5_cpufreq_target,
.get = g5_cpufreq_get_speed,
.attr = g5_cpu_freqs_attr,
.attr = cpufreq_generic_attr,
};
......@@ -397,7 +377,8 @@ static int __init g5_neo2_cpufreq_init(struct device_node *cpunode)
/* Check supported platforms */
if (of_machine_is_compatible("PowerMac8,1") ||
of_machine_is_compatible("PowerMac8,2") ||
of_machine_is_compatible("PowerMac9,1"))
of_machine_is_compatible("PowerMac9,1") ||
of_machine_is_compatible("PowerMac12,1"))
use_volts_smu = 1;
else if (of_machine_is_compatible("PowerMac11,2"))
use_volts_vdnap = 1;
......@@ -647,8 +628,10 @@ static int __init g5_pm72_cpufreq_init(struct device_node *cpunode)
g5_cpu_freqs[0].frequency = max_freq;
g5_cpu_freqs[1].frequency = min_freq;
/* Based on a measurement on Xserve G5, rounded up. */
transition_latency = 10 * NSEC_PER_MSEC;
/* Set callbacks */
transition_latency = CPUFREQ_ETERNAL;
g5_switch_volt = g5_pfunc_switch_volt;
g5_switch_freq = g5_pfunc_switch_freq;
g5_query_freq = g5_pfunc_query_freq;
......
......@@ -104,19 +104,6 @@ static void powernow_k6_set_state(struct cpufreq_policy *policy,
}
/**
* powernow_k6_verify - verifies a new CPUfreq policy
* @policy: new policy
*
* Policy must be within lowest and highest possible CPU Frequency,
* and at least one possible state must be within min and max.
*/
static int powernow_k6_verify(struct cpufreq_policy *policy)
{
return cpufreq_frequency_table_verify(policy, &clock_ratio[0]);
}
/**
* powernow_k6_setpolicy - sets a new CPUFreq policy
* @policy: new policy
......@@ -145,7 +132,6 @@ static int powernow_k6_target(struct cpufreq_policy *policy,
static int powernow_k6_cpu_init(struct cpufreq_policy *policy)
{
unsigned int i, f;
int result;
if (policy->cpu != 0)
return -ENODEV;
......@@ -165,15 +151,8 @@ static int powernow_k6_cpu_init(struct cpufreq_policy *policy)
/* cpuinfo and default policy values */
policy->cpuinfo.transition_latency = 200000;
policy->cur = busfreq * max_multiplier;
result = cpufreq_frequency_table_cpuinfo(policy, clock_ratio);
if (result)
return result;
cpufreq_frequency_table_get_attr(clock_ratio, policy->cpu);
return 0;
return cpufreq_table_validate_and_show(policy, clock_ratio);
}
......@@ -195,19 +174,14 @@ static unsigned int powernow_k6_get(unsigned int cpu)
return ret;
}
static struct freq_attr *powernow_k6_attr[] = {
&cpufreq_freq_attr_scaling_available_freqs,
NULL,
};
static struct cpufreq_driver powernow_k6_driver = {
.verify = powernow_k6_verify,
.verify = cpufreq_generic_frequency_table_verify,
.target = powernow_k6_target,
.init = powernow_k6_cpu_init,
.exit = powernow_k6_cpu_exit,
.get = powernow_k6_get,
.name = "powernow-k6",
.attr = powernow_k6_attr,
.attr = cpufreq_generic_attr,
};
static const struct x86_cpu_id powernow_k6_ids[] = {
......
......@@ -549,11 +549,6 @@ static int powernow_target(struct cpufreq_policy *policy,
}
static int powernow_verify(struct cpufreq_policy *policy)
{
return cpufreq_frequency_table_verify(policy, powernow_table);
}
/*
* We use the fact that the bus frequency is somehow
* a multiple of 100000/3 khz, then we compute sgtc according
......@@ -678,11 +673,7 @@ static int powernow_cpu_init(struct cpufreq_policy *policy)
policy->cpuinfo.transition_latency =
cpufreq_scale(2000000UL, fsb, latency);
policy->cur = powernow_get(0);
cpufreq_frequency_table_get_attr(powernow_table, policy->cpu);
return cpufreq_frequency_table_cpuinfo(policy, powernow_table);
return cpufreq_table_validate_and_show(policy, powernow_table);
}
static int powernow_cpu_exit(struct cpufreq_policy *policy)
......@@ -701,13 +692,8 @@ static int powernow_cpu_exit(struct cpufreq_policy *policy)
return 0;
}
static struct freq_attr *powernow_table_attr[] = {
&cpufreq_freq_attr_scaling_available_freqs,
NULL,
};
static struct cpufreq_driver powernow_driver = {
.verify = powernow_verify,
.verify = cpufreq_generic_frequency_table_verify,
.target = powernow_target,
.get = powernow_get,
#ifdef CONFIG_X86_POWERNOW_K7_ACPI
......@@ -716,7 +702,7 @@ static struct cpufreq_driver powernow_driver = {
.init = powernow_cpu_init,
.exit = powernow_cpu_exit,
.name = "powernow-k7",
.attr = powernow_table_attr,
.attr = cpufreq_generic_attr,
};
static int __init powernow_init(void)
......
......@@ -1053,17 +1053,6 @@ static int powernowk8_target(struct cpufreq_policy *pol,
return work_on_cpu(pol->cpu, powernowk8_target_fn, &pta);
}
/* Driver entry point to verify the policy and range of frequencies */
static int powernowk8_verify(struct cpufreq_policy *pol)
{
struct powernow_k8_data *data = per_cpu(powernow_data, pol->cpu);
if (!data)
return -EINVAL;
return cpufreq_frequency_table_verify(pol, data->powernow_table);
}
struct init_on_cpu {
struct powernow_k8_data *data;
int rc;
......@@ -1152,11 +1141,8 @@ static int powernowk8_cpu_init(struct cpufreq_policy *pol)
cpumask_copy(pol->cpus, cpu_core_mask(pol->cpu));
data->available_cores = pol->cpus;
pol->cur = find_khz_freq_from_fid(data->currfid);
pr_debug("policy current frequency %d kHz\n", pol->cur);
/* min/max the cpu is capable of */
if (cpufreq_frequency_table_cpuinfo(pol, data->powernow_table)) {
if (cpufreq_table_validate_and_show(pol, data->powernow_table)) {
printk(KERN_ERR FW_BUG PFX "invalid powernow_table\n");
powernow_k8_cpu_exit_acpi(data);
kfree(data->powernow_table);
......@@ -1164,8 +1150,6 @@ static int powernowk8_cpu_init(struct cpufreq_policy *pol)
return -EINVAL;
}
cpufreq_frequency_table_get_attr(data->powernow_table, pol->cpu);
pr_debug("cpu_init done, current fid 0x%x, vid 0x%x\n",
data->currfid, data->currvid);
......@@ -1227,20 +1211,15 @@ static unsigned int powernowk8_get(unsigned int cpu)
return khz;
}
static struct freq_attr *powernow_k8_attr[] = {
&cpufreq_freq_attr_scaling_available_freqs,
NULL,
};
static struct cpufreq_driver cpufreq_amd64_driver = {
.verify = powernowk8_verify,
.verify = cpufreq_generic_frequency_table_verify,
.target = powernowk8_target,
.bios_limit = acpi_processor_get_bios_limit,
.init = powernowk8_cpu_init,
.exit = powernowk8_cpu_exit,
.get = powernowk8_get,
.name = "powernow-k8",
.attr = powernow_k8_attr,
.attr = cpufreq_generic_attr,
};
static void __request_acpi_cpufreq(void)
......
......@@ -202,7 +202,7 @@ static int corenet_cpufreq_cpu_init(struct cpufreq_policy *policy)
table[i].frequency = CPUFREQ_TABLE_END;
/* set the min and max frequency properly */
ret = cpufreq_frequency_table_cpuinfo(policy, table);
ret = cpufreq_table_validate_and_show(policy, table);
if (ret) {
pr_err("invalid frequency table: %d\n", ret);
goto err_nomem1;
......@@ -217,9 +217,6 @@ static int corenet_cpufreq_cpu_init(struct cpufreq_policy *policy)
per_cpu(cpu_data, i) = data;
policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL;
policy->cur = corenet_cpufreq_get_speed(policy->cpu);
cpufreq_frequency_table_get_attr(table, cpu);
of_node_put(np);
return 0;
......@@ -253,14 +250,6 @@ static int __exit corenet_cpufreq_cpu_exit(struct cpufreq_policy *policy)
return 0;
}
static int corenet_cpufreq_verify(struct cpufreq_policy *policy)
{
struct cpufreq_frequency_table *table =
per_cpu(cpu_data, policy->cpu)->table;
return cpufreq_frequency_table_verify(policy, table);
}
static int corenet_cpufreq_target(struct cpufreq_policy *policy,
unsigned int target_freq, unsigned int relation)
{
......@@ -293,20 +282,15 @@ static int corenet_cpufreq_target(struct cpufreq_policy *policy,
return ret;
}
static struct freq_attr *corenet_cpufreq_attr[] = {
&cpufreq_freq_attr_scaling_available_freqs,
NULL,
};
static struct cpufreq_driver ppc_corenet_cpufreq_driver = {
.name = "ppc_cpufreq",
.flags = CPUFREQ_CONST_LOOPS,
.init = corenet_cpufreq_cpu_init,
.exit = __exit_p(corenet_cpufreq_cpu_exit),
.verify = corenet_cpufreq_verify,
.verify = cpufreq_generic_frequency_table_verify,
.target = corenet_cpufreq_target,
.get = corenet_cpufreq_get_speed,
.attr = corenet_cpufreq_attr,
.attr = cpufreq_generic_attr,
};
static const struct of_device_id node_matches[] __initdata = {
......
......@@ -123,22 +123,9 @@ static int cbe_cpufreq_cpu_init(struct cpufreq_policy *policy)
cpumask_copy(policy->cpus, cpu_sibling_mask(policy->cpu));
#endif
cpufreq_frequency_table_get_attr(cbe_freqs, policy->cpu);
/* this ensures that policy->cpuinfo_min
* and policy->cpuinfo_max are set correctly */
return cpufreq_frequency_table_cpuinfo(policy, cbe_freqs);
}
static int cbe_cpufreq_cpu_exit(struct cpufreq_policy *policy)
{
cpufreq_frequency_table_put_attr(policy->cpu);
return 0;
}
static int cbe_cpufreq_verify(struct cpufreq_policy *policy)
{
return cpufreq_frequency_table_verify(policy, cbe_freqs);
return cpufreq_table_validate_and_show(policy, cbe_freqs);
}
static int cbe_cpufreq_target(struct cpufreq_policy *policy,
......@@ -176,10 +163,10 @@ static int cbe_cpufreq_target(struct cpufreq_policy *policy,
}
static struct cpufreq_driver cbe_cpufreq_driver = {
.verify = cbe_cpufreq_verify,
.verify = cpufreq_generic_frequency_table_verify,
.target = cbe_cpufreq_target,
.init = cbe_cpufreq_cpu_init,
.exit = cbe_cpufreq_cpu_exit,
.exit = cpufreq_generic_exit,
.name = "cbe-cpufreq",
.flags = CPUFREQ_CONST_LOOPS,
};
......
......@@ -262,23 +262,6 @@ static u32 mdrefr_dri(unsigned int freq)
return (interval - (cpu_is_pxa27x() ? 31 : 0)) / 32;
}
/* find a valid frequency point */
static int pxa_verify_policy(struct cpufreq_policy *policy)
{
struct cpufreq_frequency_table *pxa_freqs_table;
pxa_freqs_t *pxa_freqs;
int ret;
find_freq_tables(&pxa_freqs_table, &pxa_freqs);
ret = cpufreq_frequency_table_verify(policy, pxa_freqs_table);
if (freq_debug)
pr_debug("Verified CPU policy: %dKhz min to %dKhz max\n",
policy->min, policy->max);
return ret;
}
static unsigned int pxa_cpufreq_get(unsigned int cpu)
{
return get_clk_frequency_khz(0);
......@@ -414,8 +397,6 @@ static int pxa_cpufreq_init(struct cpufreq_policy *policy)
/* set default policy and cpuinfo */
policy->cpuinfo.transition_latency = 1000; /* FIXME: 1 ms, assumed */
policy->cur = get_clk_frequency_khz(0); /* current freq */
policy->min = policy->max = policy->cur;
/* Generate pxa25x the run cpufreq_frequency_table struct */
for (i = 0; i < NUM_PXA25x_RUN_FREQS; i++) {
......@@ -453,10 +434,12 @@ static int pxa_cpufreq_init(struct cpufreq_policy *policy)
find_freq_tables(&pxa255_freq_table, &pxa255_freqs);
pr_info("PXA255 cpufreq using %s frequency table\n",
pxa255_turbo_table ? "turbo" : "run");
cpufreq_frequency_table_cpuinfo(policy, pxa255_freq_table);
cpufreq_table_validate_and_show(policy, pxa255_freq_table);
}
else if (cpu_is_pxa27x()) {
cpufreq_table_validate_and_show(policy, pxa27x_freq_table);
}
else if (cpu_is_pxa27x())
cpufreq_frequency_table_cpuinfo(policy, pxa27x_freq_table);
printk(KERN_INFO "PXA CPU frequency change support initialized\n");
......@@ -464,9 +447,10 @@ static int pxa_cpufreq_init(struct cpufreq_policy *policy)
}
static struct cpufreq_driver pxa_cpufreq_driver = {
.verify = pxa_verify_policy,
.verify = cpufreq_generic_frequency_table_verify,
.target = pxa_set_target,
.init = pxa_cpufreq_init,
.exit = cpufreq_generic_exit,
.get = pxa_cpufreq_get,
.name = "PXA2xx",
};
......
......@@ -108,7 +108,7 @@ static int setup_freqs_table(struct cpufreq_policy *policy,
pxa3xx_freqs_num = num;
pxa3xx_freqs_table = table;
return cpufreq_frequency_table_cpuinfo(policy, table);
return cpufreq_table_validate_and_show(policy, table);
}
static void __update_core_freq(struct pxa3xx_freq_info *info)
......@@ -150,11 +150,6 @@ static void __update_bus_freq(struct pxa3xx_freq_info *info)
cpu_relax();
}
static int pxa3xx_cpufreq_verify(struct cpufreq_policy *policy)
{
return cpufreq_frequency_table_verify(policy, pxa3xx_freqs_table);
}
static unsigned int pxa3xx_cpufreq_get(unsigned int cpu)
{
return pxa3xx_get_clk_frequency_khz(0);
......@@ -206,11 +201,10 @@ static int pxa3xx_cpufreq_init(struct cpufreq_policy *policy)
int ret = -EINVAL;
/* set default policy and cpuinfo */
policy->cpuinfo.min_freq = 104000;
policy->cpuinfo.max_freq = (cpu_is_pxa320()) ? 806000 : 624000;
policy->min = policy->cpuinfo.min_freq = 104000;
policy->max = policy->cpuinfo.max_freq =
(cpu_is_pxa320()) ? 806000 : 624000;
policy->cpuinfo.transition_latency = 1000; /* FIXME: 1 ms, assumed */
policy->max = pxa3xx_get_clk_frequency_khz(0);
policy->cur = policy->min = policy->max;
if (cpu_is_pxa300() || cpu_is_pxa310())
ret = setup_freqs_table(policy, pxa300_freqs,
......@@ -230,9 +224,10 @@ static int pxa3xx_cpufreq_init(struct cpufreq_policy *policy)
}
static struct cpufreq_driver pxa3xx_cpufreq_driver = {
.verify = pxa3xx_cpufreq_verify,
.verify = cpufreq_generic_frequency_table_verify,
.target = pxa3xx_cpufreq_set,
.init = pxa3xx_cpufreq_init,
.exit = cpufreq_generic_exit,
.get = pxa3xx_cpufreq_get,
.name = "pxa3xx-cpufreq",
};
......
......@@ -87,16 +87,6 @@ static struct cpufreq_frequency_table s3c2450_freq_table[] = {
{ 0, CPUFREQ_TABLE_END },
};
static int s3c2416_cpufreq_verify_speed(struct cpufreq_policy *policy)
{
struct s3c2416_data *s3c_freq = &s3c2416_cpufreq;
if (policy->cpu != 0)
return -EINVAL;
return cpufreq_frequency_table_verify(policy, s3c_freq->freq_table);
}
static unsigned int s3c2416_cpufreq_get_speed(unsigned int cpu)
{
struct s3c2416_data *s3c_freq = &s3c2416_cpufreq;
......@@ -486,20 +476,14 @@ static int __init s3c2416_cpufreq_driver_init(struct cpufreq_policy *policy)
freq++;
}
policy->cur = clk_get_rate(s3c_freq->armclk) / 1000;
/* Datasheet says PLL stabalisation time must be at least 300us,
* so but add some fudge. (reference in LOCKCON0 register description)
*/
policy->cpuinfo.transition_latency = (500 * 1000) +
s3c_freq->regulator_latency;
ret = cpufreq_frequency_table_cpuinfo(policy, s3c_freq->freq_table);
ret = cpufreq_generic_init(policy, s3c_freq->freq_table,
(500 * 1000) + s3c_freq->regulator_latency);
if (ret)
goto err_freq_table;
cpufreq_frequency_table_get_attr(s3c_freq->freq_table, 0);
register_reboot_notifier(&s3c2416_cpufreq_reboot_notifier);
return 0;
......@@ -518,19 +502,14 @@ static int __init s3c2416_cpufreq_driver_init(struct cpufreq_policy *policy)
return ret;
}
static struct freq_attr *s3c2416_cpufreq_attr[] = {
&cpufreq_freq_attr_scaling_available_freqs,
NULL,
};
static struct cpufreq_driver s3c2416_cpufreq_driver = {
.flags = 0,
.verify = s3c2416_cpufreq_verify_speed,
.verify = cpufreq_generic_frequency_table_verify,
.target = s3c2416_cpufreq_set_target,
.get = s3c2416_cpufreq_get_speed,
.init = s3c2416_cpufreq_driver_init,
.name = "s3c2416",
.attr = s3c2416_cpufreq_attr,
.attr = cpufreq_generic_attr,
};
static int __init s3c2416_cpufreq_init(void)
......
......@@ -373,23 +373,7 @@ struct clk *s3c_cpufreq_clk_get(struct device *dev, const char *name)
static int s3c_cpufreq_init(struct cpufreq_policy *policy)
{
printk(KERN_INFO "%s: initialising policy %p\n", __func__, policy);
if (policy->cpu != 0)
return -EINVAL;
policy->cur = s3c_cpufreq_get(0);
policy->min = policy->cpuinfo.min_freq = 0;
policy->max = policy->cpuinfo.max_freq = cpu_cur.info->max.fclk / 1000;
policy->governor = CPUFREQ_DEFAULT_GOVERNOR;
/* feed the latency information from the cpu driver */
policy->cpuinfo.transition_latency = cpu_cur.info->latency;
if (ftab)
cpufreq_frequency_table_cpuinfo(policy, ftab);
return 0;
return cpufreq_generic_init(policy, ftab, cpu_cur.info->latency);
}
static int __init s3c_cpufreq_initclks(void)
......@@ -416,14 +400,6 @@ static int __init s3c_cpufreq_initclks(void)
return 0;
}
static int s3c_cpufreq_verify(struct cpufreq_policy *policy)
{
if (policy->cpu != 0)
return -EINVAL;
return 0;
}
#ifdef CONFIG_PM
static struct cpufreq_frequency_table suspend_pll;
static unsigned int suspend_freq;
......@@ -473,7 +449,6 @@ static int s3c_cpufreq_resume(struct cpufreq_policy *policy)
static struct cpufreq_driver s3c24xx_driver = {
.flags = CPUFREQ_STICKY,
.verify = s3c_cpufreq_verify,
.target = s3c_cpufreq_target,
.get = s3c_cpufreq_get,
.init = s3c_cpufreq_init,
......
......@@ -54,14 +54,6 @@ static struct cpufreq_frequency_table s3c64xx_freq_table[] = {
};
#endif
static int s3c64xx_cpufreq_verify_speed(struct cpufreq_policy *policy)
{
if (policy->cpu != 0)
return -EINVAL;
return cpufreq_frequency_table_verify(policy, s3c64xx_freq_table);
}
static unsigned int s3c64xx_cpufreq_get_speed(unsigned int cpu)
{
if (cpu != 0)
......@@ -243,15 +235,12 @@ static int s3c64xx_cpufreq_driver_init(struct cpufreq_policy *policy)
freq++;
}
policy->cur = clk_get_rate(armclk) / 1000;
/* Datasheet says PLL stabalisation time (if we were to use
* the PLLs, which we don't currently) is ~300us worst case,
* but add some fudge.
*/
policy->cpuinfo.transition_latency = (500 * 1000) + regulator_latency;
ret = cpufreq_frequency_table_cpuinfo(policy, s3c64xx_freq_table);
ret = cpufreq_generic_init(policy, s3c64xx_freq_table,
(500 * 1000) + regulator_latency);
if (ret != 0) {
pr_err("Failed to configure frequency table: %d\n",
ret);
......@@ -264,7 +253,7 @@ static int s3c64xx_cpufreq_driver_init(struct cpufreq_policy *policy)
static struct cpufreq_driver s3c64xx_cpufreq_driver = {
.flags = 0,
.verify = s3c64xx_cpufreq_verify_speed,
.verify = cpufreq_generic_frequency_table_verify,
.target = s3c64xx_cpufreq_set_target,
.get = s3c64xx_cpufreq_get_speed,
.init = s3c64xx_cpufreq_driver_init,
......
......@@ -174,14 +174,6 @@ static void s5pv210_set_refresh(enum s5pv210_dmc_port ch, unsigned long freq)
__raw_writel(tmp1, reg);
}
static int s5pv210_verify_speed(struct cpufreq_policy *policy)
{
if (policy->cpu)
return -EINVAL;
return cpufreq_frequency_table_verify(policy, s5pv210_freq_table);
}
static unsigned int s5pv210_getspeed(unsigned int cpu)
{
if (cpu)
......@@ -551,13 +543,7 @@ static int __init s5pv210_cpu_init(struct cpufreq_policy *policy)
s5pv210_dram_conf[1].refresh = (__raw_readl(S5P_VA_DMC1 + 0x30) * 1000);
s5pv210_dram_conf[1].freq = clk_get_rate(dmc1_clk);
policy->cur = policy->min = policy->max = s5pv210_getspeed(0);
cpufreq_frequency_table_get_attr(s5pv210_freq_table, policy->cpu);
policy->cpuinfo.transition_latency = 40000;
return cpufreq_frequency_table_cpuinfo(policy, s5pv210_freq_table);
return cpufreq_generic_init(policy, s5pv210_freq_table, 40000);
out_dmc1:
clk_put(dmc0_clk);
......@@ -605,7 +591,7 @@ static int s5pv210_cpufreq_reboot_notifier_event(struct notifier_block *this,
static struct cpufreq_driver s5pv210_driver = {
.flags = CPUFREQ_STICKY,
.verify = s5pv210_verify_speed,
.verify = cpufreq_generic_frequency_table_verify,
.target = s5pv210_target,
.get = s5pv210_getspeed,
.init = s5pv210_cpu_init,
......
......@@ -218,18 +218,12 @@ static int sa1100_target(struct cpufreq_policy *policy,
static int __init sa1100_cpu_init(struct cpufreq_policy *policy)
{
if (policy->cpu != 0)
return -EINVAL;
policy->cur = policy->min = policy->max = sa11x0_getspeed(0);
policy->cpuinfo.min_freq = 59000;
policy->cpuinfo.max_freq = 287000;
policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL;
return 0;
return cpufreq_generic_init(policy, sa11x0_freq_table, CPUFREQ_ETERNAL);
}
static struct cpufreq_driver sa1100_driver __refdata = {
.flags = CPUFREQ_STICKY,
.verify = sa11x0_verify_speed,
.verify = cpufreq_generic_frequency_table_verify,
.target = sa1100_target,
.get = sa11x0_getspeed,
.init = sa1100_cpu_init,
......
......@@ -332,20 +332,14 @@ static int sa1110_target(struct cpufreq_policy *policy,
static int __init sa1110_cpu_init(struct cpufreq_policy *policy)
{
if (policy->cpu != 0)
return -EINVAL;
policy->cur = policy->min = policy->max = sa11x0_getspeed(0);
policy->cpuinfo.min_freq = 59000;
policy->cpuinfo.max_freq = 287000;
policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL;
return 0;
return cpufreq_generic_init(policy, sa11x0_freq_table, CPUFREQ_ETERNAL);
}
/* sa1110_driver needs __refdata because it must remain after init registers
* it with cpufreq_register_driver() */
static struct cpufreq_driver sa1110_driver __refdata = {
.flags = CPUFREQ_STICKY,
.verify = sa11x0_verify_speed,
.verify = cpufreq_generic_frequency_table_verify,
.target = sa1110_target,
.get = sa11x0_getspeed,
.init = sa1110_cpu_init,
......
......@@ -78,11 +78,6 @@ static void sc520_freq_set_cpu_state(struct cpufreq_policy *policy,
cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
};
static int sc520_freq_verify(struct cpufreq_policy *policy)
{
return cpufreq_frequency_table_verify(policy, &sc520_freq_table[0]);
}
static int sc520_freq_target(struct cpufreq_policy *policy,
unsigned int target_freq,
unsigned int relation)
......@@ -106,7 +101,6 @@ static int sc520_freq_target(struct cpufreq_policy *policy,
static int sc520_freq_cpu_init(struct cpufreq_policy *policy)
{
struct cpuinfo_x86 *c = &cpu_data(0);
int result;
/* capability check */
if (c->x86_vendor != X86_VENDOR_AMD ||
......@@ -115,39 +109,19 @@ static int sc520_freq_cpu_init(struct cpufreq_policy *policy)
/* cpuinfo and default policy values */
policy->cpuinfo.transition_latency = 1000000; /* 1ms */
policy->cur = sc520_freq_get_cpu_frequency(0);
result = cpufreq_frequency_table_cpuinfo(policy, sc520_freq_table);
if (result)
return result;
cpufreq_frequency_table_get_attr(sc520_freq_table, policy->cpu);
return 0;
}
static int sc520_freq_cpu_exit(struct cpufreq_policy *policy)
{
cpufreq_frequency_table_put_attr(policy->cpu);
return 0;
return cpufreq_table_validate_and_show(policy, sc520_freq_table);
}
static struct freq_attr *sc520_freq_attr[] = {
&cpufreq_freq_attr_scaling_available_freqs,
NULL,
};
static struct cpufreq_driver sc520_freq_driver = {
.get = sc520_freq_get_cpu_frequency,
.verify = sc520_freq_verify,
.verify = cpufreq_generic_frequency_table_verify,
.target = sc520_freq_target,
.init = sc520_freq_cpu_init,
.exit = sc520_freq_cpu_exit,
.exit = cpufreq_generic_exit,
.name = "sc520_freq",
.attr = sc520_freq_attr,
.attr = cpufreq_generic_attr,
};
static const struct x86_cpu_id sc520_ids[] = {
......
......@@ -87,15 +87,12 @@ static int sh_cpufreq_verify(struct cpufreq_policy *policy)
if (freq_table)
return cpufreq_frequency_table_verify(policy, freq_table);
cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq,
policy->cpuinfo.max_freq);
cpufreq_verify_within_cpu_limits(policy);
policy->min = (clk_round_rate(cpuclk, 1) + 500) / 1000;
policy->max = (clk_round_rate(cpuclk, ~0UL) + 500) / 1000;
cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq,
policy->cpuinfo.max_freq);
cpufreq_verify_within_cpu_limits(policy);
return 0;
}
......@@ -114,15 +111,13 @@ static int sh_cpufreq_cpu_init(struct cpufreq_policy *policy)
return PTR_ERR(cpuclk);
}
policy->cur = sh_cpufreq_get(cpu);
freq_table = cpuclk->nr_freqs ? cpuclk->freq_table : NULL;
if (freq_table) {
int result;
result = cpufreq_frequency_table_cpuinfo(policy, freq_table);
if (!result)
cpufreq_frequency_table_get_attr(freq_table, cpu);
result = cpufreq_table_validate_and_show(policy, freq_table);
if (result)
return result;
} else {
dev_notice(dev, "no frequency table found, falling back "
"to rate rounding.\n");
......@@ -154,11 +149,6 @@ static int sh_cpufreq_cpu_exit(struct cpufreq_policy *policy)
return 0;
}
static struct freq_attr *sh_freq_attr[] = {
&cpufreq_freq_attr_scaling_available_freqs,
NULL,
};
static struct cpufreq_driver sh_cpufreq_driver = {
.name = "sh",
.get = sh_cpufreq_get,
......@@ -166,7 +156,7 @@ static struct cpufreq_driver sh_cpufreq_driver = {
.verify = sh_cpufreq_verify,
.init = sh_cpufreq_cpu_init,
.exit = sh_cpufreq_cpu_exit,
.attr = sh_freq_attr,
.attr = cpufreq_generic_attr,
};
static int __init sh_cpufreq_module_init(void)
......
......@@ -295,12 +295,6 @@ static int us2e_freq_target(struct cpufreq_policy *policy,
return 0;
}
static int us2e_freq_verify(struct cpufreq_policy *policy)
{
return cpufreq_frequency_table_verify(policy,
&us2e_freq_table[policy->cpu].table[0]);
}
static int __init us2e_freq_cpu_init(struct cpufreq_policy *policy)
{
unsigned int cpu = policy->cpu;
......@@ -324,13 +318,15 @@ static int __init us2e_freq_cpu_init(struct cpufreq_policy *policy)
policy->cpuinfo.transition_latency = 0;
policy->cur = clock_tick;
return cpufreq_frequency_table_cpuinfo(policy, table);
return cpufreq_table_validate_and_show(policy, table);
}
static int us2e_freq_cpu_exit(struct cpufreq_policy *policy)
{
if (cpufreq_us2e_driver)
if (cpufreq_us2e_driver) {
cpufreq_frequency_table_put_attr(policy->cpu);
us2e_set_cpu_divider_index(policy, 0);
}
return 0;
}
......@@ -361,7 +357,7 @@ static int __init us2e_freq_init(void)
goto err_out;
driver->init = us2e_freq_cpu_init;
driver->verify = us2e_freq_verify;
driver->verify = cpufreq_generic_frequency_table_verify;
driver->target = us2e_freq_target;
driver->get = us2e_freq_get;
driver->exit = us2e_freq_cpu_exit;
......
......@@ -156,12 +156,6 @@ static int us3_freq_target(struct cpufreq_policy *policy,
return 0;
}
static int us3_freq_verify(struct cpufreq_policy *policy)
{
return cpufreq_frequency_table_verify(policy,
&us3_freq_table[policy->cpu].table[0]);
}
static int __init us3_freq_cpu_init(struct cpufreq_policy *policy)
{
unsigned int cpu = policy->cpu;
......@@ -181,13 +175,15 @@ static int __init us3_freq_cpu_init(struct cpufreq_policy *policy)
policy->cpuinfo.transition_latency = 0;
policy->cur = clock_tick;
return cpufreq_frequency_table_cpuinfo(policy, table);
return cpufreq_table_validate_and_show(policy, table);
}
static int us3_freq_cpu_exit(struct cpufreq_policy *policy)
{
if (cpufreq_us3_driver)
if (cpufreq_us3_driver) {
cpufreq_frequency_table_put_attr(policy->cpu);
us3_set_cpu_divider_index(policy, 0);
}
return 0;
}
......@@ -222,7 +218,7 @@ static int __init us3_freq_init(void)
goto err_out;
driver->init = us3_freq_cpu_init;
driver->verify = us3_freq_verify;
driver->verify = cpufreq_generic_frequency_table_verify;
driver->target = us3_freq_target;
driver->get = us3_freq_get;
driver->exit = us3_freq_cpu_exit;
......
......@@ -30,11 +30,6 @@ static struct {
u32 cnt;
} spear_cpufreq;
static int spear_cpufreq_verify(struct cpufreq_policy *policy)
{
return cpufreq_frequency_table_verify(policy, spear_cpufreq.freq_tbl);
}
static unsigned int spear_cpufreq_get(unsigned int cpu)
{
return clk_get_rate(spear_cpufreq.clk) / 1000;
......@@ -176,43 +171,19 @@ static int spear_cpufreq_target(struct cpufreq_policy *policy,
static int spear_cpufreq_init(struct cpufreq_policy *policy)
{
int ret;
ret = cpufreq_frequency_table_cpuinfo(policy, spear_cpufreq.freq_tbl);
if (ret) {
pr_err("cpufreq_frequency_table_cpuinfo() failed");
return ret;
}
cpufreq_frequency_table_get_attr(spear_cpufreq.freq_tbl, policy->cpu);
policy->cpuinfo.transition_latency = spear_cpufreq.transition_latency;
policy->cur = spear_cpufreq_get(0);
cpumask_setall(policy->cpus);
return 0;
}
static int spear_cpufreq_exit(struct cpufreq_policy *policy)
{
cpufreq_frequency_table_put_attr(policy->cpu);
return 0;
return cpufreq_generic_init(policy, spear_cpufreq.freq_tbl,
spear_cpufreq.transition_latency);
}
static struct freq_attr *spear_cpufreq_attr[] = {
&cpufreq_freq_attr_scaling_available_freqs,
NULL,
};
static struct cpufreq_driver spear_cpufreq_driver = {
.name = "cpufreq-spear",
.flags = CPUFREQ_STICKY,
.verify = spear_cpufreq_verify,
.verify = cpufreq_generic_frequency_table_verify,
.target = spear_cpufreq_target,
.get = spear_cpufreq_get,
.init = spear_cpufreq_init,
.exit = spear_cpufreq_exit,
.attr = spear_cpufreq_attr,
.exit = cpufreq_generic_exit,
.attr = cpufreq_generic_attr,
};
static int spear_cpufreq_driver_init(void)
......
......@@ -343,9 +343,7 @@ static unsigned int get_cur_freq(unsigned int cpu)
static int centrino_cpu_init(struct cpufreq_policy *policy)
{
struct cpuinfo_x86 *cpu = &cpu_data(policy->cpu);
unsigned freq;
unsigned l, h;
int ret;
int i;
/* Only Intel makes Enhanced Speedstep-capable CPUs */
......@@ -373,9 +371,8 @@ static int centrino_cpu_init(struct cpufreq_policy *policy)
return -ENODEV;
}
if (centrino_cpu_init_table(policy)) {
if (centrino_cpu_init_table(policy))
return -ENODEV;
}
/* Check to see if Enhanced SpeedStep is enabled, and try to
enable it if not. */
......@@ -395,22 +392,11 @@ static int centrino_cpu_init(struct cpufreq_policy *policy)
}
}
freq = get_cur_freq(policy->cpu);
policy->cpuinfo.transition_latency = 10000;
/* 10uS transition latency */
policy->cur = freq;
pr_debug("centrino_cpu_init: cur=%dkHz\n", policy->cur);
ret = cpufreq_frequency_table_cpuinfo(policy,
return cpufreq_table_validate_and_show(policy,
per_cpu(centrino_model, policy->cpu)->op_points);
if (ret)
return (ret);
cpufreq_frequency_table_get_attr(
per_cpu(centrino_model, policy->cpu)->op_points, policy->cpu);
return 0;
}
static int centrino_cpu_exit(struct cpufreq_policy *policy)
......@@ -427,19 +413,6 @@ static int centrino_cpu_exit(struct cpufreq_policy *policy)
return 0;
}
/**
* centrino_verify - verifies a new CPUFreq policy
* @policy: new policy
*
* Limit must be within this model's frequency range at least one
* border included.
*/
static int centrino_verify (struct cpufreq_policy *policy)
{
return cpufreq_frequency_table_verify(policy,
per_cpu(centrino_model, policy->cpu)->op_points);
}
/**
* centrino_setpolicy - set a new CPUFreq policy
* @policy: new policy
......@@ -561,20 +534,15 @@ static int centrino_target (struct cpufreq_policy *policy,
return retval;
}
static struct freq_attr* centrino_attr[] = {
&cpufreq_freq_attr_scaling_available_freqs,
NULL,
};
static struct cpufreq_driver centrino_driver = {
.name = "centrino", /* should be speedstep-centrino,
but there's a 16 char limit */
.init = centrino_cpu_init,
.exit = centrino_cpu_exit,
.verify = centrino_verify,
.verify = cpufreq_generic_frequency_table_verify,
.target = centrino_target,
.get = get_cur_freq,
.attr = centrino_attr,
.attr = cpufreq_generic_attr,
};
/*
......
......@@ -289,18 +289,6 @@ static int speedstep_target(struct cpufreq_policy *policy,
}
/**
* speedstep_verify - verifies a new CPUFreq policy
* @policy: new policy
*
* Limit must be within speedstep_low_freq and speedstep_high_freq, with
* at least one border included.
*/
static int speedstep_verify(struct cpufreq_policy *policy)
{
return cpufreq_frequency_table_verify(policy, &speedstep_freqs[0]);
}
struct get_freqs {
struct cpufreq_policy *policy;
int ret;
......@@ -320,8 +308,7 @@ static void get_freqs_on_cpu(void *_get_freqs)
static int speedstep_cpu_init(struct cpufreq_policy *policy)
{
int result;
unsigned int policy_cpu, speed;
unsigned int policy_cpu;
struct get_freqs gf;
/* only run on CPU to be set, or on its sibling */
......@@ -336,49 +323,18 @@ static int speedstep_cpu_init(struct cpufreq_policy *policy)
if (gf.ret)
return gf.ret;
/* get current speed setting */
speed = speedstep_get(policy_cpu);
if (!speed)
return -EIO;
pr_debug("currently at %s speed setting - %i MHz\n",
(speed == speedstep_freqs[SPEEDSTEP_LOW].frequency)
? "low" : "high",
(speed / 1000));
/* cpuinfo and default policy values */
policy->cur = speed;
result = cpufreq_frequency_table_cpuinfo(policy, speedstep_freqs);
if (result)
return result;
cpufreq_frequency_table_get_attr(speedstep_freqs, policy->cpu);
return 0;
}
static int speedstep_cpu_exit(struct cpufreq_policy *policy)
{
cpufreq_frequency_table_put_attr(policy->cpu);
return 0;
return cpufreq_table_validate_and_show(policy, speedstep_freqs);
}
static struct freq_attr *speedstep_attr[] = {
&cpufreq_freq_attr_scaling_available_freqs,
NULL,
};
static struct cpufreq_driver speedstep_driver = {
.name = "speedstep-ich",
.verify = speedstep_verify,
.verify = cpufreq_generic_frequency_table_verify,
.target = speedstep_target,
.init = speedstep_cpu_init,
.exit = speedstep_cpu_exit,
.exit = cpufreq_generic_exit,
.get = speedstep_get,
.attr = speedstep_attr,
.attr = cpufreq_generic_attr,
};
static const struct x86_cpu_id ss_smi_ids[] = {
......
......@@ -264,23 +264,9 @@ static int speedstep_target(struct cpufreq_policy *policy,
}
/**
* speedstep_verify - verifies a new CPUFreq policy
* @policy: new policy
*
* Limit must be within speedstep_low_freq and speedstep_high_freq, with
* at least one border included.
*/
static int speedstep_verify(struct cpufreq_policy *policy)
{
return cpufreq_frequency_table_verify(policy, &speedstep_freqs[0]);
}
static int speedstep_cpu_init(struct cpufreq_policy *policy)
{
int result;
unsigned int speed, state;
unsigned int *low, *high;
/* capability check */
......@@ -316,32 +302,8 @@ static int speedstep_cpu_init(struct cpufreq_policy *policy)
pr_debug("workaround worked.\n");
}
/* get current speed setting */
state = speedstep_get_state();
speed = speedstep_freqs[state].frequency;
pr_debug("currently at %s speed setting - %i MHz\n",
(speed == speedstep_freqs[SPEEDSTEP_LOW].frequency)
? "low" : "high",
(speed / 1000));
/* cpuinfo and default policy values */
policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL;
policy->cur = speed;
result = cpufreq_frequency_table_cpuinfo(policy, speedstep_freqs);
if (result)
return result;
cpufreq_frequency_table_get_attr(speedstep_freqs, policy->cpu);
return 0;
}
static int speedstep_cpu_exit(struct cpufreq_policy *policy)
{
cpufreq_frequency_table_put_attr(policy->cpu);
return 0;
return cpufreq_table_validate_and_show(policy, speedstep_freqs);
}
static unsigned int speedstep_get(unsigned int cpu)
......@@ -362,20 +324,15 @@ static int speedstep_resume(struct cpufreq_policy *policy)
return result;
}
static struct freq_attr *speedstep_attr[] = {
&cpufreq_freq_attr_scaling_available_freqs,
NULL,
};
static struct cpufreq_driver speedstep_driver = {
.name = "speedstep-smi",
.verify = speedstep_verify,
.verify = cpufreq_generic_frequency_table_verify,
.target = speedstep_target,
.init = speedstep_cpu_init,
.exit = speedstep_cpu_exit,
.exit = cpufreq_generic_exit,
.get = speedstep_get,
.resume = speedstep_resume,
.attr = speedstep_attr,
.attr = cpufreq_generic_attr,
};
static const struct x86_cpu_id ss_smi_ids[] = {
......
......@@ -51,11 +51,6 @@ static unsigned long target_cpu_speed[NUM_CPUS];
static DEFINE_MUTEX(tegra_cpu_lock);
static bool is_suspended;
static int tegra_verify_speed(struct cpufreq_policy *policy)
{
return cpufreq_frequency_table_verify(policy, freq_table);
}
static unsigned int tegra_getspeed(unsigned int cpu)
{
unsigned long rate;
......@@ -209,21 +204,23 @@ static struct notifier_block tegra_cpu_pm_notifier = {
static int tegra_cpu_init(struct cpufreq_policy *policy)
{
int ret;
if (policy->cpu >= NUM_CPUS)
return -EINVAL;
clk_prepare_enable(emc_clk);
clk_prepare_enable(cpu_clk);
cpufreq_frequency_table_cpuinfo(policy, freq_table);
cpufreq_frequency_table_get_attr(freq_table, policy->cpu);
policy->cur = tegra_getspeed(policy->cpu);
target_cpu_speed[policy->cpu] = policy->cur;
target_cpu_speed[policy->cpu] = tegra_getspeed(policy->cpu);
/* FIXME: what's the actual transition time? */
policy->cpuinfo.transition_latency = 300 * 1000;
cpumask_copy(policy->cpus, cpu_possible_mask);
ret = cpufreq_generic_init(policy, freq_table, 300 * 1000);
if (ret) {
clk_disable_unprepare(cpu_clk);
clk_disable_unprepare(emc_clk);
return ret;
}
if (policy->cpu == 0)
register_pm_notifier(&tegra_cpu_pm_notifier);
......@@ -233,24 +230,20 @@ static int tegra_cpu_init(struct cpufreq_policy *policy)
static int tegra_cpu_exit(struct cpufreq_policy *policy)
{
cpufreq_frequency_table_cpuinfo(policy, freq_table);
cpufreq_frequency_table_put_attr(policy->cpu);
clk_disable_unprepare(cpu_clk);
clk_disable_unprepare(emc_clk);
return 0;
}
static struct freq_attr *tegra_cpufreq_attr[] = {
&cpufreq_freq_attr_scaling_available_freqs,
NULL,
};
static struct cpufreq_driver tegra_cpufreq_driver = {
.verify = tegra_verify_speed,
.verify = cpufreq_generic_frequency_table_verify,
.target = tegra_target,
.get = tegra_getspeed,
.init = tegra_cpu_init,
.exit = tegra_cpu_exit,
.name = "tegra",
.attr = tegra_cpufreq_attr,
.attr = cpufreq_generic_attr,
};
static int __init tegra_cpufreq_init(void)
......
......@@ -29,9 +29,7 @@ static int ucv2_verify_speed(struct cpufreq_policy *policy)
if (policy->cpu)
return -EINVAL;
cpufreq_verify_within_limits(policy,
policy->cpuinfo.min_freq, policy->cpuinfo.max_freq);
cpufreq_verify_within_cpu_limits(policy);
return 0;
}
......@@ -68,7 +66,6 @@ static int __init ucv2_cpu_init(struct cpufreq_policy *policy)
{
if (policy->cpu != 0)
return -EINVAL;
policy->cur = ucv2_getspeed(0);
policy->min = policy->cpuinfo.min_freq = 250000;
policy->max = policy->cpuinfo.max_freq = 1000000;
policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL;
......
......@@ -78,7 +78,6 @@ config THERMAL_GOV_USER_SPACE
config CPU_THERMAL
bool "generic cpu cooling support"
depends on CPU_FREQ
select CPU_FREQ_TABLE
help
This implements the generic cpu cooling mechanism through frequency
reduction. An ACPI version of this already exists
......
......@@ -180,13 +180,6 @@ __ATTR(_name, 0644, show_##_name, store_##_name)
struct cpufreq_driver {
char name[CPUFREQ_NAME_LEN];
u8 flags;
/*
* This should be set by platforms having multiple clock-domains, i.e.
* supporting multiple policies. With this sysfs directories of governor
* would be created in cpu/cpu<num>/cpufreq/ directory and so they can
* use the same governor with different tunables for different clusters.
*/
bool have_governor_per_policy;
/* needed by all drivers */
int (*init) (struct cpufreq_policy *policy);
......@@ -211,13 +204,22 @@ struct cpufreq_driver {
};
/* flags */
#define CPUFREQ_STICKY 0x01 /* the driver isn't removed even if
* all ->init() calls failed */
#define CPUFREQ_CONST_LOOPS 0x02 /* loops_per_jiffy or other kernel
* "constants" aren't affected by
* frequency transitions */
#define CPUFREQ_PM_NO_WARN 0x04 /* don't warn on suspend/resume speed
* mismatches */
#define CPUFREQ_STICKY (1 << 0) /* driver isn't removed even if
all ->init() calls failed */
#define CPUFREQ_CONST_LOOPS (1 << 1) /* loops_per_jiffy or other
kernel "constants" aren't
affected by frequency
transitions */
#define CPUFREQ_PM_NO_WARN (1 << 2) /* don't warn on suspend/resume
speed mismatches */
/*
* This should be set by platforms having multiple clock-domains, i.e.
* supporting multiple policies. With this sysfs directories of governor would
* be created in cpu/cpu<num>/cpufreq/ directory and so they can use the same
* governor with different tunables for different clusters.
*/
#define CPUFREQ_HAVE_GOVERNOR_PER_POLICY (1 << 3)
int cpufreq_register_driver(struct cpufreq_driver *driver_data);
int cpufreq_unregister_driver(struct cpufreq_driver *driver_data);
......@@ -240,6 +242,13 @@ static inline void cpufreq_verify_within_limits(struct cpufreq_policy *policy,
return;
}
static inline void
cpufreq_verify_within_cpu_limits(struct cpufreq_policy *policy)
{
cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq,
policy->cpuinfo.max_freq);
}
/*********************************************************************
* CPUFREQ NOTIFIER INTERFACE *
*********************************************************************/
......@@ -392,6 +401,7 @@ int cpufreq_frequency_table_cpuinfo(struct cpufreq_policy *policy,
int cpufreq_frequency_table_verify(struct cpufreq_policy *policy,
struct cpufreq_frequency_table *table);
int cpufreq_generic_frequency_table_verify(struct cpufreq_policy *policy);
int cpufreq_frequency_table_target(struct cpufreq_policy *policy,
struct cpufreq_frequency_table *table,
......@@ -407,8 +417,20 @@ struct cpufreq_frequency_table *cpufreq_frequency_get_table(unsigned int cpu);
/* the following are really really optional */
extern struct freq_attr cpufreq_freq_attr_scaling_available_freqs;
extern struct freq_attr *cpufreq_generic_attr[];
void cpufreq_frequency_table_get_attr(struct cpufreq_frequency_table *table,
unsigned int cpu);
void cpufreq_frequency_table_put_attr(unsigned int cpu);
int cpufreq_table_validate_and_show(struct cpufreq_policy *policy,
struct cpufreq_frequency_table *table);
int cpufreq_generic_init(struct cpufreq_policy *policy,
struct cpufreq_frequency_table *table,
unsigned int transition_latency);
static inline int cpufreq_generic_exit(struct cpufreq_policy *policy)
{
cpufreq_frequency_table_put_attr(policy->cpu);
return 0;
}
#endif /* _LINUX_CPUFREQ_H */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment