Commit ade30b83 authored by Dominik Brodowski's avatar Dominik Brodowski Committed by Linus Torvalds

[PATCH] cpufreq: x86 driver updates (speedstep, longrun, p4-clockmod)

 - switch the Intel x86 speedstep/Pentium 4/Xeon drivers and the
   Transmeta Crusoe Longrun driver to use the advanced cpufreq_driver
   registration process
 - cleanups
 - spelling fixes (Petri Koistinen) - thanks!
parent 39de8270
/*
* $Id: longrun.c,v 1.14 2002/10/31 21:17:40 db Exp $
* $Id: longrun.c,v 1.22 2003/02/10 17:31:50 db Exp $
*
* (C) 2002 Dominik Brodowski <linux@brodo.de>
* (C) 2002 - 2003 Dominik Brodowski <linux@brodo.de>
*
* Licensed under the terms of the GNU GPL License version 2.
*
......@@ -18,7 +18,7 @@
#include <asm/processor.h>
#include <asm/timex.h>
static struct cpufreq_driver *longrun_driver;
static struct cpufreq_driver longrun_driver;
/**
* longrun_{low,high}_freq is needed for the conversion of cpufreq kHz
......@@ -39,9 +39,6 @@ static void longrun_get_policy(struct cpufreq_policy *policy)
{
u32 msr_lo, msr_hi;
if (!longrun_driver)
return;
rdmsr(MSR_TMTA_LONGRUN_FLAGS, msr_lo, msr_hi);
if (msr_lo & 0x01)
policy->policy = CPUFREQ_POLICY_PERFORMANCE;
......@@ -72,7 +69,7 @@ static int longrun_set_policy(struct cpufreq_policy *policy)
u32 msr_lo, msr_hi;
u32 pctg_lo, pctg_hi;
if (!longrun_driver || !policy)
if (!policy)
return -EINVAL;
pctg_lo = (policy->min - longrun_low_freq) /
......@@ -117,13 +114,16 @@ static int longrun_set_policy(struct cpufreq_policy *policy)
*/
static int longrun_verify_policy(struct cpufreq_policy *policy)
{
if (!policy || !longrun_driver)
if (!policy)
return -EINVAL;
policy->cpu = 0;
cpufreq_verify_within_limits(policy,
longrun_driver->policy[0].cpuinfo.min_freq,
longrun_driver->policy[0].cpuinfo.max_freq);
policy->cpuinfo.min_freq,
policy->cpuinfo.max_freq);
if (policy->policy == CPUFREQ_POLICY_GOVERNOR)
policy->policy = longrun_driver.policy[0].policy;
return 0;
}
......@@ -221,59 +221,59 @@ static unsigned int __init longrun_determine_freqs(unsigned int *low_freq,
}
/**
* longrun_init - initializes the Transmeta Crusoe LongRun CPUFreq driver
*
* Initializes the LongRun support.
*/
static int __init longrun_init(void)
static int longrun_cpu_init(struct cpufreq_policy *policy)
{
int result;
struct cpufreq_driver *driver;
int result = 0;
struct cpuinfo_x86 *c = cpu_data;
/* capability check */
if (policy->cpu != 0)
return -ENODEV;
if (c->x86_vendor != X86_VENDOR_TRANSMETA ||
!cpu_has(c, X86_FEATURE_LONGRUN))
return 0;
/* initialization of main "cpufreq" code*/
driver = kmalloc(sizeof(struct cpufreq_driver) +
NR_CPUS * sizeof(struct cpufreq_policy), GFP_KERNEL);
if (!driver)
return -ENOMEM;
memset(driver, 0, sizeof(struct cpufreq_driver) +
NR_CPUS * sizeof(struct cpufreq_policy));
driver->policy = (struct cpufreq_policy *) (driver + 1);
if (longrun_determine_freqs(&longrun_low_freq, &longrun_high_freq)) {
kfree(driver);
return -EIO;
}
driver->policy[0].cpuinfo.min_freq = longrun_low_freq;
driver->policy[0].cpuinfo.max_freq = longrun_high_freq;
driver->policy[0].cpuinfo.transition_latency = CPUFREQ_ETERNAL;
return -ENODEV;
/* detect low and high frequency */
result = longrun_determine_freqs(&longrun_low_freq, &longrun_high_freq);
if (result)
return result;
/* cpuinfo and default policy values */
policy->cpuinfo.min_freq = longrun_low_freq;
policy->cpuinfo.max_freq = longrun_high_freq;
policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL;
longrun_get_policy(policy);
#ifdef CONFIG_CPU_FREQ_24_API
longrun_driver.cpu_cur_freq[policy->cpu] = longrun_low_freq; /* dummy value */
#endif
strncpy(driver->name, "longrun", CPUFREQ_NAME_LEN);
return 0;
}
longrun_get_policy(&driver->policy[0]);
#ifdef CONFIG_CPU_FREQ_24_API
driver->cpu_cur_freq[0] = longrun_high_freq; /* dummy value */
#endif
static struct cpufreq_driver longrun_driver = {
.verify = longrun_verify_policy,
.setpolicy = longrun_set_policy,
.init = longrun_cpu_init,
.name = "longrun",
};
driver->verify = &longrun_verify_policy;
driver->setpolicy = &longrun_set_policy;
longrun_driver = driver;
/**
* longrun_init - initializes the Transmeta Crusoe LongRun CPUFreq driver
*
* Initializes the LongRun support.
*/
static int __init longrun_init(void)
{
struct cpuinfo_x86 *c = cpu_data;
result = cpufreq_register(driver);
if (result) {
longrun_driver = NULL;
kfree(driver);
}
if (c->x86_vendor != X86_VENDOR_TRANSMETA ||
!cpu_has(c, X86_FEATURE_LONGRUN))
return -ENODEV;
return result;
return cpufreq_register_driver(&longrun_driver);
}
......@@ -282,15 +282,13 @@ static int __init longrun_init(void)
*/
static void __exit longrun_exit(void)
{
if (longrun_driver) {
cpufreq_unregister();
kfree(longrun_driver);
}
cpufreq_unregister_driver(&longrun_driver);
}
MODULE_AUTHOR ("Dominik Brodowski <linux@brodo.de>");
MODULE_DESCRIPTION ("LongRun driver for Transmeta Crusoe processors.");
MODULE_LICENSE ("GPL");
module_init(longrun_init);
module_exit(longrun_exit);
/*
* Pentium 4/Xeon CPU on demand clock modulation/speed scaling
* (C) 2002 - 2003 Dominik Brodowski <linux@brodo.de>
* (C) 2002 Zwane Mwaikambo <zwane@commfireservices.com>
* (C) 2002 Arjan van de Ven <arjanv@redhat.com>
* (C) 2002 Tora T. Engstad
......@@ -45,11 +46,10 @@ enum {
#define DC_ENTRIES 8
static int has_N44_O17_errata;
static int has_N44_O17_errata[NR_CPUS];
static int stock_freq;
MODULE_PARM(stock_freq, "i");
static struct cpufreq_driver *cpufreq_p4_driver;
static struct cpufreq_driver p4clockmod_driver;
static int cpufreq_p4_setdc(unsigned int cpu, unsigned int newstate)
......@@ -107,17 +107,17 @@ static int cpufreq_p4_setdc(unsigned int cpu, unsigned int newstate)
rdmsr(MSR_IA32_THERM_STATUS, l, h);
if (l & 0x01)
printk(KERN_DEBUG PFX "CPU#%d currently thermal throttled\n", cpu);
// printk(KERN_DEBUG PFX "CPU#%d currently thermal throttled\n", cpu);
if (has_N44_O17_errata && (newstate == DC_25PT || newstate == DC_DFLT))
if (has_N44_O17_errata[cpu] && (newstate == DC_25PT || newstate == DC_DFLT))
newstate = DC_38PT;
rdmsr(MSR_IA32_THERM_CONTROL, l, h);
if (newstate == DC_DISABLE) {
printk(KERN_INFO PFX "CPU#%d disabling modulation\n", cpu);
// printk(KERN_INFO PFX "CPU#%d disabling modulation\n", cpu);
wrmsr(MSR_IA32_THERM_CONTROL, l & ~(1<<4), h);
} else {
printk(KERN_INFO PFX "CPU#%d setting duty cycle to %d%%\n", cpu, ((125 * newstate) / 10));
// printk(KERN_INFO PFX "CPU#%d setting duty cycle to %d%%\n", cpu, ((125 * newstate) / 10));
/* bits 63 - 5 : reserved
* bit 4 : enable/disable
* bits 3-1 : duty cycle
......@@ -155,14 +155,16 @@ static struct cpufreq_frequency_table p4clockmod_table[] = {
};
static int cpufreq_p4_setpolicy(struct cpufreq_policy *policy)
static int cpufreq_p4_target(struct cpufreq_policy *policy,
unsigned int target_freq,
unsigned int relation)
{
unsigned int newstate = DC_RESV;
if (cpufreq_frequency_table_setpolicy(policy, &p4clockmod_table[0], &newstate))
if (cpufreq_frequency_table_target(policy, &p4clockmod_table[0], target_freq, relation, &newstate))
return -EINVAL;
cpufreq_p4_setdc(policy->cpu, newstate);
cpufreq_p4_setdc(policy->cpu, p4clockmod_table[newstate].index);
return 0;
}
......@@ -174,39 +176,30 @@ static int cpufreq_p4_verify(struct cpufreq_policy *policy)
}
static int __init cpufreq_p4_init(void)
{
struct cpuinfo_x86 *c = cpu_data;
int cpuid;
int ret;
struct cpufreq_driver *driver;
static int cpufreq_p4_cpu_init(struct cpufreq_policy *policy)
{
struct cpuinfo_x86 *c = &cpu_data[policy->cpu];
int cpuid = 0;
unsigned int i;
/*
* THERM_CONTROL is architectural for IA32 now, so
* we can rely on the capability checks
*/
/* capability check */
if (c->x86_vendor != X86_VENDOR_INTEL)
return -ENODEV;
if (!test_bit(X86_FEATURE_ACPI, c->x86_capability) ||
!test_bit(X86_FEATURE_ACC, c->x86_capability))
!test_bit(X86_FEATURE_ACC, c->x86_capability))
return -ENODEV;
/* Errata workarounds */
/* Errata workaround */
cpuid = (c->x86 << 8) | (c->x86_model << 4) | c->x86_mask;
switch (cpuid) {
case 0x0f07:
case 0x0f0a:
case 0x0f11:
case 0x0f12:
has_N44_O17_errata = 1;
default:
break;
case 0x0f07:
case 0x0f0a:
case 0x0f11:
case 0x0f12:
has_N44_O17_errata[policy->cpu] = 1;
}
printk(KERN_INFO PFX "P4/Xeon(TM) CPU On-Demand Clock Modulation available\n");
/* get frequency */
if (!stock_freq) {
if (cpu_khz)
stock_freq = cpu_khz;
......@@ -216,71 +209,69 @@ static int __init cpufreq_p4_init(void)
}
}
driver = kmalloc(sizeof(struct cpufreq_driver) +
NR_CPUS * sizeof(struct cpufreq_policy), GFP_KERNEL);
if (!driver)
return -ENOMEM;
memset(driver, 0, sizeof(struct cpufreq_driver) +
NR_CPUS * sizeof(struct cpufreq_policy));
driver->policy = (struct cpufreq_policy *) (driver + 1);
/* table init */
for (i=1; (p4clockmod_table[i].frequency != CPUFREQ_TABLE_END); i++) {
if ((i<2) && (has_N44_O17_errata))
if ((i<2) && (has_N44_O17_errata[policy->cpu]))
p4clockmod_table[i].frequency = CPUFREQ_ENTRY_INVALID;
else
p4clockmod_table[i].frequency = (stock_freq * i)/8;
}
/* cpuinfo and default policy values */
policy->policy = CPUFREQ_POLICY_PERFORMANCE;
policy->cpuinfo.transition_latency = 1000;
#ifdef CONFIG_CPU_FREQ_24_API
for (i=0;i<NR_CPUS;i++) {
driver->cpu_cur_freq[i] = stock_freq;
}
p4clockmod_driver.cpu_cur_freq[policy->cpu] = stock_freq;
#endif
driver->verify = &cpufreq_p4_verify;
driver->setpolicy = &cpufreq_p4_setpolicy;
strncpy(driver->name, "p4-clockmod", CPUFREQ_NAME_LEN);
return cpufreq_frequency_table_cpuinfo(policy, &p4clockmod_table[0]);
}
for (i=0;i<NR_CPUS;i++) {
driver->policy[i].cpu = i;
ret = cpufreq_frequency_table_cpuinfo(&driver->policy[i], &p4clockmod_table[0]);
if (ret) {
kfree(driver);
return ret;
}
driver->policy[i].policy = CPUFREQ_POLICY_PERFORMANCE;
driver->policy[i].cpuinfo.transition_latency = CPUFREQ_ETERNAL;
}
cpufreq_p4_driver = driver;
ret = cpufreq_register(driver);
if (ret) {
cpufreq_p4_driver = NULL;
kfree(driver);
}
static int cpufreq_p4_cpu_exit(struct cpufreq_policy *policy)
{
return cpufreq_p4_setdc(policy->cpu, DC_DISABLE);
}
return ret;
static struct cpufreq_driver p4clockmod_driver = {
.verify = cpufreq_p4_verify,
.target = cpufreq_p4_target,
.init = cpufreq_p4_cpu_init,
.exit = cpufreq_p4_cpu_exit,
.name = "p4-clockmod",
};
static int __init cpufreq_p4_init(void)
{
struct cpuinfo_x86 *c = cpu_data;
/*
* THERM_CONTROL is architectural for IA32 now, so
* we can rely on the capability checks
*/
if (c->x86_vendor != X86_VENDOR_INTEL)
return -ENODEV;
if (!test_bit(X86_FEATURE_ACPI, c->x86_capability) ||
!test_bit(X86_FEATURE_ACC, c->x86_capability))
return -ENODEV;
printk(KERN_INFO PFX "P4/Xeon(TM) CPU On-Demand Clock Modulation available\n");
return cpufreq_register_driver(&p4clockmod_driver);
}
static void __exit cpufreq_p4_exit(void)
{
unsigned int i;
if (cpufreq_p4_driver) {
for (i=0; i<NR_CPUS; i++) {
if (cpu_online(i))
cpufreq_p4_setdc(i, DC_DISABLE);
}
cpufreq_unregister();
kfree(cpufreq_p4_driver);
}
cpufreq_unregister_driver(&p4clockmod_driver);
}
MODULE_PARM(stock_freq, "i");
MODULE_AUTHOR ("Zwane Mwaikambo <zwane@commfireservices.com>");
MODULE_DESCRIPTION ("cpufreq driver for Pentium(TM) 4/Xeon(TM)");
MODULE_LICENSE ("GPL");
......
/*
* $Id: speedstep.c,v 1.58 2002/11/11 15:35:46 db Exp $
* $Id: speedstep.c,v 1.68 2003/01/20 17:31:47 db Exp $
*
* (C) 2001 Dave Jones, Arjan van de ven.
* (C) 2002 Dominik Brodowski <linux@brodo.de>
* (C) 2002 - 2003 Dominik Brodowski <linux@brodo.de>
*
* Licensed under the terms of the GNU GPL License version 2.
* Based upon reverse engineered information, and on Intel documentation
......@@ -30,7 +30,7 @@
#include <asm/msr.h>
static struct cpufreq_driver *speedstep_driver;
static struct cpufreq_driver speedstep_driver;
/* speedstep_chipset:
* It is necessary to know which chipset is used. As accesses to
......@@ -208,7 +208,7 @@ static void speedstep_set_state (unsigned int state, int notify)
pm2_blk &= 0xfe;
outb(pm2_blk, (pmbase + 0x20));
/* check if transition was sucessful */
/* check if transition was successful */
value = inb(pmbase + 0x50);
/* Enable IRQs */
......@@ -217,7 +217,7 @@ static void speedstep_set_state (unsigned int state, int notify)
dprintk(KERN_DEBUG "cpufreq: read at pmbase 0x%x + 0x50 returned 0x%x\n", pmbase, value);
if (state == (value & 0x1)) {
dprintk (KERN_INFO "cpufreq: change to %u MHz succeded\n", (freqs.new / 1000));
dprintk (KERN_INFO "cpufreq: change to %u MHz succeeded\n", (freqs.new / 1000));
} else {
printk (KERN_ERR "cpufreq: change failed - I/O error\n");
}
......@@ -311,7 +311,7 @@ static unsigned int speedstep_detect_chipset (void)
pci_read_config_byte(hostbridge, PCI_REVISION_ID, &rev);
if (rev < 5) {
dprintk(KERN_INFO "cpufreq: hostbrige does not support speedstep\n");
dprintk(KERN_INFO "cpufreq: hostbridge does not support speedstep\n");
speedstep_chipset_dev = NULL;
return 0;
}
......@@ -573,11 +573,13 @@ static int speedstep_detect_speeds (void)
*
* Sets a new CPUFreq policy.
*/
static int speedstep_setpolicy (struct cpufreq_policy *policy)
static int speedstep_target (struct cpufreq_policy *policy,
unsigned int target_freq,
unsigned int relation)
{
unsigned int newstate = 0;
if (cpufreq_frequency_table_setpolicy(policy, &speedstep_freqs[0], &newstate))
if (cpufreq_frequency_table_target(policy, &speedstep_freqs[0], target_freq, relation, &newstate))
return -EINVAL;
speedstep_set_state(newstate, 1);
......@@ -599,6 +601,42 @@ static int speedstep_verify (struct cpufreq_policy *policy)
}
static int speedstep_cpu_init(struct cpufreq_policy *policy)
{
int result = 0;
unsigned int speed;
/* capability check */
if (policy->cpu != 0)
return -ENODEV;
/* detect low and high frequency */
result = speedstep_detect_speeds();
if (result)
return result;
/* get current speed setting */
result = speedstep_get_state(&speed);
if (result)
return result;
speed = (speed == SPEEDSTEP_LOW) ? speedstep_low_freq : speedstep_high_freq;
dprintk(KERN_INFO "cpufreq: currently at %s speed setting - %i MHz\n",
(speed == speedstep_low_freq) ? "low" : "high",
(speed / 1000));
/* cpuinfo and default policy values */
policy->policy = (speed == speedstep_low_freq) ?
CPUFREQ_POLICY_POWERSAVE : CPUFREQ_POLICY_PERFORMANCE;
policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL;
#ifdef CONFIG_CPU_FREQ_24_API
speedstep_driver.cpu_cur_freq[policy->cpu] = speed;
#endif
return cpufreq_frequency_table_cpuinfo(policy, &speedstep_freqs[0]);
}
#ifndef MODULE
/**
* speedstep_setup speedstep command line parameter parsing
......@@ -608,7 +646,7 @@ static int speedstep_verify (struct cpufreq_policy *policy)
* if the CPU in your notebook is a SpeedStep-capable Intel
* Pentium III Coppermine. These processors cannot be detected
* automatically, as Intel continues to consider the detection
* alogrithm as proprietary material.
* algorithm as proprietary material.
*/
static int __init speedstep_setup(char *str)
{
......@@ -618,6 +656,15 @@ static int __init speedstep_setup(char *str)
__setup("speedstep_coppermine=", speedstep_setup);
#endif
static struct cpufreq_driver speedstep_driver = {
.name = "speedstep",
.verify = speedstep_verify,
.target = speedstep_target,
.init = speedstep_cpu_init,
};
/**
* speedstep_init - initializes the SpeedStep CPUFreq driver
*
......@@ -627,11 +674,6 @@ __setup("speedstep_coppermine=", speedstep_setup);
*/
static int __init speedstep_init(void)
{
int result;
unsigned int speed;
struct cpufreq_driver *driver;
/* detect chipset */
speedstep_chipset = speedstep_detect_chipset();
......@@ -644,70 +686,13 @@ static int __init speedstep_init(void)
return -ENODEV;
}
dprintk(KERN_INFO "cpufreq: Intel(R) SpeedStep(TM) support $Revision: 1.58 $\n");
dprintk(KERN_DEBUG "cpufreq: chipset 0x%x - processor 0x%x\n",
speedstep_chipset, speedstep_processor);
dprintk(KERN_INFO "cpufreq: Intel(R) SpeedStep(TM) support $Revision: 1.68 $\n");
/* activate speedstep support */
result = speedstep_activate();
if (result)
return result;
/* detect low and high frequency */
result = speedstep_detect_speeds();
if (result)
return result;
/* get current speed setting */
result = speedstep_get_state(&speed);
if (result)
return result;
speed = (speed == SPEEDSTEP_LOW) ? speedstep_low_freq : speedstep_high_freq;
dprintk(KERN_INFO "cpufreq: currently at %s speed setting - %i MHz\n",
(speed == speedstep_low_freq) ? "low" : "high",
(speed / 1000));
/* initialization of main "cpufreq" code*/
driver = kmalloc(sizeof(struct cpufreq_driver) +
NR_CPUS * sizeof(struct cpufreq_policy), GFP_KERNEL);
if (!driver)
return -ENOMEM;
memset(driver, 0, sizeof(struct cpufreq_driver) +
NR_CPUS * sizeof(struct cpufreq_policy));
driver->policy = (struct cpufreq_policy *) (driver + 1);
driver->policy[0].cpu = 0;
result = cpufreq_frequency_table_cpuinfo(&driver->policy[0], &speedstep_freqs[0]);
if (result) {
kfree(driver);
return result;
}
#ifdef CONFIG_CPU_FREQ_24_API
driver->cpu_cur_freq[0] = speed;
#endif
driver->verify = &speedstep_verify;
driver->setpolicy = &speedstep_setpolicy;
strncpy(driver->name, "speedstep", CPUFREQ_NAME_LEN);
driver->policy[0].cpuinfo.transition_latency = CPUFREQ_ETERNAL;
driver->policy[0].policy = (speed == speedstep_low_freq) ?
CPUFREQ_POLICY_POWERSAVE : CPUFREQ_POLICY_PERFORMANCE;
speedstep_driver = driver;
result = cpufreq_register(driver);
if (result) {
speedstep_driver = NULL;
kfree(driver);
}
if (speedstep_activate())
return -EINVAL;
return result;
return cpufreq_register_driver(&speedstep_driver);
}
......@@ -718,17 +703,15 @@ static int __init speedstep_init(void)
*/
static void __exit speedstep_exit(void)
{
if (speedstep_driver) {
cpufreq_unregister();
kfree(speedstep_driver);
}
cpufreq_unregister_driver(&speedstep_driver);
}
MODULE_PARM (speedstep_coppermine, "i");
MODULE_AUTHOR ("Dave Jones <davej@suse.de>, Dominik Brodowski <linux@brodo.de>");
MODULE_DESCRIPTION ("Speedstep driver for Intel mobile processors.");
MODULE_LICENSE ("GPL");
module_init(speedstep_init);
module_exit(speedstep_exit);
MODULE_PARM (speedstep_coppermine, "i");
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment