Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
nexedi
linux
Commits
dfd64ec8
Commit
dfd64ec8
authored
Aug 02, 2004
by
Dave Jones
Browse files
Options
Browse Files
Download
Plain Diff
Merge
parents
81fd00e2
f9dfb7b5
Changes
11
Hide whitespace changes
Inline
Side-by-side
Showing
11 changed files
with
313 additions
and
215 deletions
+313
-215
arch/i386/kernel/cpu/cpufreq/Kconfig
arch/i386/kernel/cpu/cpufreq/Kconfig
+10
-0
arch/i386/kernel/cpu/cpufreq/longhaul.c
arch/i386/kernel/cpu/cpufreq/longhaul.c
+58
-39
arch/i386/kernel/cpu/cpufreq/longrun.c
arch/i386/kernel/cpu/cpufreq/longrun.c
+21
-21
arch/i386/kernel/cpu/cpufreq/powernow-k7.c
arch/i386/kernel/cpu/cpufreq/powernow-k7.c
+40
-32
arch/i386/kernel/cpu/cpufreq/powernow-k8.c
arch/i386/kernel/cpu/cpufreq/powernow-k8.c
+4
-4
arch/i386/kernel/cpu/cpufreq/speedstep-centrino.c
arch/i386/kernel/cpu/cpufreq/speedstep-centrino.c
+55
-27
arch/i386/kernel/cpu/cpufreq/speedstep-ich.c
arch/i386/kernel/cpu/cpufreq/speedstep-ich.c
+20
-8
arch/i386/kernel/cpu/cpufreq/speedstep-smi.c
arch/i386/kernel/cpu/cpufreq/speedstep-smi.c
+6
-1
arch/x86_64/kernel/cpufreq/Kconfig
arch/x86_64/kernel/cpufreq/Kconfig
+5
-0
drivers/cpufreq/cpufreq.c
drivers/cpufreq/cpufreq.c
+84
-83
drivers/cpufreq/cpufreq_userspace.c
drivers/cpufreq/cpufreq_userspace.c
+10
-0
No files found.
arch/i386/kernel/cpu/cpufreq/Kconfig
View file @
dfd64ec8
...
...
@@ -88,6 +88,11 @@ config X86_POWERNOW_K7
If in doubt, say N.
config X86_POWERNOW_K7_ACPI
bool
depends on ((X86_POWERNOW_K7 = "m" && ACPI_PROCESSOR) || (X86_POWERNOW_K7 = "y" && ACPI_PROCESSOR = "y"))
default y
config X86_POWERNOW_K8
tristate "AMD Opteron/Athlon64 PowerNow!"
depends on CPU_FREQ && EXPERIMENTAL
...
...
@@ -98,6 +103,11 @@ config X86_POWERNOW_K8
If in doubt, say N.
config X86_POWERNOW_K8_ACPI
bool
depends on ((X86_POWERNOW_K8 = "m" && ACPI_PROCESSOR) || (X86_POWERNOW_K8 = "y" && ACPI_PROCESSOR = "y"))
default y
config X86_GX_SUSPMOD
tristate "Cyrix MediaGX/NatSemi Geode Suspend Modulation"
depends on CPU_FREQ
...
...
arch/i386/kernel/cpu/cpufreq/longhaul.c
View file @
dfd64ec8
...
...
@@ -5,14 +5,19 @@
* Licensed under the terms of the GNU GPL License version 2.
* Based upon datasheets & sample CPUs kindly provided by VIA.
*
* VIA have currently
2
different versions of Longhaul.
* VIA have currently
3
different versions of Longhaul.
* Version 1 (Longhaul) uses the BCR2 MSR at 0x1147.
* It is present only in Samuel 1, Samuel 2 and Ezra.
* Version 2 (Powersaver) uses the POWERSAVER MSR at 0x110a.
* It is present in Ezra-T, Nehemiah and above.
* In addition to scaling multiplier, it can also scale voltage.
* There is provision for scaling FSB too, but this doesn't work
* too well in practice.
* It is present only in Samuel 1 (C5A), Samuel 2 (C5B) stepping 0.
* Version 2 of longhaul is the same as v1, but adds voltage scaling.
* Present in Samuel 2 (steppings 1-7 only) (C5B), and Ezra (C5C)
* voltage scaling support has currently been disabled in this driver
* until we have code that gets it right.
* Version 3 of longhaul got renamed to Powersaver and redesigned
* to use the POWERSAVER MSR at 0x110a.
* It is present in Ezra-T (C5M), Nehemiah (C5X) and above.
* It's pretty much the same feature wise to longhaul v2, though
* there is provision for scaling FSB too, but this doesn't work
* too well in practice so we don't even try to use this.
*
* BIG FAT DISCLAIMER: Work in progress code. Possibly *dangerous*
*/
...
...
@@ -95,6 +100,27 @@ static int longhaul_get_cpu_mult(void)
}
static
void
do_powersaver
(
union
msr_longhaul
*
longhaul
,
unsigned
int
clock_ratio_index
,
int
version
)
{
rdmsrl
(
MSR_VIA_LONGHAUL
,
longhaul
->
val
);
longhaul
->
bits
.
SoftBusRatio
=
clock_ratio_index
&
0xf
;
longhaul
->
bits
.
SoftBusRatio4
=
(
clock_ratio_index
&
0x10
)
>>
4
;
longhaul
->
bits
.
EnableSoftBusRatio
=
1
;
longhaul
->
bits
.
RevisionKey
=
0
;
local_irq_disable
();
wrmsrl
(
MSR_VIA_LONGHAUL
,
longhaul
->
val
);
local_irq_enable
();
__hlt
();
rdmsrl
(
MSR_VIA_LONGHAUL
,
longhaul
->
val
);
longhaul
->
bits
.
EnableSoftBusRatio
=
0
;
longhaul
->
bits
.
RevisionKey
=
version
;
local_irq_disable
();
wrmsrl
(
MSR_VIA_LONGHAUL
,
longhaul
->
val
);
local_irq_enable
();
}
/**
* longhaul_set_cpu_frequency()
* @clock_ratio_index : bitpattern of the new multiplier.
...
...
@@ -126,61 +152,54 @@ static void longhaul_setstate(unsigned int clock_ratio_index)
dprintk
(
KERN_INFO
PFX
"FSB:%d Mult:%d.%dx
\n
"
,
fsb
,
mult
/
10
,
mult
%
10
);
switch
(
longhaul_version
)
{
/*
* Longhaul v1. (Samuel[C5A] and Samuel2 stepping 0[C5B])
* Software controlled multipliers only.
*
* *NB* Until we get voltage scaling working v1 & v2 are the same code.
* Longhaul v2 appears in Samuel2 Steppings 1->7 [C5b] and Ezra [C5C]
*/
case
1
:
rdmsrl
(
MSR_VIA_BCR2
,
bcr2
.
val
);
/* Enable software clock multiplier */
bcr2
.
bits
.
ESOFTBF
=
1
;
bcr2
.
bits
.
CLOCKMUL
=
clock_ratio_index
;
local_irq_disable
();
wrmsrl
(
MSR_VIA_BCR2
,
bcr2
.
val
);
local_irq_enable
();
__hlt
();
/* Disable software clock multiplier */
rdmsrl
(
MSR_VIA_BCR2
,
bcr2
.
val
);
bcr2
.
bits
.
ESOFTBF
=
0
;
local_irq_disable
();
wrmsrl
(
MSR_VIA_BCR2
,
bcr2
.
val
);
local_irq_enable
();
break
;
/*
*
Powersaver. (Ezra-T [C5M], Nehemiah [C5N
])
*
Longhaul v3 (aka Powersaver). (Ezra-T [C5M
])
* We can scale voltage with this too, but that's currently
* disabled until we come up with a decent 'match freq to voltage'
* algorithm.
* W
e also need to do the voltage/freq setting in order depending
*
on the direction of scaling (like we do in powernow-k7.c)
*
Ezra-T was alleged to do FSB scaling too, but it never worked in practice.
* W
hen we add voltage scaling, we will also need to do the
*
voltage/freq setting in order depending on the direction
*
of scaling (like we do in powernow-k7.c)
*/
case
2
:
rdmsrl
(
MSR_VIA_LONGHAUL
,
longhaul
.
val
);
longhaul
.
bits
.
SoftBusRatio
=
clock_ratio_index
&
0xf
;
longhaul
.
bits
.
SoftBusRatio4
=
(
clock_ratio_index
&
0x10
)
>>
4
;
longhaul
.
bits
.
EnableSoftBusRatio
=
1
;
/* We must program the revision key only with values we
* know about, not blindly copy it from 0:3 */
longhaul
.
bits
.
RevisionKey
=
3
;
/* SoftVID & SoftBSEL */
wrmsrl
(
MSR_VIA_LONGHAUL
,
longhaul
.
val
);
__hlt
();
rdmsrl
(
MSR_VIA_LONGHAUL
,
longhaul
.
val
);
longhaul
.
bits
.
EnableSoftBusRatio
=
0
;
longhaul
.
bits
.
RevisionKey
=
3
;
wrmsrl
(
MSR_VIA_LONGHAUL
,
longhaul
.
val
);
do_powersaver
(
&
longhaul
,
clock_ratio_index
,
3
);
break
;
case
3
:
rdmsrl
(
MSR_VIA_LONGHAUL
,
longhaul
.
val
);
longhaul
.
bits
.
SoftBusRatio
=
clock_ratio_index
&
0xf
;
longhaul
.
bits
.
SoftBusRatio4
=
(
clock_ratio_index
&
0x10
)
>>
4
;
longhaul
.
bits
.
EnableSoftBusRatio
=
1
;
longhaul
.
bits
.
RevisionKey
=
0x0
;
wrmsrl
(
MSR_VIA_LONGHAUL
,
longhaul
.
val
);
__hlt
();
rdmsrl
(
MSR_VIA_LONGHAUL
,
longhaul
.
val
);
longhaul
.
bits
.
EnableSoftBusRatio
=
0
;
longhaul
.
bits
.
RevisionKey
=
0xf
;
wrmsrl
(
MSR_VIA_LONGHAUL
,
longhaul
.
val
);
/*
* Powersaver. (Nehemiah [C5N])
* As for Ezra-T, we don't do voltage yet.
* This can do FSB scaling too, but it has never been proven
* to work in practice.
*/
case
3
:
do_powersaver
(
&
longhaul
,
clock_ratio_index
,
0xf
);
break
;
}
...
...
arch/i386/kernel/cpu/cpufreq/longrun.c
View file @
dfd64ec8
...
...
@@ -7,7 +7,7 @@
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/cpufreq.h>
...
...
@@ -19,7 +19,7 @@
static
struct
cpufreq_driver
longrun_driver
;
/**
* longrun_{low,high}_freq is needed for the conversion of cpufreq kHz
* longrun_{low,high}_freq is needed for the conversion of cpufreq kHz
* values into per cent values. In TMTA microcode, the following is valid:
* performance_pctg = (current_freq - low_freq)/(high_freq - low_freq)
*/
...
...
@@ -42,18 +42,18 @@ static void __init longrun_get_policy(struct cpufreq_policy *policy)
policy
->
policy
=
CPUFREQ_POLICY_PERFORMANCE
;
else
policy
->
policy
=
CPUFREQ_POLICY_POWERSAVE
;
rdmsr
(
MSR_TMTA_LONGRUN_CTRL
,
msr_lo
,
msr_hi
);
msr_lo
&=
0x0000007F
;
msr_hi
&=
0x0000007F
;
if
(
longrun_high_freq
<=
longrun_low_freq
)
{
/* Assume degenerate Longrun table */
policy
->
min
=
policy
->
max
=
longrun_high_freq
;
}
else
{
policy
->
min
=
longrun_low_freq
+
msr_lo
*
policy
->
min
=
longrun_low_freq
+
msr_lo
*
((
longrun_high_freq
-
longrun_low_freq
)
/
100
);
policy
->
max
=
longrun_low_freq
+
msr_hi
*
policy
->
max
=
longrun_low_freq
+
msr_hi
*
((
longrun_high_freq
-
longrun_low_freq
)
/
100
);
}
policy
->
cpu
=
0
;
...
...
@@ -79,9 +79,9 @@ static int longrun_set_policy(struct cpufreq_policy *policy)
/* Assume degenerate Longrun table */
pctg_lo
=
pctg_hi
=
100
;
}
else
{
pctg_lo
=
(
policy
->
min
-
longrun_low_freq
)
/
pctg_lo
=
(
policy
->
min
-
longrun_low_freq
)
/
((
longrun_high_freq
-
longrun_low_freq
)
/
100
);
pctg_hi
=
(
policy
->
max
-
longrun_low_freq
)
/
pctg_hi
=
(
policy
->
max
-
longrun_low_freq
)
/
((
longrun_high_freq
-
longrun_low_freq
)
/
100
);
}
...
...
@@ -118,7 +118,7 @@ static int longrun_set_policy(struct cpufreq_policy *policy)
* longrun_verify_poliy - verifies a new CPUFreq policy
* @policy: the policy to verify
*
* Validates a new CPUFreq policy. This function has to be called with
* Validates a new CPUFreq policy. This function has to be called with
* cpufreq_driver locked.
*/
static
int
longrun_verify_policy
(
struct
cpufreq_policy
*
policy
)
...
...
@@ -127,8 +127,8 @@ static int longrun_verify_policy(struct cpufreq_policy *policy)
return
-
EINVAL
;
policy
->
cpu
=
0
;
cpufreq_verify_within_limits
(
policy
,
policy
->
cpuinfo
.
min_freq
,
cpufreq_verify_within_limits
(
policy
,
policy
->
cpuinfo
.
min_freq
,
policy
->
cpuinfo
.
max_freq
);
if
((
policy
->
policy
!=
CPUFREQ_POLICY_POWERSAVE
)
&&
...
...
@@ -160,7 +160,7 @@ static unsigned int longrun_get(unsigned int cpu)
* TMTA rules:
* performance_pctg = (target_freq - low_freq)/(high_freq - low_freq)
*/
static
unsigned
int
__init
longrun_determine_freqs
(
unsigned
int
*
low_freq
,
static
unsigned
int
__init
longrun_determine_freqs
(
unsigned
int
*
low_freq
,
unsigned
int
*
high_freq
)
{
u32
msr_lo
,
msr_hi
;
...
...
@@ -174,9 +174,9 @@ static unsigned int __init longrun_determine_freqs(unsigned int *low_freq,
if
(
cpu_has
(
c
,
X86_FEATURE_LRTI
))
{
/* if the LongRun Table Interface is present, the
* detection is a bit easier:
* detection is a bit easier:
* For minimum frequency, read out the maximum
* level (msr_hi), write that into "currently
* level (msr_hi), write that into "currently
* selected level", and read out the frequency.
* For maximum frequency, read out level zero.
*/
...
...
@@ -223,7 +223,7 @@ static unsigned int __init longrun_determine_freqs(unsigned int *low_freq,
cpuid
(
0x80860007
,
&
eax
,
&
ebx
,
&
ecx
,
&
edx
);
/* restore values */
wrmsr
(
MSR_TMTA_LONGRUN_CTRL
,
save_lo
,
save_hi
);
wrmsr
(
MSR_TMTA_LONGRUN_CTRL
,
save_lo
,
save_hi
);
}
/* performance_pctg = (current_freq - low_freq)/(high_freq - low_freq)
...
...
@@ -237,7 +237,7 @@ static unsigned int __init longrun_determine_freqs(unsigned int *low_freq,
if
((
ecx
>
95
)
||
(
ecx
==
0
)
||
(
eax
<
ebx
))
return
-
EIO
;
edx
=
(
eax
-
ebx
)
/
(
100
-
ecx
);
edx
=
(
eax
-
ebx
)
/
(
100
-
ecx
);
*
low_freq
=
edx
*
1000
;
/* back to kHz */
if
(
*
low_freq
>
*
high_freq
)
...
...
@@ -249,7 +249,7 @@ static unsigned int __init longrun_determine_freqs(unsigned int *low_freq,
static
int
__init
longrun_cpu_init
(
struct
cpufreq_policy
*
policy
)
{
int
result
=
0
;
int
result
=
0
;
/* capability check */
if
(
policy
->
cpu
!=
0
)
...
...
@@ -265,15 +265,15 @@ static int __init longrun_cpu_init(struct cpufreq_policy *policy)
policy
->
cpuinfo
.
max_freq
=
longrun_high_freq
;
policy
->
cpuinfo
.
transition_latency
=
CPUFREQ_ETERNAL
;
longrun_get_policy
(
policy
);
return
0
;
}
static
struct
cpufreq_driver
longrun_driver
=
{
.
flags
=
CPUFREQ_CONST_LOOPS
,
.
verify
=
longrun_verify_policy
,
.
setpolicy
=
longrun_set_policy
,
.
verify
=
longrun_verify_policy
,
.
setpolicy
=
longrun_set_policy
,
.
get
=
longrun_get
,
.
init
=
longrun_cpu_init
,
.
name
=
"longrun"
,
...
...
@@ -290,7 +290,7 @@ static int __init longrun_init(void)
{
struct
cpuinfo_x86
*
c
=
cpu_data
;
if
(
c
->
x86_vendor
!=
X86_VENDOR_TRANSMETA
||
if
(
c
->
x86_vendor
!=
X86_VENDOR_TRANSMETA
||
!
cpu_has
(
c
,
X86_FEATURE_LONGRUN
))
return
-
ENODEV
;
...
...
arch/i386/kernel/cpu/cpufreq/powernow-k7.c
View file @
dfd64ec8
...
...
@@ -6,8 +6,6 @@
* Licensed under the terms of the GNU GPL License version 2.
* Based upon datasheets & sample CPUs kindly provided by AMD.
*
* BIG FAT DISCLAIMER: Work in progress code. Possibly *dangerous*
*
* Errata 5: Processor may fail to execute a FID/VID change in presence of interrupt.
* - We cli/sti on stepping A0 CPUs around the FID/VID transition.
* Errata 15: Processors with half frequency multipliers may hang upon wakeup from disconnect.
...
...
@@ -29,21 +27,13 @@
#include <asm/io.h>
#include <asm/system.h>
#if
defined(CONFIG_ACPI_PROCESSOR) || defined(CONFIG_ACPI_PROCESSOR_MODULE)
#if
def CONFIG_X86_POWERNOW_K7_ACPI
#include <linux/acpi.h>
#include <acpi/processor.h>
#endif
#include "powernow-k7.h"
#define DEBUG
#ifdef DEBUG
#define dprintk(msg...) printk(msg)
#else
#define dprintk(msg...) do { } while(0)
#endif
#define PFX "powernow: "
...
...
@@ -64,7 +54,7 @@ struct pst_s {
u8
numpstates
;
};
#if
defined(CONFIG_ACPI_PROCESSOR) || defined(CONFIG_ACPI_PROCESSOR_MODULE)
#if
def CONFIG_X86_POWERNOW_K7_ACPI
union
powernow_acpi_control_t
{
struct
{
unsigned
long
fid
:
5
,
...
...
@@ -97,6 +87,7 @@ static int fid_codes[32] = {
*/
static
int
acpi_force
;
static
int
debug
;
static
struct
cpufreq_frequency_table
*
powernow_table
;
...
...
@@ -109,6 +100,21 @@ static unsigned int fsb;
static
unsigned
int
latency
;
static
char
have_a0
;
static
void
dprintk
(
const
char
*
fmt
,
...)
{
char
s
[
256
];
va_list
args
;
if
(
debug
==
0
)
return
;
va_start
(
args
,
fmt
);
vsprintf
(
s
,
fmt
,
args
);
printk
(
s
);
va_end
(
args
);
}
static
int
check_fsb
(
unsigned
int
fsbspeed
)
{
int
delta
;
...
...
@@ -190,13 +196,13 @@ static int get_ranges (unsigned char *pst)
speed
=
powernow_table
[
j
].
frequency
;
if
((
fid_codes
[
fid
]
%
10
)
==
5
)
{
#if
defined(CONFIG_ACPI_PROCESSOR) || defined(CONFIG_ACPI_PROCESSOR_MODULE)
#if
def CONFIG_X86_POWERNOW_K7_ACPI
if
(
have_a0
==
1
)
powernow_table
[
j
].
frequency
=
CPUFREQ_ENTRY_INVALID
;
#endif
}
dprintk
(
KERN_INFO
PFX
" FID: 0x%x (%d.%dx [%dMHz])
\t
"
,
fid
,
dprintk
(
KERN_INFO
PFX
" FID: 0x%x (%d.%dx [%dMHz])
"
,
fid
,
fid_codes
[
fid
]
/
10
,
fid_codes
[
fid
]
%
10
,
speed
/
1000
);
if
(
speed
<
minimum_speed
)
...
...
@@ -285,7 +291,7 @@ static void change_speed (unsigned int index)
change_VID
(
vid
);
change_FID
(
fid
);
}
if
(
have_a0
==
1
)
local_irq_enable
();
...
...
@@ -294,7 +300,7 @@ static void change_speed (unsigned int index)
}
#if
defined(CONFIG_ACPI_PROCESSOR) || defined(CONFIG_ACPI_PROCESSOR_MODULE)
#if
def CONFIG_X86_POWERNOW_K7_ACPI
struct
acpi_processor_performance
*
acpi_processor_perf
;
...
...
@@ -377,7 +383,7 @@ static int powernow_acpi_init(void)
powernow_table
[
i
].
frequency
=
CPUFREQ_ENTRY_INVALID
;
}
dprintk
(
KERN_INFO
PFX
" FID: 0x%x (%d.%dx [%dMHz])
\t
"
,
fid
,
dprintk
(
KERN_INFO
PFX
" FID: 0x%x (%d.%dx [%dMHz])
"
,
fid
,
fid_codes
[
fid
]
/
10
,
fid_codes
[
fid
]
%
10
,
speed
/
1000
);
dprintk
(
"VID: 0x%x (%d.%03dV)
\n
"
,
vid
,
mobile_vid_table
[
vid
]
/
1000
,
mobile_vid_table
[
vid
]
%
1000
);
...
...
@@ -467,9 +473,9 @@ static int powernow_decode_bios (int maxfid, int startvid)
(
maxfid
==
pst
->
maxfid
)
&&
(
startvid
==
pst
->
startvid
))
{
dprintk
(
KERN_INFO
PFX
"PST:%d (@%p)
\n
"
,
i
,
pst
);
dprintk
(
KERN_INFO
PFX
" cpuid: 0x%x
\t
"
,
pst
->
cpuid
);
dprintk
(
"fsb: %d
\t
"
,
pst
->
fsbspeed
);
dprintk
(
"maxFID: 0x%x
\t
"
,
pst
->
maxfid
);
dprintk
(
KERN_INFO
PFX
" cpuid: 0x%x
"
,
pst
->
cpuid
);
dprintk
(
"fsb: %d
"
,
pst
->
fsbspeed
);
dprintk
(
"maxFID: 0x%x
"
,
pst
->
maxfid
);
dprintk
(
"startvid: 0x%x
\n
"
,
pst
->
startvid
);
ret
=
get_ranges
((
char
*
)
pst
+
sizeof
(
struct
pst_s
));
...
...
@@ -591,14 +597,14 @@ static int __init powernow_cpu_init (struct cpufreq_policy *policy)
rdmsrl
(
MSR_K7_FID_VID_STATUS
,
fidvidstatus
.
val
);
/* A K7 with powernow technology is set to max frequency by BIOS */
fsb
=
(
10
*
cpu_khz
)
/
fid_codes
[
fidvidstatus
.
bits
.
C
FID
];
fsb
=
(
10
*
cpu_khz
)
/
fid_codes
[
fidvidstatus
.
bits
.
M
FID
];
if
(
!
fsb
)
{
printk
(
KERN_WARNING
PFX
"can not determine bus frequency
\n
"
);
return
-
EINVAL
;
}
dprintk
(
KERN_INFO
PFX
"FSB: %3d.%03d MHz
\n
"
,
fsb
/
1000
,
fsb
%
1000
);
if
(
dmi_check_system
(
powernow_dmi_table
)
||
acpi_force
)
{
if
(
dmi_check_system
(
powernow_dmi_table
)
||
acpi_force
)
{
printk
(
KERN_INFO
PFX
"PSB/PST known to be broken. Trying ACPI instead
\n
"
);
result
=
powernow_acpi_init
();
}
else
{
...
...
@@ -648,14 +654,14 @@ static struct freq_attr* powernow_table_attr[] = {
};
static
struct
cpufreq_driver
powernow_driver
=
{
.
verify
=
powernow_verify
,
.
target
=
powernow_target
,
.
get
=
powernow_get
,
.
init
=
powernow_cpu_init
,
.
exit
=
powernow_cpu_exit
,
.
name
=
"powernow-k7"
,
.
owner
=
THIS_MODULE
,
.
attr
=
powernow_table_attr
,
.
verify
=
powernow_verify
,
.
target
=
powernow_target
,
.
get
=
powernow_get
,
.
init
=
powernow_cpu_init
,
.
exit
=
powernow_cpu_exit
,
.
name
=
"powernow-k7"
,
.
owner
=
THIS_MODULE
,
.
attr
=
powernow_table_attr
,
};
static
int
__init
powernow_init
(
void
)
...
...
@@ -668,7 +674,7 @@ static int __init powernow_init (void)
static
void
__exit
powernow_exit
(
void
)
{
#if
defined(CONFIG_ACPI_PROCESSOR) || defined(CONFIG_ACPI_PROCESSOR_MODULE)
#if
def CONFIG_X86_POWERNOW_K7_ACPI
if
(
acpi_processor_perf
)
{
acpi_processor_unregister_performance
(
acpi_processor_perf
,
0
);
kfree
(
acpi_processor_perf
);
...
...
@@ -679,8 +685,10 @@ static void __exit powernow_exit (void)
kfree
(
powernow_table
);
}
module_param
(
debug
,
int
,
0444
);
MODULE_PARM_DESC
(
debug
,
"enable debug output."
);
module_param
(
acpi_force
,
int
,
0444
);
MODULE_PARM_DESC
(
acpi_force
,
"Force ACPI to be used"
);
MODULE_PARM_DESC
(
acpi_force
,
"Force ACPI to be used
.
"
);
MODULE_AUTHOR
(
"Dave Jones <davej@codemonkey.org.uk>"
);
MODULE_DESCRIPTION
(
"Powernow driver for AMD K7 processors."
);
...
...
arch/i386/kernel/cpu/cpufreq/powernow-k8.c
View file @
dfd64ec8
...
...
@@ -32,7 +32,7 @@
#include <asm/io.h>
#include <asm/delay.h>
#if
defined(CONFIG_ACPI_PROCESSOR) || defined(CONFIG_ACPI_PROCESSOR_MODULE)
#if
def CONFIG_X86_POWERNOW_K8_ACPI
#include <linux/acpi.h>
#include <acpi/processor.h>
#endif
...
...
@@ -664,7 +664,7 @@ static int find_psb_table(struct powernow_k8_data *data)
return
-
ENODEV
;
}
#if
defined(CONFIG_ACPI_PROCESSOR) || defined(CONFIG_ACPI_PROCESSOR_MODULE)
#if
def CONFIG_X86_POWERNOW_K8_ACPI
static
void
powernow_k8_acpi_pst_values
(
struct
powernow_k8_data
*
data
,
unsigned
int
index
)
{
if
(
!
data
->
acpi_data
.
state_count
)
...
...
@@ -1024,7 +1024,7 @@ static int __init powernowk8_cpu_init(struct cpufreq_policy *pol)
return
-
ENODEV
;
}
static
int
__exit
powernowk8_cpu_exit
(
struct
cpufreq_policy
*
pol
)
static
int
__
dev
exit
powernowk8_cpu_exit
(
struct
cpufreq_policy
*
pol
)
{
struct
powernow_k8_data
*
data
=
powernow_data
[
pol
->
cpu
];
...
...
@@ -1076,7 +1076,7 @@ static struct cpufreq_driver cpufreq_amd64_driver = {
.
verify
=
powernowk8_verify
,
.
target
=
powernowk8_target
,
.
init
=
powernowk8_cpu_init
,
.
exit
=
powernowk8_cpu_exit
,
.
exit
=
__devexit_p
(
powernowk8_cpu_exit
)
,
.
get
=
powernowk8_get
,
.
name
=
"powernow-k8"
,
.
owner
=
THIS_MODULE
,
...
...
arch/i386/kernel/cpu/cpufreq/speedstep-centrino.c
View file @
dfd64ec8
...
...
@@ -40,25 +40,24 @@
struct
cpu_id
{
__u8
x86
;
/* CPU family */
__u8
x86_vendor
;
/* CPU vendor */
__u8
x86
;
/* CPU family */
__u8
x86_model
;
/* model */
__u8
x86_mask
;
/* stepping */
};
static
const
struct
cpu_id
cpu_id_banias
=
{
.
x86_vendor
=
X86_VENDOR_INTEL
,
.
x86
=
6
,
.
x86_model
=
9
,
.
x86_mask
=
5
,
enum
{
CPU_BANIAS
,
CPU_DOTHAN_A1
,
CPU_DOTHAN_B0
,
};
static
const
struct
cpu_id
cpu_id_dothan_a1
=
{
.
x86_vendor
=
X86_VENDOR_INTEL
,
.
x86
=
6
,
.
x86_model
=
13
,
.
x86_mask
=
1
,
static
const
struct
cpu_id
cpu_ids
[]
=
{
[
CPU_BANIAS
]
=
{
X86_VENDOR_INTEL
,
6
,
9
,
5
},
[
CPU_DOTHAN_A1
]
=
{
X86_VENDOR_INTEL
,
6
,
13
,
1
},
[
CPU_DOTHAN_B0
]
=
{
X86_VENDOR_INTEL
,
6
,
13
,
6
},
};
#define N_IDS (sizeof(cpu_ids)/sizeof(cpu_ids[0]))
struct
cpu_model
{
...
...
@@ -68,7 +67,7 @@ struct cpu_model
struct
cpufreq_frequency_table
*
op_points
;
/* clock/voltage pairs */
};
static
int
centrino_verify_cpu_id
(
struct
cpuinfo_x86
*
c
,
const
struct
cpu_id
*
x
);
static
int
centrino_verify_cpu_id
(
const
struct
cpuinfo_x86
*
c
,
const
struct
cpu_id
*
x
);
/* Operating points for current CPU */
static
struct
cpu_model
*
centrino_model
;
...
...
@@ -103,9 +102,9 @@ static struct cpufreq_frequency_table banias_900[] =
/* Ultra Low Voltage Intel Pentium M processor 1000MHz (Banias) */
static
struct
cpufreq_frequency_table
banias_1000
[]
=
{
OP
(
600
,
844
),
OP
(
800
,
972
),
OP
(
900
,
988
),
OP
(
600
,
844
),
OP
(
800
,
972
),
OP
(
900
,
988
),
OP
(
1000
,
1004
),
{
.
frequency
=
CPUFREQ_TABLE_END
}
};
...
...
@@ -199,13 +198,13 @@ static struct cpufreq_frequency_table banias_1700[] =
.max_freq = (max)*1000, \
.op_points = banias_##max, \
}
#define BANIAS(max) _BANIAS(&cpu_id
_banias
, max, #max)
#define BANIAS(max) _BANIAS(&cpu_id
s[CPU_BANIAS]
, max, #max)
/* CPU models, their operating frequency range, and freq/voltage
operating points */
static
struct
cpu_model
models
[]
=
{
_BANIAS
(
&
cpu_id
_banias
,
900
,
" 900"
),
_BANIAS
(
&
cpu_id
s
[
CPU_BANIAS
]
,
900
,
" 900"
),
BANIAS
(
1000
),
BANIAS
(
1100
),
BANIAS
(
1200
),
...
...
@@ -214,6 +213,11 @@ static struct cpu_model models[] =
BANIAS
(
1500
),
BANIAS
(
1600
),
BANIAS
(
1700
),
/* NULL model_name is a wildcard */
{
&
cpu_ids
[
CPU_DOTHAN_A1
],
NULL
,
0
,
NULL
},
{
&
cpu_ids
[
CPU_DOTHAN_B0
],
NULL
,
0
,
NULL
},
{
NULL
,
}
};
#undef _BANIAS
...
...
@@ -224,17 +228,28 @@ static int centrino_cpu_init_table(struct cpufreq_policy *policy)
struct
cpuinfo_x86
*
cpu
=
&
cpu_data
[
policy
->
cpu
];
struct
cpu_model
*
model
;
for
(
model
=
models
;
model
->
model_name
!=
NULL
;
model
++
)
if
((
strcmp
(
cpu
->
x86_model_id
,
model
->
model_name
)
==
0
)
&&
(
!
centrino_verify_cpu_id
(
cpu
,
model
->
cpu_id
)))
for
(
model
=
models
;
model
->
cpu_id
!=
NULL
;
model
++
)
if
(
centrino_verify_cpu_id
(
cpu
,
model
->
cpu_id
)
&&
(
model
->
model_name
==
NULL
||
strcmp
(
cpu
->
x86_model_id
,
model
->
model_name
)
==
0
))
break
;
if
(
model
->
model_name
==
NULL
)
{
if
(
model
->
cpu_id
==
NULL
)
{
/* No match at all */
printk
(
KERN_INFO
PFX
"no support for CPU model
\"
%s
\"
: "
"send /proc/cpuinfo to "
MAINTAINER
"
\n
"
,
cpu
->
x86_model_id
);
return
-
ENOENT
;
}
if
(
model
->
op_points
==
NULL
)
{
/* Matched a non-match */
printk
(
KERN_INFO
PFX
"no table support for CPU model
\"
%s
\"
:
\n
"
,
cpu
->
x86_model_id
);
printk
(
KERN_INFO
PFX
"try compiling with CONFIG_X86_SPEEDSTEP_CENTRINO_ACPI enabled
\n
"
);
return
-
ENOENT
;
}
centrino_model
=
model
;
printk
(
KERN_INFO
PFX
"found
\"
%s
\"
: max frequency: %dkHz
\n
"
,
...
...
@@ -247,14 +262,14 @@ static int centrino_cpu_init_table(struct cpufreq_policy *policy)
static
inline
int
centrino_cpu_init_table
(
struct
cpufreq_policy
*
policy
)
{
return
-
ENODEV
;
}
#endif
/* CONFIG_X86_SPEEDSTEP_CENTRINO_TABLE */
static
int
centrino_verify_cpu_id
(
struct
cpuinfo_x86
*
c
,
const
struct
cpu_id
*
x
)
static
int
centrino_verify_cpu_id
(
const
struct
cpuinfo_x86
*
c
,
const
struct
cpu_id
*
x
)
{
if
((
c
->
x86
==
x
->
x86
)
&&
(
c
->
x86_vendor
==
x
->
x86_vendor
)
&&
(
c
->
x86_model
==
x
->
x86_model
)
&&
(
c
->
x86_mask
==
x
->
x86_mask
))
return
0
;
return
-
ENODEV
;
return
1
;
return
0
;
}
/* Extract clock in kHz from PERF_CTL value */
...
...
@@ -340,6 +355,12 @@ static int centrino_cpu_init_acpi(struct cpufreq_policy *policy)
goto
err_unreg
;
}
if
(
p
.
states
[
i
].
core_frequency
>
p
.
states
[
0
].
core_frequency
)
{
printk
(
KERN_DEBUG
"P%u has larger frequency than P0, skipping
\n
"
,
i
);
p
.
states
[
i
].
core_frequency
=
0
;
continue
;
}
if
(
extract_clock
(
p
.
states
[
i
].
control
)
!=
(
p
.
states
[
i
].
core_frequency
*
1000
))
{
printk
(
KERN_DEBUG
"Invalid encoded frequency
\n
"
);
...
...
@@ -371,6 +392,8 @@ static int centrino_cpu_init_acpi(struct cpufreq_policy *policy)
centrino_model
->
op_points
[
i
].
frequency
=
p
.
states
[
i
].
core_frequency
*
1000
;
if
(
cur_freq
==
centrino_model
->
op_points
[
i
].
frequency
)
p
.
state
=
i
;
if
(
!
p
.
states
[
i
].
core_frequency
)
centrino_model
->
op_points
[
i
].
frequency
=
CPUFREQ_ENTRY_INVALID
;
}
centrino_model
->
op_points
[
p
.
state_count
].
frequency
=
CPUFREQ_TABLE_END
;
...
...
@@ -392,15 +415,20 @@ static int centrino_cpu_init(struct cpufreq_policy *policy)
unsigned
freq
;
unsigned
l
,
h
;
int
ret
;
int
i
;
if
(
policy
->
cpu
!=
0
)
return
-
ENODEV
;
if
(
!
cpu_has
(
cpu
,
X86_FEATURE_EST
))
/* Only Intel makes Enhanced Speedstep-capable CPUs */
if
(
cpu
->
x86_vendor
!=
X86_VENDOR_INTEL
||
!
cpu_has
(
cpu
,
X86_FEATURE_EST
))
return
-
ENODEV
;
if
((
centrino_verify_cpu_id
(
cpu
,
&
cpu_id_banias
))
&&
(
centrino_verify_cpu_id
(
cpu
,
&
cpu_id_dothan_a1
)))
{
for
(
i
=
0
;
i
<
N_IDS
;
i
++
)
if
(
centrino_verify_cpu_id
(
cpu
,
&
cpu_ids
[
i
]))
break
;
if
(
i
==
N_IDS
)
{
printk
(
KERN_INFO
PFX
"found unsupported CPU with Enhanced SpeedStep: "
"send /proc/cpuinfo to "
MAINTAINER
"
\n
"
);
return
-
ENODEV
;
...
...
arch/i386/kernel/cpu/cpufreq/speedstep-ich.c
View file @
dfd64ec8
...
...
@@ -247,14 +247,14 @@ static int speedstep_target (struct cpufreq_policy *policy,
if
(
cpufreq_frequency_table_target
(
policy
,
&
speedstep_freqs
[
0
],
target_freq
,
relation
,
&
newstate
))
return
-
EINVAL
;
freqs
.
old
=
speedstep_get
(
policy
->
cpu
);
freqs
.
new
=
speedstep_freqs
[
newstate
].
frequency
;
freqs
.
cpu
=
policy
->
cpu
;
/* no transition necessary */
if
(
freqs
.
old
==
freqs
.
new
)
return
0
;
freqs
.
old
=
speedstep_get_processor_frequency
(
speedstep_processor
);
freqs
.
new
=
speedstep_freqs
[
newstate
].
frequency
;
freqs
.
cpu
=
policy
->
cpu
;
cpus_allowed
=
current
->
cpus_allowed
;
/* only run on CPU to be set, or on its sibling */
...
...
@@ -324,14 +324,13 @@ static int speedstep_cpu_init(struct cpufreq_policy *policy)
&
speedstep_freqs
[
SPEEDSTEP_LOW
].
frequency
,
&
speedstep_freqs
[
SPEEDSTEP_HIGH
].
frequency
,
&
speedstep_set_state
);
set_cpus_allowed
(
current
,
cpus_allowed
);
if
(
result
)
{
set_cpus_allowed
(
current
,
cpus_allowed
);
return
result
;
}
/* get current speed setting */
speed
=
speedstep_get_processor_frequency
(
speedstep_processor
);
set_cpus_allowed
(
current
,
cpus_allowed
);
speed
=
speedstep_get
(
policy
->
cpu
);
if
(
!
speed
)
return
-
EIO
;
...
...
@@ -362,7 +361,20 @@ static int speedstep_cpu_exit(struct cpufreq_policy *policy)
static
unsigned
int
speedstep_get
(
unsigned
int
cpu
)
{
return
speedstep_get_processor_frequency
(
speedstep_processor
);
unsigned
int
speed
;
cpumask_t
cpus_allowed
,
affected_cpu_map
;
/* only run on CPU to be set, or on its sibling */
cpus_allowed
=
current
->
cpus_allowed
;
#ifdef CONFIG_SMP
affected_cpu_map
=
cpu_sibling_map
[
cpu
];
#else
affected_cpu_map
=
cpumask_of_cpu
(
cpu
);
#endif
set_cpus_allowed
(
current
,
affected_cpu_map
);
speed
=
speedstep_get_processor_frequency
(
speedstep_processor
);
set_cpus_allowed
(
current
,
cpus_allowed
);
return
speed
;
}
static
struct
freq_attr
*
speedstep_attr
[]
=
{
...
...
arch/i386/kernel/cpu/cpufreq/speedstep-smi.c
View file @
dfd64ec8
...
...
@@ -115,6 +115,11 @@ static int speedstep_smi_get_freqs (unsigned int *low, unsigned int *high)
:
"=a"
(
result
),
"=b"
(
high_mhz
),
"=c"
(
low_mhz
),
"=d"
(
state
),
"=D"
(
edi
)
:
"a"
(
command
),
"b"
(
function
),
"c"
(
state
),
"d"
(
smi_port
),
"S"
(
0
)
);
/* abort if results are obviously incorrect... */
if
((
high_mhz
+
low_mhz
)
<
600
)
return
-
EINVAL
;
*
high
=
high_mhz
*
1000
;
*
low
=
low_mhz
*
1000
;
...
...
@@ -180,7 +185,7 @@ static void speedstep_set_state (unsigned int state)
local_irq_restore
(
flags
);
if
(
new_state
==
state
)
{
dprintk
(
KERN_INFO
"cpufreq: change to %u MHz succeeded after %u tries with result %u
\n
"
,
(
freqs
.
new
/
1000
),
retry
,
result
);
dprintk
(
KERN_INFO
"cpufreq: change to %u MHz succeeded after %u tries with result %u
\n
"
,
(
speedstep_freqs
[
new_state
].
frequency
/
1000
),
retry
,
result
);
}
else
{
printk
(
KERN_ERR
"cpufreq: change failed with new_state %u and result %u
\n
"
,
new_state
,
result
);
}
...
...
arch/x86_64/kernel/cpufreq/Kconfig
View file @
dfd64ec8
...
...
@@ -41,4 +41,9 @@ config X86_POWERNOW_K8
If in doubt, say N.
config X86_POWERNOW_K8_ACPI
bool
depends on ((X86_POWERNOW_K8 = "m" && ACPI_PROCESSOR) || (X86_POWERNOW_K8 = "y" && ACPI_PROCESSOR = "y"))
default y
endmenu
drivers/cpufreq/cpufreq.c
View file @
dfd64ec8
...
...
@@ -99,6 +99,86 @@ static void cpufreq_cpu_put(struct cpufreq_policy *data)
module_put
(
cpufreq_driver
->
owner
);
}
/*********************************************************************
* EXTERNALLY AFFECTING FREQUENCY CHANGES *
*********************************************************************/
/**
* adjust_jiffies - adjust the system "loops_per_jiffy"
*
* This function alters the system "loops_per_jiffy" for the clock
* speed change. Note that loops_per_jiffy cannot be updated on SMP
* systems as each CPU might be scaled differently. So, use the arch
* per-CPU loops_per_jiffy value wherever possible.
*/
#ifndef CONFIG_SMP
static
unsigned
long
l_p_j_ref
;
static
unsigned
int
l_p_j_ref_freq
;
static
inline
void
adjust_jiffies
(
unsigned
long
val
,
struct
cpufreq_freqs
*
ci
)
{
if
(
ci
->
flags
&
CPUFREQ_CONST_LOOPS
)
return
;
if
(
!
l_p_j_ref_freq
)
{
l_p_j_ref
=
loops_per_jiffy
;
l_p_j_ref_freq
=
ci
->
old
;
}
if
((
val
==
CPUFREQ_PRECHANGE
&&
ci
->
old
<
ci
->
new
)
||
(
val
==
CPUFREQ_POSTCHANGE
&&
ci
->
old
>
ci
->
new
)
||
(
val
==
CPUFREQ_RESUMECHANGE
))
loops_per_jiffy
=
cpufreq_scale
(
l_p_j_ref
,
l_p_j_ref_freq
,
ci
->
new
);
}
#else
static
inline
void
adjust_jiffies
(
unsigned
long
val
,
struct
cpufreq_freqs
*
ci
)
{
return
;
}
#endif
/**
* cpufreq_notify_transition - call notifier chain and adjust_jiffies on frequency transition
*
* This function calls the transition notifiers and the "adjust_jiffies" function. It is called
* twice on all CPU frequency changes that have external effects.
*/
void
cpufreq_notify_transition
(
struct
cpufreq_freqs
*
freqs
,
unsigned
int
state
)
{
BUG_ON
(
irqs_disabled
());
freqs
->
flags
=
cpufreq_driver
->
flags
;
down_read
(
&
cpufreq_notifier_rwsem
);
switch
(
state
)
{
case
CPUFREQ_PRECHANGE
:
/* detect if the driver reported a value as "old frequency" which
* is not equal to what the cpufreq core thinks is "old frequency".
*/
if
(
!
(
cpufreq_driver
->
flags
&
CPUFREQ_CONST_LOOPS
))
{
if
((
likely
(
cpufreq_cpu_data
[
freqs
->
cpu
]
->
cur
))
&&
(
unlikely
(
freqs
->
old
!=
cpufreq_cpu_data
[
freqs
->
cpu
]
->
cur
)))
{
if
(
cpufreq_driver
->
flags
&
CPUFREQ_PANIC_OUTOFSYNC
)
panic
(
"CPU Frequency is out of sync."
);
printk
(
KERN_WARNING
"Warning: CPU frequency is %u, "
"cpufreq assumed %u kHz.
\n
"
,
freqs
->
old
,
cpufreq_cpu_data
[
freqs
->
cpu
]
->
cur
);
freqs
->
old
=
cpufreq_cpu_data
[
freqs
->
cpu
]
->
cur
;
}
}
notifier_call_chain
(
&
cpufreq_transition_notifier_list
,
CPUFREQ_PRECHANGE
,
freqs
);
adjust_jiffies
(
CPUFREQ_PRECHANGE
,
freqs
);
break
;
case
CPUFREQ_POSTCHANGE
:
adjust_jiffies
(
CPUFREQ_POSTCHANGE
,
freqs
);
notifier_call_chain
(
&
cpufreq_transition_notifier_list
,
CPUFREQ_POSTCHANGE
,
freqs
);
cpufreq_cpu_data
[
freqs
->
cpu
]
->
cur
=
freqs
->
new
;
break
;
}
up_read
(
&
cpufreq_notifier_rwsem
);
}
EXPORT_SYMBOL_GPL
(
cpufreq_notify_transition
);
/*********************************************************************
* SYSFS INTERFACE *
*********************************************************************/
...
...
@@ -617,8 +697,8 @@ static int cpufreq_resume(struct sys_device * sysdev)
if
(
cpufreq_driver
->
flags
&
CPUFREQ_PANIC_RESUME_OUTOFSYNC
)
panic
(
"CPU Frequency is out of sync."
);
printk
(
KERN_WARNING
"Warning: CPU frequency
out of sync: cpufreq and timing
"
"c
ore thinks of %u, is %u kHz.
\n
"
,
cpu_policy
->
cur
,
cur_freq
);
printk
(
KERN_WARNING
"Warning: CPU frequency
is %u,
"
"c
pufreq assumed %u kHz.
\n
"
,
cur_freq
,
cpu_policy
->
cur
);
freqs
.
cpu
=
cpu
;
freqs
.
old
=
cpu_policy
->
cur
;
...
...
@@ -626,6 +706,8 @@ static int cpufreq_resume(struct sys_device * sysdev)
notifier_call_chain
(
&
cpufreq_transition_notifier_list
,
CPUFREQ_RESUMECHANGE
,
&
freqs
);
adjust_jiffies
(
CPUFREQ_RESUMECHANGE
,
&
freqs
);
cpu_policy
->
cur
=
cur_freq
;
}
}
...
...
@@ -1005,87 +1087,6 @@ int cpufreq_update_policy(unsigned int cpu)
EXPORT_SYMBOL
(
cpufreq_update_policy
);
/*********************************************************************
* EXTERNALLY AFFECTING FREQUENCY CHANGES *
*********************************************************************/
/**
* adjust_jiffies - adjust the system "loops_per_jiffy"
*
* This function alters the system "loops_per_jiffy" for the clock
* speed change. Note that loops_per_jiffy cannot be updated on SMP
* systems as each CPU might be scaled differently. So, use the arch
* per-CPU loops_per_jiffy value wherever possible.
*/
#ifndef CONFIG_SMP
static
unsigned
long
l_p_j_ref
;
static
unsigned
int
l_p_j_ref_freq
;
static
inline
void
adjust_jiffies
(
unsigned
long
val
,
struct
cpufreq_freqs
*
ci
)
{
if
(
ci
->
flags
&
CPUFREQ_CONST_LOOPS
)
return
;
if
(
!
l_p_j_ref_freq
)
{
l_p_j_ref
=
loops_per_jiffy
;
l_p_j_ref_freq
=
ci
->
old
;
}
if
((
val
==
CPUFREQ_PRECHANGE
&&
ci
->
old
<
ci
->
new
)
||
(
val
==
CPUFREQ_POSTCHANGE
&&
ci
->
old
>
ci
->
new
)
||
(
val
==
CPUFREQ_RESUMECHANGE
))
loops_per_jiffy
=
cpufreq_scale
(
l_p_j_ref
,
l_p_j_ref_freq
,
ci
->
new
);
}
#else
static
inline
void
adjust_jiffies
(
unsigned
long
val
,
struct
cpufreq_freqs
*
ci
)
{
return
;
}
#endif
/**
* cpufreq_notify_transition - call notifier chain and adjust_jiffies on frequency transition
*
* This function calls the transition notifiers and the "adjust_jiffies" function. It is called
* twice on all CPU frequency changes that have external effects.
*/
void
cpufreq_notify_transition
(
struct
cpufreq_freqs
*
freqs
,
unsigned
int
state
)
{
BUG_ON
(
irqs_disabled
());
freqs
->
flags
=
cpufreq_driver
->
flags
;
down_read
(
&
cpufreq_notifier_rwsem
);
switch
(
state
)
{
case
CPUFREQ_PRECHANGE
:
/* detect if the driver reported a value as "old frequency" which
* is not equal to what the cpufreq core thinks is "old frequency".
*/
if
(
!
(
cpufreq_driver
->
flags
&
CPUFREQ_CONST_LOOPS
))
{
if
((
likely
(
cpufreq_cpu_data
[
freqs
->
cpu
]
->
cur
))
&&
(
unlikely
(
freqs
->
old
!=
cpufreq_cpu_data
[
freqs
->
cpu
]
->
cur
)))
{
if
(
cpufreq_driver
->
flags
&
CPUFREQ_PANIC_OUTOFSYNC
)
panic
(
"CPU Frequency is out of sync."
);
printk
(
KERN_WARNING
"Warning: CPU frequency out of sync: "
"cpufreq and timing core thinks of %u, is %u kHz.
\n
"
,
cpufreq_cpu_data
[
freqs
->
cpu
]
->
cur
,
freqs
->
old
);
freqs
->
old
=
cpufreq_cpu_data
[
freqs
->
cpu
]
->
cur
;
}
}
notifier_call_chain
(
&
cpufreq_transition_notifier_list
,
CPUFREQ_PRECHANGE
,
freqs
);
adjust_jiffies
(
CPUFREQ_PRECHANGE
,
freqs
);
break
;
case
CPUFREQ_POSTCHANGE
:
adjust_jiffies
(
CPUFREQ_POSTCHANGE
,
freqs
);
notifier_call_chain
(
&
cpufreq_transition_notifier_list
,
CPUFREQ_POSTCHANGE
,
freqs
);
cpufreq_cpu_data
[
freqs
->
cpu
]
->
cur
=
freqs
->
new
;
break
;
}
up_read
(
&
cpufreq_notifier_rwsem
);
}
EXPORT_SYMBOL_GPL
(
cpufreq_notify_transition
);
/*********************************************************************
* REGISTER / UNREGISTER CPUFREQ DRIVER *
*********************************************************************/
...
...
drivers/cpufreq/cpufreq_userspace.c
View file @
dfd64ec8
...
...
@@ -82,6 +82,13 @@ userspace_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
{
struct
cpufreq_freqs
*
freq
=
data
;
/* Don't update cur_freq if CPU is managed and we're
* waking up: else we won't remember what frequency
* we need to set the CPU to.
*/
if
(
cpu_is_managed
[
freq
->
cpu
]
&&
(
val
==
CPUFREQ_RESUMECHANGE
))
return
0
;
cpu_cur_freq
[
freq
->
cpu
]
=
freq
->
new
;
return
0
;
...
...
@@ -522,6 +529,9 @@ static int cpufreq_governor_userspace(struct cpufreq_policy *policy,
else
if
(
policy
->
min
>
cpu_cur_freq
[
cpu
])
__cpufreq_driver_target
(
&
current_policy
[
cpu
],
policy
->
min
,
CPUFREQ_RELATION_L
);
else
__cpufreq_driver_target
(
&
current_policy
[
cpu
],
cpu_cur_freq
[
cpu
],
CPUFREQ_RELATION_L
);
memcpy
(
&
current_policy
[
cpu
],
policy
,
sizeof
(
struct
cpufreq_policy
));
up
(
&
userspace_sem
);
break
;
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment