Commit c30938d5 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/davej/cpufreq

* 'fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/davej/cpufreq:
  [CPUFREQ] cpumask: new cpumask operators for arch/x86/kernel/cpu/cpufreq/powernow-k8.c
  [CPUFREQ] cpumask: avoid playing with cpus_allowed in powernow-k8.c
  [CPUFREQ] cpumask: avoid cpumask games in arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c
  [CPUFREQ] cpumask: avoid playing with cpus_allowed in speedstep-ich.c
  [CPUFREQ] powernow-k8: get drv data for correct CPU
  [CPUFREQ] powernow-k8: read P-state from HW
  [CPUFREQ] reduce scope of ACPI_PSS_BIOS_BUG_MSG[]
  [CPUFREQ] Clean up convoluted code in arch/x86/kernel/tsc.c:time_cpufreq_notifier()
  [CPUFREQ] minor correction to cpu-freq documentation
  [CPUFREQ] powernow-k8.c: mess cleanup
  [CPUFREQ] Only set sampling_rate_max deprecated, sampling_rate_min is useful
  [CPUFREQ] powernow-k8: Set transition latency to 1 if ACPI tables export 0
  [CPUFREQ] ondemand: Uncouple minimal sampling rate from HZ in NO_HZ case
parents aa2638a2 8e7c2597
...@@ -155,7 +155,7 @@ actual frequency must be determined using the following rules: ...@@ -155,7 +155,7 @@ actual frequency must be determined using the following rules:
- if relation==CPUFREQ_REL_H, try to select a new_freq lower than or equal - if relation==CPUFREQ_REL_H, try to select a new_freq lower than or equal
target_freq. ("H for highest, but no higher than") target_freq. ("H for highest, but no higher than")
Here again the frequency table helper might assist you - see section 3 Here again the frequency table helper might assist you - see section 2
for details. for details.
......
...@@ -119,10 +119,6 @@ want the kernel to look at the CPU usage and to make decisions on ...@@ -119,10 +119,6 @@ want the kernel to look at the CPU usage and to make decisions on
what to do about the frequency. Typically this is set to values of what to do about the frequency. Typically this is set to values of
around '10000' or more. It's default value is (cmp. with users-guide.txt): around '10000' or more. It's default value is (cmp. with users-guide.txt):
transition_latency * 1000 transition_latency * 1000
The lowest value you can set is:
transition_latency * 100 or it may get restricted to a value where it
makes not sense for the kernel anymore to poll that often which depends
on your HZ config variable (HZ=1000: max=20000us, HZ=250: max=5000).
Be aware that transition latency is in ns and sampling_rate is in us, so you Be aware that transition latency is in ns and sampling_rate is in us, so you
get the same sysfs value by default. get the same sysfs value by default.
Sampling rate should always get adjusted considering the transition latency Sampling rate should always get adjusted considering the transition latency
...@@ -131,14 +127,20 @@ in the bash (as said, 1000 is default), do: ...@@ -131,14 +127,20 @@ in the bash (as said, 1000 is default), do:
echo `$(($(cat cpuinfo_transition_latency) * 750 / 1000)) \ echo `$(($(cat cpuinfo_transition_latency) * 750 / 1000)) \
>ondemand/sampling_rate >ondemand/sampling_rate
show_sampling_rate_(min|max): THIS INTERFACE IS DEPRECATED, DON'T USE IT. show_sampling_rate_min:
You can use wider ranges now and the general The sampling rate is limited by the HW transition latency:
cpuinfo_transition_latency variable (cmp. with user-guide.txt) can be transition_latency * 100
used to obtain exactly the same info: Or by kernel restrictions:
show_sampling_rate_min = transtition_latency * 500 / 1000 If CONFIG_NO_HZ is set, the limit is 10ms fixed.
show_sampling_rate_max = transtition_latency * 500000 / 1000 If CONFIG_NO_HZ is not set or no_hz=off boot parameter is used, the
(divided by 1000 is to illustrate that sampling rate is in us and limits depend on the CONFIG_HZ option:
transition latency is exported ns). HZ=1000: min=20000us (20ms)
HZ=250: min=80000us (80ms)
HZ=100: min=200000us (200ms)
The highest value of kernel and HW latency restrictions is shown and
used as the minimum sampling rate.
show_sampling_rate_max: THIS INTERFACE IS DEPRECATED, DON'T USE IT.
up_threshold: defines what the average CPU usage between the samplings up_threshold: defines what the average CPU usage between the samplings
of 'sampling_rate' needs to be for the kernel to make a decision on of 'sampling_rate' needs to be for the kernel to make a decision on
......
...@@ -31,7 +31,6 @@ Contents: ...@@ -31,7 +31,6 @@ Contents:
3. How to change the CPU cpufreq policy and/or speed 3. How to change the CPU cpufreq policy and/or speed
3.1 Preferred interface: sysfs 3.1 Preferred interface: sysfs
3.2 Deprecated interfaces
......
/* /*
* (c) 2003-2006 Advanced Micro Devices, Inc. * (c) 2003-2006 Advanced Micro Devices, Inc.
* Your use of this code is subject to the terms and conditions of the * Your use of this code is subject to the terms and conditions of the
...@@ -117,20 +118,17 @@ static int query_current_values_with_pending_wait(struct powernow_k8_data *data) ...@@ -117,20 +118,17 @@ static int query_current_values_with_pending_wait(struct powernow_k8_data *data)
u32 i = 0; u32 i = 0;
if (cpu_family == CPU_HW_PSTATE) { if (cpu_family == CPU_HW_PSTATE) {
if (data->currpstate == HW_PSTATE_INVALID) {
/* read (initial) hw pstate if not yet set */
rdmsr(MSR_PSTATE_STATUS, lo, hi); rdmsr(MSR_PSTATE_STATUS, lo, hi);
i = lo & HW_PSTATE_MASK; i = lo & HW_PSTATE_MASK;
data->currpstate = i;
/* /*
* a workaround for family 11h erratum 311 might cause * a workaround for family 11h erratum 311 might cause
* an "out-of-range Pstate if the core is in Pstate-0 * an "out-of-range Pstate if the core is in Pstate-0
*/ */
if (i >= data->numps) if ((boot_cpu_data.x86 == 0x11) && (i >= data->numps))
data->currpstate = HW_PSTATE_0; data->currpstate = HW_PSTATE_0;
else
data->currpstate = i;
}
return 0; return 0;
} }
do { do {
...@@ -510,41 +508,34 @@ static int core_voltage_post_transition(struct powernow_k8_data *data, ...@@ -510,41 +508,34 @@ static int core_voltage_post_transition(struct powernow_k8_data *data,
return 0; return 0;
} }
static int check_supported_cpu(unsigned int cpu) static void check_supported_cpu(void *_rc)
{ {
cpumask_t oldmask;
u32 eax, ebx, ecx, edx; u32 eax, ebx, ecx, edx;
unsigned int rc = 0; int *rc = _rc;
oldmask = current->cpus_allowed;
set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu));
if (smp_processor_id() != cpu) { *rc = -ENODEV;
printk(KERN_ERR PFX "limiting to cpu %u failed\n", cpu);
goto out;
}
if (current_cpu_data.x86_vendor != X86_VENDOR_AMD) if (current_cpu_data.x86_vendor != X86_VENDOR_AMD)
goto out; return;
eax = cpuid_eax(CPUID_PROCESSOR_SIGNATURE); eax = cpuid_eax(CPUID_PROCESSOR_SIGNATURE);
if (((eax & CPUID_XFAM) != CPUID_XFAM_K8) && if (((eax & CPUID_XFAM) != CPUID_XFAM_K8) &&
((eax & CPUID_XFAM) < CPUID_XFAM_10H)) ((eax & CPUID_XFAM) < CPUID_XFAM_10H))
goto out; return;
if ((eax & CPUID_XFAM) == CPUID_XFAM_K8) { if ((eax & CPUID_XFAM) == CPUID_XFAM_K8) {
if (((eax & CPUID_USE_XFAM_XMOD) != CPUID_USE_XFAM_XMOD) || if (((eax & CPUID_USE_XFAM_XMOD) != CPUID_USE_XFAM_XMOD) ||
((eax & CPUID_XMOD) > CPUID_XMOD_REV_MASK)) { ((eax & CPUID_XMOD) > CPUID_XMOD_REV_MASK)) {
printk(KERN_INFO PFX printk(KERN_INFO PFX
"Processor cpuid %x not supported\n", eax); "Processor cpuid %x not supported\n", eax);
goto out; return;
} }
eax = cpuid_eax(CPUID_GET_MAX_CAPABILITIES); eax = cpuid_eax(CPUID_GET_MAX_CAPABILITIES);
if (eax < CPUID_FREQ_VOLT_CAPABILITIES) { if (eax < CPUID_FREQ_VOLT_CAPABILITIES) {
printk(KERN_INFO PFX printk(KERN_INFO PFX
"No frequency change capabilities detected\n"); "No frequency change capabilities detected\n");
goto out; return;
} }
cpuid(CPUID_FREQ_VOLT_CAPABILITIES, &eax, &ebx, &ecx, &edx); cpuid(CPUID_FREQ_VOLT_CAPABILITIES, &eax, &ebx, &ecx, &edx);
...@@ -552,21 +543,17 @@ static int check_supported_cpu(unsigned int cpu) ...@@ -552,21 +543,17 @@ static int check_supported_cpu(unsigned int cpu)
!= P_STATE_TRANSITION_CAPABLE) { != P_STATE_TRANSITION_CAPABLE) {
printk(KERN_INFO PFX printk(KERN_INFO PFX
"Power state transitions not supported\n"); "Power state transitions not supported\n");
goto out; return;
} }
} else { /* must be a HW Pstate capable processor */ } else { /* must be a HW Pstate capable processor */
cpuid(CPUID_FREQ_VOLT_CAPABILITIES, &eax, &ebx, &ecx, &edx); cpuid(CPUID_FREQ_VOLT_CAPABILITIES, &eax, &ebx, &ecx, &edx);
if ((edx & USE_HW_PSTATE) == USE_HW_PSTATE) if ((edx & USE_HW_PSTATE) == USE_HW_PSTATE)
cpu_family = CPU_HW_PSTATE; cpu_family = CPU_HW_PSTATE;
else else
goto out; return;
} }
rc = 1; *rc = 0;
out:
set_cpus_allowed_ptr(current, &oldmask);
return rc;
} }
static int check_pst_table(struct powernow_k8_data *data, struct pst_s *pst, static int check_pst_table(struct powernow_k8_data *data, struct pst_s *pst,
...@@ -823,13 +810,14 @@ static void powernow_k8_acpi_pst_values(struct powernow_k8_data *data, ...@@ -823,13 +810,14 @@ static void powernow_k8_acpi_pst_values(struct powernow_k8_data *data,
if (!data->acpi_data.state_count || (cpu_family == CPU_HW_PSTATE)) if (!data->acpi_data.state_count || (cpu_family == CPU_HW_PSTATE))
return; return;
control = data->acpi_data.states[index].control; data->irt = (control control = data->acpi_data.states[index].control;
>> IRT_SHIFT) & IRT_MASK; data->rvo = (control >> data->irt = (control >> IRT_SHIFT) & IRT_MASK;
RVO_SHIFT) & RVO_MASK; data->exttype = (control data->rvo = (control >> RVO_SHIFT) & RVO_MASK;
>> EXT_TYPE_SHIFT) & EXT_TYPE_MASK; data->exttype = (control >> EXT_TYPE_SHIFT) & EXT_TYPE_MASK;
data->plllock = (control >> PLL_L_SHIFT) & PLL_L_MASK; data->vidmvs = 1 data->plllock = (control >> PLL_L_SHIFT) & PLL_L_MASK;
<< ((control >> MVS_SHIFT) & MVS_MASK); data->vstable = data->vidmvs = 1 << ((control >> MVS_SHIFT) & MVS_MASK);
(control >> VST_SHIFT) & VST_MASK; } data->vstable = (control >> VST_SHIFT) & VST_MASK;
}
static int powernow_k8_cpu_init_acpi(struct powernow_k8_data *data) static int powernow_k8_cpu_init_acpi(struct powernow_k8_data *data)
{ {
...@@ -1046,6 +1034,19 @@ static int get_transition_latency(struct powernow_k8_data *data) ...@@ -1046,6 +1034,19 @@ static int get_transition_latency(struct powernow_k8_data *data)
if (cur_latency > max_latency) if (cur_latency > max_latency)
max_latency = cur_latency; max_latency = cur_latency;
} }
if (max_latency == 0) {
/*
* Fam 11h always returns 0 as transition latency.
* This is intended and means "very fast". While cpufreq core
* and governors currently can handle that gracefully, better
* set it to 1 to avoid problems in the future.
* For all others it's a BIOS bug.
*/
if (!boot_cpu_data.x86 == 0x11)
printk(KERN_ERR FW_WARN PFX "Invalid zero transition "
"latency\n");
max_latency = 1;
}
/* value in usecs, needs to be in nanoseconds */ /* value in usecs, needs to be in nanoseconds */
return 1000 * max_latency; return 1000 * max_latency;
} }
...@@ -1093,7 +1094,7 @@ static int transition_frequency_fidvid(struct powernow_k8_data *data, ...@@ -1093,7 +1094,7 @@ static int transition_frequency_fidvid(struct powernow_k8_data *data,
freqs.old = find_khz_freq_from_fid(data->currfid); freqs.old = find_khz_freq_from_fid(data->currfid);
freqs.new = find_khz_freq_from_fid(fid); freqs.new = find_khz_freq_from_fid(fid);
for_each_cpu_mask_nr(i, *(data->available_cores)) { for_each_cpu(i, data->available_cores) {
freqs.cpu = i; freqs.cpu = i;
cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
} }
...@@ -1101,7 +1102,7 @@ static int transition_frequency_fidvid(struct powernow_k8_data *data, ...@@ -1101,7 +1102,7 @@ static int transition_frequency_fidvid(struct powernow_k8_data *data,
res = transition_fid_vid(data, fid, vid); res = transition_fid_vid(data, fid, vid);
freqs.new = find_khz_freq_from_fid(data->currfid); freqs.new = find_khz_freq_from_fid(data->currfid);
for_each_cpu_mask_nr(i, *(data->available_cores)) { for_each_cpu(i, data->available_cores) {
freqs.cpu = i; freqs.cpu = i;
cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
} }
...@@ -1126,7 +1127,7 @@ static int transition_frequency_pstate(struct powernow_k8_data *data, ...@@ -1126,7 +1127,7 @@ static int transition_frequency_pstate(struct powernow_k8_data *data,
data->currpstate); data->currpstate);
freqs.new = find_khz_freq_from_pstate(data->powernow_table, pstate); freqs.new = find_khz_freq_from_pstate(data->powernow_table, pstate);
for_each_cpu_mask_nr(i, *(data->available_cores)) { for_each_cpu(i, data->available_cores) {
freqs.cpu = i; freqs.cpu = i;
cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
} }
...@@ -1134,7 +1135,7 @@ static int transition_frequency_pstate(struct powernow_k8_data *data, ...@@ -1134,7 +1135,7 @@ static int transition_frequency_pstate(struct powernow_k8_data *data,
res = transition_pstate(data, pstate); res = transition_pstate(data, pstate);
freqs.new = find_khz_freq_from_pstate(data->powernow_table, pstate); freqs.new = find_khz_freq_from_pstate(data->powernow_table, pstate);
for_each_cpu_mask_nr(i, *(data->available_cores)) { for_each_cpu(i, data->available_cores) {
freqs.cpu = i; freqs.cpu = i;
cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
} }
...@@ -1235,21 +1236,47 @@ static int powernowk8_verify(struct cpufreq_policy *pol) ...@@ -1235,21 +1236,47 @@ static int powernowk8_verify(struct cpufreq_policy *pol)
return cpufreq_frequency_table_verify(pol, data->powernow_table); return cpufreq_frequency_table_verify(pol, data->powernow_table);
} }
static const char ACPI_PSS_BIOS_BUG_MSG[] = struct init_on_cpu {
KERN_ERR FW_BUG PFX "No compatible ACPI _PSS objects found.\n" struct powernow_k8_data *data;
KERN_ERR FW_BUG PFX "Try again with latest BIOS.\n"; int rc;
};
static void __cpuinit powernowk8_cpu_init_on_cpu(void *_init_on_cpu)
{
struct init_on_cpu *init_on_cpu = _init_on_cpu;
if (pending_bit_stuck()) {
printk(KERN_ERR PFX "failing init, change pending bit set\n");
init_on_cpu->rc = -ENODEV;
return;
}
if (query_current_values_with_pending_wait(init_on_cpu->data)) {
init_on_cpu->rc = -ENODEV;
return;
}
if (cpu_family == CPU_OPTERON)
fidvid_msr_init();
init_on_cpu->rc = 0;
}
/* per CPU init entry point to the driver */ /* per CPU init entry point to the driver */
static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol) static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol)
{ {
static const char ACPI_PSS_BIOS_BUG_MSG[] =
KERN_ERR FW_BUG PFX "No compatible ACPI _PSS objects found.\n"
KERN_ERR FW_BUG PFX "Try again with latest BIOS.\n";
struct powernow_k8_data *data; struct powernow_k8_data *data;
cpumask_t oldmask; struct init_on_cpu init_on_cpu;
int rc; int rc;
if (!cpu_online(pol->cpu)) if (!cpu_online(pol->cpu))
return -ENODEV; return -ENODEV;
if (!check_supported_cpu(pol->cpu)) smp_call_function_single(pol->cpu, check_supported_cpu, &rc, 1);
if (rc)
return -ENODEV; return -ENODEV;
data = kzalloc(sizeof(struct powernow_k8_data), GFP_KERNEL); data = kzalloc(sizeof(struct powernow_k8_data), GFP_KERNEL);
...@@ -1289,27 +1316,12 @@ static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol) ...@@ -1289,27 +1316,12 @@ static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol)
pol->cpuinfo.transition_latency = get_transition_latency(data); pol->cpuinfo.transition_latency = get_transition_latency(data);
/* only run on specific CPU from here on */ /* only run on specific CPU from here on */
oldmask = current->cpus_allowed; init_on_cpu.data = data;
set_cpus_allowed_ptr(current, &cpumask_of_cpu(pol->cpu)); smp_call_function_single(data->cpu, powernowk8_cpu_init_on_cpu,
&init_on_cpu, 1);
if (smp_processor_id() != pol->cpu) { rc = init_on_cpu.rc;
printk(KERN_ERR PFX "limiting to cpu %u failed\n", pol->cpu); if (rc != 0)
goto err_out_unmask; goto err_out_exit_acpi;
}
if (pending_bit_stuck()) {
printk(KERN_ERR PFX "failing init, change pending bit set\n");
goto err_out_unmask;
}
if (query_current_values_with_pending_wait(data))
goto err_out_unmask;
if (cpu_family == CPU_OPTERON)
fidvid_msr_init();
/* run on any CPU again */
set_cpus_allowed_ptr(current, &oldmask);
if (cpu_family == CPU_HW_PSTATE) if (cpu_family == CPU_HW_PSTATE)
cpumask_copy(pol->cpus, cpumask_of(pol->cpu)); cpumask_copy(pol->cpus, cpumask_of(pol->cpu));
...@@ -1346,8 +1358,7 @@ static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol) ...@@ -1346,8 +1358,7 @@ static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol)
return 0; return 0;
err_out_unmask: err_out_exit_acpi:
set_cpus_allowed_ptr(current, &oldmask);
powernow_k8_cpu_exit_acpi(data); powernow_k8_cpu_exit_acpi(data);
err_out: err_out:
...@@ -1372,28 +1383,25 @@ static int __devexit powernowk8_cpu_exit(struct cpufreq_policy *pol) ...@@ -1372,28 +1383,25 @@ static int __devexit powernowk8_cpu_exit(struct cpufreq_policy *pol)
return 0; return 0;
} }
static void query_values_on_cpu(void *_err)
{
int *err = _err;
struct powernow_k8_data *data = __get_cpu_var(powernow_data);
*err = query_current_values_with_pending_wait(data);
}
static unsigned int powernowk8_get(unsigned int cpu) static unsigned int powernowk8_get(unsigned int cpu)
{ {
struct powernow_k8_data *data; struct powernow_k8_data *data = per_cpu(powernow_data, cpu);
cpumask_t oldmask = current->cpus_allowed;
unsigned int khz = 0; unsigned int khz = 0;
unsigned int first; int err;
first = cpumask_first(cpu_core_mask(cpu));
data = per_cpu(powernow_data, first);
if (!data) if (!data)
return -EINVAL; return -EINVAL;
set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu)); smp_call_function_single(cpu, query_values_on_cpu, &err, true);
if (smp_processor_id() != cpu) { if (err)
printk(KERN_ERR PFX
"limiting to CPU %d failed in powernowk8_get\n", cpu);
set_cpus_allowed_ptr(current, &oldmask);
return 0;
}
if (query_current_values_with_pending_wait(data))
goto out; goto out;
if (cpu_family == CPU_HW_PSTATE) if (cpu_family == CPU_HW_PSTATE)
...@@ -1404,7 +1412,6 @@ static unsigned int powernowk8_get(unsigned int cpu) ...@@ -1404,7 +1412,6 @@ static unsigned int powernowk8_get(unsigned int cpu)
out: out:
set_cpus_allowed_ptr(current, &oldmask);
return khz; return khz;
} }
...@@ -1430,7 +1437,9 @@ static int __cpuinit powernowk8_init(void) ...@@ -1430,7 +1437,9 @@ static int __cpuinit powernowk8_init(void)
unsigned int i, supported_cpus = 0; unsigned int i, supported_cpus = 0;
for_each_online_cpu(i) { for_each_online_cpu(i) {
if (check_supported_cpu(i)) int rc;
smp_call_function_single(i, check_supported_cpu, &rc, 1);
if (rc == 0)
supported_cpus++; supported_cpus++;
} }
......
...@@ -223,14 +223,3 @@ static void powernow_k8_acpi_pst_values(struct powernow_k8_data *data, unsigned ...@@ -223,14 +223,3 @@ static void powernow_k8_acpi_pst_values(struct powernow_k8_data *data, unsigned
static int fill_powernow_table_pstate(struct powernow_k8_data *data, struct cpufreq_frequency_table *powernow_table); static int fill_powernow_table_pstate(struct powernow_k8_data *data, struct cpufreq_frequency_table *powernow_table);
static int fill_powernow_table_fidvid(struct powernow_k8_data *data, struct cpufreq_frequency_table *powernow_table); static int fill_powernow_table_fidvid(struct powernow_k8_data *data, struct cpufreq_frequency_table *powernow_table);
#ifdef CONFIG_SMP
static inline void define_siblings(int cpu, cpumask_t cpu_sharedcore_mask[])
{
}
#else
static inline void define_siblings(int cpu, cpumask_t cpu_sharedcore_mask[])
{
cpu_set(0, cpu_sharedcore_mask[0]);
}
#endif
...@@ -323,14 +323,8 @@ static unsigned int get_cur_freq(unsigned int cpu) ...@@ -323,14 +323,8 @@ static unsigned int get_cur_freq(unsigned int cpu)
{ {
unsigned l, h; unsigned l, h;
unsigned clock_freq; unsigned clock_freq;
cpumask_t saved_mask;
saved_mask = current->cpus_allowed; rdmsr_on_cpu(cpu, MSR_IA32_PERF_STATUS, &l, &h);
set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu));
if (smp_processor_id() != cpu)
return 0;
rdmsr(MSR_IA32_PERF_STATUS, l, h);
clock_freq = extract_clock(l, cpu, 0); clock_freq = extract_clock(l, cpu, 0);
if (unlikely(clock_freq == 0)) { if (unlikely(clock_freq == 0)) {
...@@ -340,11 +334,9 @@ static unsigned int get_cur_freq(unsigned int cpu) ...@@ -340,11 +334,9 @@ static unsigned int get_cur_freq(unsigned int cpu)
* P-state transition (like TM2). Get the last freq set * P-state transition (like TM2). Get the last freq set
* in PERF_CTL. * in PERF_CTL.
*/ */
rdmsr(MSR_IA32_PERF_CTL, l, h); rdmsr_on_cpu(cpu, MSR_IA32_PERF_CTL, &l, &h);
clock_freq = extract_clock(l, cpu, 1); clock_freq = extract_clock(l, cpu, 1);
} }
set_cpus_allowed_ptr(current, &saved_mask);
return clock_freq; return clock_freq;
} }
...@@ -467,15 +459,10 @@ static int centrino_target (struct cpufreq_policy *policy, ...@@ -467,15 +459,10 @@ static int centrino_target (struct cpufreq_policy *policy,
struct cpufreq_freqs freqs; struct cpufreq_freqs freqs;
int retval = 0; int retval = 0;
unsigned int j, k, first_cpu, tmp; unsigned int j, k, first_cpu, tmp;
cpumask_var_t saved_mask, covered_cpus; cpumask_var_t covered_cpus;
if (unlikely(!alloc_cpumask_var(&saved_mask, GFP_KERNEL))) if (unlikely(!zalloc_cpumask_var(&covered_cpus, GFP_KERNEL)))
return -ENOMEM; return -ENOMEM;
if (unlikely(!zalloc_cpumask_var(&covered_cpus, GFP_KERNEL))) {
free_cpumask_var(saved_mask);
return -ENOMEM;
}
cpumask_copy(saved_mask, &current->cpus_allowed);
if (unlikely(per_cpu(centrino_model, cpu) == NULL)) { if (unlikely(per_cpu(centrino_model, cpu) == NULL)) {
retval = -ENODEV; retval = -ENODEV;
...@@ -493,7 +480,7 @@ static int centrino_target (struct cpufreq_policy *policy, ...@@ -493,7 +480,7 @@ static int centrino_target (struct cpufreq_policy *policy,
first_cpu = 1; first_cpu = 1;
for_each_cpu(j, policy->cpus) { for_each_cpu(j, policy->cpus) {
const struct cpumask *mask; int good_cpu;
/* cpufreq holds the hotplug lock, so we are safe here */ /* cpufreq holds the hotplug lock, so we are safe here */
if (!cpu_online(j)) if (!cpu_online(j))
...@@ -504,32 +491,30 @@ static int centrino_target (struct cpufreq_policy *policy, ...@@ -504,32 +491,30 @@ static int centrino_target (struct cpufreq_policy *policy,
* Make sure we are running on CPU that wants to change freq * Make sure we are running on CPU that wants to change freq
*/ */
if (policy->shared_type == CPUFREQ_SHARED_TYPE_ANY) if (policy->shared_type == CPUFREQ_SHARED_TYPE_ANY)
mask = policy->cpus; good_cpu = cpumask_any_and(policy->cpus,
cpu_online_mask);
else else
mask = cpumask_of(j); good_cpu = j;
set_cpus_allowed_ptr(current, mask); if (good_cpu >= nr_cpu_ids) {
preempt_disable();
if (unlikely(!cpu_isset(smp_processor_id(), *mask))) {
dprintk("couldn't limit to CPUs in this domain\n"); dprintk("couldn't limit to CPUs in this domain\n");
retval = -EAGAIN; retval = -EAGAIN;
if (first_cpu) { if (first_cpu) {
/* We haven't started the transition yet. */ /* We haven't started the transition yet. */
goto migrate_end; goto out;
} }
preempt_enable();
break; break;
} }
msr = per_cpu(centrino_model, cpu)->op_points[newstate].index; msr = per_cpu(centrino_model, cpu)->op_points[newstate].index;
if (first_cpu) { if (first_cpu) {
rdmsr(MSR_IA32_PERF_CTL, oldmsr, h); rdmsr_on_cpu(good_cpu, MSR_IA32_PERF_CTL, &oldmsr, &h);
if (msr == (oldmsr & 0xffff)) { if (msr == (oldmsr & 0xffff)) {
dprintk("no change needed - msr was and needs " dprintk("no change needed - msr was and needs "
"to be %x\n", oldmsr); "to be %x\n", oldmsr);
retval = 0; retval = 0;
goto migrate_end; goto out;
} }
freqs.old = extract_clock(oldmsr, cpu, 0); freqs.old = extract_clock(oldmsr, cpu, 0);
...@@ -553,14 +538,11 @@ static int centrino_target (struct cpufreq_policy *policy, ...@@ -553,14 +538,11 @@ static int centrino_target (struct cpufreq_policy *policy,
oldmsr |= msr; oldmsr |= msr;
} }
wrmsr(MSR_IA32_PERF_CTL, oldmsr, h); wrmsr_on_cpu(good_cpu, MSR_IA32_PERF_CTL, oldmsr, h);
if (policy->shared_type == CPUFREQ_SHARED_TYPE_ANY) { if (policy->shared_type == CPUFREQ_SHARED_TYPE_ANY)
preempt_enable();
break; break;
}
cpu_set(j, *covered_cpus); cpumask_set_cpu(j, covered_cpus);
preempt_enable();
} }
for_each_cpu(k, policy->cpus) { for_each_cpu(k, policy->cpus) {
...@@ -578,10 +560,8 @@ static int centrino_target (struct cpufreq_policy *policy, ...@@ -578,10 +560,8 @@ static int centrino_target (struct cpufreq_policy *policy,
* Best effort undo.. * Best effort undo..
*/ */
for_each_cpu_mask_nr(j, *covered_cpus) { for_each_cpu(j, covered_cpus)
set_cpus_allowed_ptr(current, &cpumask_of_cpu(j)); wrmsr_on_cpu(j, MSR_IA32_PERF_CTL, oldmsr, h);
wrmsr(MSR_IA32_PERF_CTL, oldmsr, h);
}
tmp = freqs.new; tmp = freqs.new;
freqs.new = freqs.old; freqs.new = freqs.old;
...@@ -593,15 +573,9 @@ static int centrino_target (struct cpufreq_policy *policy, ...@@ -593,15 +573,9 @@ static int centrino_target (struct cpufreq_policy *policy,
cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
} }
} }
set_cpus_allowed_ptr(current, saved_mask);
retval = 0; retval = 0;
goto out;
migrate_end:
preempt_enable();
set_cpus_allowed_ptr(current, saved_mask);
out: out:
free_cpumask_var(saved_mask);
free_cpumask_var(covered_cpus); free_cpumask_var(covered_cpus);
return retval; return retval;
} }
......
...@@ -89,7 +89,8 @@ static int speedstep_find_register(void) ...@@ -89,7 +89,8 @@ static int speedstep_find_register(void)
* speedstep_set_state - set the SpeedStep state * speedstep_set_state - set the SpeedStep state
* @state: new processor frequency state (SPEEDSTEP_LOW or SPEEDSTEP_HIGH) * @state: new processor frequency state (SPEEDSTEP_LOW or SPEEDSTEP_HIGH)
* *
* Tries to change the SpeedStep state. * Tries to change the SpeedStep state. Can be called from
* smp_call_function_single.
*/ */
static void speedstep_set_state(unsigned int state) static void speedstep_set_state(unsigned int state)
{ {
...@@ -143,6 +144,11 @@ static void speedstep_set_state(unsigned int state) ...@@ -143,6 +144,11 @@ static void speedstep_set_state(unsigned int state)
return; return;
} }
/* Wrapper for smp_call_function_single. */
static void _speedstep_set_state(void *_state)
{
speedstep_set_state(*(unsigned int *)_state);
}
/** /**
* speedstep_activate - activate SpeedStep control in the chipset * speedstep_activate - activate SpeedStep control in the chipset
...@@ -226,22 +232,28 @@ static unsigned int speedstep_detect_chipset(void) ...@@ -226,22 +232,28 @@ static unsigned int speedstep_detect_chipset(void)
return 0; return 0;
} }
static unsigned int _speedstep_get(const struct cpumask *cpus) struct get_freq_data {
{
unsigned int speed; unsigned int speed;
cpumask_t cpus_allowed; unsigned int processor;
};
cpus_allowed = current->cpus_allowed;
set_cpus_allowed_ptr(current, cpus); static void get_freq_data(void *_data)
speed = speedstep_get_frequency(speedstep_processor); {
set_cpus_allowed_ptr(current, &cpus_allowed); struct get_freq_data *data = _data;
dprintk("detected %u kHz as current frequency\n", speed);
return speed; data->speed = speedstep_get_frequency(data->processor);
} }
static unsigned int speedstep_get(unsigned int cpu) static unsigned int speedstep_get(unsigned int cpu)
{ {
return _speedstep_get(cpumask_of(cpu)); struct get_freq_data data = { .processor = cpu };
/* You're supposed to ensure CPU is online. */
if (smp_call_function_single(cpu, get_freq_data, &data, 1) != 0)
BUG();
dprintk("detected %u kHz as current frequency\n", data.speed);
return data.speed;
} }
/** /**
...@@ -257,16 +269,16 @@ static int speedstep_target(struct cpufreq_policy *policy, ...@@ -257,16 +269,16 @@ static int speedstep_target(struct cpufreq_policy *policy,
unsigned int target_freq, unsigned int target_freq,
unsigned int relation) unsigned int relation)
{ {
unsigned int newstate = 0; unsigned int newstate = 0, policy_cpu;
struct cpufreq_freqs freqs; struct cpufreq_freqs freqs;
cpumask_t cpus_allowed;
int i; int i;
if (cpufreq_frequency_table_target(policy, &speedstep_freqs[0], if (cpufreq_frequency_table_target(policy, &speedstep_freqs[0],
target_freq, relation, &newstate)) target_freq, relation, &newstate))
return -EINVAL; return -EINVAL;
freqs.old = _speedstep_get(policy->cpus); policy_cpu = cpumask_any_and(policy->cpus, cpu_online_mask);
freqs.old = speedstep_get(policy_cpu);
freqs.new = speedstep_freqs[newstate].frequency; freqs.new = speedstep_freqs[newstate].frequency;
freqs.cpu = policy->cpu; freqs.cpu = policy->cpu;
...@@ -276,20 +288,13 @@ static int speedstep_target(struct cpufreq_policy *policy, ...@@ -276,20 +288,13 @@ static int speedstep_target(struct cpufreq_policy *policy,
if (freqs.old == freqs.new) if (freqs.old == freqs.new)
return 0; return 0;
cpus_allowed = current->cpus_allowed;
for_each_cpu(i, policy->cpus) { for_each_cpu(i, policy->cpus) {
freqs.cpu = i; freqs.cpu = i;
cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
} }
/* switch to physical CPU where state is to be changed */ smp_call_function_single(policy_cpu, _speedstep_set_state, &newstate,
set_cpus_allowed_ptr(current, policy->cpus); true);
speedstep_set_state(newstate);
/* allow to be run on all CPUs */
set_cpus_allowed_ptr(current, &cpus_allowed);
for_each_cpu(i, policy->cpus) { for_each_cpu(i, policy->cpus) {
freqs.cpu = i; freqs.cpu = i;
...@@ -312,33 +317,43 @@ static int speedstep_verify(struct cpufreq_policy *policy) ...@@ -312,33 +317,43 @@ static int speedstep_verify(struct cpufreq_policy *policy)
return cpufreq_frequency_table_verify(policy, &speedstep_freqs[0]); return cpufreq_frequency_table_verify(policy, &speedstep_freqs[0]);
} }
struct get_freqs {
struct cpufreq_policy *policy;
int ret;
};
static void get_freqs_on_cpu(void *_get_freqs)
{
struct get_freqs *get_freqs = _get_freqs;
get_freqs->ret =
speedstep_get_freqs(speedstep_processor,
&speedstep_freqs[SPEEDSTEP_LOW].frequency,
&speedstep_freqs[SPEEDSTEP_HIGH].frequency,
&get_freqs->policy->cpuinfo.transition_latency,
&speedstep_set_state);
}
static int speedstep_cpu_init(struct cpufreq_policy *policy) static int speedstep_cpu_init(struct cpufreq_policy *policy)
{ {
int result = 0; int result;
unsigned int speed; unsigned int policy_cpu, speed;
cpumask_t cpus_allowed; struct get_freqs gf;
/* only run on CPU to be set, or on its sibling */ /* only run on CPU to be set, or on its sibling */
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
cpumask_copy(policy->cpus, cpu_sibling_mask(policy->cpu)); cpumask_copy(policy->cpus, cpu_sibling_mask(policy->cpu));
#endif #endif
policy_cpu = cpumask_any_and(policy->cpus, cpu_online_mask);
cpus_allowed = current->cpus_allowed;
set_cpus_allowed_ptr(current, policy->cpus);
/* detect low and high frequency and transition latency */ /* detect low and high frequency and transition latency */
result = speedstep_get_freqs(speedstep_processor, gf.policy = policy;
&speedstep_freqs[SPEEDSTEP_LOW].frequency, smp_call_function_single(policy_cpu, get_freqs_on_cpu, &gf, 1);
&speedstep_freqs[SPEEDSTEP_HIGH].frequency, if (gf.ret)
&policy->cpuinfo.transition_latency, return gf.ret;
&speedstep_set_state);
set_cpus_allowed_ptr(current, &cpus_allowed);
if (result)
return result;
/* get current speed setting */ /* get current speed setting */
speed = _speedstep_get(policy->cpus); speed = speedstep_get(policy_cpu);
if (!speed) if (!speed)
return -EIO; return -EIO;
......
...@@ -226,6 +226,7 @@ static unsigned int pentium4_get_frequency(void) ...@@ -226,6 +226,7 @@ static unsigned int pentium4_get_frequency(void)
} }
/* Warning: may get called from smp_call_function_single. */
unsigned int speedstep_get_frequency(unsigned int processor) unsigned int speedstep_get_frequency(unsigned int processor)
{ {
switch (processor) { switch (processor) {
......
...@@ -632,17 +632,15 @@ static int time_cpufreq_notifier(struct notifier_block *nb, unsigned long val, ...@@ -632,17 +632,15 @@ static int time_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
void *data) void *data)
{ {
struct cpufreq_freqs *freq = data; struct cpufreq_freqs *freq = data;
unsigned long *lpj, dummy; unsigned long *lpj;
if (cpu_has(&cpu_data(freq->cpu), X86_FEATURE_CONSTANT_TSC)) if (cpu_has(&cpu_data(freq->cpu), X86_FEATURE_CONSTANT_TSC))
return 0; return 0;
lpj = &dummy; lpj = &boot_cpu_data.loops_per_jiffy;
if (!(freq->flags & CPUFREQ_CONST_LOOPS))
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
if (!(freq->flags & CPUFREQ_CONST_LOOPS))
lpj = &cpu_data(freq->cpu).loops_per_jiffy; lpj = &cpu_data(freq->cpu).loops_per_jiffy;
#else
lpj = &boot_cpu_data.loops_per_jiffy;
#endif #endif
if (!ref_freq) { if (!ref_freq) {
......
...@@ -42,27 +42,12 @@ ...@@ -42,27 +42,12 @@
* this governor will not work. * this governor will not work.
* All times here are in uS. * All times here are in uS.
*/ */
static unsigned int def_sampling_rate;
#define MIN_SAMPLING_RATE_RATIO (2) #define MIN_SAMPLING_RATE_RATIO (2)
/* for correct statistics, we need at least 10 ticks between each measure */
#define MIN_STAT_SAMPLING_RATE \
(MIN_SAMPLING_RATE_RATIO * jiffies_to_usecs(10))
#define MIN_SAMPLING_RATE \
(def_sampling_rate / MIN_SAMPLING_RATE_RATIO)
/* Above MIN_SAMPLING_RATE will vanish with its sysfs file soon
* Define the minimal settable sampling rate to the greater of:
* - "HW transition latency" * 100 (same as default sampling / 10)
* - MIN_STAT_SAMPLING_RATE
* To avoid that userspace shoots itself.
*/
static unsigned int minimum_sampling_rate(void)
{
return max(def_sampling_rate / 10, MIN_STAT_SAMPLING_RATE);
}
/* This will also vanish soon with removing sampling_rate_max */ static unsigned int min_sampling_rate;
#define MAX_SAMPLING_RATE (500 * def_sampling_rate)
#define LATENCY_MULTIPLIER (1000) #define LATENCY_MULTIPLIER (1000)
#define MIN_LATENCY_MULTIPLIER (100)
#define DEF_SAMPLING_DOWN_FACTOR (1) #define DEF_SAMPLING_DOWN_FACTOR (1)
#define MAX_SAMPLING_DOWN_FACTOR (10) #define MAX_SAMPLING_DOWN_FACTOR (10)
#define TRANSITION_LATENCY_LIMIT (10 * 1000 * 1000) #define TRANSITION_LATENCY_LIMIT (10 * 1000 * 1000)
...@@ -182,27 +167,14 @@ static struct notifier_block dbs_cpufreq_notifier_block = { ...@@ -182,27 +167,14 @@ static struct notifier_block dbs_cpufreq_notifier_block = {
/************************** sysfs interface ************************/ /************************** sysfs interface ************************/
static ssize_t show_sampling_rate_max(struct cpufreq_policy *policy, char *buf) static ssize_t show_sampling_rate_max(struct cpufreq_policy *policy, char *buf)
{ {
static int print_once; printk_once(KERN_INFO "CPUFREQ: conservative sampling_rate_max "
"sysfs file is deprecated - used by: %s\n", current->comm);
if (!print_once) { return sprintf(buf, "%u\n", -1U);
printk(KERN_INFO "CPUFREQ: conservative sampling_rate_max "
"sysfs file is deprecated - used by: %s\n",
current->comm);
print_once = 1;
}
return sprintf(buf, "%u\n", MAX_SAMPLING_RATE);
} }
static ssize_t show_sampling_rate_min(struct cpufreq_policy *policy, char *buf) static ssize_t show_sampling_rate_min(struct cpufreq_policy *policy, char *buf)
{ {
static int print_once; return sprintf(buf, "%u\n", min_sampling_rate);
if (!print_once) {
printk(KERN_INFO "CPUFREQ: conservative sampling_rate_max "
"sysfs file is deprecated - used by: %s\n", current->comm);
print_once = 1;
}
return sprintf(buf, "%u\n", MIN_SAMPLING_RATE);
} }
#define define_one_ro(_name) \ #define define_one_ro(_name) \
...@@ -254,7 +226,7 @@ static ssize_t store_sampling_rate(struct cpufreq_policy *unused, ...@@ -254,7 +226,7 @@ static ssize_t store_sampling_rate(struct cpufreq_policy *unused,
return -EINVAL; return -EINVAL;
mutex_lock(&dbs_mutex); mutex_lock(&dbs_mutex);
dbs_tuners_ins.sampling_rate = max(input, minimum_sampling_rate()); dbs_tuners_ins.sampling_rate = max(input, min_sampling_rate);
mutex_unlock(&dbs_mutex); mutex_unlock(&dbs_mutex);
return count; return count;
...@@ -601,11 +573,18 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy, ...@@ -601,11 +573,18 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
if (latency == 0) if (latency == 0)
latency = 1; latency = 1;
def_sampling_rate = /*
max(latency * LATENCY_MULTIPLIER, * conservative does not implement micro like ondemand
MIN_STAT_SAMPLING_RATE); * governor, thus we are bound to jiffes/HZ
*/
dbs_tuners_ins.sampling_rate = def_sampling_rate; min_sampling_rate =
MIN_SAMPLING_RATE_RATIO * jiffies_to_usecs(10);
/* Bring kernel and HW constraints together */
min_sampling_rate = max(min_sampling_rate,
MIN_LATENCY_MULTIPLIER * latency);
dbs_tuners_ins.sampling_rate =
max(min_sampling_rate,
latency * LATENCY_MULTIPLIER);
cpufreq_register_notifier( cpufreq_register_notifier(
&dbs_cpufreq_notifier_block, &dbs_cpufreq_notifier_block,
......
...@@ -32,6 +32,7 @@ ...@@ -32,6 +32,7 @@
#define DEF_FREQUENCY_UP_THRESHOLD (80) #define DEF_FREQUENCY_UP_THRESHOLD (80)
#define MICRO_FREQUENCY_DOWN_DIFFERENTIAL (3) #define MICRO_FREQUENCY_DOWN_DIFFERENTIAL (3)
#define MICRO_FREQUENCY_UP_THRESHOLD (95) #define MICRO_FREQUENCY_UP_THRESHOLD (95)
#define MICRO_FREQUENCY_MIN_SAMPLE_RATE (10000)
#define MIN_FREQUENCY_UP_THRESHOLD (11) #define MIN_FREQUENCY_UP_THRESHOLD (11)
#define MAX_FREQUENCY_UP_THRESHOLD (100) #define MAX_FREQUENCY_UP_THRESHOLD (100)
...@@ -45,27 +46,12 @@ ...@@ -45,27 +46,12 @@
* this governor will not work. * this governor will not work.
* All times here are in uS. * All times here are in uS.
*/ */
static unsigned int def_sampling_rate;
#define MIN_SAMPLING_RATE_RATIO (2) #define MIN_SAMPLING_RATE_RATIO (2)
/* for correct statistics, we need at least 10 ticks between each measure */
#define MIN_STAT_SAMPLING_RATE \
(MIN_SAMPLING_RATE_RATIO * jiffies_to_usecs(10))
#define MIN_SAMPLING_RATE \
(def_sampling_rate / MIN_SAMPLING_RATE_RATIO)
/* Above MIN_SAMPLING_RATE will vanish with its sysfs file soon
* Define the minimal settable sampling rate to the greater of:
* - "HW transition latency" * 100 (same as default sampling / 10)
* - MIN_STAT_SAMPLING_RATE
* To avoid that userspace shoots itself.
*/
static unsigned int minimum_sampling_rate(void)
{
return max(def_sampling_rate / 10, MIN_STAT_SAMPLING_RATE);
}
/* This will also vanish soon with removing sampling_rate_max */ static unsigned int min_sampling_rate;
#define MAX_SAMPLING_RATE (500 * def_sampling_rate)
#define LATENCY_MULTIPLIER (1000) #define LATENCY_MULTIPLIER (1000)
#define MIN_LATENCY_MULTIPLIER (100)
#define TRANSITION_LATENCY_LIMIT (10 * 1000 * 1000) #define TRANSITION_LATENCY_LIMIT (10 * 1000 * 1000)
static void do_dbs_timer(struct work_struct *work); static void do_dbs_timer(struct work_struct *work);
...@@ -219,28 +205,14 @@ static void ondemand_powersave_bias_init(void) ...@@ -219,28 +205,14 @@ static void ondemand_powersave_bias_init(void)
/************************** sysfs interface ************************/ /************************** sysfs interface ************************/
static ssize_t show_sampling_rate_max(struct cpufreq_policy *policy, char *buf) static ssize_t show_sampling_rate_max(struct cpufreq_policy *policy, char *buf)
{ {
static int print_once; printk_once(KERN_INFO "CPUFREQ: ondemand sampling_rate_max "
"sysfs file is deprecated - used by: %s\n", current->comm);
if (!print_once) { return sprintf(buf, "%u\n", -1U);
printk(KERN_INFO "CPUFREQ: ondemand sampling_rate_max "
"sysfs file is deprecated - used by: %s\n",
current->comm);
print_once = 1;
}
return sprintf(buf, "%u\n", MAX_SAMPLING_RATE);
} }
static ssize_t show_sampling_rate_min(struct cpufreq_policy *policy, char *buf) static ssize_t show_sampling_rate_min(struct cpufreq_policy *policy, char *buf)
{ {
static int print_once; return sprintf(buf, "%u\n", min_sampling_rate);
if (!print_once) {
printk(KERN_INFO "CPUFREQ: ondemand sampling_rate_min "
"sysfs file is deprecated - used by: %s\n",
current->comm);
print_once = 1;
}
return sprintf(buf, "%u\n", MIN_SAMPLING_RATE);
} }
#define define_one_ro(_name) \ #define define_one_ro(_name) \
...@@ -274,7 +246,7 @@ static ssize_t store_sampling_rate(struct cpufreq_policy *unused, ...@@ -274,7 +246,7 @@ static ssize_t store_sampling_rate(struct cpufreq_policy *unused,
mutex_unlock(&dbs_mutex); mutex_unlock(&dbs_mutex);
return -EINVAL; return -EINVAL;
} }
dbs_tuners_ins.sampling_rate = max(input, minimum_sampling_rate()); dbs_tuners_ins.sampling_rate = max(input, min_sampling_rate);
mutex_unlock(&dbs_mutex); mutex_unlock(&dbs_mutex);
return count; return count;
...@@ -619,12 +591,12 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy, ...@@ -619,12 +591,12 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
latency = policy->cpuinfo.transition_latency / 1000; latency = policy->cpuinfo.transition_latency / 1000;
if (latency == 0) if (latency == 0)
latency = 1; latency = 1;
/* Bring kernel and HW constraints together */
def_sampling_rate = min_sampling_rate = max(min_sampling_rate,
max(latency * LATENCY_MULTIPLIER, MIN_LATENCY_MULTIPLIER * latency);
MIN_STAT_SAMPLING_RATE); dbs_tuners_ins.sampling_rate =
max(min_sampling_rate,
dbs_tuners_ins.sampling_rate = def_sampling_rate; latency * LATENCY_MULTIPLIER);
} }
dbs_timer_init(this_dbs_info); dbs_timer_init(this_dbs_info);
...@@ -678,6 +650,16 @@ static int __init cpufreq_gov_dbs_init(void) ...@@ -678,6 +650,16 @@ static int __init cpufreq_gov_dbs_init(void)
dbs_tuners_ins.up_threshold = MICRO_FREQUENCY_UP_THRESHOLD; dbs_tuners_ins.up_threshold = MICRO_FREQUENCY_UP_THRESHOLD;
dbs_tuners_ins.down_differential = dbs_tuners_ins.down_differential =
MICRO_FREQUENCY_DOWN_DIFFERENTIAL; MICRO_FREQUENCY_DOWN_DIFFERENTIAL;
/*
* In no_hz/micro accounting case we set the minimum frequency
* not depending on HZ, but fixed (very low). The deferred
* timer might skip some samples if idle/sleeping as needed.
*/
min_sampling_rate = MICRO_FREQUENCY_MIN_SAMPLE_RATE;
} else {
/* For correct statistics, we need 10 ticks for each measure */
min_sampling_rate =
MIN_SAMPLING_RATE_RATIO * jiffies_to_usecs(10);
} }
kondemand_wq = create_workqueue("kondemand"); kondemand_wq = create_workqueue("kondemand");
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment