Commit 8c14577d authored by Rafael J. Wysocki's avatar Rafael J. Wysocki

Merge branches 'pm-cpufreq', 'pm-cpuidle', 'pm-opp' and 'powercap'

* pm-cpufreq:
  cpufreq: schedutil: Don't skip freq update if need_freq_update is set

* pm-cpuidle:
  Documentation: PM: cpuidle: correct path name
  Documentation: PM: cpuidle: correct typo

* pm-opp:
  opp: Reduce the size of critical section in _opp_table_kref_release()
  opp: Fix early exit from dev_pm_opp_register_set_opp_helper()
  opp: Don't always remove static OPPs in _of_add_opp_table_v1()

* powercap:
  powercap/intel_rapl: remove unneeded semicolon
...@@ -478,7 +478,7 @@ order to ask the hardware to enter that state. Also, for each ...@@ -478,7 +478,7 @@ order to ask the hardware to enter that state. Also, for each
statistics of the given idle state. That information is exposed by the kernel statistics of the given idle state. That information is exposed by the kernel
via ``sysfs``. via ``sysfs``.
For each CPU in the system, there is a :file:`/sys/devices/system/cpu<N>/cpuidle/` For each CPU in the system, there is a :file:`/sys/devices/system/cpu/cpu<N>/cpuidle/`
directory in ``sysfs``, where the number ``<N>`` is assigned to the given directory in ``sysfs``, where the number ``<N>`` is assigned to the given
CPU at the initialization time. That directory contains a set of subdirectories CPU at the initialization time. That directory contains a set of subdirectories
called :file:`state0`, :file:`state1` and so on, up to the number of idle state called :file:`state0`, :file:`state1` and so on, up to the number of idle state
...@@ -494,7 +494,7 @@ object corresponding to it, as follows: ...@@ -494,7 +494,7 @@ object corresponding to it, as follows:
residency. residency.
``below`` ``below``
Total number of times this idle state had been asked for, but cerainly Total number of times this idle state had been asked for, but certainly
a deeper idle state would have been a better match for the observed idle a deeper idle state would have been a better match for the observed idle
duration. duration.
......
...@@ -1181,6 +1181,10 @@ static void _opp_table_kref_release(struct kref *kref) ...@@ -1181,6 +1181,10 @@ static void _opp_table_kref_release(struct kref *kref)
struct opp_device *opp_dev, *temp; struct opp_device *opp_dev, *temp;
int i; int i;
/* Drop the lock as soon as we can */
list_del(&opp_table->node);
mutex_unlock(&opp_table_lock);
_of_clear_opp_table(opp_table); _of_clear_opp_table(opp_table);
/* Release clk */ /* Release clk */
...@@ -1208,10 +1212,7 @@ static void _opp_table_kref_release(struct kref *kref) ...@@ -1208,10 +1212,7 @@ static void _opp_table_kref_release(struct kref *kref)
mutex_destroy(&opp_table->genpd_virt_dev_lock); mutex_destroy(&opp_table->genpd_virt_dev_lock);
mutex_destroy(&opp_table->lock); mutex_destroy(&opp_table->lock);
list_del(&opp_table->node);
kfree(opp_table); kfree(opp_table);
mutex_unlock(&opp_table_lock);
} }
void dev_pm_opp_put_opp_table(struct opp_table *opp_table) void dev_pm_opp_put_opp_table(struct opp_table *opp_table)
...@@ -1930,7 +1931,7 @@ struct opp_table *dev_pm_opp_register_set_opp_helper(struct device *dev, ...@@ -1930,7 +1931,7 @@ struct opp_table *dev_pm_opp_register_set_opp_helper(struct device *dev,
return ERR_PTR(-EINVAL); return ERR_PTR(-EINVAL);
opp_table = dev_pm_opp_get_opp_table(dev); opp_table = dev_pm_opp_get_opp_table(dev);
if (!IS_ERR(opp_table)) if (IS_ERR(opp_table))
return opp_table; return opp_table;
/* This should be called before OPPs are initialized */ /* This should be called before OPPs are initialized */
......
...@@ -944,6 +944,8 @@ static int _of_add_opp_table_v1(struct device *dev, struct opp_table *opp_table) ...@@ -944,6 +944,8 @@ static int _of_add_opp_table_v1(struct device *dev, struct opp_table *opp_table)
nr -= 2; nr -= 2;
} }
return 0;
remove_static_opp: remove_static_opp:
_opp_remove_all_static(opp_table); _opp_remove_all_static(opp_table);
......
...@@ -620,7 +620,7 @@ static u64 rapl_unit_xlate(struct rapl_domain *rd, enum unit_type type, ...@@ -620,7 +620,7 @@ static u64 rapl_unit_xlate(struct rapl_domain *rd, enum unit_type type,
case ARBITRARY_UNIT: case ARBITRARY_UNIT:
default: default:
return value; return value;
}; }
if (to_raw) if (to_raw)
return div64_u64(value, units) * scale; return div64_u64(value, units) * scale;
......
...@@ -102,9 +102,12 @@ static bool sugov_should_update_freq(struct sugov_policy *sg_policy, u64 time) ...@@ -102,9 +102,12 @@ static bool sugov_should_update_freq(struct sugov_policy *sg_policy, u64 time)
static bool sugov_update_next_freq(struct sugov_policy *sg_policy, u64 time, static bool sugov_update_next_freq(struct sugov_policy *sg_policy, u64 time,
unsigned int next_freq) unsigned int next_freq)
{ {
if (sg_policy->next_freq == next_freq && if (!sg_policy->need_freq_update) {
!cpufreq_driver_test_flags(CPUFREQ_NEED_UPDATE_LIMITS)) if (sg_policy->next_freq == next_freq)
return false; return false;
} else {
sg_policy->need_freq_update = cpufreq_driver_test_flags(CPUFREQ_NEED_UPDATE_LIMITS);
}
sg_policy->next_freq = next_freq; sg_policy->next_freq = next_freq;
sg_policy->last_freq_update_time = time; sg_policy->last_freq_update_time = time;
...@@ -162,11 +165,9 @@ static unsigned int get_next_freq(struct sugov_policy *sg_policy, ...@@ -162,11 +165,9 @@ static unsigned int get_next_freq(struct sugov_policy *sg_policy,
freq = map_util_freq(util, freq, max); freq = map_util_freq(util, freq, max);
if (freq == sg_policy->cached_raw_freq && !sg_policy->need_freq_update && if (freq == sg_policy->cached_raw_freq && !sg_policy->need_freq_update)
!cpufreq_driver_test_flags(CPUFREQ_NEED_UPDATE_LIMITS))
return sg_policy->next_freq; return sg_policy->next_freq;
sg_policy->need_freq_update = false;
sg_policy->cached_raw_freq = freq; sg_policy->cached_raw_freq = freq;
return cpufreq_driver_resolve_freq(policy, freq); return cpufreq_driver_resolve_freq(policy, freq);
} }
...@@ -442,7 +443,6 @@ static void sugov_update_single(struct update_util_data *hook, u64 time, ...@@ -442,7 +443,6 @@ static void sugov_update_single(struct update_util_data *hook, u64 time,
struct sugov_policy *sg_policy = sg_cpu->sg_policy; struct sugov_policy *sg_policy = sg_cpu->sg_policy;
unsigned long util, max; unsigned long util, max;
unsigned int next_f; unsigned int next_f;
bool busy;
unsigned int cached_freq = sg_policy->cached_raw_freq; unsigned int cached_freq = sg_policy->cached_raw_freq;
sugov_iowait_boost(sg_cpu, time, flags); sugov_iowait_boost(sg_cpu, time, flags);
...@@ -453,9 +453,6 @@ static void sugov_update_single(struct update_util_data *hook, u64 time, ...@@ -453,9 +453,6 @@ static void sugov_update_single(struct update_util_data *hook, u64 time,
if (!sugov_should_update_freq(sg_policy, time)) if (!sugov_should_update_freq(sg_policy, time))
return; return;
/* Limits may have changed, don't skip frequency update */
busy = !sg_policy->need_freq_update && sugov_cpu_is_busy(sg_cpu);
util = sugov_get_util(sg_cpu); util = sugov_get_util(sg_cpu);
max = sg_cpu->max; max = sg_cpu->max;
util = sugov_iowait_apply(sg_cpu, time, util, max); util = sugov_iowait_apply(sg_cpu, time, util, max);
...@@ -464,7 +461,7 @@ static void sugov_update_single(struct update_util_data *hook, u64 time, ...@@ -464,7 +461,7 @@ static void sugov_update_single(struct update_util_data *hook, u64 time,
* Do not reduce the frequency if the CPU has not been idle * Do not reduce the frequency if the CPU has not been idle
* recently, as the reduction is likely to be premature then. * recently, as the reduction is likely to be premature then.
*/ */
if (busy && next_f < sg_policy->next_freq) { if (sugov_cpu_is_busy(sg_cpu) && next_f < sg_policy->next_freq) {
next_f = sg_policy->next_freq; next_f = sg_policy->next_freq;
/* Restore cached freq as next_freq has changed */ /* Restore cached freq as next_freq has changed */
...@@ -829,9 +826,10 @@ static int sugov_start(struct cpufreq_policy *policy) ...@@ -829,9 +826,10 @@ static int sugov_start(struct cpufreq_policy *policy)
sg_policy->next_freq = 0; sg_policy->next_freq = 0;
sg_policy->work_in_progress = false; sg_policy->work_in_progress = false;
sg_policy->limits_changed = false; sg_policy->limits_changed = false;
sg_policy->need_freq_update = false;
sg_policy->cached_raw_freq = 0; sg_policy->cached_raw_freq = 0;
sg_policy->need_freq_update = cpufreq_driver_test_flags(CPUFREQ_NEED_UPDATE_LIMITS);
for_each_cpu(cpu, policy->cpus) { for_each_cpu(cpu, policy->cpus) {
struct sugov_cpu *sg_cpu = &per_cpu(sugov_cpu, cpu); struct sugov_cpu *sg_cpu = &per_cpu(sugov_cpu, cpu);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment