Commit fbd88dd0 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'pm-6.9-rc1-2' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm

Pull more power management updates from Rafael Wysocki:
 "These update the Energy Model to make it prevent errors due to power
  unit mismatches, fix a typo in power management documentation, convert
  one driver to using a platform remove callback returning void, address
  two cpufreq issues (one in the core and one in the DT driver), and
  enable boost support in the SCMI cpufreq driver.

  Specifics:

   - Modify the Energy Model code to bail out and complain if the unit
     of power is not uW to prevent errors due to unit mismatches (Lukasz
     Luba)

   - Make the intel_rapl platform driver use a remove callback returning
     void (Uwe Kleine-König)

   - Fix typo in the suspend and interrupts document (Saravana Kannan)

   - Make per-policy boost flags actually take effect on platforms using
     cpufreq_boost_set_sw() (Sibi Sankar)

   - Enable boost support in the SCMI cpufreq driver (Sibi Sankar)

   - Make the DT cpufreq driver use zalloc_cpumask_var() for allocating
     cpumasks to avoid using unitinialized memory (Marek Szyprowski)"

* tag 'pm-6.9-rc1-2' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm:
  cpufreq: scmi: Enable boost support
  firmware: arm_scmi: Add support for marking certain frequencies as turbo
  cpufreq: dt: always allocate zeroed cpumask
  cpufreq: Fix per-policy boost behavior on SoCs using cpufreq_boost_set_sw()
  Documentation: power: Fix typo in suspend and interrupts doc
  PM: EM: Force device drivers to provide power in uW
  powercap: intel_rapl: Convert to platform remove callback returning void
parents 6d37f7e7 a6d65909
...@@ -78,7 +78,7 @@ handling the given IRQ as a system wakeup interrupt line and disable_irq_wake() ...@@ -78,7 +78,7 @@ handling the given IRQ as a system wakeup interrupt line and disable_irq_wake()
turns that logic off. turns that logic off.
Calling enable_irq_wake() causes suspend_device_irqs() to treat the given IRQ Calling enable_irq_wake() causes suspend_device_irqs() to treat the given IRQ
in a special way. Namely, the IRQ remains enabled, by on the first interrupt in a special way. Namely, the IRQ remains enabled, but on the first interrupt
it will be disabled, marked as pending and "suspended" so that it will be it will be disabled, marked as pending and "suspended" so that it will be
re-enabled by resume_device_irqs() during the subsequent system resume. Also re-enabled by resume_device_irqs() during the subsequent system resume. Also
the PM core is notified about the event which causes the system suspend in the PM core is notified about the event which causes the system suspend in
......
...@@ -208,7 +208,7 @@ static int dt_cpufreq_early_init(struct device *dev, int cpu) ...@@ -208,7 +208,7 @@ static int dt_cpufreq_early_init(struct device *dev, int cpu)
if (!priv) if (!priv)
return -ENOMEM; return -ENOMEM;
if (!alloc_cpumask_var(&priv->cpus, GFP_KERNEL)) if (!zalloc_cpumask_var(&priv->cpus, GFP_KERNEL))
return -ENOMEM; return -ENOMEM;
cpumask_set_cpu(cpu, priv->cpus); cpumask_set_cpu(cpu, priv->cpus);
......
...@@ -653,14 +653,16 @@ static ssize_t store_local_boost(struct cpufreq_policy *policy, ...@@ -653,14 +653,16 @@ static ssize_t store_local_boost(struct cpufreq_policy *policy,
if (policy->boost_enabled == enable) if (policy->boost_enabled == enable)
return count; return count;
policy->boost_enabled = enable;
cpus_read_lock(); cpus_read_lock();
ret = cpufreq_driver->set_boost(policy, enable); ret = cpufreq_driver->set_boost(policy, enable);
cpus_read_unlock(); cpus_read_unlock();
if (ret) if (ret) {
policy->boost_enabled = !policy->boost_enabled;
return ret; return ret;
}
policy->boost_enabled = enable;
return count; return count;
} }
...@@ -1428,6 +1430,9 @@ static int cpufreq_online(unsigned int cpu) ...@@ -1428,6 +1430,9 @@ static int cpufreq_online(unsigned int cpu)
goto out_free_policy; goto out_free_policy;
} }
/* Let the per-policy boost flag mirror the cpufreq_driver boost during init */
policy->boost_enabled = cpufreq_boost_enabled() && policy_has_boost_freq(policy);
/* /*
* The initialization has succeeded and the policy is online. * The initialization has succeeded and the policy is online.
* If there is a problem with its frequency table, take it * If there is a problem with its frequency table, take it
...@@ -2769,11 +2774,12 @@ int cpufreq_boost_trigger_state(int state) ...@@ -2769,11 +2774,12 @@ int cpufreq_boost_trigger_state(int state)
cpus_read_lock(); cpus_read_lock();
for_each_active_policy(policy) { for_each_active_policy(policy) {
policy->boost_enabled = state;
ret = cpufreq_driver->set_boost(policy, state); ret = cpufreq_driver->set_boost(policy, state);
if (ret) if (ret) {
policy->boost_enabled = !policy->boost_enabled;
goto err_reset_state; goto err_reset_state;
}
policy->boost_enabled = state;
} }
cpus_read_unlock(); cpus_read_unlock();
......
...@@ -40,7 +40,7 @@ int cpufreq_frequency_table_cpuinfo(struct cpufreq_policy *policy, ...@@ -40,7 +40,7 @@ int cpufreq_frequency_table_cpuinfo(struct cpufreq_policy *policy,
cpufreq_for_each_valid_entry(pos, table) { cpufreq_for_each_valid_entry(pos, table) {
freq = pos->frequency; freq = pos->frequency;
if (!cpufreq_boost_enabled() if ((!cpufreq_boost_enabled() || !policy->boost_enabled)
&& (pos->flags & CPUFREQ_BOOST_FREQ)) && (pos->flags & CPUFREQ_BOOST_FREQ))
continue; continue;
......
...@@ -30,6 +30,7 @@ struct scmi_data { ...@@ -30,6 +30,7 @@ struct scmi_data {
static struct scmi_protocol_handle *ph; static struct scmi_protocol_handle *ph;
static const struct scmi_perf_proto_ops *perf_ops; static const struct scmi_perf_proto_ops *perf_ops;
static struct cpufreq_driver scmi_cpufreq_driver;
static unsigned int scmi_cpufreq_get_rate(unsigned int cpu) static unsigned int scmi_cpufreq_get_rate(unsigned int cpu)
{ {
...@@ -167,6 +168,12 @@ scmi_get_rate_limit(u32 domain, bool has_fast_switch) ...@@ -167,6 +168,12 @@ scmi_get_rate_limit(u32 domain, bool has_fast_switch)
return rate_limit; return rate_limit;
} }
static struct freq_attr *scmi_cpufreq_hw_attr[] = {
&cpufreq_freq_attr_scaling_available_freqs,
NULL,
NULL,
};
static int scmi_cpufreq_init(struct cpufreq_policy *policy) static int scmi_cpufreq_init(struct cpufreq_policy *policy)
{ {
int ret, nr_opp, domain; int ret, nr_opp, domain;
...@@ -276,6 +283,17 @@ static int scmi_cpufreq_init(struct cpufreq_policy *policy) ...@@ -276,6 +283,17 @@ static int scmi_cpufreq_init(struct cpufreq_policy *policy)
policy->transition_delay_us = policy->transition_delay_us =
scmi_get_rate_limit(domain, policy->fast_switch_possible); scmi_get_rate_limit(domain, policy->fast_switch_possible);
if (policy_has_boost_freq(policy)) {
ret = cpufreq_enable_boost_support();
if (ret) {
dev_warn(cpu_dev, "failed to enable boost: %d\n", ret);
goto out_free_opp;
} else {
scmi_cpufreq_hw_attr[1] = &cpufreq_freq_attr_scaling_boost_freqs;
scmi_cpufreq_driver.boost_enabled = true;
}
}
return 0; return 0;
out_free_opp: out_free_opp:
...@@ -334,7 +352,7 @@ static struct cpufreq_driver scmi_cpufreq_driver = { ...@@ -334,7 +352,7 @@ static struct cpufreq_driver scmi_cpufreq_driver = {
CPUFREQ_NEED_INITIAL_FREQ_CHECK | CPUFREQ_NEED_INITIAL_FREQ_CHECK |
CPUFREQ_IS_COOLING_DEV, CPUFREQ_IS_COOLING_DEV,
.verify = cpufreq_generic_frequency_table_verify, .verify = cpufreq_generic_frequency_table_verify,
.attr = cpufreq_generic_attr, .attr = scmi_cpufreq_hw_attr,
.target_index = scmi_cpufreq_set_target, .target_index = scmi_cpufreq_set_target,
.fast_switch = scmi_cpufreq_fast_switch, .fast_switch = scmi_cpufreq_fast_switch,
.get = scmi_cpufreq_get_rate, .get = scmi_cpufreq_get_rate,
......
...@@ -871,6 +871,9 @@ static int scmi_dvfs_device_opps_add(const struct scmi_protocol_handle *ph, ...@@ -871,6 +871,9 @@ static int scmi_dvfs_device_opps_add(const struct scmi_protocol_handle *ph,
else else
freq = dom->opp[idx].indicative_freq * dom->mult_factor; freq = dom->opp[idx].indicative_freq * dom->mult_factor;
/* All OPPs above the sustained frequency are treated as turbo */
data.turbo = freq > dom->sustained_freq_khz * 1000;
data.level = dom->opp[idx].perf; data.level = dom->opp[idx].perf;
data.freq = freq; data.freq = freq;
......
...@@ -197,11 +197,10 @@ static int rapl_msr_probe(struct platform_device *pdev) ...@@ -197,11 +197,10 @@ static int rapl_msr_probe(struct platform_device *pdev)
return ret; return ret;
} }
static int rapl_msr_remove(struct platform_device *pdev) static void rapl_msr_remove(struct platform_device *pdev)
{ {
cpuhp_remove_state(rapl_msr_priv->pcap_rapl_online); cpuhp_remove_state(rapl_msr_priv->pcap_rapl_online);
powercap_unregister_control_type(rapl_msr_priv->control_type); powercap_unregister_control_type(rapl_msr_priv->control_type);
return 0;
} }
static const struct platform_device_id rapl_msr_ids[] = { static const struct platform_device_id rapl_msr_ids[] = {
...@@ -212,7 +211,7 @@ MODULE_DEVICE_TABLE(platform, rapl_msr_ids); ...@@ -212,7 +211,7 @@ MODULE_DEVICE_TABLE(platform, rapl_msr_ids);
static struct platform_driver intel_rapl_msr_driver = { static struct platform_driver intel_rapl_msr_driver = {
.probe = rapl_msr_probe, .probe = rapl_msr_probe,
.remove = rapl_msr_remove, .remove_new = rapl_msr_remove,
.id_table = rapl_msr_ids, .id_table = rapl_msr_ids,
.driver = { .driver = {
.name = "intel_rapl_msr", .name = "intel_rapl_msr",
......
...@@ -612,6 +612,17 @@ int em_dev_register_perf_domain(struct device *dev, unsigned int nr_states, ...@@ -612,6 +612,17 @@ int em_dev_register_perf_domain(struct device *dev, unsigned int nr_states,
else if (cb->get_cost) else if (cb->get_cost)
flags |= EM_PERF_DOMAIN_ARTIFICIAL; flags |= EM_PERF_DOMAIN_ARTIFICIAL;
/*
* EM only supports uW (exception is artificial EM).
* Therefore, check and force the drivers to provide
* power in uW.
*/
if (!microwatts && !(flags & EM_PERF_DOMAIN_ARTIFICIAL)) {
dev_err(dev, "EM: only supports uW power values\n");
ret = -EINVAL;
goto unlock;
}
ret = em_create_pd(dev, nr_states, cb, cpus, flags); ret = em_create_pd(dev, nr_states, cb, cpus, flags);
if (ret) if (ret)
goto unlock; goto unlock;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment