Commit d422555f authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'pm-5.16-rc1-2' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm

Pull more power management updates from Rafael Wysocki:
 "These fix three intel_pstate driver regressions, fix locking in the
  core code suspending and resuming devices during system PM
  transitions, fix the handling of cpuidle drivers based on runtime PM
  during system-wide suspend, fix two issues in the operating
  performance points (OPP) framework and resource-managed helpers to it.

  Specifics:

   - Fix two intel_pstate driver regressions related to the HWP
     interrupt handling added recently (Srinivas Pandruvada).

   - Fix intel_pstate driver regression introduced during the 5.11 cycle
     and causing HWP desired performance to be mishandled in some cases
     when switching driver modes and during system suspend and shutdown
     (Rafael Wysocki).

   - Fix system-wide device suspend and resume locking to avoid
     deadlocks when device objects are deleted during a system-wide PM
     transition (Rafael Wysocki).

   - Modify system-wide suspend of devices to prevent cpuidle drivers
     based on runtime PM from misbehaving during the "no IRQ" phase of
     it (Ulf Hansson).

   - Fix return value of _opp_add_static_v2() helper (YueHaibing).

   - Fix required-opp handle count (Pavankumar Kondeti).

   - Add resource managed OPP helpers, update dev_pm_opp_attach_genpd(),
     update their devfreq users, and make minor DT binding change
     (Dmitry Osipenko)"

* tag 'pm-5.16-rc1-2' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm:
  PM: sleep: Avoid calling put_device() under dpm_list_mtx
  cpufreq: intel_pstate: Clear HWP Status during HWP Interrupt enable
  cpufreq: intel_pstate: Fix unchecked MSR 0x773 access
  cpufreq: intel_pstate: Clear HWP desired on suspend/shutdown and offline
  PM: sleep: Fix runtime PM based cpuidle support
  dt-bindings: opp: Allow multi-worded OPP entry name
  opp: Fix return in _opp_add_static_v2()
  PM / devfreq: tegra30: Check whether clk_round_rate() returns zero rate
  PM / devfreq: tegra30: Use resource-managed helpers
  PM / devfreq: Add devm_devfreq_add_governor()
  opp: Add more resource-managed variants of dev_pm_opp_of_add_table()
  opp: Change type of dev_pm_opp_attach_genpd(names) argument
  opp: Fix required-opps phandle array count check
parents 285fc3db dcc0b6f2
......@@ -33,7 +33,7 @@ properties:
type: boolean
patternProperties:
'^opp-?[0-9]+$':
'^opp(-?[0-9]+)*$':
type: object
description:
One or more OPP nodes describing voltage-current-frequency combinations.
......
......@@ -710,6 +710,7 @@ static void dpm_noirq_resume_devices(pm_message_t state)
dev = to_device(dpm_noirq_list.next);
get_device(dev);
list_move_tail(&dev->power.entry, &dpm_late_early_list);
mutex_unlock(&dpm_list_mtx);
if (!is_async(dev)) {
......@@ -724,8 +725,9 @@ static void dpm_noirq_resume_devices(pm_message_t state)
}
}
mutex_lock(&dpm_list_mtx);
put_device(dev);
mutex_lock(&dpm_list_mtx);
}
mutex_unlock(&dpm_list_mtx);
async_synchronize_full();
......@@ -849,6 +851,7 @@ void dpm_resume_early(pm_message_t state)
dev = to_device(dpm_late_early_list.next);
get_device(dev);
list_move_tail(&dev->power.entry, &dpm_suspended_list);
mutex_unlock(&dpm_list_mtx);
if (!is_async(dev)) {
......@@ -862,8 +865,10 @@ void dpm_resume_early(pm_message_t state)
pm_dev_err(dev, state, " early", error);
}
}
mutex_lock(&dpm_list_mtx);
put_device(dev);
mutex_lock(&dpm_list_mtx);
}
mutex_unlock(&dpm_list_mtx);
async_synchronize_full();
......@@ -1026,7 +1031,12 @@ void dpm_resume(pm_message_t state)
}
if (!list_empty(&dev->power.entry))
list_move_tail(&dev->power.entry, &dpm_prepared_list);
mutex_unlock(&dpm_list_mtx);
put_device(dev);
mutex_lock(&dpm_list_mtx);
}
mutex_unlock(&dpm_list_mtx);
async_synchronize_full();
......@@ -1104,14 +1114,16 @@ void dpm_complete(pm_message_t state)
get_device(dev);
dev->power.is_prepared = false;
list_move(&dev->power.entry, &list);
mutex_unlock(&dpm_list_mtx);
trace_device_pm_callback_start(dev, "", state.event);
device_complete(dev, state);
trace_device_pm_callback_end(dev, 0);
mutex_lock(&dpm_list_mtx);
put_device(dev);
mutex_lock(&dpm_list_mtx);
}
list_splice(&list, &dpm_list);
mutex_unlock(&dpm_list_mtx);
......@@ -1296,17 +1308,21 @@ static int dpm_noirq_suspend_devices(pm_message_t state)
error = device_suspend_noirq(dev);
mutex_lock(&dpm_list_mtx);
if (error) {
pm_dev_err(dev, state, " noirq", error);
dpm_save_failed_dev(dev_name(dev));
put_device(dev);
break;
}
if (!list_empty(&dev->power.entry))
} else if (!list_empty(&dev->power.entry)) {
list_move(&dev->power.entry, &dpm_noirq_list);
}
mutex_unlock(&dpm_list_mtx);
put_device(dev);
if (async_error)
mutex_lock(&dpm_list_mtx);
if (error || async_error)
break;
}
mutex_unlock(&dpm_list_mtx);
......@@ -1463,6 +1479,7 @@ int dpm_suspend_late(pm_message_t state)
int error = 0;
trace_suspend_resume(TPS("dpm_suspend_late"), state.event, true);
wake_up_all_idle_cpus();
mutex_lock(&dpm_list_mtx);
pm_transition = state;
async_error = 0;
......@@ -1471,23 +1488,28 @@ int dpm_suspend_late(pm_message_t state)
struct device *dev = to_device(dpm_suspended_list.prev);
get_device(dev);
mutex_unlock(&dpm_list_mtx);
error = device_suspend_late(dev);
mutex_lock(&dpm_list_mtx);
if (!list_empty(&dev->power.entry))
list_move(&dev->power.entry, &dpm_late_early_list);
if (error) {
pm_dev_err(dev, state, " late", error);
dpm_save_failed_dev(dev_name(dev));
put_device(dev);
break;
}
mutex_unlock(&dpm_list_mtx);
put_device(dev);
if (async_error)
mutex_lock(&dpm_list_mtx);
if (error || async_error)
break;
}
mutex_unlock(&dpm_list_mtx);
......@@ -1747,21 +1769,27 @@ int dpm_suspend(pm_message_t state)
struct device *dev = to_device(dpm_prepared_list.prev);
get_device(dev);
mutex_unlock(&dpm_list_mtx);
error = device_suspend(dev);
mutex_lock(&dpm_list_mtx);
if (error) {
pm_dev_err(dev, state, "", error);
dpm_save_failed_dev(dev_name(dev));
put_device(dev);
break;
}
if (!list_empty(&dev->power.entry))
} else if (!list_empty(&dev->power.entry)) {
list_move(&dev->power.entry, &dpm_suspended_list);
}
mutex_unlock(&dpm_list_mtx);
put_device(dev);
if (async_error)
mutex_lock(&dpm_list_mtx);
if (error || async_error)
break;
}
mutex_unlock(&dpm_list_mtx);
......@@ -1878,6 +1906,7 @@ int dpm_prepare(pm_message_t state)
struct device *dev = to_device(dpm_list.next);
get_device(dev);
mutex_unlock(&dpm_list_mtx);
trace_device_pm_callback_start(dev, "", state.event);
......@@ -1885,21 +1914,23 @@ int dpm_prepare(pm_message_t state)
trace_device_pm_callback_end(dev, error);
mutex_lock(&dpm_list_mtx);
if (error) {
if (error == -EAGAIN) {
put_device(dev);
error = 0;
continue;
}
if (!error) {
dev->power.is_prepared = true;
if (!list_empty(&dev->power.entry))
list_move_tail(&dev->power.entry, &dpm_prepared_list);
} else if (error == -EAGAIN) {
error = 0;
} else {
dev_info(dev, "not prepared for power transition: code %d\n",
error);
put_device(dev);
break;
}
dev->power.is_prepared = true;
if (!list_empty(&dev->power.entry))
list_move_tail(&dev->power.entry, &dpm_prepared_list);
mutex_unlock(&dpm_list_mtx);
put_device(dev);
mutex_lock(&dpm_list_mtx);
}
mutex_unlock(&dpm_list_mtx);
trace_suspend_resume(TPS("dpm_prepare"), state.event, false);
......
......@@ -1006,9 +1006,16 @@ static void intel_pstate_hwp_offline(struct cpudata *cpu)
*/
value &= ~GENMASK_ULL(31, 24);
value |= HWP_ENERGY_PERF_PREFERENCE(cpu->epp_cached);
WRITE_ONCE(cpu->hwp_req_cached, value);
}
/*
* Clear the desired perf field in the cached HWP request value to
* prevent nonzero desired values from being leaked into the active
* mode.
*/
value &= ~HWP_DESIRED_PERF(~0L);
WRITE_ONCE(cpu->hwp_req_cached, value);
value &= ~GENMASK_ULL(31, 0);
min_perf = HWP_LOWEST_PERF(READ_ONCE(cpu->hwp_cap_cached));
......@@ -1620,6 +1627,9 @@ static void intel_pstate_disable_hwp_interrupt(struct cpudata *cpudata)
{
unsigned long flags;
if (!boot_cpu_has(X86_FEATURE_HWP_NOTIFY))
return;
/* wrmsrl_on_cpu has to be outside spinlock as this can result in IPC */
wrmsrl_on_cpu(cpudata->cpu, MSR_HWP_INTERRUPT, 0x00);
......@@ -1642,6 +1652,7 @@ static void intel_pstate_enable_hwp_interrupt(struct cpudata *cpudata)
/* wrmsrl_on_cpu has to be outside spinlock as this can result in IPC */
wrmsrl_on_cpu(cpudata->cpu, MSR_HWP_INTERRUPT, 0x01);
wrmsrl_on_cpu(cpudata->cpu, MSR_HWP_STATUS, 0);
}
}
......@@ -3003,6 +3014,27 @@ static int intel_cpufreq_cpu_exit(struct cpufreq_policy *policy)
return intel_pstate_cpu_exit(policy);
}
static int intel_cpufreq_suspend(struct cpufreq_policy *policy)
{
intel_pstate_suspend(policy);
if (hwp_active) {
struct cpudata *cpu = all_cpu_data[policy->cpu];
u64 value = READ_ONCE(cpu->hwp_req_cached);
/*
* Clear the desired perf field in MSR_HWP_REQUEST in case
* intel_cpufreq_adjust_perf() is in use and the last value
* written by it may not be suitable.
*/
value &= ~HWP_DESIRED_PERF(~0L);
wrmsrl_on_cpu(cpu->cpu, MSR_HWP_REQUEST, value);
WRITE_ONCE(cpu->hwp_req_cached, value);
}
return 0;
}
static struct cpufreq_driver intel_cpufreq = {
.flags = CPUFREQ_CONST_LOOPS,
.verify = intel_cpufreq_verify_policy,
......@@ -3012,7 +3044,7 @@ static struct cpufreq_driver intel_cpufreq = {
.exit = intel_cpufreq_cpu_exit,
.offline = intel_cpufreq_cpu_offline,
.online = intel_pstate_cpu_online,
.suspend = intel_pstate_suspend,
.suspend = intel_cpufreq_suspend,
.resume = intel_pstate_resume,
.update_limits = intel_pstate_update_limits,
.name = "intel_cpufreq",
......
......@@ -1301,6 +1301,32 @@ int devfreq_add_governor(struct devfreq_governor *governor)
}
EXPORT_SYMBOL(devfreq_add_governor);
static void devm_devfreq_remove_governor(void *governor)
{
WARN_ON(devfreq_remove_governor(governor));
}
/**
* devm_devfreq_add_governor() - Add devfreq governor
* @dev: device which adds devfreq governor
* @governor: the devfreq governor to be added
*
* This is a resource-managed variant of devfreq_add_governor().
*/
int devm_devfreq_add_governor(struct device *dev,
struct devfreq_governor *governor)
{
int err;
err = devfreq_add_governor(governor);
if (err)
return err;
return devm_add_action_or_reset(dev, devm_devfreq_remove_governor,
governor);
}
EXPORT_SYMBOL(devm_devfreq_add_governor);
/**
* devfreq_remove_governor() - Remove devfreq feature from a device.
* @governor: the devfreq governor to be removed
......
......@@ -84,6 +84,9 @@ void devfreq_update_interval(struct devfreq *devfreq, unsigned int *delay);
int devfreq_add_governor(struct devfreq_governor *governor);
int devfreq_remove_governor(struct devfreq_governor *governor);
int devm_devfreq_add_governor(struct device *dev,
struct devfreq_governor *governor);
int devfreq_update_status(struct devfreq *devfreq, unsigned long freq);
int devfreq_update_target(struct devfreq *devfreq, unsigned long freq);
......
......@@ -178,7 +178,6 @@ struct tegra_devfreq_soc_data {
struct tegra_devfreq {
struct devfreq *devfreq;
struct opp_table *opp_table;
struct reset_control *reset;
struct clk *clock;
......@@ -789,6 +788,39 @@ static struct devfreq_governor tegra_devfreq_governor = {
.event_handler = tegra_governor_event_handler,
};
static void devm_tegra_devfreq_deinit_hw(void *data)
{
struct tegra_devfreq *tegra = data;
reset_control_reset(tegra->reset);
clk_disable_unprepare(tegra->clock);
}
static int devm_tegra_devfreq_init_hw(struct device *dev,
struct tegra_devfreq *tegra)
{
int err;
err = clk_prepare_enable(tegra->clock);
if (err) {
dev_err(dev, "Failed to prepare and enable ACTMON clock\n");
return err;
}
err = devm_add_action_or_reset(dev, devm_tegra_devfreq_deinit_hw,
tegra);
if (err)
return err;
err = reset_control_reset(tegra->reset);
if (err) {
dev_err(dev, "Failed to reset hardware: %d\n", err);
return err;
}
return err;
}
static int tegra_devfreq_probe(struct platform_device *pdev)
{
u32 hw_version = BIT(tegra_sku_info.soc_speedo_id);
......@@ -842,38 +874,26 @@ static int tegra_devfreq_probe(struct platform_device *pdev)
return err;
}
tegra->opp_table = dev_pm_opp_set_supported_hw(&pdev->dev,
&hw_version, 1);
err = PTR_ERR_OR_ZERO(tegra->opp_table);
err = devm_pm_opp_set_supported_hw(&pdev->dev, &hw_version, 1);
if (err) {
dev_err(&pdev->dev, "Failed to set supported HW: %d\n", err);
return err;
}
err = dev_pm_opp_of_add_table_noclk(&pdev->dev, 0);
err = devm_pm_opp_of_add_table_noclk(&pdev->dev, 0);
if (err) {
dev_err(&pdev->dev, "Failed to add OPP table: %d\n", err);
goto put_hw;
}
err = clk_prepare_enable(tegra->clock);
if (err) {
dev_err(&pdev->dev,
"Failed to prepare and enable ACTMON clock\n");
goto remove_table;
return err;
}
err = reset_control_reset(tegra->reset);
if (err) {
dev_err(&pdev->dev, "Failed to reset hardware: %d\n", err);
goto disable_clk;
}
err = devm_tegra_devfreq_init_hw(&pdev->dev, tegra);
if (err)
return err;
rate = clk_round_rate(tegra->emc_clock, ULONG_MAX);
if (rate < 0) {
if (rate <= 0) {
dev_err(&pdev->dev, "Failed to round clock rate: %ld\n", rate);
err = rate;
goto disable_clk;
return rate ?: -EINVAL;
}
tegra->max_freq = rate / KHZ;
......@@ -892,52 +912,18 @@ static int tegra_devfreq_probe(struct platform_device *pdev)
INIT_DELAYED_WORK(&tegra->cpufreq_update_work,
tegra_actmon_delayed_update);
err = devfreq_add_governor(&tegra_devfreq_governor);
err = devm_devfreq_add_governor(&pdev->dev, &tegra_devfreq_governor);
if (err) {
dev_err(&pdev->dev, "Failed to add governor: %d\n", err);
goto remove_opps;
return err;
}
tegra_devfreq_profile.initial_freq = clk_get_rate(tegra->emc_clock);
devfreq = devfreq_add_device(&pdev->dev, &tegra_devfreq_profile,
"tegra_actmon", NULL);
if (IS_ERR(devfreq)) {
err = PTR_ERR(devfreq);
goto remove_governor;
}
return 0;
remove_governor:
devfreq_remove_governor(&tegra_devfreq_governor);
remove_opps:
dev_pm_opp_remove_all_dynamic(&pdev->dev);
reset_control_reset(tegra->reset);
disable_clk:
clk_disable_unprepare(tegra->clock);
remove_table:
dev_pm_opp_of_remove_table(&pdev->dev);
put_hw:
dev_pm_opp_put_supported_hw(tegra->opp_table);
return err;
}
static int tegra_devfreq_remove(struct platform_device *pdev)
{
struct tegra_devfreq *tegra = platform_get_drvdata(pdev);
devfreq_remove_device(tegra->devfreq);
devfreq_remove_governor(&tegra_devfreq_governor);
reset_control_reset(tegra->reset);
clk_disable_unprepare(tegra->clock);
dev_pm_opp_of_remove_table(&pdev->dev);
dev_pm_opp_put_supported_hw(tegra->opp_table);
devfreq = devm_devfreq_add_device(&pdev->dev, &tegra_devfreq_profile,
"tegra_actmon", NULL);
if (IS_ERR(devfreq))
return PTR_ERR(devfreq);
return 0;
}
......@@ -967,7 +953,6 @@ MODULE_DEVICE_TABLE(of, tegra_devfreq_of_match);
static struct platform_driver tegra_devfreq_driver = {
.probe = tegra_devfreq_probe,
.remove = tegra_devfreq_remove,
.driver = {
.name = "tegra-devfreq",
.of_match_table = tegra_devfreq_of_match,
......
......@@ -2348,12 +2348,12 @@ static void _opp_detach_genpd(struct opp_table *opp_table)
* "required-opps" are added in DT.
*/
struct opp_table *dev_pm_opp_attach_genpd(struct device *dev,
const char **names, struct device ***virt_devs)
const char * const *names, struct device ***virt_devs)
{
struct opp_table *opp_table;
struct device *virt_dev;
int index = 0, ret = -EINVAL;
const char **name = names;
const char * const *name = names;
opp_table = _add_opp_table(dev, false);
if (IS_ERR(opp_table))
......@@ -2457,7 +2457,7 @@ static void devm_pm_opp_detach_genpd(void *data)
*
* Return: 0 on success and errorno otherwise.
*/
int devm_pm_opp_attach_genpd(struct device *dev, const char **names,
int devm_pm_opp_attach_genpd(struct device *dev, const char * const *names,
struct device ***virt_devs)
{
struct opp_table *opp_table;
......
......@@ -170,7 +170,7 @@ static void _opp_table_alloc_required_tables(struct opp_table *opp_table,
}
count = of_count_phandle_with_args(np, "required-opps", NULL);
if (!count)
if (count <= 0)
goto put_np;
required_opp_tables = kcalloc(count, sizeof(*required_opp_tables),
......@@ -921,7 +921,7 @@ static struct dev_pm_opp *_opp_add_static_v2(struct opp_table *opp_table,
free_opp:
_opp_free(new_opp);
return ERR_PTR(ret);
return ret ? ERR_PTR(ret) : NULL;
}
/* Initializes OPP tables based on new bindings */
......@@ -1081,6 +1081,17 @@ static void devm_pm_opp_of_table_release(void *data)
dev_pm_opp_of_remove_table(data);
}
static int _devm_of_add_table_indexed(struct device *dev, int index, bool getclk)
{
int ret;
ret = _of_add_table_indexed(dev, index, getclk);
if (ret)
return ret;
return devm_add_action_or_reset(dev, devm_pm_opp_of_table_release, dev);
}
/**
* devm_pm_opp_of_add_table() - Initialize opp table from device tree
* @dev: device pointer used to lookup OPP table.
......@@ -1102,13 +1113,7 @@ static void devm_pm_opp_of_table_release(void *data)
*/
int devm_pm_opp_of_add_table(struct device *dev)
{
int ret;
ret = dev_pm_opp_of_add_table(dev);
if (ret)
return ret;
return devm_add_action_or_reset(dev, devm_pm_opp_of_table_release, dev);
return _devm_of_add_table_indexed(dev, 0, true);
}
EXPORT_SYMBOL_GPL(devm_pm_opp_of_add_table);
......@@ -1151,6 +1156,19 @@ int dev_pm_opp_of_add_table_indexed(struct device *dev, int index)
}
EXPORT_SYMBOL_GPL(dev_pm_opp_of_add_table_indexed);
/**
* devm_pm_opp_of_add_table_indexed() - Initialize indexed opp table from device tree
* @dev: device pointer used to lookup OPP table.
* @index: Index number.
*
* This is a resource-managed variant of dev_pm_opp_of_add_table_indexed().
*/
int devm_pm_opp_of_add_table_indexed(struct device *dev, int index)
{
return _devm_of_add_table_indexed(dev, index, true);
}
EXPORT_SYMBOL_GPL(devm_pm_opp_of_add_table_indexed);
/**
* dev_pm_opp_of_add_table_noclk() - Initialize indexed opp table from device
* tree without getting clk for device.
......@@ -1169,6 +1187,20 @@ int dev_pm_opp_of_add_table_noclk(struct device *dev, int index)
}
EXPORT_SYMBOL_GPL(dev_pm_opp_of_add_table_noclk);
/**
* devm_pm_opp_of_add_table_noclk() - Initialize indexed opp table from device
* tree without getting clk for device.
* @dev: device pointer used to lookup OPP table.
* @index: Index number.
*
* This is a resource-managed variant of dev_pm_opp_of_add_table_noclk().
*/
int devm_pm_opp_of_add_table_noclk(struct device *dev, int index)
{
return _devm_of_add_table_indexed(dev, index, false);
}
EXPORT_SYMBOL_GPL(devm_pm_opp_of_add_table_noclk);
/* CPU device specific helpers */
/**
......
......@@ -156,9 +156,9 @@ int devm_pm_opp_set_clkname(struct device *dev, const char *name);
struct opp_table *dev_pm_opp_register_set_opp_helper(struct device *dev, int (*set_opp)(struct dev_pm_set_opp_data *data));
void dev_pm_opp_unregister_set_opp_helper(struct opp_table *opp_table);
int devm_pm_opp_register_set_opp_helper(struct device *dev, int (*set_opp)(struct dev_pm_set_opp_data *data));
struct opp_table *dev_pm_opp_attach_genpd(struct device *dev, const char **names, struct device ***virt_devs);
struct opp_table *dev_pm_opp_attach_genpd(struct device *dev, const char * const *names, struct device ***virt_devs);
void dev_pm_opp_detach_genpd(struct opp_table *opp_table);
int devm_pm_opp_attach_genpd(struct device *dev, const char **names, struct device ***virt_devs);
int devm_pm_opp_attach_genpd(struct device *dev, const char * const *names, struct device ***virt_devs);
struct dev_pm_opp *dev_pm_opp_xlate_required_opp(struct opp_table *src_table, struct opp_table *dst_table, struct dev_pm_opp *src_opp);
int dev_pm_opp_xlate_performance_state(struct opp_table *src_table, struct opp_table *dst_table, unsigned int pstate);
int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq);
......@@ -376,7 +376,7 @@ static inline int devm_pm_opp_set_clkname(struct device *dev, const char *name)
return -EOPNOTSUPP;
}
static inline struct opp_table *dev_pm_opp_attach_genpd(struct device *dev, const char **names, struct device ***virt_devs)
static inline struct opp_table *dev_pm_opp_attach_genpd(struct device *dev, const char * const *names, struct device ***virt_devs)
{
return ERR_PTR(-EOPNOTSUPP);
}
......@@ -384,7 +384,7 @@ static inline struct opp_table *dev_pm_opp_attach_genpd(struct device *dev, cons
static inline void dev_pm_opp_detach_genpd(struct opp_table *opp_table) {}
static inline int devm_pm_opp_attach_genpd(struct device *dev,
const char **names,
const char * const *names,
struct device ***virt_devs)
{
return -EOPNOTSUPP;
......@@ -439,7 +439,9 @@ static inline int dev_pm_opp_sync_regulators(struct device *dev)
#if defined(CONFIG_PM_OPP) && defined(CONFIG_OF)
int dev_pm_opp_of_add_table(struct device *dev);
int dev_pm_opp_of_add_table_indexed(struct device *dev, int index);
int devm_pm_opp_of_add_table_indexed(struct device *dev, int index);
int dev_pm_opp_of_add_table_noclk(struct device *dev, int index);
int devm_pm_opp_of_add_table_noclk(struct device *dev, int index);
void dev_pm_opp_of_remove_table(struct device *dev);
int devm_pm_opp_of_add_table(struct device *dev);
int dev_pm_opp_of_cpumask_add_table(const struct cpumask *cpumask);
......@@ -465,11 +467,21 @@ static inline int dev_pm_opp_of_add_table_indexed(struct device *dev, int index)
return -EOPNOTSUPP;
}
static inline int devm_pm_opp_of_add_table_indexed(struct device *dev, int index)
{
return -EOPNOTSUPP;
}
static inline int dev_pm_opp_of_add_table_noclk(struct device *dev, int index)
{
return -EOPNOTSUPP;
}
static inline int devm_pm_opp_of_add_table_noclk(struct device *dev, int index)
{
return -EOPNOTSUPP;
}
static inline void dev_pm_opp_of_remove_table(struct device *dev)
{
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment