Commit 45a7953c authored by Rafael J. Wysocki's avatar Rafael J. Wysocki

Merge branches 'pm-core', 'pm-opp', 'pm-domains', 'pm-cpu' and 'pm-avs'

* pm-core:
  PM / wakeup: Set power.can_wakeup if wakeup_sysfs_add() fails

* pm-opp:
  PM / OPP: Fix get sharing CPUs when hotplug is used
  PM / OPP: OF: Use pr_debug() instead of pr_err() while adding OPP table

* pm-domains:
  PM / Domains: Convert to using %pOF instead of full_name
  PM / Domains: Extend generic power domain debugfs
  PM / Domains: Add time accounting to various genpd states

* pm-cpu:
  PM / CPU: replace raw_notifier with atomic_notifier

* pm-avs:
  PM / AVS: rockchip-io: add io selectors and supplies for RV1108
...@@ -39,6 +39,8 @@ Required properties: ...@@ -39,6 +39,8 @@ Required properties:
- "rockchip,rk3368-pmu-io-voltage-domain" for rk3368 pmu-domains - "rockchip,rk3368-pmu-io-voltage-domain" for rk3368 pmu-domains
- "rockchip,rk3399-io-voltage-domain" for rk3399 - "rockchip,rk3399-io-voltage-domain" for rk3399
- "rockchip,rk3399-pmu-io-voltage-domain" for rk3399 pmu-domains - "rockchip,rk3399-pmu-io-voltage-domain" for rk3399 pmu-domains
- "rockchip,rv1108-io-voltage-domain" for rv1108
- "rockchip,rv1108-pmu-io-voltage-domain" for rv1108 pmu-domains
Deprecated properties: Deprecated properties:
- rockchip,grf: phandle to the syscon managing the "general register files" - rockchip,grf: phandle to the syscon managing the "general register files"
......
...@@ -209,6 +209,34 @@ static void genpd_sd_counter_inc(struct generic_pm_domain *genpd) ...@@ -209,6 +209,34 @@ static void genpd_sd_counter_inc(struct generic_pm_domain *genpd)
smp_mb__after_atomic(); smp_mb__after_atomic();
} }
#ifdef CONFIG_DEBUG_FS
static void genpd_update_accounting(struct generic_pm_domain *genpd)
{
ktime_t delta, now;
now = ktime_get();
delta = ktime_sub(now, genpd->accounting_time);
/*
* If genpd->status is active, it means we are just
* out of off and so update the idle time and vice
* versa.
*/
if (genpd->status == GPD_STATE_ACTIVE) {
int state_idx = genpd->state_idx;
genpd->states[state_idx].idle_time =
ktime_add(genpd->states[state_idx].idle_time, delta);
} else {
genpd->on_time = ktime_add(genpd->on_time, delta);
}
genpd->accounting_time = now;
}
#else
static inline void genpd_update_accounting(struct generic_pm_domain *genpd) {}
#endif
static int _genpd_power_on(struct generic_pm_domain *genpd, bool timed) static int _genpd_power_on(struct generic_pm_domain *genpd, bool timed)
{ {
unsigned int state_idx = genpd->state_idx; unsigned int state_idx = genpd->state_idx;
...@@ -361,6 +389,7 @@ static int genpd_power_off(struct generic_pm_domain *genpd, bool one_dev_on, ...@@ -361,6 +389,7 @@ static int genpd_power_off(struct generic_pm_domain *genpd, bool one_dev_on,
} }
genpd->status = GPD_STATE_POWER_OFF; genpd->status = GPD_STATE_POWER_OFF;
genpd_update_accounting(genpd);
list_for_each_entry(link, &genpd->slave_links, slave_node) { list_for_each_entry(link, &genpd->slave_links, slave_node) {
genpd_sd_counter_dec(link->master); genpd_sd_counter_dec(link->master);
...@@ -413,6 +442,8 @@ static int genpd_power_on(struct generic_pm_domain *genpd, unsigned int depth) ...@@ -413,6 +442,8 @@ static int genpd_power_on(struct generic_pm_domain *genpd, unsigned int depth)
goto err; goto err;
genpd->status = GPD_STATE_ACTIVE; genpd->status = GPD_STATE_ACTIVE;
genpd_update_accounting(genpd);
return 0; return 0;
err: err:
...@@ -1540,6 +1571,7 @@ int pm_genpd_init(struct generic_pm_domain *genpd, ...@@ -1540,6 +1571,7 @@ int pm_genpd_init(struct generic_pm_domain *genpd,
genpd->max_off_time_changed = true; genpd->max_off_time_changed = true;
genpd->provider = NULL; genpd->provider = NULL;
genpd->has_provider = false; genpd->has_provider = false;
genpd->accounting_time = ktime_get();
genpd->domain.ops.runtime_suspend = genpd_runtime_suspend; genpd->domain.ops.runtime_suspend = genpd_runtime_suspend;
genpd->domain.ops.runtime_resume = genpd_runtime_resume; genpd->domain.ops.runtime_resume = genpd_runtime_resume;
genpd->domain.ops.prepare = pm_genpd_prepare; genpd->domain.ops.prepare = pm_genpd_prepare;
...@@ -1743,7 +1775,7 @@ static int genpd_add_provider(struct device_node *np, genpd_xlate_t xlate, ...@@ -1743,7 +1775,7 @@ static int genpd_add_provider(struct device_node *np, genpd_xlate_t xlate,
mutex_lock(&of_genpd_mutex); mutex_lock(&of_genpd_mutex);
list_add(&cp->link, &of_genpd_providers); list_add(&cp->link, &of_genpd_providers);
mutex_unlock(&of_genpd_mutex); mutex_unlock(&of_genpd_mutex);
pr_debug("Added domain provider from %s\n", np->full_name); pr_debug("Added domain provider from %pOF\n", np);
return 0; return 0;
} }
...@@ -2149,16 +2181,16 @@ static int genpd_parse_state(struct genpd_power_state *genpd_state, ...@@ -2149,16 +2181,16 @@ static int genpd_parse_state(struct genpd_power_state *genpd_state,
err = of_property_read_u32(state_node, "entry-latency-us", err = of_property_read_u32(state_node, "entry-latency-us",
&entry_latency); &entry_latency);
if (err) { if (err) {
pr_debug(" * %s missing entry-latency-us property\n", pr_debug(" * %pOF missing entry-latency-us property\n",
state_node->full_name); state_node);
return -EINVAL; return -EINVAL;
} }
err = of_property_read_u32(state_node, "exit-latency-us", err = of_property_read_u32(state_node, "exit-latency-us",
&exit_latency); &exit_latency);
if (err) { if (err) {
pr_debug(" * %s missing exit-latency-us property\n", pr_debug(" * %pOF missing exit-latency-us property\n",
state_node->full_name); state_node);
return -EINVAL; return -EINVAL;
} }
...@@ -2212,8 +2244,8 @@ int of_genpd_parse_idle_states(struct device_node *dn, ...@@ -2212,8 +2244,8 @@ int of_genpd_parse_idle_states(struct device_node *dn,
ret = genpd_parse_state(&st[i++], np); ret = genpd_parse_state(&st[i++], np);
if (ret) { if (ret) {
pr_err pr_err
("Parsing idle state node %s failed with err %d\n", ("Parsing idle state node %pOF failed with err %d\n",
np->full_name, ret); np, ret);
of_node_put(np); of_node_put(np);
kfree(st); kfree(st);
return ret; return ret;
...@@ -2327,7 +2359,7 @@ static int pm_genpd_summary_one(struct seq_file *s, ...@@ -2327,7 +2359,7 @@ static int pm_genpd_summary_one(struct seq_file *s,
return 0; return 0;
} }
static int pm_genpd_summary_show(struct seq_file *s, void *data) static int genpd_summary_show(struct seq_file *s, void *data)
{ {
struct generic_pm_domain *genpd; struct generic_pm_domain *genpd;
int ret = 0; int ret = 0;
...@@ -2350,21 +2382,187 @@ static int pm_genpd_summary_show(struct seq_file *s, void *data) ...@@ -2350,21 +2382,187 @@ static int pm_genpd_summary_show(struct seq_file *s, void *data)
return ret; return ret;
} }
static int pm_genpd_summary_open(struct inode *inode, struct file *file) static int genpd_status_show(struct seq_file *s, void *data)
{ {
return single_open(file, pm_genpd_summary_show, NULL); static const char * const status_lookup[] = {
[GPD_STATE_ACTIVE] = "on",
[GPD_STATE_POWER_OFF] = "off"
};
struct generic_pm_domain *genpd = s->private;
int ret = 0;
ret = genpd_lock_interruptible(genpd);
if (ret)
return -ERESTARTSYS;
if (WARN_ON_ONCE(genpd->status >= ARRAY_SIZE(status_lookup)))
goto exit;
if (genpd->status == GPD_STATE_POWER_OFF)
seq_printf(s, "%s-%u\n", status_lookup[genpd->status],
genpd->state_idx);
else
seq_printf(s, "%s\n", status_lookup[genpd->status]);
exit:
genpd_unlock(genpd);
return ret;
} }
static const struct file_operations pm_genpd_summary_fops = { static int genpd_sub_domains_show(struct seq_file *s, void *data)
.open = pm_genpd_summary_open, {
.read = seq_read, struct generic_pm_domain *genpd = s->private;
.llseek = seq_lseek, struct gpd_link *link;
.release = single_release, int ret = 0;
};
ret = genpd_lock_interruptible(genpd);
if (ret)
return -ERESTARTSYS;
list_for_each_entry(link, &genpd->master_links, master_node)
seq_printf(s, "%s\n", link->slave->name);
genpd_unlock(genpd);
return ret;
}
static int genpd_idle_states_show(struct seq_file *s, void *data)
{
struct generic_pm_domain *genpd = s->private;
unsigned int i;
int ret = 0;
ret = genpd_lock_interruptible(genpd);
if (ret)
return -ERESTARTSYS;
seq_puts(s, "State Time Spent(ms)\n");
for (i = 0; i < genpd->state_count; i++) {
ktime_t delta = 0;
s64 msecs;
if ((genpd->status == GPD_STATE_POWER_OFF) &&
(genpd->state_idx == i))
delta = ktime_sub(ktime_get(), genpd->accounting_time);
msecs = ktime_to_ms(
ktime_add(genpd->states[i].idle_time, delta));
seq_printf(s, "S%-13i %lld\n", i, msecs);
}
genpd_unlock(genpd);
return ret;
}
static int genpd_active_time_show(struct seq_file *s, void *data)
{
struct generic_pm_domain *genpd = s->private;
ktime_t delta = 0;
int ret = 0;
ret = genpd_lock_interruptible(genpd);
if (ret)
return -ERESTARTSYS;
if (genpd->status == GPD_STATE_ACTIVE)
delta = ktime_sub(ktime_get(), genpd->accounting_time);
seq_printf(s, "%lld ms\n", ktime_to_ms(
ktime_add(genpd->on_time, delta)));
genpd_unlock(genpd);
return ret;
}
static int genpd_total_idle_time_show(struct seq_file *s, void *data)
{
struct generic_pm_domain *genpd = s->private;
ktime_t delta = 0, total = 0;
unsigned int i;
int ret = 0;
ret = genpd_lock_interruptible(genpd);
if (ret)
return -ERESTARTSYS;
for (i = 0; i < genpd->state_count; i++) {
if ((genpd->status == GPD_STATE_POWER_OFF) &&
(genpd->state_idx == i))
delta = ktime_sub(ktime_get(), genpd->accounting_time);
total = ktime_add(total, genpd->states[i].idle_time);
}
total = ktime_add(total, delta);
seq_printf(s, "%lld ms\n", ktime_to_ms(total));
genpd_unlock(genpd);
return ret;
}
static int genpd_devices_show(struct seq_file *s, void *data)
{
struct generic_pm_domain *genpd = s->private;
struct pm_domain_data *pm_data;
const char *kobj_path;
int ret = 0;
ret = genpd_lock_interruptible(genpd);
if (ret)
return -ERESTARTSYS;
list_for_each_entry(pm_data, &genpd->dev_list, list_node) {
kobj_path = kobject_get_path(&pm_data->dev->kobj,
genpd_is_irq_safe(genpd) ?
GFP_ATOMIC : GFP_KERNEL);
if (kobj_path == NULL)
continue;
seq_printf(s, "%s\n", kobj_path);
kfree(kobj_path);
}
genpd_unlock(genpd);
return ret;
}
#define define_genpd_open_function(name) \
static int genpd_##name##_open(struct inode *inode, struct file *file) \
{ \
return single_open(file, genpd_##name##_show, inode->i_private); \
}
define_genpd_open_function(summary);
define_genpd_open_function(status);
define_genpd_open_function(sub_domains);
define_genpd_open_function(idle_states);
define_genpd_open_function(active_time);
define_genpd_open_function(total_idle_time);
define_genpd_open_function(devices);
#define define_genpd_debugfs_fops(name) \
static const struct file_operations genpd_##name##_fops = { \
.open = genpd_##name##_open, \
.read = seq_read, \
.llseek = seq_lseek, \
.release = single_release, \
}
define_genpd_debugfs_fops(summary);
define_genpd_debugfs_fops(status);
define_genpd_debugfs_fops(sub_domains);
define_genpd_debugfs_fops(idle_states);
define_genpd_debugfs_fops(active_time);
define_genpd_debugfs_fops(total_idle_time);
define_genpd_debugfs_fops(devices);
static int __init pm_genpd_debug_init(void) static int __init pm_genpd_debug_init(void)
{ {
struct dentry *d; struct dentry *d;
struct generic_pm_domain *genpd;
pm_genpd_debugfs_dir = debugfs_create_dir("pm_genpd", NULL); pm_genpd_debugfs_dir = debugfs_create_dir("pm_genpd", NULL);
...@@ -2372,10 +2570,29 @@ static int __init pm_genpd_debug_init(void) ...@@ -2372,10 +2570,29 @@ static int __init pm_genpd_debug_init(void)
return -ENOMEM; return -ENOMEM;
d = debugfs_create_file("pm_genpd_summary", S_IRUGO, d = debugfs_create_file("pm_genpd_summary", S_IRUGO,
pm_genpd_debugfs_dir, NULL, &pm_genpd_summary_fops); pm_genpd_debugfs_dir, NULL, &genpd_summary_fops);
if (!d)
return -ENOMEM;
list_for_each_entry(genpd, &gpd_list, gpd_list_node) {
d = debugfs_create_dir(genpd->name, pm_genpd_debugfs_dir);
if (!d) if (!d)
return -ENOMEM; return -ENOMEM;
debugfs_create_file("current_state", 0444,
d, genpd, &genpd_status_fops);
debugfs_create_file("sub_domains", 0444,
d, genpd, &genpd_sub_domains_fops);
debugfs_create_file("idle_states", 0444,
d, genpd, &genpd_idle_states_fops);
debugfs_create_file("active_time", 0444,
d, genpd, &genpd_active_time_fops);
debugfs_create_file("total_idle_time", 0444,
d, genpd, &genpd_total_idle_time_fops);
debugfs_create_file("devices", 0444,
d, genpd, &genpd_devices_fops);
}
return 0; return 0;
} }
late_initcall(pm_genpd_debug_init); late_initcall(pm_genpd_debug_init);
......
...@@ -248,15 +248,22 @@ void dev_pm_opp_of_remove_table(struct device *dev) ...@@ -248,15 +248,22 @@ void dev_pm_opp_of_remove_table(struct device *dev)
} }
EXPORT_SYMBOL_GPL(dev_pm_opp_of_remove_table); EXPORT_SYMBOL_GPL(dev_pm_opp_of_remove_table);
/* Returns opp descriptor node for a device, caller must do of_node_put() */ /* Returns opp descriptor node for a device node, caller must
struct device_node *dev_pm_opp_of_get_opp_desc_node(struct device *dev) * do of_node_put() */
static struct device_node *_opp_of_get_opp_desc_node(struct device_node *np)
{ {
/* /*
* There should be only ONE phandle present in "operating-points-v2" * There should be only ONE phandle present in "operating-points-v2"
* property. * property.
*/ */
return of_parse_phandle(dev->of_node, "operating-points-v2", 0); return of_parse_phandle(np, "operating-points-v2", 0);
}
/* Returns opp descriptor node for a device, caller must do of_node_put() */
struct device_node *dev_pm_opp_of_get_opp_desc_node(struct device *dev)
{
return _opp_of_get_opp_desc_node(dev->of_node);
} }
EXPORT_SYMBOL_GPL(dev_pm_opp_of_get_opp_desc_node); EXPORT_SYMBOL_GPL(dev_pm_opp_of_get_opp_desc_node);
...@@ -539,7 +546,11 @@ int dev_pm_opp_of_cpumask_add_table(const struct cpumask *cpumask) ...@@ -539,7 +546,11 @@ int dev_pm_opp_of_cpumask_add_table(const struct cpumask *cpumask)
ret = dev_pm_opp_of_add_table(cpu_dev); ret = dev_pm_opp_of_add_table(cpu_dev);
if (ret) { if (ret) {
pr_err("%s: couldn't find opp table for cpu:%d, %d\n", /*
* OPP may get registered dynamically, don't print error
* message here.
*/
pr_debug("%s: couldn't find opp table for cpu:%d, %d\n",
__func__, cpu, ret); __func__, cpu, ret);
/* Free all other OPPs */ /* Free all other OPPs */
...@@ -572,8 +583,7 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_of_cpumask_add_table); ...@@ -572,8 +583,7 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_of_cpumask_add_table);
int dev_pm_opp_of_get_sharing_cpus(struct device *cpu_dev, int dev_pm_opp_of_get_sharing_cpus(struct device *cpu_dev,
struct cpumask *cpumask) struct cpumask *cpumask)
{ {
struct device_node *np, *tmp_np; struct device_node *np, *tmp_np, *cpu_np;
struct device *tcpu_dev;
int cpu, ret = 0; int cpu, ret = 0;
/* Get OPP descriptor node */ /* Get OPP descriptor node */
...@@ -593,19 +603,18 @@ int dev_pm_opp_of_get_sharing_cpus(struct device *cpu_dev, ...@@ -593,19 +603,18 @@ int dev_pm_opp_of_get_sharing_cpus(struct device *cpu_dev,
if (cpu == cpu_dev->id) if (cpu == cpu_dev->id)
continue; continue;
tcpu_dev = get_cpu_device(cpu); cpu_np = of_get_cpu_node(cpu, NULL);
if (!tcpu_dev) { if (!cpu_np) {
dev_err(cpu_dev, "%s: failed to get cpu%d device\n", dev_err(cpu_dev, "%s: failed to get cpu%d node\n",
__func__, cpu); __func__, cpu);
ret = -ENODEV; ret = -ENOENT;
goto put_cpu_node; goto put_cpu_node;
} }
/* Get OPP descriptor node */ /* Get OPP descriptor node */
tmp_np = dev_pm_opp_of_get_opp_desc_node(tcpu_dev); tmp_np = _opp_of_get_opp_desc_node(cpu_np);
if (!tmp_np) { if (!tmp_np) {
dev_err(tcpu_dev, "%s: Couldn't find opp node.\n", pr_err("%pOF: Couldn't find opp node\n", cpu_np);
__func__);
ret = -ENOENT; ret = -ENOENT;
goto put_cpu_node; goto put_cpu_node;
} }
......
...@@ -412,15 +412,17 @@ void device_set_wakeup_capable(struct device *dev, bool capable) ...@@ -412,15 +412,17 @@ void device_set_wakeup_capable(struct device *dev, bool capable)
if (!!dev->power.can_wakeup == !!capable) if (!!dev->power.can_wakeup == !!capable)
return; return;
dev->power.can_wakeup = capable;
if (device_is_registered(dev) && !list_empty(&dev->power.entry)) { if (device_is_registered(dev) && !list_empty(&dev->power.entry)) {
if (capable) { if (capable) {
if (wakeup_sysfs_add(dev)) int ret = wakeup_sysfs_add(dev);
return;
if (ret)
dev_info(dev, "Wakeup sysfs attributes not added\n");
} else { } else {
wakeup_sysfs_remove(dev); wakeup_sysfs_remove(dev);
} }
} }
dev->power.can_wakeup = capable;
} }
EXPORT_SYMBOL_GPL(device_set_wakeup_capable); EXPORT_SYMBOL_GPL(device_set_wakeup_capable);
......
...@@ -349,6 +349,36 @@ static const struct rockchip_iodomain_soc_data soc_data_rk3399_pmu = { ...@@ -349,6 +349,36 @@ static const struct rockchip_iodomain_soc_data soc_data_rk3399_pmu = {
.init = rk3399_pmu_iodomain_init, .init = rk3399_pmu_iodomain_init,
}; };
static const struct rockchip_iodomain_soc_data soc_data_rv1108 = {
.grf_offset = 0x404,
.supply_names = {
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
"vccio1",
"vccio2",
"vccio3",
"vccio5",
"vccio6",
},
};
static const struct rockchip_iodomain_soc_data soc_data_rv1108_pmu = {
.grf_offset = 0x104,
.supply_names = {
"pmu",
},
};
static const struct of_device_id rockchip_iodomain_match[] = { static const struct of_device_id rockchip_iodomain_match[] = {
{ {
.compatible = "rockchip,rk3188-io-voltage-domain", .compatible = "rockchip,rk3188-io-voltage-domain",
...@@ -382,6 +412,14 @@ static const struct of_device_id rockchip_iodomain_match[] = { ...@@ -382,6 +412,14 @@ static const struct of_device_id rockchip_iodomain_match[] = {
.compatible = "rockchip,rk3399-pmu-io-voltage-domain", .compatible = "rockchip,rk3399-pmu-io-voltage-domain",
.data = (void *)&soc_data_rk3399_pmu .data = (void *)&soc_data_rk3399_pmu
}, },
{
.compatible = "rockchip,rv1108-io-voltage-domain",
.data = (void *)&soc_data_rv1108
},
{
.compatible = "rockchip,rv1108-pmu-io-voltage-domain",
.data = (void *)&soc_data_rv1108_pmu
},
{ /* sentinel */ }, { /* sentinel */ },
}; };
MODULE_DEVICE_TABLE(of, rockchip_iodomain_match); MODULE_DEVICE_TABLE(of, rockchip_iodomain_match);
......
...@@ -43,6 +43,7 @@ struct genpd_power_state { ...@@ -43,6 +43,7 @@ struct genpd_power_state {
s64 power_on_latency_ns; s64 power_on_latency_ns;
s64 residency_ns; s64 residency_ns;
struct fwnode_handle *fwnode; struct fwnode_handle *fwnode;
ktime_t idle_time;
}; };
struct genpd_lock_ops; struct genpd_lock_ops;
...@@ -78,6 +79,8 @@ struct generic_pm_domain { ...@@ -78,6 +79,8 @@ struct generic_pm_domain {
unsigned int state_count; /* number of states */ unsigned int state_count; /* number of states */
unsigned int state_idx; /* state that genpd will go to when off */ unsigned int state_idx; /* state that genpd will go to when off */
void *free; /* Free the state that was allocated for default */ void *free; /* Free the state that was allocated for default */
ktime_t on_time;
ktime_t accounting_time;
const struct genpd_lock_ops *lock_ops; const struct genpd_lock_ops *lock_ops;
union { union {
struct mutex mlock; struct mutex mlock;
......
...@@ -22,15 +22,21 @@ ...@@ -22,15 +22,21 @@
#include <linux/spinlock.h> #include <linux/spinlock.h>
#include <linux/syscore_ops.h> #include <linux/syscore_ops.h>
static DEFINE_RWLOCK(cpu_pm_notifier_lock); static ATOMIC_NOTIFIER_HEAD(cpu_pm_notifier_chain);
static RAW_NOTIFIER_HEAD(cpu_pm_notifier_chain);
static int cpu_pm_notify(enum cpu_pm_event event, int nr_to_call, int *nr_calls) static int cpu_pm_notify(enum cpu_pm_event event, int nr_to_call, int *nr_calls)
{ {
int ret; int ret;
ret = __raw_notifier_call_chain(&cpu_pm_notifier_chain, event, NULL, /*
* __atomic_notifier_call_chain has a RCU read critical section, which
* could be disfunctional in cpu idle. Copy RCU_NONIDLE code to let
* RCU know this.
*/
rcu_irq_enter_irqson();
ret = __atomic_notifier_call_chain(&cpu_pm_notifier_chain, event, NULL,
nr_to_call, nr_calls); nr_to_call, nr_calls);
rcu_irq_exit_irqson();
return notifier_to_errno(ret); return notifier_to_errno(ret);
} }
...@@ -47,14 +53,7 @@ static int cpu_pm_notify(enum cpu_pm_event event, int nr_to_call, int *nr_calls) ...@@ -47,14 +53,7 @@ static int cpu_pm_notify(enum cpu_pm_event event, int nr_to_call, int *nr_calls)
*/ */
int cpu_pm_register_notifier(struct notifier_block *nb) int cpu_pm_register_notifier(struct notifier_block *nb)
{ {
unsigned long flags; return atomic_notifier_chain_register(&cpu_pm_notifier_chain, nb);
int ret;
write_lock_irqsave(&cpu_pm_notifier_lock, flags);
ret = raw_notifier_chain_register(&cpu_pm_notifier_chain, nb);
write_unlock_irqrestore(&cpu_pm_notifier_lock, flags);
return ret;
} }
EXPORT_SYMBOL_GPL(cpu_pm_register_notifier); EXPORT_SYMBOL_GPL(cpu_pm_register_notifier);
...@@ -69,14 +68,7 @@ EXPORT_SYMBOL_GPL(cpu_pm_register_notifier); ...@@ -69,14 +68,7 @@ EXPORT_SYMBOL_GPL(cpu_pm_register_notifier);
*/ */
int cpu_pm_unregister_notifier(struct notifier_block *nb) int cpu_pm_unregister_notifier(struct notifier_block *nb)
{ {
unsigned long flags; return atomic_notifier_chain_unregister(&cpu_pm_notifier_chain, nb);
int ret;
write_lock_irqsave(&cpu_pm_notifier_lock, flags);
ret = raw_notifier_chain_unregister(&cpu_pm_notifier_chain, nb);
write_unlock_irqrestore(&cpu_pm_notifier_lock, flags);
return ret;
} }
EXPORT_SYMBOL_GPL(cpu_pm_unregister_notifier); EXPORT_SYMBOL_GPL(cpu_pm_unregister_notifier);
...@@ -100,7 +92,6 @@ int cpu_pm_enter(void) ...@@ -100,7 +92,6 @@ int cpu_pm_enter(void)
int nr_calls; int nr_calls;
int ret = 0; int ret = 0;
read_lock(&cpu_pm_notifier_lock);
ret = cpu_pm_notify(CPU_PM_ENTER, -1, &nr_calls); ret = cpu_pm_notify(CPU_PM_ENTER, -1, &nr_calls);
if (ret) if (ret)
/* /*
...@@ -108,7 +99,6 @@ int cpu_pm_enter(void) ...@@ -108,7 +99,6 @@ int cpu_pm_enter(void)
* PM entry who are notified earlier to prepare for it. * PM entry who are notified earlier to prepare for it.
*/ */
cpu_pm_notify(CPU_PM_ENTER_FAILED, nr_calls - 1, NULL); cpu_pm_notify(CPU_PM_ENTER_FAILED, nr_calls - 1, NULL);
read_unlock(&cpu_pm_notifier_lock);
return ret; return ret;
} }
...@@ -128,13 +118,7 @@ EXPORT_SYMBOL_GPL(cpu_pm_enter); ...@@ -128,13 +118,7 @@ EXPORT_SYMBOL_GPL(cpu_pm_enter);
*/ */
int cpu_pm_exit(void) int cpu_pm_exit(void)
{ {
int ret; return cpu_pm_notify(CPU_PM_EXIT, -1, NULL);
read_lock(&cpu_pm_notifier_lock);
ret = cpu_pm_notify(CPU_PM_EXIT, -1, NULL);
read_unlock(&cpu_pm_notifier_lock);
return ret;
} }
EXPORT_SYMBOL_GPL(cpu_pm_exit); EXPORT_SYMBOL_GPL(cpu_pm_exit);
...@@ -159,7 +143,6 @@ int cpu_cluster_pm_enter(void) ...@@ -159,7 +143,6 @@ int cpu_cluster_pm_enter(void)
int nr_calls; int nr_calls;
int ret = 0; int ret = 0;
read_lock(&cpu_pm_notifier_lock);
ret = cpu_pm_notify(CPU_CLUSTER_PM_ENTER, -1, &nr_calls); ret = cpu_pm_notify(CPU_CLUSTER_PM_ENTER, -1, &nr_calls);
if (ret) if (ret)
/* /*
...@@ -167,7 +150,6 @@ int cpu_cluster_pm_enter(void) ...@@ -167,7 +150,6 @@ int cpu_cluster_pm_enter(void)
* PM entry who are notified earlier to prepare for it. * PM entry who are notified earlier to prepare for it.
*/ */
cpu_pm_notify(CPU_CLUSTER_PM_ENTER_FAILED, nr_calls - 1, NULL); cpu_pm_notify(CPU_CLUSTER_PM_ENTER_FAILED, nr_calls - 1, NULL);
read_unlock(&cpu_pm_notifier_lock);
return ret; return ret;
} }
...@@ -190,13 +172,7 @@ EXPORT_SYMBOL_GPL(cpu_cluster_pm_enter); ...@@ -190,13 +172,7 @@ EXPORT_SYMBOL_GPL(cpu_cluster_pm_enter);
*/ */
int cpu_cluster_pm_exit(void) int cpu_cluster_pm_exit(void)
{ {
int ret; return cpu_pm_notify(CPU_CLUSTER_PM_EXIT, -1, NULL);
read_lock(&cpu_pm_notifier_lock);
ret = cpu_pm_notify(CPU_CLUSTER_PM_EXIT, -1, NULL);
read_unlock(&cpu_pm_notifier_lock);
return ret;
} }
EXPORT_SYMBOL_GPL(cpu_cluster_pm_exit); EXPORT_SYMBOL_GPL(cpu_cluster_pm_exit);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment