Commit 0883ac03 authored by Ulf Hansson's avatar Ulf Hansson Committed by Rafael J. Wysocki

PM / Domains: Fix asynchronous execution of *noirq() callbacks

As the PM core may invoke the *noirq() callbacks asynchronously, the
current lock-less approach in genpd doesn't work. The consequence is that
we may find concurrent operations racing to power on/off the PM domain.

As of now, no immediate errors has been reported, but it's probably only a
matter time. Therefor let's fix the problem now before this becomes a real
issue, by deploying the locking scheme to the relevant functions.
Reported-by: default avatarBrian Norris <briannorris@chromium.org>
Signed-off-by: default avatarUlf Hansson <ulf.hansson@linaro.org>
Tested-by: default avatarGeert Uytterhoeven <geert+renesas@glider.be>
Signed-off-by: default avatarRafael J. Wysocki <rafael.j.wysocki@intel.com>
parent f3c826ac
...@@ -729,16 +729,18 @@ static bool genpd_dev_active_wakeup(struct generic_pm_domain *genpd, ...@@ -729,16 +729,18 @@ static bool genpd_dev_active_wakeup(struct generic_pm_domain *genpd,
/** /**
* genpd_sync_power_off - Synchronously power off a PM domain and its masters. * genpd_sync_power_off - Synchronously power off a PM domain and its masters.
* @genpd: PM domain to power off, if possible. * @genpd: PM domain to power off, if possible.
* @use_lock: use the lock.
* @depth: nesting count for lockdep.
* *
* Check if the given PM domain can be powered off (during system suspend or * Check if the given PM domain can be powered off (during system suspend or
* hibernation) and do that if so. Also, in that case propagate to its masters. * hibernation) and do that if so. Also, in that case propagate to its masters.
* *
* This function is only called in "noirq" and "syscore" stages of system power * This function is only called in "noirq" and "syscore" stages of system power
* transitions, so it need not acquire locks (all of the "noirq" callbacks are * transitions. The "noirq" callbacks may be executed asynchronously, thus in
* executed sequentially, so it is guaranteed that it will never run twice in * these cases the lock must be held.
* parallel).
*/ */
static void genpd_sync_power_off(struct generic_pm_domain *genpd) static void genpd_sync_power_off(struct generic_pm_domain *genpd, bool use_lock,
unsigned int depth)
{ {
struct gpd_link *link; struct gpd_link *link;
...@@ -757,20 +759,29 @@ static void genpd_sync_power_off(struct generic_pm_domain *genpd) ...@@ -757,20 +759,29 @@ static void genpd_sync_power_off(struct generic_pm_domain *genpd)
list_for_each_entry(link, &genpd->slave_links, slave_node) { list_for_each_entry(link, &genpd->slave_links, slave_node) {
genpd_sd_counter_dec(link->master); genpd_sd_counter_dec(link->master);
genpd_sync_power_off(link->master);
if (use_lock)
genpd_lock_nested(link->master, depth + 1);
genpd_sync_power_off(link->master, use_lock, depth + 1);
if (use_lock)
genpd_unlock(link->master);
} }
} }
/** /**
* genpd_sync_power_on - Synchronously power on a PM domain and its masters. * genpd_sync_power_on - Synchronously power on a PM domain and its masters.
* @genpd: PM domain to power on. * @genpd: PM domain to power on.
* @use_lock: use the lock.
* @depth: nesting count for lockdep.
* *
* This function is only called in "noirq" and "syscore" stages of system power * This function is only called in "noirq" and "syscore" stages of system power
* transitions, so it need not acquire locks (all of the "noirq" callbacks are * transitions. The "noirq" callbacks may be executed asynchronously, thus in
* executed sequentially, so it is guaranteed that it will never run twice in * these cases the lock must be held.
* parallel).
*/ */
static void genpd_sync_power_on(struct generic_pm_domain *genpd) static void genpd_sync_power_on(struct generic_pm_domain *genpd, bool use_lock,
unsigned int depth)
{ {
struct gpd_link *link; struct gpd_link *link;
...@@ -778,8 +789,15 @@ static void genpd_sync_power_on(struct generic_pm_domain *genpd) ...@@ -778,8 +789,15 @@ static void genpd_sync_power_on(struct generic_pm_domain *genpd)
return; return;
list_for_each_entry(link, &genpd->slave_links, slave_node) { list_for_each_entry(link, &genpd->slave_links, slave_node) {
genpd_sync_power_on(link->master);
genpd_sd_counter_inc(link->master); genpd_sd_counter_inc(link->master);
if (use_lock)
genpd_lock_nested(link->master, depth + 1);
genpd_sync_power_on(link->master, use_lock, depth + 1);
if (use_lock)
genpd_unlock(link->master);
} }
_genpd_power_on(genpd, false); _genpd_power_on(genpd, false);
...@@ -888,13 +906,10 @@ static int pm_genpd_suspend_noirq(struct device *dev) ...@@ -888,13 +906,10 @@ static int pm_genpd_suspend_noirq(struct device *dev)
return ret; return ret;
} }
/* genpd_lock(genpd);
* Since all of the "noirq" callbacks are executed sequentially, it is
* guaranteed that this function will never run twice in parallel for
* the same PM domain, so it is not necessary to use locking here.
*/
genpd->suspended_count++; genpd->suspended_count++;
genpd_sync_power_off(genpd); genpd_sync_power_off(genpd, true, 0);
genpd_unlock(genpd);
return 0; return 0;
} }
...@@ -919,13 +934,10 @@ static int pm_genpd_resume_noirq(struct device *dev) ...@@ -919,13 +934,10 @@ static int pm_genpd_resume_noirq(struct device *dev)
if (dev->power.wakeup_path && genpd_dev_active_wakeup(genpd, dev)) if (dev->power.wakeup_path && genpd_dev_active_wakeup(genpd, dev))
return 0; return 0;
/* genpd_lock(genpd);
* Since all of the "noirq" callbacks are executed sequentially, it is genpd_sync_power_on(genpd, true, 0);
* guaranteed that this function will never run twice in parallel for
* the same PM domain, so it is not necessary to use locking here.
*/
genpd_sync_power_on(genpd);
genpd->suspended_count--; genpd->suspended_count--;
genpd_unlock(genpd);
if (genpd->dev_ops.stop && genpd->dev_ops.start) if (genpd->dev_ops.stop && genpd->dev_ops.start)
ret = pm_runtime_force_resume(dev); ret = pm_runtime_force_resume(dev);
...@@ -1002,13 +1014,10 @@ static int pm_genpd_restore_noirq(struct device *dev) ...@@ -1002,13 +1014,10 @@ static int pm_genpd_restore_noirq(struct device *dev)
return -EINVAL; return -EINVAL;
/* /*
* Since all of the "noirq" callbacks are executed sequentially, it is
* guaranteed that this function will never run twice in parallel for
* the same PM domain, so it is not necessary to use locking here.
*
* At this point suspended_count == 0 means we are being run for the * At this point suspended_count == 0 means we are being run for the
* first time for the given domain in the present cycle. * first time for the given domain in the present cycle.
*/ */
genpd_lock(genpd);
if (genpd->suspended_count++ == 0) if (genpd->suspended_count++ == 0)
/* /*
* The boot kernel might put the domain into arbitrary state, * The boot kernel might put the domain into arbitrary state,
...@@ -1017,7 +1026,8 @@ static int pm_genpd_restore_noirq(struct device *dev) ...@@ -1017,7 +1026,8 @@ static int pm_genpd_restore_noirq(struct device *dev)
*/ */
genpd->status = GPD_STATE_POWER_OFF; genpd->status = GPD_STATE_POWER_OFF;
genpd_sync_power_on(genpd); genpd_sync_power_on(genpd, true, 0);
genpd_unlock(genpd);
if (genpd->dev_ops.stop && genpd->dev_ops.start) if (genpd->dev_ops.stop && genpd->dev_ops.start)
ret = pm_runtime_force_resume(dev); ret = pm_runtime_force_resume(dev);
...@@ -1072,9 +1082,9 @@ static void genpd_syscore_switch(struct device *dev, bool suspend) ...@@ -1072,9 +1082,9 @@ static void genpd_syscore_switch(struct device *dev, bool suspend)
if (suspend) { if (suspend) {
genpd->suspended_count++; genpd->suspended_count++;
genpd_sync_power_off(genpd); genpd_sync_power_off(genpd, false, 0);
} else { } else {
genpd_sync_power_on(genpd); genpd_sync_power_on(genpd, false, 0);
genpd->suspended_count--; genpd->suspended_count--;
} }
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment