PM / Domains: Allow callbacks to execute all runtime PM helpers

A deadlock may occur if one of the PM domains' .start_device() or
.stop_device() callbacks or a device driver's .runtime_suspend() or
.runtime_resume() callback executed by the core generic PM domain
code uses a "wrong" runtime PM helper function.  This happens, for
example, if .runtime_resume() from one device's driver calls
pm_runtime_resume() for another device in the same PM domain.
A similar situation may take place if a device's parent is in the
same PM domain, in which case the runtime PM framework may execute
pm_genpd_runtime_resume() automatically for the parent (if it is
suspended at the moment).  This, of course, is undesirable, so
the generic PM domains code should be modified to prevent it from
happening.

The runtime PM framework guarantees that pm_genpd_runtime_suspend()
and pm_genpd_runtime_resume() won't be executed in parallel for
the same device, so the generic PM domains code need not worry
about those cases.  Still, it needs to prevent the other possible
race conditions between pm_genpd_runtime_suspend(),
pm_genpd_runtime_resume(), pm_genpd_poweron() and pm_genpd_poweroff()
from happening and it needs to avoid deadlocks at the same time.
To this end, modify the generic PM domains code to relax
synchronization rules so that:

* pm_genpd_poweron() doesn't wait for the PM domain status to
  change from GPD_STATE_BUSY.  If it finds that the status is
  not GPD_STATE_POWER_OFF, it returns without powering the domain on
  (it may modify the status depending on the circumstances).

* pm_genpd_poweroff() returns as soon as it finds that the PM
  domain's status changed from GPD_STATE_BUSY after it's released
  the PM domain's lock.

* pm_genpd_runtime_suspend() doesn't wait for the PM domain status
  to change from GPD_STATE_BUSY after executing the domain's
  .stop_device() callback and executes pm_genpd_poweroff() only
  if pm_genpd_runtime_resume() is not executed in parallel.

* pm_genpd_runtime_resume() doesn't wait for the PM domain status
  to change from GPD_STATE_BUSY after executing pm_genpd_poweron()
  and sets the domain's status to GPD_STATE_BUSY and increments its
  counter of resuming devices (introduced by this change) immediately
  after acquiring the lock.  The counter of resuming devices is then
  decremented after executing __pm_genpd_runtime_resume() for the
  device and the domain's status is reset to GPD_STATE_ACTIVE (unless
  there are more resuming devices in the domain, in which case the
  status remains GPD_STATE_BUSY).

This way, for example, if a device driver's .runtime_resume()
callback executes pm_runtime_resume() for another device in the same
PM domain, pm_genpd_poweron() called by pm_genpd_runtime_resume()
invoked by the runtime PM framework will not block and it will see
that there's nothing to do for it.  Next, the PM domain's lock will
be acquired without waiting for its status to change from
GPD_STATE_BUSY and the device driver's .runtime_resume() callback
will be executed.  In turn, if pm_runtime_suspend() is executed by
one device driver's .runtime_resume() callback for another device in
the same PM domain, pm_genpd_poweroff() executed by
pm_genpd_runtime_suspend() invoked by the runtime PM framework as a
result will notice that one of the devices in the domain is being
resumed, so it will return immediately.

Signed-off-by: Rafael J. Wysocki <rjw@sisk.pl>
This commit is contained in:
Rafael J. Wysocki 2011-07-12 00:39:36 +02:00
parent 17b75eca76
commit c6d22b3726
2 changed files with 102 additions and 45 deletions

View File

@ -44,7 +44,8 @@ static void genpd_acquire_lock(struct generic_pm_domain *genpd)
for (;;) { for (;;) {
prepare_to_wait(&genpd->status_wait_queue, &wait, prepare_to_wait(&genpd->status_wait_queue, &wait,
TASK_UNINTERRUPTIBLE); TASK_UNINTERRUPTIBLE);
if (genpd->status != GPD_STATE_BUSY) if (genpd->status == GPD_STATE_ACTIVE
|| genpd->status == GPD_STATE_POWER_OFF)
break; break;
mutex_unlock(&genpd->lock); mutex_unlock(&genpd->lock);
@ -60,6 +61,12 @@ static void genpd_release_lock(struct generic_pm_domain *genpd)
mutex_unlock(&genpd->lock); mutex_unlock(&genpd->lock);
} }
static void genpd_set_active(struct generic_pm_domain *genpd)
{
if (genpd->resume_count == 0)
genpd->status = GPD_STATE_ACTIVE;
}
/** /**
* pm_genpd_poweron - Restore power to a given PM domain and its parents. * pm_genpd_poweron - Restore power to a given PM domain and its parents.
* @genpd: PM domain to power up. * @genpd: PM domain to power up.
@ -75,42 +82,24 @@ int pm_genpd_poweron(struct generic_pm_domain *genpd)
start: start:
if (parent) { if (parent) {
mutex_lock(&parent->lock); genpd_acquire_lock(parent);
mutex_lock_nested(&genpd->lock, SINGLE_DEPTH_NESTING); mutex_lock_nested(&genpd->lock, SINGLE_DEPTH_NESTING);
} else { } else {
mutex_lock(&genpd->lock); mutex_lock(&genpd->lock);
} }
/*
* Wait for the domain to transition into either the active,
* or the power off state.
*/
for (;;) {
prepare_to_wait(&genpd->status_wait_queue, &wait,
TASK_UNINTERRUPTIBLE);
if (genpd->status != GPD_STATE_BUSY)
break;
mutex_unlock(&genpd->lock);
if (parent)
mutex_unlock(&parent->lock);
schedule();
if (parent) {
mutex_lock(&parent->lock);
mutex_lock_nested(&genpd->lock, SINGLE_DEPTH_NESTING);
} else {
mutex_lock(&genpd->lock);
}
}
finish_wait(&genpd->status_wait_queue, &wait);
if (genpd->status == GPD_STATE_ACTIVE if (genpd->status == GPD_STATE_ACTIVE
|| (genpd->prepared_count > 0 && genpd->suspend_power_off)) || (genpd->prepared_count > 0 && genpd->suspend_power_off))
goto out; goto out;
if (genpd->status != GPD_STATE_POWER_OFF) {
genpd_set_active(genpd);
goto out;
}
if (parent && parent->status != GPD_STATE_ACTIVE) { if (parent && parent->status != GPD_STATE_ACTIVE) {
mutex_unlock(&genpd->lock); mutex_unlock(&genpd->lock);
mutex_unlock(&parent->lock); genpd_release_lock(parent);
ret = pm_genpd_poweron(parent); ret = pm_genpd_poweron(parent);
if (ret) if (ret)
@ -125,14 +114,14 @@ int pm_genpd_poweron(struct generic_pm_domain *genpd)
goto out; goto out;
} }
genpd->status = GPD_STATE_ACTIVE; genpd_set_active(genpd);
if (parent) if (parent)
parent->sd_count++; parent->sd_count++;
out: out:
mutex_unlock(&genpd->lock); mutex_unlock(&genpd->lock);
if (parent) if (parent)
mutex_unlock(&parent->lock); genpd_release_lock(parent);
return ret; return ret;
} }
@ -209,6 +198,20 @@ static void __pm_genpd_restore_device(struct dev_list_entry *dle,
dle->need_restore = false; dle->need_restore = false;
} }
/**
* genpd_abort_poweroff - Check if a PM domain power off should be aborted.
* @genpd: PM domain to check.
*
* Return true if a PM domain's status changed to GPD_STATE_ACTIVE during
* a "power off" operation, which means that a "power on" has occured in the
* meantime, or if its resume_count field is different from zero, which means
* that one of its devices has been resumed in the meantime.
*/
static bool genpd_abort_poweroff(struct generic_pm_domain *genpd)
{
return genpd->status == GPD_STATE_ACTIVE || genpd->resume_count > 0;
}
/** /**
* pm_genpd_poweroff - Remove power from a given PM domain. * pm_genpd_poweroff - Remove power from a given PM domain.
* @genpd: PM domain to power down. * @genpd: PM domain to power down.
@ -223,9 +226,17 @@ static int pm_genpd_poweroff(struct generic_pm_domain *genpd)
struct generic_pm_domain *parent; struct generic_pm_domain *parent;
struct dev_list_entry *dle; struct dev_list_entry *dle;
unsigned int not_suspended; unsigned int not_suspended;
int ret; int ret = 0;
if (genpd->status == GPD_STATE_POWER_OFF || genpd->prepared_count > 0) start:
/*
* Do not try to power off the domain in the following situations:
* (1) The domain is already in the "power off" state.
* (2) System suspend is in progress.
* (3) One of the domain's devices is being resumed right now.
*/
if (genpd->status == GPD_STATE_POWER_OFF || genpd->prepared_count > 0
|| genpd->resume_count > 0)
return 0; return 0;
if (genpd->sd_count > 0) if (genpd->sd_count > 0)
@ -239,34 +250,54 @@ static int pm_genpd_poweroff(struct generic_pm_domain *genpd)
if (not_suspended > genpd->in_progress) if (not_suspended > genpd->in_progress)
return -EBUSY; return -EBUSY;
if (genpd->poweroff_task) {
/*
* Another instance of pm_genpd_poweroff() is executing
* callbacks, so tell it to start over and return.
*/
genpd->status = GPD_STATE_REPEAT;
return 0;
}
if (genpd->gov && genpd->gov->power_down_ok) { if (genpd->gov && genpd->gov->power_down_ok) {
if (!genpd->gov->power_down_ok(&genpd->domain)) if (!genpd->gov->power_down_ok(&genpd->domain))
return -EAGAIN; return -EAGAIN;
} }
genpd->status = GPD_STATE_BUSY; genpd->status = GPD_STATE_BUSY;
genpd->poweroff_task = current;
list_for_each_entry_reverse(dle, &genpd->dev_list, node) { list_for_each_entry_reverse(dle, &genpd->dev_list, node) {
ret = __pm_genpd_save_device(dle, genpd); ret = __pm_genpd_save_device(dle, genpd);
if (ret) if (ret)
goto err_dev; goto err_dev;
}
mutex_unlock(&genpd->lock); if (genpd_abort_poweroff(genpd))
goto out;
if (genpd->status == GPD_STATE_REPEAT) {
genpd->poweroff_task = NULL;
goto start;
}
}
parent = genpd->parent; parent = genpd->parent;
if (parent) { if (parent) {
mutex_unlock(&genpd->lock);
genpd_acquire_lock(parent); genpd_acquire_lock(parent);
mutex_lock_nested(&genpd->lock, SINGLE_DEPTH_NESTING); mutex_lock_nested(&genpd->lock, SINGLE_DEPTH_NESTING);
} else {
mutex_lock(&genpd->lock); if (genpd_abort_poweroff(genpd)) {
genpd_release_lock(parent);
goto out;
}
} }
if (genpd->power_off) if (genpd->power_off)
genpd->power_off(genpd); genpd->power_off(genpd);
genpd->status = GPD_STATE_POWER_OFF; genpd->status = GPD_STATE_POWER_OFF;
wake_up_all(&genpd->status_wait_queue);
if (parent) { if (parent) {
genpd_sd_counter_dec(parent); genpd_sd_counter_dec(parent);
@ -276,16 +307,17 @@ static int pm_genpd_poweroff(struct generic_pm_domain *genpd)
genpd_release_lock(parent); genpd_release_lock(parent);
} }
return 0; out:
genpd->poweroff_task = NULL;
wake_up_all(&genpd->status_wait_queue);
return ret;
err_dev: err_dev:
list_for_each_entry_continue(dle, &genpd->dev_list, node) list_for_each_entry_continue(dle, &genpd->dev_list, node)
__pm_genpd_restore_device(dle, genpd); __pm_genpd_restore_device(dle, genpd);
genpd->status = GPD_STATE_ACTIVE; genpd_set_active(genpd);
wake_up_all(&genpd->status_wait_queue); goto out;
return ret;
} }
/** /**
@ -327,11 +359,11 @@ static int pm_genpd_runtime_suspend(struct device *dev)
return ret; return ret;
} }
genpd_acquire_lock(genpd); mutex_lock(&genpd->lock);
genpd->in_progress++; genpd->in_progress++;
pm_genpd_poweroff(genpd); pm_genpd_poweroff(genpd);
genpd->in_progress--; genpd->in_progress--;
genpd_release_lock(genpd); mutex_unlock(&genpd->lock);
return 0; return 0;
} }
@ -365,6 +397,7 @@ static void __pm_genpd_runtime_resume(struct device *dev,
static int pm_genpd_runtime_resume(struct device *dev) static int pm_genpd_runtime_resume(struct device *dev)
{ {
struct generic_pm_domain *genpd; struct generic_pm_domain *genpd;
DEFINE_WAIT(wait);
int ret; int ret;
dev_dbg(dev, "%s()\n", __func__); dev_dbg(dev, "%s()\n", __func__);
@ -377,12 +410,31 @@ static int pm_genpd_runtime_resume(struct device *dev)
if (ret) if (ret)
return ret; return ret;
genpd_acquire_lock(genpd); mutex_lock(&genpd->lock);
genpd->status = GPD_STATE_BUSY; genpd->status = GPD_STATE_BUSY;
genpd->resume_count++;
for (;;) {
prepare_to_wait(&genpd->status_wait_queue, &wait,
TASK_UNINTERRUPTIBLE);
/*
* If current is the powering off task, we have been called
* reentrantly from one of the device callbacks, so we should
* not wait.
*/
if (!genpd->poweroff_task || genpd->poweroff_task == current)
break;
mutex_unlock(&genpd->lock);
schedule();
mutex_lock(&genpd->lock);
}
finish_wait(&genpd->status_wait_queue, &wait);
__pm_genpd_runtime_resume(dev, genpd); __pm_genpd_runtime_resume(dev, genpd);
genpd->status = GPD_STATE_ACTIVE; genpd->resume_count--;
genpd_set_active(genpd);
wake_up_all(&genpd->status_wait_queue); wake_up_all(&genpd->status_wait_queue);
genpd_release_lock(genpd); mutex_unlock(&genpd->lock);
if (genpd->start_device) if (genpd->start_device)
genpd->start_device(dev); genpd->start_device(dev);
@ -1130,6 +1182,8 @@ void pm_genpd_init(struct generic_pm_domain *genpd,
genpd->sd_count = 0; genpd->sd_count = 0;
genpd->status = is_off ? GPD_STATE_POWER_OFF : GPD_STATE_ACTIVE; genpd->status = is_off ? GPD_STATE_POWER_OFF : GPD_STATE_ACTIVE;
init_waitqueue_head(&genpd->status_wait_queue); init_waitqueue_head(&genpd->status_wait_queue);
genpd->poweroff_task = NULL;
genpd->resume_count = 0;
genpd->device_count = 0; genpd->device_count = 0;
genpd->suspended_count = 0; genpd->suspended_count = 0;
genpd->domain.ops.runtime_suspend = pm_genpd_runtime_suspend; genpd->domain.ops.runtime_suspend = pm_genpd_runtime_suspend;

View File

@ -14,6 +14,7 @@
enum gpd_status { enum gpd_status {
GPD_STATE_ACTIVE = 0, /* PM domain is active */ GPD_STATE_ACTIVE = 0, /* PM domain is active */
GPD_STATE_BUSY, /* Something is happening to the PM domain */ GPD_STATE_BUSY, /* Something is happening to the PM domain */
GPD_STATE_REPEAT, /* Power off in progress, to be repeated */
GPD_STATE_POWER_OFF, /* PM domain is off */ GPD_STATE_POWER_OFF, /* PM domain is off */
}; };
@ -34,6 +35,8 @@ struct generic_pm_domain {
unsigned int sd_count; /* Number of subdomains with power "on" */ unsigned int sd_count; /* Number of subdomains with power "on" */
enum gpd_status status; /* Current state of the domain */ enum gpd_status status; /* Current state of the domain */
wait_queue_head_t status_wait_queue; wait_queue_head_t status_wait_queue;
struct task_struct *poweroff_task; /* Powering off task */
unsigned int resume_count; /* Number of devices being resumed */
unsigned int device_count; /* Number of devices */ unsigned int device_count; /* Number of devices */
unsigned int suspended_count; /* System suspend device counter */ unsigned int suspended_count; /* System suspend device counter */
unsigned int prepared_count; /* Suspend counter of prepared devices */ unsigned int prepared_count; /* Suspend counter of prepared devices */