Merge branches 'pm-core', 'pm-clk', 'pm-domains' and 'pm-pci'

* pm-core:
  PM / runtime: Asynchronous "idle" in pm_runtime_allow()
  PM / runtime: print error when activating a child to unactive parent

* pm-clk:
  PM / clk: Add support for adding a specific clock from device-tree
  PM / clk: export symbols for existing pm_clk_<...> API fcns

* pm-domains:
  PM / Domains: Convert pm_genpd_init() to return an error code
  PM / Domains: Stop/start devices during system PM suspend/resume in genpd
  PM / Domains: Allow runtime PM during system PM phases
  PM / Runtime: Avoid resuming devices again in pm_runtime_force_resume()
  PM / Domains: Remove redundant pm_request_idle() call in genpd
  PM / Domains: Remove redundant wrapper functions for system PM
  PM / Domains: Allow genpd to power on during system PM phases

* pm-pci:
  PCI / PM: check all fields in pci_set_platform_pm()
This commit is contained in:
Rafael J. Wysocki 2016-07-25 13:45:27 +02:00
7 changed files with 117 additions and 280 deletions

View File

@ -121,6 +121,7 @@ int pm_clk_add(struct device *dev, const char *con_id)
{ {
return __pm_clk_add(dev, con_id, NULL); return __pm_clk_add(dev, con_id, NULL);
} }
EXPORT_SYMBOL_GPL(pm_clk_add);
/** /**
* pm_clk_add_clk - Start using a device clock for power management. * pm_clk_add_clk - Start using a device clock for power management.
@ -136,8 +137,41 @@ int pm_clk_add_clk(struct device *dev, struct clk *clk)
{ {
return __pm_clk_add(dev, NULL, clk); return __pm_clk_add(dev, NULL, clk);
} }
EXPORT_SYMBOL_GPL(pm_clk_add_clk);
/**
* of_pm_clk_add_clk - Start using a device clock for power management.
* @dev: Device whose clock is going to be used for power management.
* @name: Name of clock that is going to be used for power management.
*
* Add the clock described in the 'clocks' device-tree node that matches
* with the 'name' provided, to the list of clocks used for the power
* management of @dev. On success, returns 0. Returns a negative error
* code if the clock is not found or cannot be added.
*/
int of_pm_clk_add_clk(struct device *dev, const char *name)
{
struct clk *clk;
int ret;
if (!dev || !dev->of_node || !name)
return -EINVAL;
clk = of_clk_get_by_name(dev->of_node, name);
if (IS_ERR(clk))
return PTR_ERR(clk);
ret = pm_clk_add_clk(dev, clk);
if (ret) {
clk_put(clk);
return ret;
}
return 0;
}
EXPORT_SYMBOL_GPL(of_pm_clk_add_clk);
/** /**
* of_pm_clk_add_clks - Start using device clock(s) for power management. * of_pm_clk_add_clks - Start using device clock(s) for power management.
* @dev: Device whose clock(s) is going to be used for power management. * @dev: Device whose clock(s) is going to be used for power management.
@ -192,6 +226,7 @@ error:
return ret; return ret;
} }
EXPORT_SYMBOL_GPL(of_pm_clk_add_clks);
/** /**
* __pm_clk_remove - Destroy PM clock entry. * __pm_clk_remove - Destroy PM clock entry.
@ -252,6 +287,7 @@ void pm_clk_remove(struct device *dev, const char *con_id)
__pm_clk_remove(ce); __pm_clk_remove(ce);
} }
EXPORT_SYMBOL_GPL(pm_clk_remove);
/** /**
* pm_clk_remove_clk - Stop using a device clock for power management. * pm_clk_remove_clk - Stop using a device clock for power management.
@ -285,6 +321,7 @@ void pm_clk_remove_clk(struct device *dev, struct clk *clk)
__pm_clk_remove(ce); __pm_clk_remove(ce);
} }
EXPORT_SYMBOL_GPL(pm_clk_remove_clk);
/** /**
* pm_clk_init - Initialize a device's list of power management clocks. * pm_clk_init - Initialize a device's list of power management clocks.
@ -299,6 +336,7 @@ void pm_clk_init(struct device *dev)
if (psd) if (psd)
INIT_LIST_HEAD(&psd->clock_list); INIT_LIST_HEAD(&psd->clock_list);
} }
EXPORT_SYMBOL_GPL(pm_clk_init);
/** /**
* pm_clk_create - Create and initialize a device's list of PM clocks. * pm_clk_create - Create and initialize a device's list of PM clocks.
@ -311,6 +349,7 @@ int pm_clk_create(struct device *dev)
{ {
return dev_pm_get_subsys_data(dev); return dev_pm_get_subsys_data(dev);
} }
EXPORT_SYMBOL_GPL(pm_clk_create);
/** /**
* pm_clk_destroy - Destroy a device's list of power management clocks. * pm_clk_destroy - Destroy a device's list of power management clocks.
@ -345,6 +384,7 @@ void pm_clk_destroy(struct device *dev)
__pm_clk_remove(ce); __pm_clk_remove(ce);
} }
} }
EXPORT_SYMBOL_GPL(pm_clk_destroy);
/** /**
* pm_clk_suspend - Disable clocks in a device's PM clock list. * pm_clk_suspend - Disable clocks in a device's PM clock list.
@ -375,6 +415,7 @@ int pm_clk_suspend(struct device *dev)
return 0; return 0;
} }
EXPORT_SYMBOL_GPL(pm_clk_suspend);
/** /**
* pm_clk_resume - Enable clocks in a device's PM clock list. * pm_clk_resume - Enable clocks in a device's PM clock list.
@ -400,6 +441,7 @@ int pm_clk_resume(struct device *dev)
return 0; return 0;
} }
EXPORT_SYMBOL_GPL(pm_clk_resume);
/** /**
* pm_clk_notify - Notify routine for device addition and removal. * pm_clk_notify - Notify routine for device addition and removal.
@ -480,6 +522,7 @@ int pm_clk_runtime_suspend(struct device *dev)
return 0; return 0;
} }
EXPORT_SYMBOL_GPL(pm_clk_runtime_suspend);
int pm_clk_runtime_resume(struct device *dev) int pm_clk_runtime_resume(struct device *dev)
{ {
@ -495,6 +538,7 @@ int pm_clk_runtime_resume(struct device *dev)
return pm_generic_runtime_resume(dev); return pm_generic_runtime_resume(dev);
} }
EXPORT_SYMBOL_GPL(pm_clk_runtime_resume);
#else /* !CONFIG_PM_CLK */ #else /* !CONFIG_PM_CLK */
@ -598,3 +642,4 @@ void pm_clk_add_notifier(struct bus_type *bus,
clknb->nb.notifier_call = pm_clk_notify; clknb->nb.notifier_call = pm_clk_notify;
bus_register_notifier(bus, &clknb->nb); bus_register_notifier(bus, &clknb->nb);
} }
EXPORT_SYMBOL_GPL(pm_clk_add_notifier);

View File

@ -187,8 +187,7 @@ static int genpd_poweron(struct generic_pm_domain *genpd, unsigned int depth)
struct gpd_link *link; struct gpd_link *link;
int ret = 0; int ret = 0;
if (genpd->status == GPD_STATE_ACTIVE if (genpd->status == GPD_STATE_ACTIVE)
|| (genpd->prepared_count > 0 && genpd->suspend_power_off))
return 0; return 0;
/* /*
@ -735,81 +734,23 @@ static int pm_genpd_prepare(struct device *dev)
mutex_lock(&genpd->lock); mutex_lock(&genpd->lock);
if (genpd->prepared_count++ == 0) { if (genpd->prepared_count++ == 0)
genpd->suspended_count = 0; genpd->suspended_count = 0;
genpd->suspend_power_off = genpd->status == GPD_STATE_POWER_OFF;
}
mutex_unlock(&genpd->lock); mutex_unlock(&genpd->lock);
if (genpd->suspend_power_off)
return 0;
/*
* The PM domain must be in the GPD_STATE_ACTIVE state at this point,
* so genpd_poweron() will return immediately, but if the device
* is suspended (e.g. it's been stopped by genpd_stop_dev()), we need
* to make it operational.
*/
pm_runtime_resume(dev);
__pm_runtime_disable(dev, false);
ret = pm_generic_prepare(dev); ret = pm_generic_prepare(dev);
if (ret) { if (ret) {
mutex_lock(&genpd->lock); mutex_lock(&genpd->lock);
if (--genpd->prepared_count == 0) genpd->prepared_count--;
genpd->suspend_power_off = false;
mutex_unlock(&genpd->lock); mutex_unlock(&genpd->lock);
pm_runtime_enable(dev);
} }
return ret; return ret;
} }
/**
* pm_genpd_suspend - Suspend a device belonging to an I/O PM domain.
* @dev: Device to suspend.
*
* Suspend a device under the assumption that its pm_domain field points to the
* domain member of an object of type struct generic_pm_domain representing
* a PM domain consisting of I/O devices.
*/
static int pm_genpd_suspend(struct device *dev)
{
struct generic_pm_domain *genpd;
dev_dbg(dev, "%s()\n", __func__);
genpd = dev_to_genpd(dev);
if (IS_ERR(genpd))
return -EINVAL;
return genpd->suspend_power_off ? 0 : pm_generic_suspend(dev);
}
/**
* pm_genpd_suspend_late - Late suspend of a device from an I/O PM domain.
* @dev: Device to suspend.
*
* Carry out a late suspend of a device under the assumption that its
* pm_domain field points to the domain member of an object of type
* struct generic_pm_domain representing a PM domain consisting of I/O devices.
*/
static int pm_genpd_suspend_late(struct device *dev)
{
struct generic_pm_domain *genpd;
dev_dbg(dev, "%s()\n", __func__);
genpd = dev_to_genpd(dev);
if (IS_ERR(genpd))
return -EINVAL;
return genpd->suspend_power_off ? 0 : pm_generic_suspend_late(dev);
}
/** /**
* pm_genpd_suspend_noirq - Completion of suspend of device in an I/O PM domain. * pm_genpd_suspend_noirq - Completion of suspend of device in an I/O PM domain.
* @dev: Device to suspend. * @dev: Device to suspend.
@ -820,6 +761,7 @@ static int pm_genpd_suspend_late(struct device *dev)
static int pm_genpd_suspend_noirq(struct device *dev) static int pm_genpd_suspend_noirq(struct device *dev)
{ {
struct generic_pm_domain *genpd; struct generic_pm_domain *genpd;
int ret;
dev_dbg(dev, "%s()\n", __func__); dev_dbg(dev, "%s()\n", __func__);
@ -827,11 +769,14 @@ static int pm_genpd_suspend_noirq(struct device *dev)
if (IS_ERR(genpd)) if (IS_ERR(genpd))
return -EINVAL; return -EINVAL;
if (genpd->suspend_power_off if (dev->power.wakeup_path && genpd_dev_active_wakeup(genpd, dev))
|| (dev->power.wakeup_path && genpd_dev_active_wakeup(genpd, dev)))
return 0; return 0;
genpd_stop_dev(genpd, dev); if (genpd->dev_ops.stop && genpd->dev_ops.start) {
ret = pm_runtime_force_suspend(dev);
if (ret)
return ret;
}
/* /*
* Since all of the "noirq" callbacks are executed sequentially, it is * Since all of the "noirq" callbacks are executed sequentially, it is
@ -853,6 +798,7 @@ static int pm_genpd_suspend_noirq(struct device *dev)
static int pm_genpd_resume_noirq(struct device *dev) static int pm_genpd_resume_noirq(struct device *dev)
{ {
struct generic_pm_domain *genpd; struct generic_pm_domain *genpd;
int ret = 0;
dev_dbg(dev, "%s()\n", __func__); dev_dbg(dev, "%s()\n", __func__);
@ -860,8 +806,7 @@ static int pm_genpd_resume_noirq(struct device *dev)
if (IS_ERR(genpd)) if (IS_ERR(genpd))
return -EINVAL; return -EINVAL;
if (genpd->suspend_power_off if (dev->power.wakeup_path && genpd_dev_active_wakeup(genpd, dev))
|| (dev->power.wakeup_path && genpd_dev_active_wakeup(genpd, dev)))
return 0; return 0;
/* /*
@ -872,93 +817,10 @@ static int pm_genpd_resume_noirq(struct device *dev)
pm_genpd_sync_poweron(genpd, true); pm_genpd_sync_poweron(genpd, true);
genpd->suspended_count--; genpd->suspended_count--;
return genpd_start_dev(genpd, dev); if (genpd->dev_ops.stop && genpd->dev_ops.start)
} ret = pm_runtime_force_resume(dev);
/** return ret;
* pm_genpd_resume_early - Early resume of a device in an I/O PM domain.
* @dev: Device to resume.
*
* Carry out an early resume of a device under the assumption that its
* pm_domain field points to the domain member of an object of type
* struct generic_pm_domain representing a power domain consisting of I/O
* devices.
*/
static int pm_genpd_resume_early(struct device *dev)
{
struct generic_pm_domain *genpd;
dev_dbg(dev, "%s()\n", __func__);
genpd = dev_to_genpd(dev);
if (IS_ERR(genpd))
return -EINVAL;
return genpd->suspend_power_off ? 0 : pm_generic_resume_early(dev);
}
/**
* pm_genpd_resume - Resume of device in an I/O PM domain.
* @dev: Device to resume.
*
* Resume a device under the assumption that its pm_domain field points to the
* domain member of an object of type struct generic_pm_domain representing
* a power domain consisting of I/O devices.
*/
static int pm_genpd_resume(struct device *dev)
{
struct generic_pm_domain *genpd;
dev_dbg(dev, "%s()\n", __func__);
genpd = dev_to_genpd(dev);
if (IS_ERR(genpd))
return -EINVAL;
return genpd->suspend_power_off ? 0 : pm_generic_resume(dev);
}
/**
* pm_genpd_freeze - Freezing a device in an I/O PM domain.
* @dev: Device to freeze.
*
* Freeze a device under the assumption that its pm_domain field points to the
* domain member of an object of type struct generic_pm_domain representing
* a power domain consisting of I/O devices.
*/
static int pm_genpd_freeze(struct device *dev)
{
struct generic_pm_domain *genpd;
dev_dbg(dev, "%s()\n", __func__);
genpd = dev_to_genpd(dev);
if (IS_ERR(genpd))
return -EINVAL;
return genpd->suspend_power_off ? 0 : pm_generic_freeze(dev);
}
/**
* pm_genpd_freeze_late - Late freeze of a device in an I/O PM domain.
* @dev: Device to freeze.
*
* Carry out a late freeze of a device under the assumption that its
* pm_domain field points to the domain member of an object of type
* struct generic_pm_domain representing a power domain consisting of I/O
* devices.
*/
static int pm_genpd_freeze_late(struct device *dev)
{
struct generic_pm_domain *genpd;
dev_dbg(dev, "%s()\n", __func__);
genpd = dev_to_genpd(dev);
if (IS_ERR(genpd))
return -EINVAL;
return genpd->suspend_power_off ? 0 : pm_generic_freeze_late(dev);
} }
/** /**
@ -973,6 +835,7 @@ static int pm_genpd_freeze_late(struct device *dev)
static int pm_genpd_freeze_noirq(struct device *dev) static int pm_genpd_freeze_noirq(struct device *dev)
{ {
struct generic_pm_domain *genpd; struct generic_pm_domain *genpd;
int ret = 0;
dev_dbg(dev, "%s()\n", __func__); dev_dbg(dev, "%s()\n", __func__);
@ -980,7 +843,10 @@ static int pm_genpd_freeze_noirq(struct device *dev)
if (IS_ERR(genpd)) if (IS_ERR(genpd))
return -EINVAL; return -EINVAL;
return genpd->suspend_power_off ? 0 : genpd_stop_dev(genpd, dev); if (genpd->dev_ops.stop && genpd->dev_ops.start)
ret = pm_runtime_force_suspend(dev);
return ret;
} }
/** /**
@ -993,6 +859,7 @@ static int pm_genpd_freeze_noirq(struct device *dev)
static int pm_genpd_thaw_noirq(struct device *dev) static int pm_genpd_thaw_noirq(struct device *dev)
{ {
struct generic_pm_domain *genpd; struct generic_pm_domain *genpd;
int ret = 0;
dev_dbg(dev, "%s()\n", __func__); dev_dbg(dev, "%s()\n", __func__);
@ -1000,51 +867,10 @@ static int pm_genpd_thaw_noirq(struct device *dev)
if (IS_ERR(genpd)) if (IS_ERR(genpd))
return -EINVAL; return -EINVAL;
return genpd->suspend_power_off ? if (genpd->dev_ops.stop && genpd->dev_ops.start)
0 : genpd_start_dev(genpd, dev); ret = pm_runtime_force_resume(dev);
}
/** return ret;
* pm_genpd_thaw_early - Early thaw of device in an I/O PM domain.
* @dev: Device to thaw.
*
* Carry out an early thaw of a device under the assumption that its
* pm_domain field points to the domain member of an object of type
* struct generic_pm_domain representing a power domain consisting of I/O
* devices.
*/
static int pm_genpd_thaw_early(struct device *dev)
{
struct generic_pm_domain *genpd;
dev_dbg(dev, "%s()\n", __func__);
genpd = dev_to_genpd(dev);
if (IS_ERR(genpd))
return -EINVAL;
return genpd->suspend_power_off ? 0 : pm_generic_thaw_early(dev);
}
/**
* pm_genpd_thaw - Thaw a device belonging to an I/O power domain.
* @dev: Device to thaw.
*
* Thaw a device under the assumption that its pm_domain field points to the
* domain member of an object of type struct generic_pm_domain representing
* a power domain consisting of I/O devices.
*/
static int pm_genpd_thaw(struct device *dev)
{
struct generic_pm_domain *genpd;
dev_dbg(dev, "%s()\n", __func__);
genpd = dev_to_genpd(dev);
if (IS_ERR(genpd))
return -EINVAL;
return genpd->suspend_power_off ? 0 : pm_generic_thaw(dev);
} }
/** /**
@ -1057,6 +883,7 @@ static int pm_genpd_thaw(struct device *dev)
static int pm_genpd_restore_noirq(struct device *dev) static int pm_genpd_restore_noirq(struct device *dev)
{ {
struct generic_pm_domain *genpd; struct generic_pm_domain *genpd;
int ret = 0;
dev_dbg(dev, "%s()\n", __func__); dev_dbg(dev, "%s()\n", __func__);
@ -1072,30 +899,20 @@ static int pm_genpd_restore_noirq(struct device *dev)
* At this point suspended_count == 0 means we are being run for the * At this point suspended_count == 0 means we are being run for the
* first time for the given domain in the present cycle. * first time for the given domain in the present cycle.
*/ */
if (genpd->suspended_count++ == 0) { if (genpd->suspended_count++ == 0)
/* /*
* The boot kernel might put the domain into arbitrary state, * The boot kernel might put the domain into arbitrary state,
* so make it appear as powered off to pm_genpd_sync_poweron(), * so make it appear as powered off to pm_genpd_sync_poweron(),
* so that it tries to power it on in case it was really off. * so that it tries to power it on in case it was really off.
*/ */
genpd->status = GPD_STATE_POWER_OFF; genpd->status = GPD_STATE_POWER_OFF;
if (genpd->suspend_power_off) {
/*
* If the domain was off before the hibernation, make
* sure it will be off going forward.
*/
genpd_power_off(genpd, true);
return 0;
}
}
if (genpd->suspend_power_off)
return 0;
pm_genpd_sync_poweron(genpd, true); pm_genpd_sync_poweron(genpd, true);
return genpd_start_dev(genpd, dev); if (genpd->dev_ops.stop && genpd->dev_ops.start)
ret = pm_runtime_force_resume(dev);
return ret;
} }
/** /**
@ -1110,7 +927,6 @@ static int pm_genpd_restore_noirq(struct device *dev)
static void pm_genpd_complete(struct device *dev) static void pm_genpd_complete(struct device *dev)
{ {
struct generic_pm_domain *genpd; struct generic_pm_domain *genpd;
bool run_complete;
dev_dbg(dev, "%s()\n", __func__); dev_dbg(dev, "%s()\n", __func__);
@ -1118,20 +934,15 @@ static void pm_genpd_complete(struct device *dev)
if (IS_ERR(genpd)) if (IS_ERR(genpd))
return; return;
pm_generic_complete(dev);
mutex_lock(&genpd->lock); mutex_lock(&genpd->lock);
run_complete = !genpd->suspend_power_off; genpd->prepared_count--;
if (--genpd->prepared_count == 0) if (!genpd->prepared_count)
genpd->suspend_power_off = false; genpd_queue_power_off_work(genpd);
mutex_unlock(&genpd->lock); mutex_unlock(&genpd->lock);
if (run_complete) {
pm_generic_complete(dev);
pm_runtime_set_active(dev);
pm_runtime_enable(dev);
pm_request_idle(dev);
}
} }
/** /**
@ -1173,18 +984,10 @@ EXPORT_SYMBOL_GPL(pm_genpd_syscore_poweron);
#else /* !CONFIG_PM_SLEEP */ #else /* !CONFIG_PM_SLEEP */
#define pm_genpd_prepare NULL #define pm_genpd_prepare NULL
#define pm_genpd_suspend NULL
#define pm_genpd_suspend_late NULL
#define pm_genpd_suspend_noirq NULL #define pm_genpd_suspend_noirq NULL
#define pm_genpd_resume_early NULL
#define pm_genpd_resume_noirq NULL #define pm_genpd_resume_noirq NULL
#define pm_genpd_resume NULL
#define pm_genpd_freeze NULL
#define pm_genpd_freeze_late NULL
#define pm_genpd_freeze_noirq NULL #define pm_genpd_freeze_noirq NULL
#define pm_genpd_thaw_early NULL
#define pm_genpd_thaw_noirq NULL #define pm_genpd_thaw_noirq NULL
#define pm_genpd_thaw NULL
#define pm_genpd_restore_noirq NULL #define pm_genpd_restore_noirq NULL
#define pm_genpd_complete NULL #define pm_genpd_complete NULL
@ -1455,12 +1258,14 @@ EXPORT_SYMBOL_GPL(pm_genpd_remove_subdomain);
* @genpd: PM domain object to initialize. * @genpd: PM domain object to initialize.
* @gov: PM domain governor to associate with the domain (may be NULL). * @gov: PM domain governor to associate with the domain (may be NULL).
* @is_off: Initial value of the domain's power_is_off field. * @is_off: Initial value of the domain's power_is_off field.
*
* Returns 0 on successful initialization, else a negative error code.
*/ */
void pm_genpd_init(struct generic_pm_domain *genpd, int pm_genpd_init(struct generic_pm_domain *genpd,
struct dev_power_governor *gov, bool is_off) struct dev_power_governor *gov, bool is_off)
{ {
if (IS_ERR_OR_NULL(genpd)) if (IS_ERR_OR_NULL(genpd))
return; return -EINVAL;
INIT_LIST_HEAD(&genpd->master_links); INIT_LIST_HEAD(&genpd->master_links);
INIT_LIST_HEAD(&genpd->slave_links); INIT_LIST_HEAD(&genpd->slave_links);
@ -1476,24 +1281,24 @@ void pm_genpd_init(struct generic_pm_domain *genpd,
genpd->domain.ops.runtime_suspend = genpd_runtime_suspend; genpd->domain.ops.runtime_suspend = genpd_runtime_suspend;
genpd->domain.ops.runtime_resume = genpd_runtime_resume; genpd->domain.ops.runtime_resume = genpd_runtime_resume;
genpd->domain.ops.prepare = pm_genpd_prepare; genpd->domain.ops.prepare = pm_genpd_prepare;
genpd->domain.ops.suspend = pm_genpd_suspend; genpd->domain.ops.suspend = pm_generic_suspend;
genpd->domain.ops.suspend_late = pm_genpd_suspend_late; genpd->domain.ops.suspend_late = pm_generic_suspend_late;
genpd->domain.ops.suspend_noirq = pm_genpd_suspend_noirq; genpd->domain.ops.suspend_noirq = pm_genpd_suspend_noirq;
genpd->domain.ops.resume_noirq = pm_genpd_resume_noirq; genpd->domain.ops.resume_noirq = pm_genpd_resume_noirq;
genpd->domain.ops.resume_early = pm_genpd_resume_early; genpd->domain.ops.resume_early = pm_generic_resume_early;
genpd->domain.ops.resume = pm_genpd_resume; genpd->domain.ops.resume = pm_generic_resume;
genpd->domain.ops.freeze = pm_genpd_freeze; genpd->domain.ops.freeze = pm_generic_freeze;
genpd->domain.ops.freeze_late = pm_genpd_freeze_late; genpd->domain.ops.freeze_late = pm_generic_freeze_late;
genpd->domain.ops.freeze_noirq = pm_genpd_freeze_noirq; genpd->domain.ops.freeze_noirq = pm_genpd_freeze_noirq;
genpd->domain.ops.thaw_noirq = pm_genpd_thaw_noirq; genpd->domain.ops.thaw_noirq = pm_genpd_thaw_noirq;
genpd->domain.ops.thaw_early = pm_genpd_thaw_early; genpd->domain.ops.thaw_early = pm_generic_thaw_early;
genpd->domain.ops.thaw = pm_genpd_thaw; genpd->domain.ops.thaw = pm_generic_thaw;
genpd->domain.ops.poweroff = pm_genpd_suspend; genpd->domain.ops.poweroff = pm_generic_poweroff;
genpd->domain.ops.poweroff_late = pm_genpd_suspend_late; genpd->domain.ops.poweroff_late = pm_generic_poweroff_late;
genpd->domain.ops.poweroff_noirq = pm_genpd_suspend_noirq; genpd->domain.ops.poweroff_noirq = pm_genpd_suspend_noirq;
genpd->domain.ops.restore_noirq = pm_genpd_restore_noirq; genpd->domain.ops.restore_noirq = pm_genpd_restore_noirq;
genpd->domain.ops.restore_early = pm_genpd_resume_early; genpd->domain.ops.restore_early = pm_generic_restore_early;
genpd->domain.ops.restore = pm_genpd_resume; genpd->domain.ops.restore = pm_generic_restore;
genpd->domain.ops.complete = pm_genpd_complete; genpd->domain.ops.complete = pm_genpd_complete;
if (genpd->flags & GENPD_FLAG_PM_CLK) { if (genpd->flags & GENPD_FLAG_PM_CLK) {
@ -1518,6 +1323,8 @@ void pm_genpd_init(struct generic_pm_domain *genpd,
mutex_lock(&gpd_list_lock); mutex_lock(&gpd_list_lock);
list_add(&genpd->gpd_list_node, &gpd_list); list_add(&genpd->gpd_list_node, &gpd_list);
mutex_unlock(&gpd_list_lock); mutex_unlock(&gpd_list_lock);
return 0;
} }
EXPORT_SYMBOL_GPL(pm_genpd_init); EXPORT_SYMBOL_GPL(pm_genpd_init);

View File

@ -1045,10 +1045,14 @@ int __pm_runtime_set_status(struct device *dev, unsigned int status)
*/ */
if (!parent->power.disable_depth if (!parent->power.disable_depth
&& !parent->power.ignore_children && !parent->power.ignore_children
&& parent->power.runtime_status != RPM_ACTIVE) && parent->power.runtime_status != RPM_ACTIVE) {
dev_err(dev, "runtime PM trying to activate child device %s but parent (%s) is not active\n",
dev_name(dev),
dev_name(parent));
error = -EBUSY; error = -EBUSY;
else if (dev->power.runtime_status == RPM_SUSPENDED) } else if (dev->power.runtime_status == RPM_SUSPENDED) {
atomic_inc(&parent->power.child_count); atomic_inc(&parent->power.child_count);
}
spin_unlock(&parent->power.lock); spin_unlock(&parent->power.lock);
@ -1256,7 +1260,7 @@ void pm_runtime_allow(struct device *dev)
dev->power.runtime_auto = true; dev->power.runtime_auto = true;
if (atomic_dec_and_test(&dev->power.usage_count)) if (atomic_dec_and_test(&dev->power.usage_count))
rpm_idle(dev, RPM_AUTO); rpm_idle(dev, RPM_AUTO | RPM_ASYNC);
out: out:
spin_unlock_irq(&dev->power.lock); spin_unlock_irq(&dev->power.lock);
@ -1506,6 +1510,9 @@ int pm_runtime_force_resume(struct device *dev)
goto out; goto out;
} }
if (!pm_runtime_status_suspended(dev))
goto out;
ret = pm_runtime_set_active(dev); ret = pm_runtime_set_active(dev);
if (ret) if (ret)
goto out; goto out;

View File

@ -421,29 +421,6 @@ static int acp_suspend(void *handle)
static int acp_resume(void *handle) static int acp_resume(void *handle)
{ {
int i, ret;
struct acp_pm_domain *apd;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
/* return early if no ACP */
if (!adev->acp.acp_genpd)
return 0;
/* SMU block will power on ACP irrespective of ACP runtime status.
* Power off explicitly based on genpd ACP runtime status so that ACP
* hw and ACP-genpd status are in sync.
* 'suspend_power_off' represents "Power status before system suspend"
*/
if (adev->acp.acp_genpd->gpd.suspend_power_off == true) {
apd = container_of(&adev->acp.acp_genpd->gpd,
struct acp_pm_domain, gpd);
for (i = 4; i >= 0 ; i--) {
ret = acp_suspend_tile(apd->cgs_dev, ACP_TILE_P1 + i);
if (ret)
pr_err("ACP tile %d tile suspend failed\n", i);
}
}
return 0; return 0;
} }

View File

@ -530,8 +530,8 @@ static const struct pci_platform_pm_ops *pci_platform_pm;
int pci_set_platform_pm(const struct pci_platform_pm_ops *ops) int pci_set_platform_pm(const struct pci_platform_pm_ops *ops)
{ {
if (!ops->is_manageable || !ops->set_state || !ops->choose_state if (!ops->is_manageable || !ops->set_state || !ops->choose_state ||
|| !ops->sleep_wake) !ops->sleep_wake || !ops->run_wake || !ops->need_resume)
return -EINVAL; return -EINVAL;
pci_platform_pm = ops; pci_platform_pm = ops;
return 0; return 0;

View File

@ -42,6 +42,7 @@ extern int pm_clk_create(struct device *dev);
extern void pm_clk_destroy(struct device *dev); extern void pm_clk_destroy(struct device *dev);
extern int pm_clk_add(struct device *dev, const char *con_id); extern int pm_clk_add(struct device *dev, const char *con_id);
extern int pm_clk_add_clk(struct device *dev, struct clk *clk); extern int pm_clk_add_clk(struct device *dev, struct clk *clk);
extern int of_pm_clk_add_clk(struct device *dev, const char *name);
extern int of_pm_clk_add_clks(struct device *dev); extern int of_pm_clk_add_clks(struct device *dev);
extern void pm_clk_remove(struct device *dev, const char *con_id); extern void pm_clk_remove(struct device *dev, const char *con_id);
extern void pm_clk_remove_clk(struct device *dev, struct clk *clk); extern void pm_clk_remove_clk(struct device *dev, struct clk *clk);

View File

@ -57,7 +57,6 @@ struct generic_pm_domain {
unsigned int device_count; /* Number of devices */ unsigned int device_count; /* Number of devices */
unsigned int suspended_count; /* System suspend device counter */ unsigned int suspended_count; /* System suspend device counter */
unsigned int prepared_count; /* Suspend counter of prepared devices */ unsigned int prepared_count; /* Suspend counter of prepared devices */
bool suspend_power_off; /* Power status before system suspend */
int (*power_off)(struct generic_pm_domain *domain); int (*power_off)(struct generic_pm_domain *domain);
int (*power_on)(struct generic_pm_domain *domain); int (*power_on)(struct generic_pm_domain *domain);
struct gpd_dev_ops dev_ops; struct gpd_dev_ops dev_ops;
@ -128,8 +127,8 @@ extern int pm_genpd_add_subdomain(struct generic_pm_domain *genpd,
struct generic_pm_domain *new_subdomain); struct generic_pm_domain *new_subdomain);
extern int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd, extern int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
struct generic_pm_domain *target); struct generic_pm_domain *target);
extern void pm_genpd_init(struct generic_pm_domain *genpd, extern int pm_genpd_init(struct generic_pm_domain *genpd,
struct dev_power_governor *gov, bool is_off); struct dev_power_governor *gov, bool is_off);
extern struct dev_power_governor simple_qos_governor; extern struct dev_power_governor simple_qos_governor;
extern struct dev_power_governor pm_domain_always_on_gov; extern struct dev_power_governor pm_domain_always_on_gov;
@ -164,9 +163,10 @@ static inline int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
{ {
return -ENOSYS; return -ENOSYS;
} }
static inline void pm_genpd_init(struct generic_pm_domain *genpd, static inline int pm_genpd_init(struct generic_pm_domain *genpd,
struct dev_power_governor *gov, bool is_off) struct dev_power_governor *gov, bool is_off)
{ {
return -ENOSYS;
} }
#endif #endif