2019-04-02 21:32:01 +08:00
|
|
|
// SPDX-License-Identifier: GPL-2.0
|
2011-12-01 07:02:05 +08:00
|
|
|
/*
|
|
|
|
* drivers/base/power/domain_governor.c - Governors for device PM domains.
|
|
|
|
*
|
|
|
|
* Copyright (C) 2011 Rafael J. Wysocki <rjw@sisk.pl>, Renesas Electronics Corp.
|
|
|
|
*/
|
|
|
|
#include <linux/kernel.h>
|
|
|
|
#include <linux/pm_domain.h>
|
|
|
|
#include <linux/pm_qos.h>
|
2011-12-01 07:02:10 +08:00
|
|
|
#include <linux/hrtimer.h>
|
2019-04-12 02:17:33 +08:00
|
|
|
#include <linux/cpuidle.h>
|
|
|
|
#include <linux/cpumask.h>
|
|
|
|
#include <linux/ktime.h>
|
2011-12-01 07:02:05 +08:00
|
|
|
|
2012-04-30 04:54:17 +08:00
|
|
|
static int dev_update_qos_constraint(struct device *dev, void *data)
|
|
|
|
{
|
|
|
|
s64 *constraint_ns_p = data;
|
PM / domains: Rework governor code to be more consistent
The genpd governor currently uses negative PM QoS values to indicate
the "no suspend" condition and 0 as "no restriction", but it doesn't
use them consistently. Moreover, it tries to refresh QoS values for
already suspended devices in a quite questionable way.
For the above reasons, rework it to be a bit more consistent.
First off, note that dev_pm_qos_read_value() in
dev_update_qos_constraint() and __default_power_down_ok() is
evaluated for devices in suspend. Moreover, that only happens if the
effective_constraint_ns value for them is negative (meaning "no
suspend"). It is not evaluated in any other cases, so effectively
the QoS values are only updated for devices in suspend that should
not have been suspended in the first place. In all of the other
cases, the QoS values taken into account are the effective ones from
the time before the device has been suspended, so generally devices
need to be resumed and suspended again for new QoS values to take
effect anyway. Thus evaluating dev_update_qos_constraint() in
those two places doesn't make sense at all, so drop it.
Second, initialize effective_constraint_ns to 0 ("no constraint")
rather than to (-1) ("no suspend"), which makes more sense in
general and in case effective_constraint_ns is never updated
(the device is in suspend all the time or it is never suspended)
it doesn't affect the device's parent and so on.
Finally, rework default_suspend_ok() to explicitly handle the
"no restriction" and "no suspend" special cases.
Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Tested-by: Geert Uytterhoeven <geert+renesas@glider.be>
Tested-by: Tero Kristo <t-kristo@ti.com>
Reviewed-by: Ramesh Thomas <ramesh.thomas@intel.com>
2017-11-07 09:23:18 +08:00
|
|
|
s64 constraint_ns;
|
2012-04-30 04:54:17 +08:00
|
|
|
|
PM / domains: Rework governor code to be more consistent
The genpd governor currently uses negative PM QoS values to indicate
the "no suspend" condition and 0 as "no restriction", but it doesn't
use them consistently. Moreover, it tries to refresh QoS values for
already suspended devices in a quite questionable way.
For the above reasons, rework it to be a bit more consistent.
First off, note that dev_pm_qos_read_value() in
dev_update_qos_constraint() and __default_power_down_ok() is
evaluated for devices in suspend. Moreover, that only happens if the
effective_constraint_ns value for them is negative (meaning "no
suspend"). It is not evaluated in any other cases, so effectively
the QoS values are only updated for devices in suspend that should
not have been suspended in the first place. In all of the other
cases, the QoS values taken into account are the effective ones from
the time before the device has been suspended, so generally devices
need to be resumed and suspended again for new QoS values to take
effect anyway. Thus evaluating dev_update_qos_constraint() in
those two places doesn't make sense at all, so drop it.
Second, initialize effective_constraint_ns to 0 ("no constraint")
rather than to (-1) ("no suspend"), which makes more sense in
general and in case effective_constraint_ns is never updated
(the device is in suspend all the time or it is never suspended)
it doesn't affect the device's parent and so on.
Finally, rework default_suspend_ok() to explicitly handle the
"no restriction" and "no suspend" special cases.
Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Tested-by: Geert Uytterhoeven <geert+renesas@glider.be>
Tested-by: Tero Kristo <t-kristo@ti.com>
Reviewed-by: Ramesh Thomas <ramesh.thomas@intel.com>
2017-11-07 09:23:18 +08:00
|
|
|
if (dev->power.subsys_data && dev->power.subsys_data->domain_data) {
|
2022-05-11 22:56:56 +08:00
|
|
|
struct gpd_timing_data *td = dev_gpd_data(dev)->td;
|
|
|
|
|
PM / domains: Rework governor code to be more consistent
The genpd governor currently uses negative PM QoS values to indicate
the "no suspend" condition and 0 as "no restriction", but it doesn't
use them consistently. Moreover, it tries to refresh QoS values for
already suspended devices in a quite questionable way.
For the above reasons, rework it to be a bit more consistent.
First off, note that dev_pm_qos_read_value() in
dev_update_qos_constraint() and __default_power_down_ok() is
evaluated for devices in suspend. Moreover, that only happens if the
effective_constraint_ns value for them is negative (meaning "no
suspend"). It is not evaluated in any other cases, so effectively
the QoS values are only updated for devices in suspend that should
not have been suspended in the first place. In all of the other
cases, the QoS values taken into account are the effective ones from
the time before the device has been suspended, so generally devices
need to be resumed and suspended again for new QoS values to take
effect anyway. Thus evaluating dev_update_qos_constraint() in
those two places doesn't make sense at all, so drop it.
Second, initialize effective_constraint_ns to 0 ("no constraint")
rather than to (-1) ("no suspend"), which makes more sense in
general and in case effective_constraint_ns is never updated
(the device is in suspend all the time or it is never suspended)
it doesn't affect the device's parent and so on.
Finally, rework default_suspend_ok() to explicitly handle the
"no restriction" and "no suspend" special cases.
Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Tested-by: Geert Uytterhoeven <geert+renesas@glider.be>
Tested-by: Tero Kristo <t-kristo@ti.com>
Reviewed-by: Ramesh Thomas <ramesh.thomas@intel.com>
2017-11-07 09:23:18 +08:00
|
|
|
/*
|
|
|
|
* Only take suspend-time QoS constraints of devices into
|
|
|
|
* account, because constraints updated after the device has
|
|
|
|
* been suspended are not guaranteed to be taken into account
|
|
|
|
* anyway. In order for them to take effect, the device has to
|
|
|
|
* be resumed and suspended again.
|
|
|
|
*/
|
2022-05-11 22:56:56 +08:00
|
|
|
constraint_ns = td ? td->effective_constraint_ns :
|
|
|
|
PM_QOS_RESUME_LATENCY_NO_CONSTRAINT_NS;
|
PM / domains: Rework governor code to be more consistent
The genpd governor currently uses negative PM QoS values to indicate
the "no suspend" condition and 0 as "no restriction", but it doesn't
use them consistently. Moreover, it tries to refresh QoS values for
already suspended devices in a quite questionable way.
For the above reasons, rework it to be a bit more consistent.
First off, note that dev_pm_qos_read_value() in
dev_update_qos_constraint() and __default_power_down_ok() is
evaluated for devices in suspend. Moreover, that only happens if the
effective_constraint_ns value for them is negative (meaning "no
suspend"). It is not evaluated in any other cases, so effectively
the QoS values are only updated for devices in suspend that should
not have been suspended in the first place. In all of the other
cases, the QoS values taken into account are the effective ones from
the time before the device has been suspended, so generally devices
need to be resumed and suspended again for new QoS values to take
effect anyway. Thus evaluating dev_update_qos_constraint() in
those two places doesn't make sense at all, so drop it.
Second, initialize effective_constraint_ns to 0 ("no constraint")
rather than to (-1) ("no suspend"), which makes more sense in
general and in case effective_constraint_ns is never updated
(the device is in suspend all the time or it is never suspended)
it doesn't affect the device's parent and so on.
Finally, rework default_suspend_ok() to explicitly handle the
"no restriction" and "no suspend" special cases.
Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Tested-by: Geert Uytterhoeven <geert+renesas@glider.be>
Tested-by: Tero Kristo <t-kristo@ti.com>
Reviewed-by: Ramesh Thomas <ramesh.thomas@intel.com>
2017-11-07 09:23:18 +08:00
|
|
|
} else {
|
|
|
|
/*
|
|
|
|
* The child is not in a domain and there's no info on its
|
|
|
|
* suspend/resume latencies, so assume them to be negligible and
|
|
|
|
* take its current PM QoS constraint (that's the only thing
|
|
|
|
* known at this point anyway).
|
|
|
|
*/
|
2019-07-04 15:36:19 +08:00
|
|
|
constraint_ns = dev_pm_qos_read_value(dev, DEV_PM_QOS_RESUME_LATENCY);
|
2017-11-07 18:33:49 +08:00
|
|
|
constraint_ns *= NSEC_PER_USEC;
|
2012-04-30 04:54:17 +08:00
|
|
|
}
|
PM / domains: Rework governor code to be more consistent
The genpd governor currently uses negative PM QoS values to indicate
the "no suspend" condition and 0 as "no restriction", but it doesn't
use them consistently. Moreover, it tries to refresh QoS values for
already suspended devices in a quite questionable way.
For the above reasons, rework it to be a bit more consistent.
First off, note that dev_pm_qos_read_value() in
dev_update_qos_constraint() and __default_power_down_ok() is
evaluated for devices in suspend. Moreover, that only happens if the
effective_constraint_ns value for them is negative (meaning "no
suspend"). It is not evaluated in any other cases, so effectively
the QoS values are only updated for devices in suspend that should
not have been suspended in the first place. In all of the other
cases, the QoS values taken into account are the effective ones from
the time before the device has been suspended, so generally devices
need to be resumed and suspended again for new QoS values to take
effect anyway. Thus evaluating dev_update_qos_constraint() in
those two places doesn't make sense at all, so drop it.
Second, initialize effective_constraint_ns to 0 ("no constraint")
rather than to (-1) ("no suspend"), which makes more sense in
general and in case effective_constraint_ns is never updated
(the device is in suspend all the time or it is never suspended)
it doesn't affect the device's parent and so on.
Finally, rework default_suspend_ok() to explicitly handle the
"no restriction" and "no suspend" special cases.
Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Tested-by: Geert Uytterhoeven <geert+renesas@glider.be>
Tested-by: Tero Kristo <t-kristo@ti.com>
Reviewed-by: Ramesh Thomas <ramesh.thomas@intel.com>
2017-11-07 09:23:18 +08:00
|
|
|
|
2017-11-07 18:33:49 +08:00
|
|
|
if (constraint_ns < *constraint_ns_p)
|
2012-04-30 04:54:17 +08:00
|
|
|
*constraint_ns_p = constraint_ns;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2011-12-01 07:02:05 +08:00
|
|
|
/**
|
2016-03-31 17:21:25 +08:00
|
|
|
* default_suspend_ok - Default PM domain governor routine to suspend devices.
|
2011-12-01 07:02:05 +08:00
|
|
|
* @dev: Device to check.
|
2023-12-06 06:58:56 +08:00
|
|
|
*
|
|
|
|
* Returns: true if OK to suspend, false if not OK to suspend
|
2011-12-01 07:02:05 +08:00
|
|
|
*/
|
2016-03-31 17:21:25 +08:00
|
|
|
static bool default_suspend_ok(struct device *dev)
|
2011-12-01 07:02:05 +08:00
|
|
|
{
|
2022-05-11 22:56:56 +08:00
|
|
|
struct gpd_timing_data *td = dev_gpd_data(dev)->td;
|
PM / Domains: Cache device stop and domain power off governor results, v3
The results of the default device stop and domain power off governor
functions for generic PM domains, default_stop_ok() and
default_power_down_ok(), depend only on the timing data of devices,
which are static, and on their PM QoS constraints. Thus, in theory,
these functions only need to carry out their computations, which may
be time consuming in general, when it is known that the PM QoS
constraint of at least one of the devices in question has changed.
Use the PM QoS notifiers of devices to implement that. First,
introduce new fields, constraint_changed and max_off_time_changed,
into struct gpd_timing_data and struct generic_pm_domain,
respectively, and register a PM QoS notifier function when adding
a device into a domain that will set those fields to 'true' whenever
the device's PM QoS constraint is modified. Second, make
default_stop_ok() and default_power_down_ok() use those fields to
decide whether or not to carry out their computations from scratch.
The device and PM domain hierarchies are taken into account in that
and the expense is that the changes of PM QoS constraints of
suspended devices will not be taken into account immediately, which
isn't guaranteed anyway in general.
Signed-off-by: Rafael J. Wysocki <rjw@sisk.pl>
2012-05-02 03:34:07 +08:00
|
|
|
unsigned long flags;
|
2012-04-30 04:54:17 +08:00
|
|
|
s64 constraint_ns;
|
2011-12-01 07:02:05 +08:00
|
|
|
|
|
|
|
dev_dbg(dev, "%s()\n", __func__);
|
|
|
|
|
PM / Domains: Cache device stop and domain power off governor results, v3
The results of the default device stop and domain power off governor
functions for generic PM domains, default_stop_ok() and
default_power_down_ok(), depend only on the timing data of devices,
which are static, and on their PM QoS constraints. Thus, in theory,
these functions only need to carry out their computations, which may
be time consuming in general, when it is known that the PM QoS
constraint of at least one of the devices in question has changed.
Use the PM QoS notifiers of devices to implement that. First,
introduce new fields, constraint_changed and max_off_time_changed,
into struct gpd_timing_data and struct generic_pm_domain,
respectively, and register a PM QoS notifier function when adding
a device into a domain that will set those fields to 'true' whenever
the device's PM QoS constraint is modified. Second, make
default_stop_ok() and default_power_down_ok() use those fields to
decide whether or not to carry out their computations from scratch.
The device and PM domain hierarchies are taken into account in that
and the expense is that the changes of PM QoS constraints of
suspended devices will not be taken into account immediately, which
isn't guaranteed anyway in general.
Signed-off-by: Rafael J. Wysocki <rjw@sisk.pl>
2012-05-02 03:34:07 +08:00
|
|
|
spin_lock_irqsave(&dev->power.lock, flags);
|
|
|
|
|
|
|
|
if (!td->constraint_changed) {
|
2016-03-31 17:21:25 +08:00
|
|
|
bool ret = td->cached_suspend_ok;
|
PM / Domains: Cache device stop and domain power off governor results, v3
The results of the default device stop and domain power off governor
functions for generic PM domains, default_stop_ok() and
default_power_down_ok(), depend only on the timing data of devices,
which are static, and on their PM QoS constraints. Thus, in theory,
these functions only need to carry out their computations, which may
be time consuming in general, when it is known that the PM QoS
constraint of at least one of the devices in question has changed.
Use the PM QoS notifiers of devices to implement that. First,
introduce new fields, constraint_changed and max_off_time_changed,
into struct gpd_timing_data and struct generic_pm_domain,
respectively, and register a PM QoS notifier function when adding
a device into a domain that will set those fields to 'true' whenever
the device's PM QoS constraint is modified. Second, make
default_stop_ok() and default_power_down_ok() use those fields to
decide whether or not to carry out their computations from scratch.
The device and PM domain hierarchies are taken into account in that
and the expense is that the changes of PM QoS constraints of
suspended devices will not be taken into account immediately, which
isn't guaranteed anyway in general.
Signed-off-by: Rafael J. Wysocki <rjw@sisk.pl>
2012-05-02 03:34:07 +08:00
|
|
|
|
|
|
|
spin_unlock_irqrestore(&dev->power.lock, flags);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
td->constraint_changed = false;
|
2016-03-31 17:21:25 +08:00
|
|
|
td->cached_suspend_ok = false;
|
2017-11-07 18:33:49 +08:00
|
|
|
td->effective_constraint_ns = 0;
|
2019-07-04 15:36:18 +08:00
|
|
|
constraint_ns = __dev_pm_qos_resume_latency(dev);
|
PM / Domains: Cache device stop and domain power off governor results, v3
The results of the default device stop and domain power off governor
functions for generic PM domains, default_stop_ok() and
default_power_down_ok(), depend only on the timing data of devices,
which are static, and on their PM QoS constraints. Thus, in theory,
these functions only need to carry out their computations, which may
be time consuming in general, when it is known that the PM QoS
constraint of at least one of the devices in question has changed.
Use the PM QoS notifiers of devices to implement that. First,
introduce new fields, constraint_changed and max_off_time_changed,
into struct gpd_timing_data and struct generic_pm_domain,
respectively, and register a PM QoS notifier function when adding
a device into a domain that will set those fields to 'true' whenever
the device's PM QoS constraint is modified. Second, make
default_stop_ok() and default_power_down_ok() use those fields to
decide whether or not to carry out their computations from scratch.
The device and PM domain hierarchies are taken into account in that
and the expense is that the changes of PM QoS constraints of
suspended devices will not be taken into account immediately, which
isn't guaranteed anyway in general.
Signed-off-by: Rafael J. Wysocki <rjw@sisk.pl>
2012-05-02 03:34:07 +08:00
|
|
|
|
|
|
|
spin_unlock_irqrestore(&dev->power.lock, flags);
|
|
|
|
|
2017-11-07 18:33:49 +08:00
|
|
|
if (constraint_ns == 0)
|
2012-04-30 04:54:17 +08:00
|
|
|
return false;
|
2011-12-01 07:02:05 +08:00
|
|
|
|
2012-04-30 04:54:17 +08:00
|
|
|
constraint_ns *= NSEC_PER_USEC;
|
|
|
|
/*
|
|
|
|
* We can walk the children without any additional locking, because
|
PM / Domains: Cache device stop and domain power off governor results, v3
The results of the default device stop and domain power off governor
functions for generic PM domains, default_stop_ok() and
default_power_down_ok(), depend only on the timing data of devices,
which are static, and on their PM QoS constraints. Thus, in theory,
these functions only need to carry out their computations, which may
be time consuming in general, when it is known that the PM QoS
constraint of at least one of the devices in question has changed.
Use the PM QoS notifiers of devices to implement that. First,
introduce new fields, constraint_changed and max_off_time_changed,
into struct gpd_timing_data and struct generic_pm_domain,
respectively, and register a PM QoS notifier function when adding
a device into a domain that will set those fields to 'true' whenever
the device's PM QoS constraint is modified. Second, make
default_stop_ok() and default_power_down_ok() use those fields to
decide whether or not to carry out their computations from scratch.
The device and PM domain hierarchies are taken into account in that
and the expense is that the changes of PM QoS constraints of
suspended devices will not be taken into account immediately, which
isn't guaranteed anyway in general.
Signed-off-by: Rafael J. Wysocki <rjw@sisk.pl>
2012-05-02 03:34:07 +08:00
|
|
|
* they all have been suspended at this point and their
|
|
|
|
* effective_constraint_ns fields won't be modified in parallel with us.
|
2012-04-30 04:54:17 +08:00
|
|
|
*/
|
|
|
|
if (!dev->power.ignore_children)
|
|
|
|
device_for_each_child(dev, &constraint_ns,
|
|
|
|
dev_update_qos_constraint);
|
|
|
|
|
2017-11-07 18:33:49 +08:00
|
|
|
if (constraint_ns == PM_QOS_RESUME_LATENCY_NO_CONSTRAINT_NS) {
|
PM / domains: Rework governor code to be more consistent
The genpd governor currently uses negative PM QoS values to indicate
the "no suspend" condition and 0 as "no restriction", but it doesn't
use them consistently. Moreover, it tries to refresh QoS values for
already suspended devices in a quite questionable way.
For the above reasons, rework it to be a bit more consistent.
First off, note that dev_pm_qos_read_value() in
dev_update_qos_constraint() and __default_power_down_ok() is
evaluated for devices in suspend. Moreover, that only happens if the
effective_constraint_ns value for them is negative (meaning "no
suspend"). It is not evaluated in any other cases, so effectively
the QoS values are only updated for devices in suspend that should
not have been suspended in the first place. In all of the other
cases, the QoS values taken into account are the effective ones from
the time before the device has been suspended, so generally devices
need to be resumed and suspended again for new QoS values to take
effect anyway. Thus evaluating dev_update_qos_constraint() in
those two places doesn't make sense at all, so drop it.
Second, initialize effective_constraint_ns to 0 ("no constraint")
rather than to (-1) ("no suspend"), which makes more sense in
general and in case effective_constraint_ns is never updated
(the device is in suspend all the time or it is never suspended)
it doesn't affect the device's parent and so on.
Finally, rework default_suspend_ok() to explicitly handle the
"no restriction" and "no suspend" special cases.
Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Tested-by: Geert Uytterhoeven <geert+renesas@glider.be>
Tested-by: Tero Kristo <t-kristo@ti.com>
Reviewed-by: Ramesh Thomas <ramesh.thomas@intel.com>
2017-11-07 09:23:18 +08:00
|
|
|
/* "No restriction", so the device is allowed to suspend. */
|
2017-11-07 18:33:49 +08:00
|
|
|
td->effective_constraint_ns = PM_QOS_RESUME_LATENCY_NO_CONSTRAINT_NS;
|
PM / domains: Rework governor code to be more consistent
The genpd governor currently uses negative PM QoS values to indicate
the "no suspend" condition and 0 as "no restriction", but it doesn't
use them consistently. Moreover, it tries to refresh QoS values for
already suspended devices in a quite questionable way.
For the above reasons, rework it to be a bit more consistent.
First off, note that dev_pm_qos_read_value() in
dev_update_qos_constraint() and __default_power_down_ok() is
evaluated for devices in suspend. Moreover, that only happens if the
effective_constraint_ns value for them is negative (meaning "no
suspend"). It is not evaluated in any other cases, so effectively
the QoS values are only updated for devices in suspend that should
not have been suspended in the first place. In all of the other
cases, the QoS values taken into account are the effective ones from
the time before the device has been suspended, so generally devices
need to be resumed and suspended again for new QoS values to take
effect anyway. Thus evaluating dev_update_qos_constraint() in
those two places doesn't make sense at all, so drop it.
Second, initialize effective_constraint_ns to 0 ("no constraint")
rather than to (-1) ("no suspend"), which makes more sense in
general and in case effective_constraint_ns is never updated
(the device is in suspend all the time or it is never suspended)
it doesn't affect the device's parent and so on.
Finally, rework default_suspend_ok() to explicitly handle the
"no restriction" and "no suspend" special cases.
Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Tested-by: Geert Uytterhoeven <geert+renesas@glider.be>
Tested-by: Tero Kristo <t-kristo@ti.com>
Reviewed-by: Ramesh Thomas <ramesh.thomas@intel.com>
2017-11-07 09:23:18 +08:00
|
|
|
td->cached_suspend_ok = true;
|
2017-11-07 18:33:49 +08:00
|
|
|
} else if (constraint_ns == 0) {
|
PM / domains: Rework governor code to be more consistent
The genpd governor currently uses negative PM QoS values to indicate
the "no suspend" condition and 0 as "no restriction", but it doesn't
use them consistently. Moreover, it tries to refresh QoS values for
already suspended devices in a quite questionable way.
For the above reasons, rework it to be a bit more consistent.
First off, note that dev_pm_qos_read_value() in
dev_update_qos_constraint() and __default_power_down_ok() is
evaluated for devices in suspend. Moreover, that only happens if the
effective_constraint_ns value for them is negative (meaning "no
suspend"). It is not evaluated in any other cases, so effectively
the QoS values are only updated for devices in suspend that should
not have been suspended in the first place. In all of the other
cases, the QoS values taken into account are the effective ones from
the time before the device has been suspended, so generally devices
need to be resumed and suspended again for new QoS values to take
effect anyway. Thus evaluating dev_update_qos_constraint() in
those two places doesn't make sense at all, so drop it.
Second, initialize effective_constraint_ns to 0 ("no constraint")
rather than to (-1) ("no suspend"), which makes more sense in
general and in case effective_constraint_ns is never updated
(the device is in suspend all the time or it is never suspended)
it doesn't affect the device's parent and so on.
Finally, rework default_suspend_ok() to explicitly handle the
"no restriction" and "no suspend" special cases.
Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Tested-by: Geert Uytterhoeven <geert+renesas@glider.be>
Tested-by: Tero Kristo <t-kristo@ti.com>
Reviewed-by: Ramesh Thomas <ramesh.thomas@intel.com>
2017-11-07 09:23:18 +08:00
|
|
|
/*
|
|
|
|
* This triggers if one of the children that don't belong to a
|
2017-11-07 18:33:49 +08:00
|
|
|
* domain has a zero PM QoS constraint and it's better not to
|
|
|
|
* suspend then. effective_constraint_ns is zero already and
|
|
|
|
* cached_suspend_ok is false, so bail out.
|
PM / domains: Rework governor code to be more consistent
The genpd governor currently uses negative PM QoS values to indicate
the "no suspend" condition and 0 as "no restriction", but it doesn't
use them consistently. Moreover, it tries to refresh QoS values for
already suspended devices in a quite questionable way.
For the above reasons, rework it to be a bit more consistent.
First off, note that dev_pm_qos_read_value() in
dev_update_qos_constraint() and __default_power_down_ok() is
evaluated for devices in suspend. Moreover, that only happens if the
effective_constraint_ns value for them is negative (meaning "no
suspend"). It is not evaluated in any other cases, so effectively
the QoS values are only updated for devices in suspend that should
not have been suspended in the first place. In all of the other
cases, the QoS values taken into account are the effective ones from
the time before the device has been suspended, so generally devices
need to be resumed and suspended again for new QoS values to take
effect anyway. Thus evaluating dev_update_qos_constraint() in
those two places doesn't make sense at all, so drop it.
Second, initialize effective_constraint_ns to 0 ("no constraint")
rather than to (-1) ("no suspend"), which makes more sense in
general and in case effective_constraint_ns is never updated
(the device is in suspend all the time or it is never suspended)
it doesn't affect the device's parent and so on.
Finally, rework default_suspend_ok() to explicitly handle the
"no restriction" and "no suspend" special cases.
Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Tested-by: Geert Uytterhoeven <geert+renesas@glider.be>
Tested-by: Tero Kristo <t-kristo@ti.com>
Reviewed-by: Ramesh Thomas <ramesh.thomas@intel.com>
2017-11-07 09:23:18 +08:00
|
|
|
*/
|
|
|
|
return false;
|
|
|
|
} else {
|
2015-10-15 23:02:19 +08:00
|
|
|
constraint_ns -= td->suspend_latency_ns +
|
|
|
|
td->resume_latency_ns;
|
PM / domains: Rework governor code to be more consistent
The genpd governor currently uses negative PM QoS values to indicate
the "no suspend" condition and 0 as "no restriction", but it doesn't
use them consistently. Moreover, it tries to refresh QoS values for
already suspended devices in a quite questionable way.
For the above reasons, rework it to be a bit more consistent.
First off, note that dev_pm_qos_read_value() in
dev_update_qos_constraint() and __default_power_down_ok() is
evaluated for devices in suspend. Moreover, that only happens if the
effective_constraint_ns value for them is negative (meaning "no
suspend"). It is not evaluated in any other cases, so effectively
the QoS values are only updated for devices in suspend that should
not have been suspended in the first place. In all of the other
cases, the QoS values taken into account are the effective ones from
the time before the device has been suspended, so generally devices
need to be resumed and suspended again for new QoS values to take
effect anyway. Thus evaluating dev_update_qos_constraint() in
those two places doesn't make sense at all, so drop it.
Second, initialize effective_constraint_ns to 0 ("no constraint")
rather than to (-1) ("no suspend"), which makes more sense in
general and in case effective_constraint_ns is never updated
(the device is in suspend all the time or it is never suspended)
it doesn't affect the device's parent and so on.
Finally, rework default_suspend_ok() to explicitly handle the
"no restriction" and "no suspend" special cases.
Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Tested-by: Geert Uytterhoeven <geert+renesas@glider.be>
Tested-by: Tero Kristo <t-kristo@ti.com>
Reviewed-by: Ramesh Thomas <ramesh.thomas@intel.com>
2017-11-07 09:23:18 +08:00
|
|
|
/*
|
2017-11-07 18:33:49 +08:00
|
|
|
* effective_constraint_ns is zero already and cached_suspend_ok
|
|
|
|
* is false, so if the computed value is not positive, return
|
|
|
|
* right away.
|
PM / domains: Rework governor code to be more consistent
The genpd governor currently uses negative PM QoS values to indicate
the "no suspend" condition and 0 as "no restriction", but it doesn't
use them consistently. Moreover, it tries to refresh QoS values for
already suspended devices in a quite questionable way.
For the above reasons, rework it to be a bit more consistent.
First off, note that dev_pm_qos_read_value() in
dev_update_qos_constraint() and __default_power_down_ok() is
evaluated for devices in suspend. Moreover, that only happens if the
effective_constraint_ns value for them is negative (meaning "no
suspend"). It is not evaluated in any other cases, so effectively
the QoS values are only updated for devices in suspend that should
not have been suspended in the first place. In all of the other
cases, the QoS values taken into account are the effective ones from
the time before the device has been suspended, so generally devices
need to be resumed and suspended again for new QoS values to take
effect anyway. Thus evaluating dev_update_qos_constraint() in
those two places doesn't make sense at all, so drop it.
Second, initialize effective_constraint_ns to 0 ("no constraint")
rather than to (-1) ("no suspend"), which makes more sense in
general and in case effective_constraint_ns is never updated
(the device is in suspend all the time or it is never suspended)
it doesn't affect the device's parent and so on.
Finally, rework default_suspend_ok() to explicitly handle the
"no restriction" and "no suspend" special cases.
Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Tested-by: Geert Uytterhoeven <geert+renesas@glider.be>
Tested-by: Tero Kristo <t-kristo@ti.com>
Reviewed-by: Ramesh Thomas <ramesh.thomas@intel.com>
2017-11-07 09:23:18 +08:00
|
|
|
*/
|
|
|
|
if (constraint_ns <= 0)
|
2012-04-30 04:54:17 +08:00
|
|
|
return false;
|
PM / domains: Rework governor code to be more consistent
The genpd governor currently uses negative PM QoS values to indicate
the "no suspend" condition and 0 as "no restriction", but it doesn't
use them consistently. Moreover, it tries to refresh QoS values for
already suspended devices in a quite questionable way.
For the above reasons, rework it to be a bit more consistent.
First off, note that dev_pm_qos_read_value() in
dev_update_qos_constraint() and __default_power_down_ok() is
evaluated for devices in suspend. Moreover, that only happens if the
effective_constraint_ns value for them is negative (meaning "no
suspend"). It is not evaluated in any other cases, so effectively
the QoS values are only updated for devices in suspend that should
not have been suspended in the first place. In all of the other
cases, the QoS values taken into account are the effective ones from
the time before the device has been suspended, so generally devices
need to be resumed and suspended again for new QoS values to take
effect anyway. Thus evaluating dev_update_qos_constraint() in
those two places doesn't make sense at all, so drop it.
Second, initialize effective_constraint_ns to 0 ("no constraint")
rather than to (-1) ("no suspend"), which makes more sense in
general and in case effective_constraint_ns is never updated
(the device is in suspend all the time or it is never suspended)
it doesn't affect the device's parent and so on.
Finally, rework default_suspend_ok() to explicitly handle the
"no restriction" and "no suspend" special cases.
Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Tested-by: Geert Uytterhoeven <geert+renesas@glider.be>
Tested-by: Tero Kristo <t-kristo@ti.com>
Reviewed-by: Ramesh Thomas <ramesh.thomas@intel.com>
2017-11-07 09:23:18 +08:00
|
|
|
|
|
|
|
td->effective_constraint_ns = constraint_ns;
|
|
|
|
td->cached_suspend_ok = true;
|
2012-04-30 04:54:17 +08:00
|
|
|
}
|
2015-10-13 22:10:28 +08:00
|
|
|
|
2012-04-30 04:54:17 +08:00
|
|
|
/*
|
|
|
|
* The children have been suspended already, so we don't need to take
|
2016-03-31 17:21:25 +08:00
|
|
|
* their suspend latencies into account here.
|
2012-04-30 04:54:17 +08:00
|
|
|
*/
|
2016-03-31 17:21:25 +08:00
|
|
|
return td->cached_suspend_ok;
|
2011-12-01 07:02:05 +08:00
|
|
|
}
|
|
|
|
|
2021-01-20 23:50:42 +08:00
|
|
|
static void update_domain_next_wakeup(struct generic_pm_domain *genpd, ktime_t now)
|
|
|
|
{
|
|
|
|
ktime_t domain_wakeup = KTIME_MAX;
|
|
|
|
ktime_t next_wakeup;
|
|
|
|
struct pm_domain_data *pdd;
|
|
|
|
struct gpd_link *link;
|
|
|
|
|
|
|
|
if (!(genpd->flags & GENPD_FLAG_MIN_RESIDENCY))
|
|
|
|
return;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Devices that have a predictable wakeup pattern, may specify
|
|
|
|
* their next wakeup. Let's find the next wakeup from all the
|
|
|
|
* devices attached to this domain and from all the sub-domains.
|
|
|
|
* It is possible that component's a next wakeup may have become
|
|
|
|
* stale when we read that here. We will ignore to ensure the domain
|
|
|
|
* is able to enter its optimal idle state.
|
|
|
|
*/
|
|
|
|
list_for_each_entry(pdd, &genpd->dev_list, list_node) {
|
2022-05-11 22:56:57 +08:00
|
|
|
next_wakeup = to_gpd_data(pdd)->td->next_wakeup;
|
2021-01-20 23:50:42 +08:00
|
|
|
if (next_wakeup != KTIME_MAX && !ktime_before(next_wakeup, now))
|
|
|
|
if (ktime_before(next_wakeup, domain_wakeup))
|
|
|
|
domain_wakeup = next_wakeup;
|
|
|
|
}
|
|
|
|
|
|
|
|
list_for_each_entry(link, &genpd->parent_links, parent_node) {
|
2022-05-11 22:57:02 +08:00
|
|
|
struct genpd_governor_data *cgd = link->child->gd;
|
|
|
|
|
|
|
|
next_wakeup = cgd ? cgd->next_wakeup : KTIME_MAX;
|
2021-01-20 23:50:42 +08:00
|
|
|
if (next_wakeup != KTIME_MAX && !ktime_before(next_wakeup, now))
|
|
|
|
if (ktime_before(next_wakeup, domain_wakeup))
|
|
|
|
domain_wakeup = next_wakeup;
|
|
|
|
}
|
|
|
|
|
2022-05-11 22:57:02 +08:00
|
|
|
genpd->gd->next_wakeup = domain_wakeup;
|
2021-01-20 23:50:42 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static bool next_wakeup_allows_state(struct generic_pm_domain *genpd,
|
|
|
|
unsigned int state, ktime_t now)
|
|
|
|
{
|
2022-05-11 22:57:02 +08:00
|
|
|
ktime_t domain_wakeup = genpd->gd->next_wakeup;
|
2021-01-20 23:50:42 +08:00
|
|
|
s64 idle_time_ns, min_sleep_ns;
|
|
|
|
|
|
|
|
min_sleep_ns = genpd->states[state].power_off_latency_ns +
|
|
|
|
genpd->states[state].residency_ns;
|
|
|
|
|
|
|
|
idle_time_ns = ktime_to_ns(ktime_sub(domain_wakeup, now));
|
|
|
|
|
|
|
|
return idle_time_ns >= min_sleep_ns;
|
|
|
|
}
|
|
|
|
|
2016-02-15 18:10:51 +08:00
|
|
|
static bool __default_power_down_ok(struct dev_pm_domain *pd,
|
|
|
|
unsigned int state)
|
2011-12-01 07:02:10 +08:00
|
|
|
{
|
|
|
|
struct generic_pm_domain *genpd = pd_to_genpd(pd);
|
|
|
|
struct gpd_link *link;
|
|
|
|
struct pm_domain_data *pdd;
|
2012-05-08 04:00:59 +08:00
|
|
|
s64 min_off_time_ns;
|
2011-12-01 07:02:10 +08:00
|
|
|
s64 off_on_time_ns;
|
|
|
|
|
2016-02-15 18:10:51 +08:00
|
|
|
off_on_time_ns = genpd->states[state].power_off_latency_ns +
|
|
|
|
genpd->states[state].power_on_latency_ns;
|
PM / Domains: Cache device stop and domain power off governor results, v3
The results of the default device stop and domain power off governor
functions for generic PM domains, default_stop_ok() and
default_power_down_ok(), depend only on the timing data of devices,
which are static, and on their PM QoS constraints. Thus, in theory,
these functions only need to carry out their computations, which may
be time consuming in general, when it is known that the PM QoS
constraint of at least one of the devices in question has changed.
Use the PM QoS notifiers of devices to implement that. First,
introduce new fields, constraint_changed and max_off_time_changed,
into struct gpd_timing_data and struct generic_pm_domain,
respectively, and register a PM QoS notifier function when adding
a device into a domain that will set those fields to 'true' whenever
the device's PM QoS constraint is modified. Second, make
default_stop_ok() and default_power_down_ok() use those fields to
decide whether or not to carry out their computations from scratch.
The device and PM domain hierarchies are taken into account in that
and the expense is that the changes of PM QoS constraints of
suspended devices will not be taken into account immediately, which
isn't guaranteed anyway in general.
Signed-off-by: Rafael J. Wysocki <rjw@sisk.pl>
2012-05-02 03:34:07 +08:00
|
|
|
|
2012-05-08 04:00:59 +08:00
|
|
|
min_off_time_ns = -1;
|
2011-12-01 07:02:10 +08:00
|
|
|
/*
|
|
|
|
* Check if subdomains can be off for enough time.
|
|
|
|
*
|
|
|
|
* All subdomains have been powered off already at this point.
|
|
|
|
*/
|
2020-07-09 07:32:13 +08:00
|
|
|
list_for_each_entry(link, &genpd->parent_links, parent_node) {
|
2022-05-11 22:57:02 +08:00
|
|
|
struct genpd_governor_data *cgd = link->child->gd;
|
|
|
|
|
|
|
|
s64 sd_max_off_ns = cgd ? cgd->max_off_time_ns : -1;
|
2011-12-01 07:02:10 +08:00
|
|
|
|
|
|
|
if (sd_max_off_ns < 0)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Check if the subdomain is allowed to be off long enough for
|
|
|
|
* the current domain to turn off and on (that's how much time
|
|
|
|
* it will have to wait worst case).
|
|
|
|
*/
|
|
|
|
if (sd_max_off_ns <= off_on_time_ns)
|
|
|
|
return false;
|
2012-05-08 04:00:59 +08:00
|
|
|
|
|
|
|
if (min_off_time_ns > sd_max_off_ns || min_off_time_ns < 0)
|
|
|
|
min_off_time_ns = sd_max_off_ns;
|
2011-12-01 07:02:10 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Check if the devices in the domain can be off enough time.
|
|
|
|
*/
|
|
|
|
list_for_each_entry(pdd, &genpd->dev_list, list_node) {
|
|
|
|
struct gpd_timing_data *td;
|
2012-04-30 04:54:30 +08:00
|
|
|
s64 constraint_ns;
|
2011-12-01 07:02:10 +08:00
|
|
|
|
2012-04-30 04:54:30 +08:00
|
|
|
/*
|
|
|
|
* Check if the device is allowed to be off long enough for the
|
|
|
|
* domain to turn off and on (that's how much time it will
|
|
|
|
* have to wait worst case).
|
|
|
|
*/
|
2022-05-11 22:56:56 +08:00
|
|
|
td = to_gpd_data(pdd)->td;
|
2012-04-30 04:54:30 +08:00
|
|
|
constraint_ns = td->effective_constraint_ns;
|
PM / domains: Rework governor code to be more consistent
The genpd governor currently uses negative PM QoS values to indicate
the "no suspend" condition and 0 as "no restriction", but it doesn't
use them consistently. Moreover, it tries to refresh QoS values for
already suspended devices in a quite questionable way.
For the above reasons, rework it to be a bit more consistent.
First off, note that dev_pm_qos_read_value() in
dev_update_qos_constraint() and __default_power_down_ok() is
evaluated for devices in suspend. Moreover, that only happens if the
effective_constraint_ns value for them is negative (meaning "no
suspend"). It is not evaluated in any other cases, so effectively
the QoS values are only updated for devices in suspend that should
not have been suspended in the first place. In all of the other
cases, the QoS values taken into account are the effective ones from
the time before the device has been suspended, so generally devices
need to be resumed and suspended again for new QoS values to take
effect anyway. Thus evaluating dev_update_qos_constraint() in
those two places doesn't make sense at all, so drop it.
Second, initialize effective_constraint_ns to 0 ("no constraint")
rather than to (-1) ("no suspend"), which makes more sense in
general and in case effective_constraint_ns is never updated
(the device is in suspend all the time or it is never suspended)
it doesn't affect the device's parent and so on.
Finally, rework default_suspend_ok() to explicitly handle the
"no restriction" and "no suspend" special cases.
Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Tested-by: Geert Uytterhoeven <geert+renesas@glider.be>
Tested-by: Tero Kristo <t-kristo@ti.com>
Reviewed-by: Ramesh Thomas <ramesh.thomas@intel.com>
2017-11-07 09:23:18 +08:00
|
|
|
/*
|
2017-11-07 18:33:49 +08:00
|
|
|
* Zero means "no suspend at all" and this runs only when all
|
|
|
|
* devices in the domain are suspended, so it must be positive.
|
PM / domains: Rework governor code to be more consistent
The genpd governor currently uses negative PM QoS values to indicate
the "no suspend" condition and 0 as "no restriction", but it doesn't
use them consistently. Moreover, it tries to refresh QoS values for
already suspended devices in a quite questionable way.
For the above reasons, rework it to be a bit more consistent.
First off, note that dev_pm_qos_read_value() in
dev_update_qos_constraint() and __default_power_down_ok() is
evaluated for devices in suspend. Moreover, that only happens if the
effective_constraint_ns value for them is negative (meaning "no
suspend"). It is not evaluated in any other cases, so effectively
the QoS values are only updated for devices in suspend that should
not have been suspended in the first place. In all of the other
cases, the QoS values taken into account are the effective ones from
the time before the device has been suspended, so generally devices
need to be resumed and suspended again for new QoS values to take
effect anyway. Thus evaluating dev_update_qos_constraint() in
those two places doesn't make sense at all, so drop it.
Second, initialize effective_constraint_ns to 0 ("no constraint")
rather than to (-1) ("no suspend"), which makes more sense in
general and in case effective_constraint_ns is never updated
(the device is in suspend all the time or it is never suspended)
it doesn't affect the device's parent and so on.
Finally, rework default_suspend_ok() to explicitly handle the
"no restriction" and "no suspend" special cases.
Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Tested-by: Geert Uytterhoeven <geert+renesas@glider.be>
Tested-by: Tero Kristo <t-kristo@ti.com>
Reviewed-by: Ramesh Thomas <ramesh.thomas@intel.com>
2017-11-07 09:23:18 +08:00
|
|
|
*/
|
2017-11-07 18:33:49 +08:00
|
|
|
if (constraint_ns == PM_QOS_RESUME_LATENCY_NO_CONSTRAINT_NS)
|
2012-04-30 04:54:30 +08:00
|
|
|
continue;
|
2011-12-01 07:02:10 +08:00
|
|
|
|
2012-04-30 04:54:30 +08:00
|
|
|
if (constraint_ns <= off_on_time_ns)
|
|
|
|
return false;
|
|
|
|
|
2012-05-08 04:00:59 +08:00
|
|
|
if (min_off_time_ns > constraint_ns || min_off_time_ns < 0)
|
|
|
|
min_off_time_ns = constraint_ns;
|
2011-12-01 07:02:10 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2012-04-30 04:54:30 +08:00
|
|
|
* If the computed minimum device off time is negative, there are no
|
|
|
|
* latency constraints, so the domain can spend arbitrary time in the
|
|
|
|
* "off" state.
|
2011-12-01 07:02:10 +08:00
|
|
|
*/
|
2012-05-08 04:00:59 +08:00
|
|
|
if (min_off_time_ns < 0)
|
2012-04-30 04:54:30 +08:00
|
|
|
return true;
|
2011-12-01 07:02:10 +08:00
|
|
|
|
|
|
|
/*
|
2012-05-08 04:00:59 +08:00
|
|
|
* The difference between the computed minimum subdomain or device off
|
|
|
|
* time and the time needed to turn the domain on is the maximum
|
|
|
|
* theoretical time this domain can spend in the "off" state.
|
2011-12-01 07:02:10 +08:00
|
|
|
*/
|
2022-05-11 22:57:02 +08:00
|
|
|
genpd->gd->max_off_time_ns = min_off_time_ns -
|
2016-02-15 18:10:51 +08:00
|
|
|
genpd->states[state].power_on_latency_ns;
|
2011-12-01 07:02:10 +08:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2017-06-28 22:56:21 +08:00
|
|
|
/**
|
2021-01-20 23:50:42 +08:00
|
|
|
* _default_power_down_ok - Default generic PM domain power off governor routine.
|
2017-06-28 22:56:21 +08:00
|
|
|
* @pd: PM domain to check.
|
2021-05-12 15:25:15 +08:00
|
|
|
* @now: current ktime.
|
2017-06-28 22:56:21 +08:00
|
|
|
*
|
|
|
|
* This routine must be executed under the PM domain's lock.
|
2023-12-06 06:58:56 +08:00
|
|
|
*
|
|
|
|
* Returns: true if OK to power down, false if not OK to power down
|
2017-06-28 22:56:21 +08:00
|
|
|
*/
|
2021-01-20 23:50:42 +08:00
|
|
|
static bool _default_power_down_ok(struct dev_pm_domain *pd, ktime_t now)
|
2016-02-15 18:10:51 +08:00
|
|
|
{
|
|
|
|
struct generic_pm_domain *genpd = pd_to_genpd(pd);
|
2022-05-11 22:57:02 +08:00
|
|
|
struct genpd_governor_data *gd = genpd->gd;
|
2021-01-20 23:50:42 +08:00
|
|
|
int state_idx = genpd->state_count - 1;
|
2016-02-15 18:10:51 +08:00
|
|
|
struct gpd_link *link;
|
|
|
|
|
2021-01-20 23:50:42 +08:00
|
|
|
/*
|
|
|
|
* Find the next wakeup from devices that can determine their own wakeup
|
|
|
|
* to find when the domain would wakeup and do it for every device down
|
|
|
|
* the hierarchy. It is not worth while to sleep if the state's residency
|
|
|
|
* cannot be met.
|
|
|
|
*/
|
|
|
|
update_domain_next_wakeup(genpd, now);
|
2022-05-11 22:57:02 +08:00
|
|
|
if ((genpd->flags & GENPD_FLAG_MIN_RESIDENCY) && (gd->next_wakeup != KTIME_MAX)) {
|
2021-01-20 23:50:42 +08:00
|
|
|
/* Let's find out the deepest domain idle state, the devices prefer */
|
|
|
|
while (state_idx >= 0) {
|
|
|
|
if (next_wakeup_allows_state(genpd, state_idx, now)) {
|
2022-05-11 22:57:02 +08:00
|
|
|
gd->max_off_time_changed = true;
|
2021-01-20 23:50:42 +08:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
state_idx--;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (state_idx < 0) {
|
|
|
|
state_idx = 0;
|
2022-05-11 22:57:02 +08:00
|
|
|
gd->cached_power_down_ok = false;
|
2021-01-20 23:50:42 +08:00
|
|
|
goto done;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-05-11 22:57:02 +08:00
|
|
|
if (!gd->max_off_time_changed) {
|
|
|
|
genpd->state_idx = gd->cached_power_down_state_idx;
|
|
|
|
return gd->cached_power_down_ok;
|
2019-04-12 02:17:33 +08:00
|
|
|
}
|
2016-02-15 18:10:51 +08:00
|
|
|
|
|
|
|
/*
|
2020-07-09 07:32:13 +08:00
|
|
|
* We have to invalidate the cached results for the parents, so
|
2016-02-15 18:10:51 +08:00
|
|
|
* use the observation that default_power_down_ok() is not
|
2020-07-09 07:32:13 +08:00
|
|
|
* going to be called for any parent until this instance
|
2016-02-15 18:10:51 +08:00
|
|
|
* returns.
|
|
|
|
*/
|
2022-05-11 22:57:02 +08:00
|
|
|
list_for_each_entry(link, &genpd->child_links, child_node) {
|
|
|
|
struct genpd_governor_data *pgd = link->parent->gd;
|
|
|
|
|
|
|
|
if (pgd)
|
|
|
|
pgd->max_off_time_changed = true;
|
|
|
|
}
|
2016-02-15 18:10:51 +08:00
|
|
|
|
2022-05-11 22:57:02 +08:00
|
|
|
gd->max_off_time_ns = -1;
|
|
|
|
gd->max_off_time_changed = false;
|
|
|
|
gd->cached_power_down_ok = true;
|
2016-02-15 18:10:51 +08:00
|
|
|
|
2021-01-20 23:50:42 +08:00
|
|
|
/*
|
|
|
|
* Find a state to power down to, starting from the state
|
|
|
|
* determined by the next wakeup.
|
|
|
|
*/
|
|
|
|
while (!__default_power_down_ok(pd, state_idx)) {
|
|
|
|
if (state_idx == 0) {
|
2022-05-11 22:57:02 +08:00
|
|
|
gd->cached_power_down_ok = false;
|
2016-02-15 18:10:51 +08:00
|
|
|
break;
|
|
|
|
}
|
2021-01-20 23:50:42 +08:00
|
|
|
state_idx--;
|
2016-02-15 18:10:51 +08:00
|
|
|
}
|
|
|
|
|
2021-01-20 23:50:42 +08:00
|
|
|
done:
|
|
|
|
genpd->state_idx = state_idx;
|
2022-05-11 22:57:02 +08:00
|
|
|
gd->cached_power_down_state_idx = genpd->state_idx;
|
|
|
|
return gd->cached_power_down_ok;
|
2016-02-15 18:10:51 +08:00
|
|
|
}
|
|
|
|
|
2021-01-20 23:50:42 +08:00
|
|
|
static bool default_power_down_ok(struct dev_pm_domain *pd)
|
|
|
|
{
|
|
|
|
return _default_power_down_ok(pd, ktime_get());
|
|
|
|
}
|
|
|
|
|
2019-04-12 02:17:33 +08:00
|
|
|
#ifdef CONFIG_CPU_IDLE
|
|
|
|
static bool cpu_power_down_ok(struct dev_pm_domain *pd)
|
|
|
|
{
|
|
|
|
struct generic_pm_domain *genpd = pd_to_genpd(pd);
|
|
|
|
struct cpuidle_device *dev;
|
|
|
|
ktime_t domain_wakeup, next_hrtimer;
|
2021-01-20 23:50:42 +08:00
|
|
|
ktime_t now = ktime_get();
|
2019-04-12 02:17:33 +08:00
|
|
|
s64 idle_duration_ns;
|
|
|
|
int cpu, i;
|
|
|
|
|
|
|
|
/* Validate dev PM QoS constraints. */
|
2021-01-20 23:50:42 +08:00
|
|
|
if (!_default_power_down_ok(pd, now))
|
2019-04-12 02:17:33 +08:00
|
|
|
return false;
|
|
|
|
|
|
|
|
if (!(genpd->flags & GENPD_FLAG_CPU_DOMAIN))
|
|
|
|
return true;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Find the next wakeup for any of the online CPUs within the PM domain
|
|
|
|
* and its subdomains. Note, we only need the genpd->cpus, as it already
|
|
|
|
* contains a mask of all CPUs from subdomains.
|
|
|
|
*/
|
|
|
|
domain_wakeup = ktime_set(KTIME_SEC_MAX, 0);
|
|
|
|
for_each_cpu_and(cpu, genpd->cpus, cpu_online_mask) {
|
|
|
|
dev = per_cpu(cpuidle_devices, cpu);
|
|
|
|
if (dev) {
|
|
|
|
next_hrtimer = READ_ONCE(dev->next_hrtimer);
|
|
|
|
if (ktime_before(next_hrtimer, domain_wakeup))
|
|
|
|
domain_wakeup = next_hrtimer;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* The minimum idle duration is from now - until the next wakeup. */
|
2021-01-20 23:50:42 +08:00
|
|
|
idle_duration_ns = ktime_to_ns(ktime_sub(domain_wakeup, now));
|
2019-04-12 02:17:33 +08:00
|
|
|
if (idle_duration_ns <= 0)
|
|
|
|
return false;
|
|
|
|
|
2022-10-18 23:28:35 +08:00
|
|
|
/* Store the next domain_wakeup to allow consumers to use it. */
|
|
|
|
genpd->gd->next_hrtimer = domain_wakeup;
|
|
|
|
|
2019-04-12 02:17:33 +08:00
|
|
|
/*
|
|
|
|
* Find the deepest idle state that has its residency value satisfied
|
|
|
|
* and by also taking into account the power off latency for the state.
|
|
|
|
* Start at the state picked by the dev PM QoS constraint validation.
|
|
|
|
*/
|
|
|
|
i = genpd->state_idx;
|
|
|
|
do {
|
|
|
|
if (idle_duration_ns >= (genpd->states[i].residency_ns +
|
|
|
|
genpd->states[i].power_off_latency_ns)) {
|
|
|
|
genpd->state_idx = i;
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
} while (--i >= 0);
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
struct dev_power_governor pm_domain_cpu_gov = {
|
|
|
|
.suspend_ok = default_suspend_ok,
|
|
|
|
.power_down_ok = cpu_power_down_ok,
|
|
|
|
};
|
|
|
|
#endif
|
|
|
|
|
2012-01-14 07:39:36 +08:00
|
|
|
struct dev_power_governor simple_qos_governor = {
|
2016-03-31 17:21:25 +08:00
|
|
|
.suspend_ok = default_suspend_ok,
|
2012-01-14 07:39:36 +08:00
|
|
|
.power_down_ok = default_power_down_ok,
|
|
|
|
};
|
|
|
|
|
2023-12-06 06:58:56 +08:00
|
|
|
/*
|
|
|
|
* pm_domain_always_on_gov - A governor implementing an always-on policy
|
2011-12-09 06:27:28 +08:00
|
|
|
*/
|
|
|
|
struct dev_power_governor pm_domain_always_on_gov = {
|
2016-03-31 17:21:25 +08:00
|
|
|
.suspend_ok = default_suspend_ok,
|
2011-12-09 06:27:28 +08:00
|
|
|
};
|