mirror of
https://github.com/edk2-porting/linux-next.git
synced 2024-11-17 23:25:46 +08:00
Merge branch 'pm-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm
* 'pm-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm: (63 commits) PM / Clocks: Remove redundant NULL checks before kfree() PM / Documentation: Update docs about suspend and CPU hotplug ACPI / PM: Add Sony VGN-FW21E to nonvs blacklist. ARM: mach-shmobile: sh7372 A4R support (v4) ARM: mach-shmobile: sh7372 A3SP support (v4) PM / Sleep: Mark devices involved in wakeup signaling during suspend PM / Hibernate: Improve performance of LZO/plain hibernation, checksum image PM / Hibernate: Do not initialize static and extern variables to 0 PM / Freezer: Make fake_signal_wake_up() wake TASK_KILLABLE tasks too PM / Hibernate: Add resumedelay kernel param in addition to resumewait MAINTAINERS: Update linux-pm list address PM / ACPI: Blacklist Vaio VGN-FW520F machine known to require acpi_sleep=nonvs PM / ACPI: Blacklist Sony Vaio known to require acpi_sleep=nonvs PM / Hibernate: Add resumewait param to support MMC-like devices as resume file PM / Hibernate: Fix typo in a kerneldoc comment PM / Hibernate: Freeze kernel threads after preallocating memory PM: Update the policy on default wakeup settings PM / VT: Cleanup #if defined uglyness and fix compile error PM / Suspend: Off by one in pm_suspend() PM / Hibernate: Include storage keys in hibernation image on s390 ...
This commit is contained in:
commit
7e0bb71e75
52
Documentation/ABI/testing/sysfs-class-devfreq
Normal file
52
Documentation/ABI/testing/sysfs-class-devfreq
Normal file
@ -0,0 +1,52 @@
|
||||
What: /sys/class/devfreq/.../
|
||||
Date: September 2011
|
||||
Contact: MyungJoo Ham <myungjoo.ham@samsung.com>
|
||||
Description:
|
||||
Provide a place in sysfs for the devfreq objects.
|
||||
This allows accessing various devfreq specific variables.
|
||||
The name of devfreq object denoted as ... is same as the
|
||||
name of device using devfreq.
|
||||
|
||||
What: /sys/class/devfreq/.../governor
|
||||
Date: September 2011
|
||||
Contact: MyungJoo Ham <myungjoo.ham@samsung.com>
|
||||
Description:
|
||||
The /sys/class/devfreq/.../governor shows the name of the
|
||||
governor used by the corresponding devfreq object.
|
||||
|
||||
What: /sys/class/devfreq/.../cur_freq
|
||||
Date: September 2011
|
||||
Contact: MyungJoo Ham <myungjoo.ham@samsung.com>
|
||||
Description:
|
||||
The /sys/class/devfreq/.../cur_freq shows the current
|
||||
frequency of the corresponding devfreq object.
|
||||
|
||||
What: /sys/class/devfreq/.../central_polling
|
||||
Date: September 2011
|
||||
Contact: MyungJoo Ham <myungjoo.ham@samsung.com>
|
||||
Description:
|
||||
The /sys/class/devfreq/.../central_polling shows whether
|
||||
the devfreq ojbect is using devfreq-provided central
|
||||
polling mechanism or not.
|
||||
|
||||
What: /sys/class/devfreq/.../polling_interval
|
||||
Date: September 2011
|
||||
Contact: MyungJoo Ham <myungjoo.ham@samsung.com>
|
||||
Description:
|
||||
The /sys/class/devfreq/.../polling_interval shows and sets
|
||||
the requested polling interval of the corresponding devfreq
|
||||
object. The values are represented in ms. If the value is
|
||||
less than 1 jiffy, it is considered to be 0, which means
|
||||
no polling. This value is meaningless if the governor is
|
||||
not polling; thus. If the governor is not using
|
||||
devfreq-provided central polling
|
||||
(/sys/class/devfreq/.../central_polling is 0), this value
|
||||
may be useless.
|
||||
|
||||
What: /sys/class/devfreq/.../userspace/set_freq
|
||||
Date: September 2011
|
||||
Contact: MyungJoo Ham <myungjoo.ham@samsung.com>
|
||||
Description:
|
||||
The /sys/class/devfreq/.../userspace/set_freq shows and
|
||||
sets the requested frequency for the devfreq object if
|
||||
userspace governor is in effect.
|
@ -2246,6 +2246,13 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
|
||||
in <PAGE_SIZE> units (needed only for swap files).
|
||||
See Documentation/power/swsusp-and-swap-files.txt
|
||||
|
||||
resumedelay= [HIBERNATION] Delay (in seconds) to pause before attempting to
|
||||
read the resume files
|
||||
|
||||
resumewait [HIBERNATION] Wait (indefinitely) for resume device to show up.
|
||||
Useful for devices that are detected asynchronously
|
||||
(e.g. USB and MMC devices).
|
||||
|
||||
hibernate= [HIBERNATION]
|
||||
noresume Don't check if there's a hibernation image
|
||||
present during boot.
|
||||
|
@ -26,6 +26,8 @@ s2ram.txt
|
||||
- How to get suspend to ram working (and debug it when it isn't)
|
||||
states.txt
|
||||
- System power management states
|
||||
suspend-and-cpuhotplug.txt
|
||||
- Explains the interaction between Suspend-to-RAM (S3) and CPU hotplug
|
||||
swsusp-and-swap-files.txt
|
||||
- Using swap files with software suspend (to disk)
|
||||
swsusp-dmcrypt.txt
|
||||
|
@ -201,3 +201,27 @@ case, you may be able to search for failing drivers by following the procedure
|
||||
analogous to the one described in section 1. If you find some failing drivers,
|
||||
you will have to unload them every time before an STR transition (ie. before
|
||||
you run s2ram), and please report the problems with them.
|
||||
|
||||
There is a debugfs entry which shows the suspend to RAM statistics. Here is an
|
||||
example of its output.
|
||||
# mount -t debugfs none /sys/kernel/debug
|
||||
# cat /sys/kernel/debug/suspend_stats
|
||||
success: 20
|
||||
fail: 5
|
||||
failed_freeze: 0
|
||||
failed_prepare: 0
|
||||
failed_suspend: 5
|
||||
failed_suspend_noirq: 0
|
||||
failed_resume: 0
|
||||
failed_resume_noirq: 0
|
||||
failures:
|
||||
last_failed_dev: alarm
|
||||
adc
|
||||
last_failed_errno: -16
|
||||
-16
|
||||
last_failed_step: suspend
|
||||
suspend
|
||||
Field success means the success number of suspend to RAM, and field fail means
|
||||
the failure number. Others are the failure number of different steps of suspend
|
||||
to RAM. suspend_stats just lists the last 2 failed devices, error number and
|
||||
failed step of suspend.
|
||||
|
@ -152,7 +152,9 @@ try to use its wakeup mechanism. device_set_wakeup_enable() affects this flag;
|
||||
for the most part drivers should not change its value. The initial value of
|
||||
should_wakeup is supposed to be false for the majority of devices; the major
|
||||
exceptions are power buttons, keyboards, and Ethernet adapters whose WoL
|
||||
(wake-on-LAN) feature has been set up with ethtool.
|
||||
(wake-on-LAN) feature has been set up with ethtool. It should also default
|
||||
to true for devices that don't generate wakeup requests on their own but merely
|
||||
forward wakeup requests from one bus to another (like PCI bridges).
|
||||
|
||||
Whether or not a device is capable of issuing wakeup events is a hardware
|
||||
matter, and the kernel is responsible for keeping track of it. By contrast,
|
||||
@ -279,10 +281,6 @@ When the system goes into the standby or memory sleep state, the phases are:
|
||||
time.) Unlike the other suspend-related phases, during the prepare
|
||||
phase the device tree is traversed top-down.
|
||||
|
||||
In addition to that, if device drivers need to allocate additional
|
||||
memory to be able to hadle device suspend correctly, that should be
|
||||
done in the prepare phase.
|
||||
|
||||
After the prepare callback method returns, no new children may be
|
||||
registered below the device. The method may also prepare the device or
|
||||
driver in some way for the upcoming system power transition (for
|
||||
|
@ -4,14 +4,19 @@ This interface provides a kernel and user mode interface for registering
|
||||
performance expectations by drivers, subsystems and user space applications on
|
||||
one of the parameters.
|
||||
|
||||
Currently we have {cpu_dma_latency, network_latency, network_throughput} as the
|
||||
initial set of pm_qos parameters.
|
||||
Two different PM QoS frameworks are available:
|
||||
1. PM QoS classes for cpu_dma_latency, network_latency, network_throughput.
|
||||
2. the per-device PM QoS framework provides the API to manage the per-device latency
|
||||
constraints.
|
||||
|
||||
Each parameters have defined units:
|
||||
* latency: usec
|
||||
* timeout: usec
|
||||
* throughput: kbs (kilo bit / sec)
|
||||
|
||||
|
||||
1. PM QoS framework
|
||||
|
||||
The infrastructure exposes multiple misc device nodes one per implemented
|
||||
parameter. The set of parameters implement is defined by pm_qos_power_init()
|
||||
and pm_qos_params.h. This is done because having the available parameters
|
||||
@ -23,14 +28,18 @@ an aggregated target value. The aggregated target value is updated with
|
||||
changes to the request list or elements of the list. Typically the
|
||||
aggregated target value is simply the max or min of the request values held
|
||||
in the parameter list elements.
|
||||
Note: the aggregated target value is implemented as an atomic variable so that
|
||||
reading the aggregated value does not require any locking mechanism.
|
||||
|
||||
|
||||
From kernel mode the use of this interface is simple:
|
||||
|
||||
handle = pm_qos_add_request(param_class, target_value):
|
||||
Will insert an element into the list for that identified PM_QOS class with the
|
||||
void pm_qos_add_request(handle, param_class, target_value):
|
||||
Will insert an element into the list for that identified PM QoS class with the
|
||||
target value. Upon change to this list the new target is recomputed and any
|
||||
registered notifiers are called only if the target value is now different.
|
||||
Clients of pm_qos need to save the returned handle.
|
||||
Clients of pm_qos need to save the returned handle for future use in other
|
||||
pm_qos API functions.
|
||||
|
||||
void pm_qos_update_request(handle, new_target_value):
|
||||
Will update the list element pointed to by the handle with the new target value
|
||||
@ -42,6 +51,20 @@ Will remove the element. After removal it will update the aggregate target and
|
||||
call the notification tree if the target was changed as a result of removing
|
||||
the request.
|
||||
|
||||
int pm_qos_request(param_class):
|
||||
Returns the aggregated value for a given PM QoS class.
|
||||
|
||||
int pm_qos_request_active(handle):
|
||||
Returns if the request is still active, i.e. it has not been removed from a
|
||||
PM QoS class constraints list.
|
||||
|
||||
int pm_qos_add_notifier(param_class, notifier):
|
||||
Adds a notification callback function to the PM QoS class. The callback is
|
||||
called when the aggregated value for the PM QoS class is changed.
|
||||
|
||||
int pm_qos_remove_notifier(int param_class, notifier):
|
||||
Removes the notification callback function for the PM QoS class.
|
||||
|
||||
|
||||
From user mode:
|
||||
Only processes can register a pm_qos request. To provide for automatic
|
||||
@ -63,4 +86,63 @@ To remove the user mode request for a target value simply close the device
|
||||
node.
|
||||
|
||||
|
||||
2. PM QoS per-device latency framework
|
||||
|
||||
For each device a list of performance requests is maintained along with
|
||||
an aggregated target value. The aggregated target value is updated with
|
||||
changes to the request list or elements of the list. Typically the
|
||||
aggregated target value is simply the max or min of the request values held
|
||||
in the parameter list elements.
|
||||
Note: the aggregated target value is implemented as an atomic variable so that
|
||||
reading the aggregated value does not require any locking mechanism.
|
||||
|
||||
|
||||
From kernel mode the use of this interface is the following:
|
||||
|
||||
int dev_pm_qos_add_request(device, handle, value):
|
||||
Will insert an element into the list for that identified device with the
|
||||
target value. Upon change to this list the new target is recomputed and any
|
||||
registered notifiers are called only if the target value is now different.
|
||||
Clients of dev_pm_qos need to save the handle for future use in other
|
||||
dev_pm_qos API functions.
|
||||
|
||||
int dev_pm_qos_update_request(handle, new_value):
|
||||
Will update the list element pointed to by the handle with the new target value
|
||||
and recompute the new aggregated target, calling the notification trees if the
|
||||
target is changed.
|
||||
|
||||
int dev_pm_qos_remove_request(handle):
|
||||
Will remove the element. After removal it will update the aggregate target and
|
||||
call the notification trees if the target was changed as a result of removing
|
||||
the request.
|
||||
|
||||
s32 dev_pm_qos_read_value(device):
|
||||
Returns the aggregated value for a given device's constraints list.
|
||||
|
||||
|
||||
Notification mechanisms:
|
||||
The per-device PM QoS framework has 2 different and distinct notification trees:
|
||||
a per-device notification tree and a global notification tree.
|
||||
|
||||
int dev_pm_qos_add_notifier(device, notifier):
|
||||
Adds a notification callback function for the device.
|
||||
The callback is called when the aggregated value of the device constraints list
|
||||
is changed.
|
||||
|
||||
int dev_pm_qos_remove_notifier(device, notifier):
|
||||
Removes the notification callback function for the device.
|
||||
|
||||
int dev_pm_qos_add_global_notifier(notifier):
|
||||
Adds a notification callback function in the global notification tree of the
|
||||
framework.
|
||||
The callback is called when the aggregated value for any device is changed.
|
||||
|
||||
int dev_pm_qos_remove_global_notifier(notifier):
|
||||
Removes the notification callback function from the global notification tree
|
||||
of the framework.
|
||||
|
||||
|
||||
From user mode:
|
||||
No API for user space access to the per-device latency constraints is provided
|
||||
yet - still under discussion.
|
||||
|
||||
|
@ -43,13 +43,18 @@ struct dev_pm_ops {
|
||||
...
|
||||
};
|
||||
|
||||
The ->runtime_suspend(), ->runtime_resume() and ->runtime_idle() callbacks are
|
||||
executed by the PM core for either the device type, or the class (if the device
|
||||
type's struct dev_pm_ops object does not exist), or the bus type (if the
|
||||
device type's and class' struct dev_pm_ops objects do not exist) of the given
|
||||
device (this allows device types to override callbacks provided by bus types or
|
||||
classes if necessary). The bus type, device type and class callbacks are
|
||||
referred to as subsystem-level callbacks in what follows.
|
||||
The ->runtime_suspend(), ->runtime_resume() and ->runtime_idle() callbacks
|
||||
are executed by the PM core for either the power domain, or the device type
|
||||
(if the device power domain's struct dev_pm_ops does not exist), or the class
|
||||
(if the device power domain's and type's struct dev_pm_ops object does not
|
||||
exist), or the bus type (if the device power domain's, type's and class'
|
||||
struct dev_pm_ops objects do not exist) of the given device, so the priority
|
||||
order of callbacks from high to low is that power domain callbacks, device
|
||||
type callbacks, class callbacks and bus type callbacks, and the high priority
|
||||
one will take precedence over low priority one. The bus type, device type and
|
||||
class callbacks are referred to as subsystem-level callbacks in what follows,
|
||||
and generally speaking, the power domain callbacks are used for representing
|
||||
power domains within a SoC.
|
||||
|
||||
By default, the callbacks are always invoked in process context with interrupts
|
||||
enabled. However, subsystems can use the pm_runtime_irq_safe() helper function
|
||||
@ -477,12 +482,14 @@ pm_runtime_autosuspend_expiration()
|
||||
If pm_runtime_irq_safe() has been called for a device then the following helper
|
||||
functions may also be used in interrupt context:
|
||||
|
||||
pm_runtime_idle()
|
||||
pm_runtime_suspend()
|
||||
pm_runtime_autosuspend()
|
||||
pm_runtime_resume()
|
||||
pm_runtime_get_sync()
|
||||
pm_runtime_put_sync()
|
||||
pm_runtime_put_sync_suspend()
|
||||
pm_runtime_put_sync_autosuspend()
|
||||
|
||||
5. Runtime PM Initialization, Device Probing and Removal
|
||||
|
||||
|
275
Documentation/power/suspend-and-cpuhotplug.txt
Normal file
275
Documentation/power/suspend-and-cpuhotplug.txt
Normal file
@ -0,0 +1,275 @@
|
||||
Interaction of Suspend code (S3) with the CPU hotplug infrastructure
|
||||
|
||||
(C) 2011 Srivatsa S. Bhat <srivatsa.bhat@linux.vnet.ibm.com>
|
||||
|
||||
|
||||
I. How does the regular CPU hotplug code differ from how the Suspend-to-RAM
|
||||
infrastructure uses it internally? And where do they share common code?
|
||||
|
||||
Well, a picture is worth a thousand words... So ASCII art follows :-)
|
||||
|
||||
[This depicts the current design in the kernel, and focusses only on the
|
||||
interactions involving the freezer and CPU hotplug and also tries to explain
|
||||
the locking involved. It outlines the notifications involved as well.
|
||||
But please note that here, only the call paths are illustrated, with the aim
|
||||
of describing where they take different paths and where they share code.
|
||||
What happens when regular CPU hotplug and Suspend-to-RAM race with each other
|
||||
is not depicted here.]
|
||||
|
||||
On a high level, the suspend-resume cycle goes like this:
|
||||
|
||||
|Freeze| -> |Disable nonboot| -> |Do suspend| -> |Enable nonboot| -> |Thaw |
|
||||
|tasks | | cpus | | | | cpus | |tasks|
|
||||
|
||||
|
||||
More details follow:
|
||||
|
||||
Suspend call path
|
||||
-----------------
|
||||
|
||||
Write 'mem' to
|
||||
/sys/power/state
|
||||
syfs file
|
||||
|
|
||||
v
|
||||
Acquire pm_mutex lock
|
||||
|
|
||||
v
|
||||
Send PM_SUSPEND_PREPARE
|
||||
notifications
|
||||
|
|
||||
v
|
||||
Freeze tasks
|
||||
|
|
||||
|
|
||||
v
|
||||
disable_nonboot_cpus()
|
||||
/* start */
|
||||
|
|
||||
v
|
||||
Acquire cpu_add_remove_lock
|
||||
|
|
||||
v
|
||||
Iterate over CURRENTLY
|
||||
online CPUs
|
||||
|
|
||||
|
|
||||
| ----------
|
||||
v | L
|
||||
======> _cpu_down() |
|
||||
| [This takes cpuhotplug.lock |
|
||||
Common | before taking down the CPU |
|
||||
code | and releases it when done] | O
|
||||
| While it is at it, notifications |
|
||||
| are sent when notable events occur, |
|
||||
======> by running all registered callbacks. |
|
||||
| | O
|
||||
| |
|
||||
| |
|
||||
v |
|
||||
Note down these cpus in | P
|
||||
frozen_cpus mask ----------
|
||||
|
|
||||
v
|
||||
Disable regular cpu hotplug
|
||||
by setting cpu_hotplug_disabled=1
|
||||
|
|
||||
v
|
||||
Release cpu_add_remove_lock
|
||||
|
|
||||
v
|
||||
/* disable_nonboot_cpus() complete */
|
||||
|
|
||||
v
|
||||
Do suspend
|
||||
|
||||
|
||||
|
||||
Resuming back is likewise, with the counterparts being (in the order of
|
||||
execution during resume):
|
||||
* enable_nonboot_cpus() which involves:
|
||||
| Acquire cpu_add_remove_lock
|
||||
| Reset cpu_hotplug_disabled to 0, thereby enabling regular cpu hotplug
|
||||
| Call _cpu_up() [for all those cpus in the frozen_cpus mask, in a loop]
|
||||
| Release cpu_add_remove_lock
|
||||
v
|
||||
|
||||
* thaw tasks
|
||||
* send PM_POST_SUSPEND notifications
|
||||
* Release pm_mutex lock.
|
||||
|
||||
|
||||
It is to be noted here that the pm_mutex lock is acquired at the very
|
||||
beginning, when we are just starting out to suspend, and then released only
|
||||
after the entire cycle is complete (i.e., suspend + resume).
|
||||
|
||||
|
||||
|
||||
Regular CPU hotplug call path
|
||||
-----------------------------
|
||||
|
||||
Write 0 (or 1) to
|
||||
/sys/devices/system/cpu/cpu*/online
|
||||
sysfs file
|
||||
|
|
||||
|
|
||||
v
|
||||
cpu_down()
|
||||
|
|
||||
v
|
||||
Acquire cpu_add_remove_lock
|
||||
|
|
||||
v
|
||||
If cpu_hotplug_disabled is 1
|
||||
return gracefully
|
||||
|
|
||||
|
|
||||
v
|
||||
======> _cpu_down()
|
||||
| [This takes cpuhotplug.lock
|
||||
Common | before taking down the CPU
|
||||
code | and releases it when done]
|
||||
| While it is at it, notifications
|
||||
| are sent when notable events occur,
|
||||
======> by running all registered callbacks.
|
||||
|
|
||||
|
|
||||
v
|
||||
Release cpu_add_remove_lock
|
||||
[That's it!, for
|
||||
regular CPU hotplug]
|
||||
|
||||
|
||||
|
||||
So, as can be seen from the two diagrams (the parts marked as "Common code"),
|
||||
regular CPU hotplug and the suspend code path converge at the _cpu_down() and
|
||||
_cpu_up() functions. They differ in the arguments passed to these functions,
|
||||
in that during regular CPU hotplug, 0 is passed for the 'tasks_frozen'
|
||||
argument. But during suspend, since the tasks are already frozen by the time
|
||||
the non-boot CPUs are offlined or onlined, the _cpu_*() functions are called
|
||||
with the 'tasks_frozen' argument set to 1.
|
||||
[See below for some known issues regarding this.]
|
||||
|
||||
|
||||
Important files and functions/entry points:
|
||||
------------------------------------------
|
||||
|
||||
kernel/power/process.c : freeze_processes(), thaw_processes()
|
||||
kernel/power/suspend.c : suspend_prepare(), suspend_enter(), suspend_finish()
|
||||
kernel/cpu.c: cpu_[up|down](), _cpu_[up|down](), [disable|enable]_nonboot_cpus()
|
||||
|
||||
|
||||
|
||||
II. What are the issues involved in CPU hotplug?
|
||||
-------------------------------------------
|
||||
|
||||
There are some interesting situations involving CPU hotplug and microcode
|
||||
update on the CPUs, as discussed below:
|
||||
|
||||
[Please bear in mind that the kernel requests the microcode images from
|
||||
userspace, using the request_firmware() function defined in
|
||||
drivers/base/firmware_class.c]
|
||||
|
||||
|
||||
a. When all the CPUs are identical:
|
||||
|
||||
This is the most common situation and it is quite straightforward: we want
|
||||
to apply the same microcode revision to each of the CPUs.
|
||||
To give an example of x86, the collect_cpu_info() function defined in
|
||||
arch/x86/kernel/microcode_core.c helps in discovering the type of the CPU
|
||||
and thereby in applying the correct microcode revision to it.
|
||||
But note that the kernel does not maintain a common microcode image for the
|
||||
all CPUs, in order to handle case 'b' described below.
|
||||
|
||||
|
||||
b. When some of the CPUs are different than the rest:
|
||||
|
||||
In this case since we probably need to apply different microcode revisions
|
||||
to different CPUs, the kernel maintains a copy of the correct microcode
|
||||
image for each CPU (after appropriate CPU type/model discovery using
|
||||
functions such as collect_cpu_info()).
|
||||
|
||||
|
||||
c. When a CPU is physically hot-unplugged and a new (and possibly different
|
||||
type of) CPU is hot-plugged into the system:
|
||||
|
||||
In the current design of the kernel, whenever a CPU is taken offline during
|
||||
a regular CPU hotplug operation, upon receiving the CPU_DEAD notification
|
||||
(which is sent by the CPU hotplug code), the microcode update driver's
|
||||
callback for that event reacts by freeing the kernel's copy of the
|
||||
microcode image for that CPU.
|
||||
|
||||
Hence, when a new CPU is brought online, since the kernel finds that it
|
||||
doesn't have the microcode image, it does the CPU type/model discovery
|
||||
afresh and then requests the userspace for the appropriate microcode image
|
||||
for that CPU, which is subsequently applied.
|
||||
|
||||
For example, in x86, the mc_cpu_callback() function (which is the microcode
|
||||
update driver's callback registered for CPU hotplug events) calls
|
||||
microcode_update_cpu() which would call microcode_init_cpu() in this case,
|
||||
instead of microcode_resume_cpu() when it finds that the kernel doesn't
|
||||
have a valid microcode image. This ensures that the CPU type/model
|
||||
discovery is performed and the right microcode is applied to the CPU after
|
||||
getting it from userspace.
|
||||
|
||||
|
||||
d. Handling microcode update during suspend/hibernate:
|
||||
|
||||
Strictly speaking, during a CPU hotplug operation which does not involve
|
||||
physically removing or inserting CPUs, the CPUs are not actually powered
|
||||
off during a CPU offline. They are just put to the lowest C-states possible.
|
||||
Hence, in such a case, it is not really necessary to re-apply microcode
|
||||
when the CPUs are brought back online, since they wouldn't have lost the
|
||||
image during the CPU offline operation.
|
||||
|
||||
This is the usual scenario encountered during a resume after a suspend.
|
||||
However, in the case of hibernation, since all the CPUs are completely
|
||||
powered off, during restore it becomes necessary to apply the microcode
|
||||
images to all the CPUs.
|
||||
|
||||
[Note that we don't expect someone to physically pull out nodes and insert
|
||||
nodes with a different type of CPUs in-between a suspend-resume or a
|
||||
hibernate/restore cycle.]
|
||||
|
||||
In the current design of the kernel however, during a CPU offline operation
|
||||
as part of the suspend/hibernate cycle (the CPU_DEAD_FROZEN notification),
|
||||
the existing copy of microcode image in the kernel is not freed up.
|
||||
And during the CPU online operations (during resume/restore), since the
|
||||
kernel finds that it already has copies of the microcode images for all the
|
||||
CPUs, it just applies them to the CPUs, avoiding any re-discovery of CPU
|
||||
type/model and the need for validating whether the microcode revisions are
|
||||
right for the CPUs or not (due to the above assumption that physical CPU
|
||||
hotplug will not be done in-between suspend/resume or hibernate/restore
|
||||
cycles).
|
||||
|
||||
|
||||
III. Are there any known problems when regular CPU hotplug and suspend race
|
||||
with each other?
|
||||
|
||||
Yes, they are listed below:
|
||||
|
||||
1. When invoking regular CPU hotplug, the 'tasks_frozen' argument passed to
|
||||
the _cpu_down() and _cpu_up() functions is *always* 0.
|
||||
This might not reflect the true current state of the system, since the
|
||||
tasks could have been frozen by an out-of-band event such as a suspend
|
||||
operation in progress. Hence, it will lead to wrong notifications being
|
||||
sent during the cpu online/offline events (eg, CPU_ONLINE notification
|
||||
instead of CPU_ONLINE_FROZEN) which in turn will lead to execution of
|
||||
inappropriate code by the callbacks registered for such CPU hotplug events.
|
||||
|
||||
2. If a regular CPU hotplug stress test happens to race with the freezer due
|
||||
to a suspend operation in progress at the same time, then we could hit the
|
||||
situation described below:
|
||||
|
||||
* A regular cpu online operation continues its journey from userspace
|
||||
into the kernel, since the freezing has not yet begun.
|
||||
* Then freezer gets to work and freezes userspace.
|
||||
* If cpu online has not yet completed the microcode update stuff by now,
|
||||
it will now start waiting on the frozen userspace in the
|
||||
TASK_UNINTERRUPTIBLE state, in order to get the microcode image.
|
||||
* Now the freezer continues and tries to freeze the remaining tasks. But
|
||||
due to this wait mentioned above, the freezer won't be able to freeze
|
||||
the cpu online hotplug task and hence freezing of tasks fails.
|
||||
|
||||
As a result of this task freezing failure, the suspend operation gets
|
||||
aborted.
|
@ -439,10 +439,10 @@ cause autosuspends to fail with -EBUSY if the driver needs to use the
|
||||
device.
|
||||
|
||||
External suspend calls should never be allowed to fail in this way,
|
||||
only autosuspend calls. The driver can tell them apart by checking
|
||||
the PM_EVENT_AUTO bit in the message.event argument to the suspend
|
||||
method; this bit will be set for internal PM events (autosuspend) and
|
||||
clear for external PM events.
|
||||
only autosuspend calls. The driver can tell them apart by applying
|
||||
the PMSG_IS_AUTO() macro to the message argument to the suspend
|
||||
method; it will return True for internal PM events (autosuspend) and
|
||||
False for external PM events.
|
||||
|
||||
|
||||
Mutual exclusion
|
||||
|
12
MAINTAINERS
12
MAINTAINERS
@ -2760,7 +2760,7 @@ F: fs/freevxfs/
|
||||
FREEZER
|
||||
M: Pavel Machek <pavel@ucw.cz>
|
||||
M: "Rafael J. Wysocki" <rjw@sisk.pl>
|
||||
L: linux-pm@lists.linux-foundation.org
|
||||
L: linux-pm@vger.kernel.org
|
||||
S: Supported
|
||||
F: Documentation/power/freezing-of-tasks.txt
|
||||
F: include/linux/freezer.h
|
||||
@ -3022,7 +3022,7 @@ F: drivers/video/hgafb.c
|
||||
HIBERNATION (aka Software Suspend, aka swsusp)
|
||||
M: Pavel Machek <pavel@ucw.cz>
|
||||
M: "Rafael J. Wysocki" <rjw@sisk.pl>
|
||||
L: linux-pm@lists.linux-foundation.org
|
||||
L: linux-pm@vger.kernel.org
|
||||
S: Supported
|
||||
F: arch/x86/power/
|
||||
F: drivers/base/power/
|
||||
@ -3217,7 +3217,7 @@ F: drivers/ide/ide-cd*
|
||||
|
||||
IDLE-I7300
|
||||
M: Andy Henroid <andrew.d.henroid@intel.com>
|
||||
L: linux-pm@lists.linux-foundation.org
|
||||
L: linux-pm@vger.kernel.org
|
||||
S: Supported
|
||||
F: drivers/idle/i7300_idle.c
|
||||
|
||||
@ -3300,7 +3300,7 @@ F: firmware/isci/
|
||||
|
||||
INTEL IDLE DRIVER
|
||||
M: Len Brown <lenb@kernel.org>
|
||||
L: linux-pm@lists.linux-foundation.org
|
||||
L: linux-pm@vger.kernel.org
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/lenb/linux-idle-2.6.git
|
||||
S: Supported
|
||||
F: drivers/idle/intel_idle.c
|
||||
@ -3397,7 +3397,7 @@ F: drivers/net/ethernet/intel/
|
||||
|
||||
INTEL MRST PMU DRIVER
|
||||
M: Len Brown <len.brown@intel.com>
|
||||
L: linux-pm@lists.linux-foundation.org
|
||||
L: linux-pm@vger.kernel.org
|
||||
S: Supported
|
||||
F: arch/x86/platform/mrst/pmu.*
|
||||
|
||||
@ -6338,7 +6338,7 @@ SUSPEND TO RAM
|
||||
M: Len Brown <len.brown@intel.com>
|
||||
M: Pavel Machek <pavel@ucw.cz>
|
||||
M: "Rafael J. Wysocki" <rjw@sisk.pl>
|
||||
L: linux-pm@lists.linux-foundation.org
|
||||
L: linux-pm@vger.kernel.org
|
||||
S: Supported
|
||||
F: Documentation/power/
|
||||
F: arch/x86/kernel/acpi/
|
||||
|
@ -18,7 +18,7 @@
|
||||
#include <linux/list.h>
|
||||
#include <linux/err.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/pm_qos_params.h>
|
||||
#include <linux/pm_qos.h>
|
||||
#include <linux/mutex.h>
|
||||
#include <linux/clk.h>
|
||||
#include <linux/string.h>
|
||||
|
@ -13,6 +13,7 @@
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/io.h>
|
||||
#include <linux/pm_runtime.h>
|
||||
#include <linux/pm_clock.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/mutex.h>
|
||||
#include <linux/clk.h>
|
||||
|
@ -42,6 +42,7 @@
|
||||
#include <linux/leds.h>
|
||||
#include <linux/input/sh_keysc.h>
|
||||
#include <linux/usb/r8a66597.h>
|
||||
#include <linux/pm_clock.h>
|
||||
|
||||
#include <media/sh_mobile_ceu.h>
|
||||
#include <media/sh_mobile_csi2.h>
|
||||
@ -1408,6 +1409,11 @@ static void __init ap4evb_init(void)
|
||||
sh7372_add_device_to_domain(&sh7372_a4lc, &lcdc_device);
|
||||
sh7372_add_device_to_domain(&sh7372_a4mp, &fsi_device);
|
||||
|
||||
sh7372_add_device_to_domain(&sh7372_a3sp, &sh_mmcif_device);
|
||||
sh7372_add_device_to_domain(&sh7372_a3sp, &sdhi0_device);
|
||||
sh7372_add_device_to_domain(&sh7372_a3sp, &sdhi1_device);
|
||||
sh7372_add_device_to_domain(&sh7372_a4r, &ceu_device);
|
||||
|
||||
hdmi_init_pm_clock();
|
||||
fsi_init_pm_clock();
|
||||
sh7372_pm_init();
|
||||
|
@ -39,7 +39,7 @@
|
||||
#include <linux/mtd/mtd.h>
|
||||
#include <linux/mtd/partitions.h>
|
||||
#include <linux/mtd/physmap.h>
|
||||
#include <linux/pm_runtime.h>
|
||||
#include <linux/pm_clock.h>
|
||||
#include <linux/smsc911x.h>
|
||||
#include <linux/sh_intc.h>
|
||||
#include <linux/tca6416_keypad.h>
|
||||
@ -1589,6 +1589,15 @@ static void __init mackerel_init(void)
|
||||
sh7372_add_device_to_domain(&sh7372_a4lc, &lcdc_device);
|
||||
sh7372_add_device_to_domain(&sh7372_a4lc, &hdmi_lcdc_device);
|
||||
sh7372_add_device_to_domain(&sh7372_a4mp, &fsi_device);
|
||||
sh7372_add_device_to_domain(&sh7372_a3sp, &usbhs0_device);
|
||||
sh7372_add_device_to_domain(&sh7372_a3sp, &usbhs1_device);
|
||||
sh7372_add_device_to_domain(&sh7372_a3sp, &sh_mmcif_device);
|
||||
sh7372_add_device_to_domain(&sh7372_a3sp, &sdhi0_device);
|
||||
#if !defined(CONFIG_MMC_SH_MMCIF) && !defined(CONFIG_MMC_SH_MMCIF_MODULE)
|
||||
sh7372_add_device_to_domain(&sh7372_a3sp, &sdhi1_device);
|
||||
#endif
|
||||
sh7372_add_device_to_domain(&sh7372_a3sp, &sdhi2_device);
|
||||
sh7372_add_device_to_domain(&sh7372_a4r, &ceu_device);
|
||||
|
||||
hdmi_init_pm_clock();
|
||||
sh7372_pm_init();
|
||||
|
@ -35,8 +35,8 @@ extern void sh7372_add_standard_devices(void);
|
||||
extern void sh7372_clock_init(void);
|
||||
extern void sh7372_pinmux_init(void);
|
||||
extern void sh7372_pm_init(void);
|
||||
extern void sh7372_cpu_suspend(void);
|
||||
extern void sh7372_cpu_resume(void);
|
||||
extern void sh7372_resume_core_standby_a3sm(void);
|
||||
extern int sh7372_do_idle_a3sm(unsigned long unused);
|
||||
extern struct clk sh7372_extal1_clk;
|
||||
extern struct clk sh7372_extal2_clk;
|
||||
|
||||
|
@ -479,7 +479,12 @@ struct platform_device;
|
||||
|
||||
struct sh7372_pm_domain {
|
||||
struct generic_pm_domain genpd;
|
||||
struct dev_power_governor *gov;
|
||||
void (*suspend)(void);
|
||||
void (*resume)(void);
|
||||
unsigned int bit_shift;
|
||||
bool no_debug;
|
||||
bool stay_on;
|
||||
};
|
||||
|
||||
static inline struct sh7372_pm_domain *to_sh7372_pd(struct generic_pm_domain *d)
|
||||
@ -491,16 +496,24 @@ static inline struct sh7372_pm_domain *to_sh7372_pd(struct generic_pm_domain *d)
|
||||
extern struct sh7372_pm_domain sh7372_a4lc;
|
||||
extern struct sh7372_pm_domain sh7372_a4mp;
|
||||
extern struct sh7372_pm_domain sh7372_d4;
|
||||
extern struct sh7372_pm_domain sh7372_a4r;
|
||||
extern struct sh7372_pm_domain sh7372_a3rv;
|
||||
extern struct sh7372_pm_domain sh7372_a3ri;
|
||||
extern struct sh7372_pm_domain sh7372_a3sp;
|
||||
extern struct sh7372_pm_domain sh7372_a3sg;
|
||||
|
||||
extern void sh7372_init_pm_domain(struct sh7372_pm_domain *sh7372_pd);
|
||||
extern void sh7372_add_device_to_domain(struct sh7372_pm_domain *sh7372_pd,
|
||||
struct platform_device *pdev);
|
||||
extern void sh7372_pm_add_subdomain(struct sh7372_pm_domain *sh7372_pd,
|
||||
struct sh7372_pm_domain *sh7372_sd);
|
||||
#else
|
||||
#define sh7372_init_pm_domain(pd) do { } while(0)
|
||||
#define sh7372_add_device_to_domain(pd, pdev) do { } while(0)
|
||||
#define sh7372_pm_add_subdomain(pd, sd) do { } while(0)
|
||||
#endif /* CONFIG_PM */
|
||||
|
||||
extern void sh7372_intcs_suspend(void);
|
||||
extern void sh7372_intcs_resume(void);
|
||||
|
||||
#endif /* __ASM_SH7372_H__ */
|
||||
|
@ -606,9 +606,16 @@ static void intcs_demux(unsigned int irq, struct irq_desc *desc)
|
||||
generic_handle_irq(intcs_evt2irq(evtcodeas));
|
||||
}
|
||||
|
||||
static void __iomem *intcs_ffd2;
|
||||
static void __iomem *intcs_ffd5;
|
||||
|
||||
void __init sh7372_init_irq(void)
|
||||
{
|
||||
void __iomem *intevtsa = ioremap_nocache(0xffd20100, PAGE_SIZE);
|
||||
void __iomem *intevtsa;
|
||||
|
||||
intcs_ffd2 = ioremap_nocache(0xffd20000, PAGE_SIZE);
|
||||
intevtsa = intcs_ffd2 + 0x100;
|
||||
intcs_ffd5 = ioremap_nocache(0xffd50000, PAGE_SIZE);
|
||||
|
||||
register_intc_controller(&intca_desc);
|
||||
register_intc_controller(&intcs_desc);
|
||||
@ -617,3 +624,46 @@ void __init sh7372_init_irq(void)
|
||||
irq_set_handler_data(evt2irq(0xf80), (void *)intevtsa);
|
||||
irq_set_chained_handler(evt2irq(0xf80), intcs_demux);
|
||||
}
|
||||
|
||||
static unsigned short ffd2[0x200];
|
||||
static unsigned short ffd5[0x100];
|
||||
|
||||
void sh7372_intcs_suspend(void)
|
||||
{
|
||||
int k;
|
||||
|
||||
for (k = 0x00; k <= 0x30; k += 4)
|
||||
ffd2[k] = __raw_readw(intcs_ffd2 + k);
|
||||
|
||||
for (k = 0x80; k <= 0xb0; k += 4)
|
||||
ffd2[k] = __raw_readb(intcs_ffd2 + k);
|
||||
|
||||
for (k = 0x180; k <= 0x188; k += 4)
|
||||
ffd2[k] = __raw_readb(intcs_ffd2 + k);
|
||||
|
||||
for (k = 0x00; k <= 0x3c; k += 4)
|
||||
ffd5[k] = __raw_readw(intcs_ffd5 + k);
|
||||
|
||||
for (k = 0x80; k <= 0x9c; k += 4)
|
||||
ffd5[k] = __raw_readb(intcs_ffd5 + k);
|
||||
}
|
||||
|
||||
void sh7372_intcs_resume(void)
|
||||
{
|
||||
int k;
|
||||
|
||||
for (k = 0x00; k <= 0x30; k += 4)
|
||||
__raw_writew(ffd2[k], intcs_ffd2 + k);
|
||||
|
||||
for (k = 0x80; k <= 0xb0; k += 4)
|
||||
__raw_writeb(ffd2[k], intcs_ffd2 + k);
|
||||
|
||||
for (k = 0x180; k <= 0x188; k += 4)
|
||||
__raw_writeb(ffd2[k], intcs_ffd2 + k);
|
||||
|
||||
for (k = 0x00; k <= 0x3c; k += 4)
|
||||
__raw_writew(ffd5[k], intcs_ffd5 + k);
|
||||
|
||||
for (k = 0x80; k <= 0x9c; k += 4)
|
||||
__raw_writeb(ffd5[k], intcs_ffd5 + k);
|
||||
}
|
||||
|
@ -15,23 +15,61 @@
|
||||
#include <linux/list.h>
|
||||
#include <linux/err.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/pm_runtime.h>
|
||||
#include <linux/pm_clock.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/irq.h>
|
||||
#include <linux/bitrev.h>
|
||||
#include <asm/system.h>
|
||||
#include <asm/io.h>
|
||||
#include <asm/tlbflush.h>
|
||||
#include <asm/suspend.h>
|
||||
#include <mach/common.h>
|
||||
#include <mach/sh7372.h>
|
||||
|
||||
#define SMFRAM 0xe6a70000
|
||||
#define SYSTBCR 0xe6150024
|
||||
#define SBAR 0xe6180020
|
||||
#define APARMBAREA 0xe6f10020
|
||||
/* DBG */
|
||||
#define DBGREG1 0xe6100020
|
||||
#define DBGREG9 0xe6100040
|
||||
|
||||
/* CPGA */
|
||||
#define SYSTBCR 0xe6150024
|
||||
#define MSTPSR0 0xe6150030
|
||||
#define MSTPSR1 0xe6150038
|
||||
#define MSTPSR2 0xe6150040
|
||||
#define MSTPSR3 0xe6150048
|
||||
#define MSTPSR4 0xe615004c
|
||||
#define PLLC01STPCR 0xe61500c8
|
||||
|
||||
/* SYSC */
|
||||
#define SPDCR 0xe6180008
|
||||
#define SWUCR 0xe6180014
|
||||
#define SBAR 0xe6180020
|
||||
#define WUPRMSK 0xe6180028
|
||||
#define WUPSMSK 0xe618002c
|
||||
#define WUPSMSK2 0xe6180048
|
||||
#define PSTR 0xe6180080
|
||||
#define WUPSFAC 0xe6180098
|
||||
#define IRQCR 0xe618022c
|
||||
#define IRQCR2 0xe6180238
|
||||
#define IRQCR3 0xe6180244
|
||||
#define IRQCR4 0xe6180248
|
||||
#define PDNSEL 0xe6180254
|
||||
|
||||
/* INTC */
|
||||
#define ICR1A 0xe6900000
|
||||
#define ICR2A 0xe6900004
|
||||
#define ICR3A 0xe6900008
|
||||
#define ICR4A 0xe690000c
|
||||
#define INTMSK00A 0xe6900040
|
||||
#define INTMSK10A 0xe6900044
|
||||
#define INTMSK20A 0xe6900048
|
||||
#define INTMSK30A 0xe690004c
|
||||
|
||||
/* MFIS */
|
||||
#define SMFRAM 0xe6a70000
|
||||
|
||||
/* AP-System Core */
|
||||
#define APARMBAREA 0xe6f10020
|
||||
|
||||
#define PSTR_RETRIES 100
|
||||
#define PSTR_DELAY_US 10
|
||||
@ -43,6 +81,12 @@ static int pd_power_down(struct generic_pm_domain *genpd)
|
||||
struct sh7372_pm_domain *sh7372_pd = to_sh7372_pd(genpd);
|
||||
unsigned int mask = 1 << sh7372_pd->bit_shift;
|
||||
|
||||
if (sh7372_pd->suspend)
|
||||
sh7372_pd->suspend();
|
||||
|
||||
if (sh7372_pd->stay_on)
|
||||
return 0;
|
||||
|
||||
if (__raw_readl(PSTR) & mask) {
|
||||
unsigned int retry_count;
|
||||
|
||||
@ -55,6 +99,7 @@ static int pd_power_down(struct generic_pm_domain *genpd)
|
||||
}
|
||||
}
|
||||
|
||||
if (!sh7372_pd->no_debug)
|
||||
pr_debug("sh7372 power domain down 0x%08x -> PSTR = 0x%08x\n",
|
||||
mask, __raw_readl(PSTR));
|
||||
|
||||
@ -68,6 +113,9 @@ static int pd_power_up(struct generic_pm_domain *genpd)
|
||||
unsigned int retry_count;
|
||||
int ret = 0;
|
||||
|
||||
if (sh7372_pd->stay_on)
|
||||
goto out;
|
||||
|
||||
if (__raw_readl(PSTR) & mask)
|
||||
goto out;
|
||||
|
||||
@ -84,40 +132,21 @@ static int pd_power_up(struct generic_pm_domain *genpd)
|
||||
if (__raw_readl(SWUCR) & mask)
|
||||
ret = -EIO;
|
||||
|
||||
out:
|
||||
if (!sh7372_pd->no_debug)
|
||||
pr_debug("sh7372 power domain up 0x%08x -> PSTR = 0x%08x\n",
|
||||
mask, __raw_readl(PSTR));
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int pd_power_up_a3rv(struct generic_pm_domain *genpd)
|
||||
{
|
||||
int ret = pd_power_up(genpd);
|
||||
|
||||
/* force A4LC on after A3RV has been requested on */
|
||||
pm_genpd_poweron(&sh7372_a4lc.genpd);
|
||||
out:
|
||||
if (ret == 0 && sh7372_pd->resume)
|
||||
sh7372_pd->resume();
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int pd_power_down_a3rv(struct generic_pm_domain *genpd)
|
||||
static void sh7372_a4r_suspend(void)
|
||||
{
|
||||
int ret = pd_power_down(genpd);
|
||||
|
||||
/* try to power down A4LC after A3RV is requested off */
|
||||
genpd_queue_power_off_work(&sh7372_a4lc.genpd);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int pd_power_down_a4lc(struct generic_pm_domain *genpd)
|
||||
{
|
||||
/* only power down A4LC if A3RV is off */
|
||||
if (!(__raw_readl(PSTR) & (1 << sh7372_a3rv.bit_shift)))
|
||||
return pd_power_down(genpd);
|
||||
|
||||
return -EBUSY;
|
||||
sh7372_intcs_suspend();
|
||||
__raw_writel(0x300fffff, WUPRMSK); /* avoid wakeup */
|
||||
}
|
||||
|
||||
static bool pd_active_wakeup(struct device *dev)
|
||||
@ -125,25 +154,26 @@ static bool pd_active_wakeup(struct device *dev)
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool sh7372_power_down_forbidden(struct dev_pm_domain *domain)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
struct dev_power_governor sh7372_always_on_gov = {
|
||||
.power_down_ok = sh7372_power_down_forbidden,
|
||||
};
|
||||
|
||||
void sh7372_init_pm_domain(struct sh7372_pm_domain *sh7372_pd)
|
||||
{
|
||||
struct generic_pm_domain *genpd = &sh7372_pd->genpd;
|
||||
|
||||
pm_genpd_init(genpd, NULL, false);
|
||||
pm_genpd_init(genpd, sh7372_pd->gov, false);
|
||||
genpd->stop_device = pm_clk_suspend;
|
||||
genpd->start_device = pm_clk_resume;
|
||||
genpd->dev_irq_safe = true;
|
||||
genpd->active_wakeup = pd_active_wakeup;
|
||||
|
||||
if (sh7372_pd == &sh7372_a4lc) {
|
||||
genpd->power_off = pd_power_down_a4lc;
|
||||
genpd->power_on = pd_power_up;
|
||||
} else if (sh7372_pd == &sh7372_a3rv) {
|
||||
genpd->power_off = pd_power_down_a3rv;
|
||||
genpd->power_on = pd_power_up_a3rv;
|
||||
} else {
|
||||
genpd->power_off = pd_power_down;
|
||||
genpd->power_on = pd_power_up;
|
||||
}
|
||||
genpd->power_on(&sh7372_pd->genpd);
|
||||
}
|
||||
|
||||
@ -152,11 +182,15 @@ void sh7372_add_device_to_domain(struct sh7372_pm_domain *sh7372_pd,
|
||||
{
|
||||
struct device *dev = &pdev->dev;
|
||||
|
||||
if (!dev->power.subsys_data) {
|
||||
pm_clk_init(dev);
|
||||
pm_clk_add(dev, NULL);
|
||||
}
|
||||
pm_genpd_add_device(&sh7372_pd->genpd, dev);
|
||||
if (pm_clk_no_clocks(dev))
|
||||
pm_clk_add(dev, NULL);
|
||||
}
|
||||
|
||||
void sh7372_pm_add_subdomain(struct sh7372_pm_domain *sh7372_pd,
|
||||
struct sh7372_pm_domain *sh7372_sd)
|
||||
{
|
||||
pm_genpd_add_subdomain(&sh7372_pd->genpd, &sh7372_sd->genpd);
|
||||
}
|
||||
|
||||
struct sh7372_pm_domain sh7372_a4lc = {
|
||||
@ -171,6 +205,14 @@ struct sh7372_pm_domain sh7372_d4 = {
|
||||
.bit_shift = 3,
|
||||
};
|
||||
|
||||
struct sh7372_pm_domain sh7372_a4r = {
|
||||
.bit_shift = 5,
|
||||
.gov = &sh7372_always_on_gov,
|
||||
.suspend = sh7372_a4r_suspend,
|
||||
.resume = sh7372_intcs_resume,
|
||||
.stay_on = true,
|
||||
};
|
||||
|
||||
struct sh7372_pm_domain sh7372_a3rv = {
|
||||
.bit_shift = 6,
|
||||
};
|
||||
@ -179,39 +221,187 @@ struct sh7372_pm_domain sh7372_a3ri = {
|
||||
.bit_shift = 8,
|
||||
};
|
||||
|
||||
struct sh7372_pm_domain sh7372_a3sp = {
|
||||
.bit_shift = 11,
|
||||
.gov = &sh7372_always_on_gov,
|
||||
.no_debug = true,
|
||||
};
|
||||
|
||||
struct sh7372_pm_domain sh7372_a3sg = {
|
||||
.bit_shift = 13,
|
||||
};
|
||||
|
||||
#endif /* CONFIG_PM */
|
||||
|
||||
static void sh7372_enter_core_standby(void)
|
||||
#if defined(CONFIG_SUSPEND) || defined(CONFIG_CPU_IDLE)
|
||||
static int sh7372_do_idle_core_standby(unsigned long unused)
|
||||
{
|
||||
void __iomem *smfram = (void __iomem *)SMFRAM;
|
||||
|
||||
__raw_writel(0, APARMBAREA); /* translate 4k */
|
||||
__raw_writel(__pa(sh7372_cpu_resume), SBAR); /* set reset vector */
|
||||
__raw_writel(0x10, SYSTBCR); /* enable core standby */
|
||||
|
||||
__raw_writel(0, smfram + 0x3c); /* clear page table address */
|
||||
|
||||
sh7372_cpu_suspend();
|
||||
cpu_init();
|
||||
|
||||
/* if page table address is non-NULL then we have been powered down */
|
||||
if (__raw_readl(smfram + 0x3c)) {
|
||||
__raw_writel(__raw_readl(smfram + 0x40),
|
||||
__va(__raw_readl(smfram + 0x3c)));
|
||||
|
||||
flush_tlb_all();
|
||||
set_cr(__raw_readl(smfram + 0x38));
|
||||
}
|
||||
|
||||
__raw_writel(0, SYSTBCR); /* disable core standby */
|
||||
__raw_writel(0, SBAR); /* disable reset vector translation */
|
||||
cpu_do_idle(); /* WFI when SYSTBCR == 0x10 -> Core Standby */
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void sh7372_enter_core_standby(void)
|
||||
{
|
||||
/* set reset vector, translate 4k */
|
||||
__raw_writel(__pa(sh7372_resume_core_standby_a3sm), SBAR);
|
||||
__raw_writel(0, APARMBAREA);
|
||||
|
||||
/* enter sleep mode with SYSTBCR to 0x10 */
|
||||
__raw_writel(0x10, SYSTBCR);
|
||||
cpu_suspend(0, sh7372_do_idle_core_standby);
|
||||
__raw_writel(0, SYSTBCR);
|
||||
|
||||
/* disable reset vector translation */
|
||||
__raw_writel(0, SBAR);
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_SUSPEND
|
||||
static void sh7372_enter_a3sm_common(int pllc0_on)
|
||||
{
|
||||
/* set reset vector, translate 4k */
|
||||
__raw_writel(__pa(sh7372_resume_core_standby_a3sm), SBAR);
|
||||
__raw_writel(0, APARMBAREA);
|
||||
|
||||
if (pllc0_on)
|
||||
__raw_writel(0, PLLC01STPCR);
|
||||
else
|
||||
__raw_writel(1 << 28, PLLC01STPCR);
|
||||
|
||||
__raw_writel(0, PDNSEL); /* power-down A3SM only, not A4S */
|
||||
__raw_readl(WUPSFAC); /* read wakeup int. factor before sleep */
|
||||
cpu_suspend(0, sh7372_do_idle_a3sm);
|
||||
__raw_readl(WUPSFAC); /* read wakeup int. factor after wakeup */
|
||||
|
||||
/* disable reset vector translation */
|
||||
__raw_writel(0, SBAR);
|
||||
}
|
||||
|
||||
static int sh7372_a3sm_valid(unsigned long *mskp, unsigned long *msk2p)
|
||||
{
|
||||
unsigned long mstpsr0, mstpsr1, mstpsr2, mstpsr3, mstpsr4;
|
||||
unsigned long msk, msk2;
|
||||
|
||||
/* check active clocks to determine potential wakeup sources */
|
||||
|
||||
mstpsr0 = __raw_readl(MSTPSR0);
|
||||
if ((mstpsr0 & 0x00000003) != 0x00000003) {
|
||||
pr_debug("sh7372 mstpsr0 0x%08lx\n", mstpsr0);
|
||||
return 0;
|
||||
}
|
||||
|
||||
mstpsr1 = __raw_readl(MSTPSR1);
|
||||
if ((mstpsr1 & 0xff079b7f) != 0xff079b7f) {
|
||||
pr_debug("sh7372 mstpsr1 0x%08lx\n", mstpsr1);
|
||||
return 0;
|
||||
}
|
||||
|
||||
mstpsr2 = __raw_readl(MSTPSR2);
|
||||
if ((mstpsr2 & 0x000741ff) != 0x000741ff) {
|
||||
pr_debug("sh7372 mstpsr2 0x%08lx\n", mstpsr2);
|
||||
return 0;
|
||||
}
|
||||
|
||||
mstpsr3 = __raw_readl(MSTPSR3);
|
||||
if ((mstpsr3 & 0x1a60f010) != 0x1a60f010) {
|
||||
pr_debug("sh7372 mstpsr3 0x%08lx\n", mstpsr3);
|
||||
return 0;
|
||||
}
|
||||
|
||||
mstpsr4 = __raw_readl(MSTPSR4);
|
||||
if ((mstpsr4 & 0x00008cf0) != 0x00008cf0) {
|
||||
pr_debug("sh7372 mstpsr4 0x%08lx\n", mstpsr4);
|
||||
return 0;
|
||||
}
|
||||
|
||||
msk = 0;
|
||||
msk2 = 0;
|
||||
|
||||
/* make bitmaps of limited number of wakeup sources */
|
||||
|
||||
if ((mstpsr2 & (1 << 23)) == 0) /* SPU2 */
|
||||
msk |= 1 << 31;
|
||||
|
||||
if ((mstpsr2 & (1 << 12)) == 0) /* MFI_MFIM */
|
||||
msk |= 1 << 21;
|
||||
|
||||
if ((mstpsr4 & (1 << 3)) == 0) /* KEYSC */
|
||||
msk |= 1 << 2;
|
||||
|
||||
if ((mstpsr1 & (1 << 24)) == 0) /* CMT0 */
|
||||
msk |= 1 << 1;
|
||||
|
||||
if ((mstpsr3 & (1 << 29)) == 0) /* CMT1 */
|
||||
msk |= 1 << 1;
|
||||
|
||||
if ((mstpsr4 & (1 << 0)) == 0) /* CMT2 */
|
||||
msk |= 1 << 1;
|
||||
|
||||
if ((mstpsr2 & (1 << 13)) == 0) /* MFI_MFIS */
|
||||
msk2 |= 1 << 17;
|
||||
|
||||
*mskp = msk;
|
||||
*msk2p = msk2;
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
static void sh7372_icr_to_irqcr(unsigned long icr, u16 *irqcr1p, u16 *irqcr2p)
|
||||
{
|
||||
u16 tmp, irqcr1, irqcr2;
|
||||
int k;
|
||||
|
||||
irqcr1 = 0;
|
||||
irqcr2 = 0;
|
||||
|
||||
/* convert INTCA ICR register layout to SYSC IRQCR+IRQCR2 */
|
||||
for (k = 0; k <= 7; k++) {
|
||||
tmp = (icr >> ((7 - k) * 4)) & 0xf;
|
||||
irqcr1 |= (tmp & 0x03) << (k * 2);
|
||||
irqcr2 |= (tmp >> 2) << (k * 2);
|
||||
}
|
||||
|
||||
*irqcr1p = irqcr1;
|
||||
*irqcr2p = irqcr2;
|
||||
}
|
||||
|
||||
static void sh7372_setup_a3sm(unsigned long msk, unsigned long msk2)
|
||||
{
|
||||
u16 irqcrx_low, irqcrx_high, irqcry_low, irqcry_high;
|
||||
unsigned long tmp;
|
||||
|
||||
/* read IRQ0A -> IRQ15A mask */
|
||||
tmp = bitrev8(__raw_readb(INTMSK00A));
|
||||
tmp |= bitrev8(__raw_readb(INTMSK10A)) << 8;
|
||||
|
||||
/* setup WUPSMSK from clocks and external IRQ mask */
|
||||
msk = (~msk & 0xc030000f) | (tmp << 4);
|
||||
__raw_writel(msk, WUPSMSK);
|
||||
|
||||
/* propage level/edge trigger for external IRQ 0->15 */
|
||||
sh7372_icr_to_irqcr(__raw_readl(ICR1A), &irqcrx_low, &irqcry_low);
|
||||
sh7372_icr_to_irqcr(__raw_readl(ICR2A), &irqcrx_high, &irqcry_high);
|
||||
__raw_writel((irqcrx_high << 16) | irqcrx_low, IRQCR);
|
||||
__raw_writel((irqcry_high << 16) | irqcry_low, IRQCR2);
|
||||
|
||||
/* read IRQ16A -> IRQ31A mask */
|
||||
tmp = bitrev8(__raw_readb(INTMSK20A));
|
||||
tmp |= bitrev8(__raw_readb(INTMSK30A)) << 8;
|
||||
|
||||
/* setup WUPSMSK2 from clocks and external IRQ mask */
|
||||
msk2 = (~msk2 & 0x00030000) | tmp;
|
||||
__raw_writel(msk2, WUPSMSK2);
|
||||
|
||||
/* propage level/edge trigger for external IRQ 16->31 */
|
||||
sh7372_icr_to_irqcr(__raw_readl(ICR3A), &irqcrx_low, &irqcry_low);
|
||||
sh7372_icr_to_irqcr(__raw_readl(ICR4A), &irqcrx_high, &irqcry_high);
|
||||
__raw_writel((irqcrx_high << 16) | irqcrx_low, IRQCR3);
|
||||
__raw_writel((irqcry_high << 16) | irqcry_low, IRQCR4);
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_CPU_IDLE
|
||||
|
||||
static void sh7372_cpuidle_setup(struct cpuidle_device *dev)
|
||||
{
|
||||
struct cpuidle_state *state;
|
||||
@ -239,9 +429,25 @@ static void sh7372_cpuidle_init(void) {}
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_SUSPEND
|
||||
|
||||
static int sh7372_enter_suspend(suspend_state_t suspend_state)
|
||||
{
|
||||
unsigned long msk, msk2;
|
||||
|
||||
/* check active clocks to determine potential wakeup sources */
|
||||
if (sh7372_a3sm_valid(&msk, &msk2)) {
|
||||
|
||||
/* convert INTC mask and sense to SYSC mask and sense */
|
||||
sh7372_setup_a3sm(msk, msk2);
|
||||
|
||||
/* enter A3SM sleep with PLLC0 off */
|
||||
pr_debug("entering A3SM\n");
|
||||
sh7372_enter_a3sm_common(0);
|
||||
} else {
|
||||
/* default to Core Standby that supports all wakeup sources */
|
||||
pr_debug("entering Core Standby\n");
|
||||
sh7372_enter_core_standby();
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -253,9 +459,6 @@ static void sh7372_suspend_init(void)
|
||||
static void sh7372_suspend_init(void) {}
|
||||
#endif
|
||||
|
||||
#define DBGREG1 0xe6100020
|
||||
#define DBGREG9 0xe6100040
|
||||
|
||||
void __init sh7372_pm_init(void)
|
||||
{
|
||||
/* enable DBG hardware block to kick SYSC */
|
||||
@ -263,6 +466,9 @@ void __init sh7372_pm_init(void)
|
||||
__raw_writel(0x0000a501, DBGREG9);
|
||||
__raw_writel(0x00000000, DBGREG1);
|
||||
|
||||
/* do not convert A3SM, A3SP, A3SG, A4R power down into A4S */
|
||||
__raw_writel(0, PDNSEL);
|
||||
|
||||
sh7372_suspend_init();
|
||||
sh7372_cpuidle_init();
|
||||
}
|
||||
|
@ -15,6 +15,7 @@
|
||||
#include <linux/io.h>
|
||||
#include <linux/pm_runtime.h>
|
||||
#include <linux/pm_domain.h>
|
||||
#include <linux/pm_clock.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/clk.h>
|
||||
#include <linux/sh_clk.h>
|
||||
|
@ -30,6 +30,7 @@
|
||||
#include <linux/sh_dma.h>
|
||||
#include <linux/sh_intc.h>
|
||||
#include <linux/sh_timer.h>
|
||||
#include <linux/pm_domain.h>
|
||||
#include <mach/hardware.h>
|
||||
#include <mach/sh7372.h>
|
||||
#include <asm/mach-types.h>
|
||||
@ -990,9 +991,14 @@ void __init sh7372_add_standard_devices(void)
|
||||
sh7372_init_pm_domain(&sh7372_a4lc);
|
||||
sh7372_init_pm_domain(&sh7372_a4mp);
|
||||
sh7372_init_pm_domain(&sh7372_d4);
|
||||
sh7372_init_pm_domain(&sh7372_a4r);
|
||||
sh7372_init_pm_domain(&sh7372_a3rv);
|
||||
sh7372_init_pm_domain(&sh7372_a3ri);
|
||||
sh7372_init_pm_domain(&sh7372_a3sg);
|
||||
sh7372_init_pm_domain(&sh7372_a3sp);
|
||||
|
||||
sh7372_pm_add_subdomain(&sh7372_a4lc, &sh7372_a3rv);
|
||||
sh7372_pm_add_subdomain(&sh7372_a4r, &sh7372_a4lc);
|
||||
|
||||
platform_add_devices(sh7372_early_devices,
|
||||
ARRAY_SIZE(sh7372_early_devices));
|
||||
@ -1003,6 +1009,25 @@ void __init sh7372_add_standard_devices(void)
|
||||
sh7372_add_device_to_domain(&sh7372_a3rv, &vpu_device);
|
||||
sh7372_add_device_to_domain(&sh7372_a4mp, &spu0_device);
|
||||
sh7372_add_device_to_domain(&sh7372_a4mp, &spu1_device);
|
||||
sh7372_add_device_to_domain(&sh7372_a3sp, &scif0_device);
|
||||
sh7372_add_device_to_domain(&sh7372_a3sp, &scif1_device);
|
||||
sh7372_add_device_to_domain(&sh7372_a3sp, &scif2_device);
|
||||
sh7372_add_device_to_domain(&sh7372_a3sp, &scif3_device);
|
||||
sh7372_add_device_to_domain(&sh7372_a3sp, &scif4_device);
|
||||
sh7372_add_device_to_domain(&sh7372_a3sp, &scif5_device);
|
||||
sh7372_add_device_to_domain(&sh7372_a3sp, &scif6_device);
|
||||
sh7372_add_device_to_domain(&sh7372_a3sp, &iic1_device);
|
||||
sh7372_add_device_to_domain(&sh7372_a3sp, &dma0_device);
|
||||
sh7372_add_device_to_domain(&sh7372_a3sp, &dma1_device);
|
||||
sh7372_add_device_to_domain(&sh7372_a3sp, &dma2_device);
|
||||
sh7372_add_device_to_domain(&sh7372_a3sp, &usb_dma0_device);
|
||||
sh7372_add_device_to_domain(&sh7372_a3sp, &usb_dma1_device);
|
||||
sh7372_add_device_to_domain(&sh7372_a4r, &iic0_device);
|
||||
sh7372_add_device_to_domain(&sh7372_a4r, &veu0_device);
|
||||
sh7372_add_device_to_domain(&sh7372_a4r, &veu1_device);
|
||||
sh7372_add_device_to_domain(&sh7372_a4r, &veu2_device);
|
||||
sh7372_add_device_to_domain(&sh7372_a4r, &veu3_device);
|
||||
sh7372_add_device_to_domain(&sh7372_a4r, &jpu_device);
|
||||
}
|
||||
|
||||
void __init sh7372_add_early_devices(void)
|
||||
|
@ -30,58 +30,20 @@
|
||||
*/
|
||||
|
||||
#include <linux/linkage.h>
|
||||
#include <linux/init.h>
|
||||
#include <asm/memory.h>
|
||||
#include <asm/assembler.h>
|
||||
|
||||
#define SMFRAM 0xe6a70000
|
||||
|
||||
.align
|
||||
kernel_flush:
|
||||
.word v7_flush_dcache_all
|
||||
|
||||
.align 3
|
||||
ENTRY(sh7372_cpu_suspend)
|
||||
stmfd sp!, {r0-r12, lr} @ save registers on stack
|
||||
|
||||
ldr r8, =SMFRAM
|
||||
|
||||
mov r4, sp @ Store sp
|
||||
mrs r5, spsr @ Store spsr
|
||||
mov r6, lr @ Store lr
|
||||
stmia r8!, {r4-r6}
|
||||
|
||||
mrc p15, 0, r4, c1, c0, 2 @ Coprocessor access control register
|
||||
mrc p15, 0, r5, c2, c0, 0 @ TTBR0
|
||||
mrc p15, 0, r6, c2, c0, 1 @ TTBR1
|
||||
mrc p15, 0, r7, c2, c0, 2 @ TTBCR
|
||||
stmia r8!, {r4-r7}
|
||||
|
||||
mrc p15, 0, r4, c3, c0, 0 @ Domain access Control Register
|
||||
mrc p15, 0, r5, c10, c2, 0 @ PRRR
|
||||
mrc p15, 0, r6, c10, c2, 1 @ NMRR
|
||||
stmia r8!,{r4-r6}
|
||||
|
||||
mrc p15, 0, r4, c13, c0, 1 @ Context ID
|
||||
mrc p15, 0, r5, c13, c0, 2 @ User r/w thread and process ID
|
||||
mrc p15, 0, r6, c12, c0, 0 @ Secure or NS vector base address
|
||||
mrs r7, cpsr @ Store current cpsr
|
||||
stmia r8!, {r4-r7}
|
||||
|
||||
mrc p15, 0, r4, c1, c0, 0 @ save control register
|
||||
stmia r8!, {r4}
|
||||
|
||||
/*
|
||||
* jump out to kernel flush routine
|
||||
* - reuse that code is better
|
||||
* - it executes in a cached space so is faster than refetch per-block
|
||||
* - should be faster and will change with kernel
|
||||
* - 'might' have to copy address, load and jump to it
|
||||
* Flush all data from the L1 data cache before disabling
|
||||
* SCTLR.C bit.
|
||||
*/
|
||||
ldr r1, kernel_flush
|
||||
mov lr, pc
|
||||
bx r1
|
||||
#if defined(CONFIG_SUSPEND) || defined(CONFIG_CPU_IDLE)
|
||||
.align 12
|
||||
.text
|
||||
.global sh7372_resume_core_standby_a3sm
|
||||
sh7372_resume_core_standby_a3sm:
|
||||
ldr pc, 1f
|
||||
1: .long cpu_resume - PAGE_OFFSET + PLAT_PHYS_OFFSET
|
||||
|
||||
.global sh7372_do_idle_a3sm
|
||||
sh7372_do_idle_a3sm:
|
||||
/*
|
||||
* Clear the SCTLR.C bit to prevent further data cache
|
||||
* allocation. Clearing SCTLR.C would make all the data accesses
|
||||
@ -92,10 +54,13 @@ ENTRY(sh7372_cpu_suspend)
|
||||
mcr p15, 0, r0, c1, c0, 0
|
||||
isb
|
||||
|
||||
/* disable L2 cache in the aux control register */
|
||||
mrc p15, 0, r10, c1, c0, 1
|
||||
bic r10, r10, #2
|
||||
mcr p15, 0, r10, c1, c0, 1
|
||||
|
||||
/*
|
||||
* Invalidate L1 data cache. Even though only invalidate is
|
||||
* necessary exported flush API is used here. Doing clean
|
||||
* on already clean cache would be almost NOP.
|
||||
* Invalidate data cache again.
|
||||
*/
|
||||
ldr r1, kernel_flush
|
||||
blx r1
|
||||
@ -115,146 +80,16 @@ ENTRY(sh7372_cpu_suspend)
|
||||
dsb
|
||||
dmb
|
||||
|
||||
/*
|
||||
* ===================================
|
||||
* == WFI instruction => Enter idle ==
|
||||
* ===================================
|
||||
*/
|
||||
wfi @ wait for interrupt
|
||||
#define SPDCR 0xe6180008
|
||||
#define A3SM (1 << 12)
|
||||
|
||||
/*
|
||||
* ===================================
|
||||
* == Resume path for non-OFF modes ==
|
||||
* ===================================
|
||||
*/
|
||||
mrc p15, 0, r0, c1, c0, 0
|
||||
tst r0, #(1 << 2) @ Check C bit enabled?
|
||||
orreq r0, r0, #(1 << 2) @ Enable the C bit if cleared
|
||||
mcreq p15, 0, r0, c1, c0, 0
|
||||
isb
|
||||
/* A3SM power down */
|
||||
ldr r0, =SPDCR
|
||||
ldr r1, =A3SM
|
||||
str r1, [r0]
|
||||
1:
|
||||
b 1b
|
||||
|
||||
/*
|
||||
* ===================================
|
||||
* == Exit point from non-OFF modes ==
|
||||
* ===================================
|
||||
*/
|
||||
ldmfd sp!, {r0-r12, pc} @ restore regs and return
|
||||
|
||||
.pool
|
||||
|
||||
.align 12
|
||||
.text
|
||||
.global sh7372_cpu_resume
|
||||
sh7372_cpu_resume:
|
||||
|
||||
mov r1, #0
|
||||
/*
|
||||
* Invalidate all instruction caches to PoU
|
||||
* and flush branch target cache
|
||||
*/
|
||||
mcr p15, 0, r1, c7, c5, 0
|
||||
|
||||
ldr r3, =SMFRAM
|
||||
|
||||
ldmia r3!, {r4-r6}
|
||||
mov sp, r4 @ Restore sp
|
||||
msr spsr_cxsf, r5 @ Restore spsr
|
||||
mov lr, r6 @ Restore lr
|
||||
|
||||
ldmia r3!, {r4-r7}
|
||||
mcr p15, 0, r4, c1, c0, 2 @ Coprocessor access Control Register
|
||||
mcr p15, 0, r5, c2, c0, 0 @ TTBR0
|
||||
mcr p15, 0, r6, c2, c0, 1 @ TTBR1
|
||||
mcr p15, 0, r7, c2, c0, 2 @ TTBCR
|
||||
|
||||
ldmia r3!,{r4-r6}
|
||||
mcr p15, 0, r4, c3, c0, 0 @ Domain access Control Register
|
||||
mcr p15, 0, r5, c10, c2, 0 @ PRRR
|
||||
mcr p15, 0, r6, c10, c2, 1 @ NMRR
|
||||
|
||||
ldmia r3!,{r4-r7}
|
||||
mcr p15, 0, r4, c13, c0, 1 @ Context ID
|
||||
mcr p15, 0, r5, c13, c0, 2 @ User r/w thread and process ID
|
||||
mrc p15, 0, r6, c12, c0, 0 @ Secure or NS vector base address
|
||||
msr cpsr, r7 @ store cpsr
|
||||
|
||||
/* Starting to enable MMU here */
|
||||
mrc p15, 0, r7, c2, c0, 2 @ Read TTBRControl
|
||||
/* Extract N (0:2) bits and decide whether to use TTBR0 or TTBR1 */
|
||||
and r7, #0x7
|
||||
cmp r7, #0x0
|
||||
beq usettbr0
|
||||
ttbr_error:
|
||||
/*
|
||||
* More work needs to be done to support N[0:2] value other than 0
|
||||
* So looping here so that the error can be detected
|
||||
*/
|
||||
b ttbr_error
|
||||
|
||||
.align
|
||||
cache_pred_disable_mask:
|
||||
.word 0xFFFFE7FB
|
||||
ttbrbit_mask:
|
||||
.word 0xFFFFC000
|
||||
table_index_mask:
|
||||
.word 0xFFF00000
|
||||
table_entry:
|
||||
.word 0x00000C02
|
||||
usettbr0:
|
||||
|
||||
mrc p15, 0, r2, c2, c0, 0
|
||||
ldr r5, ttbrbit_mask
|
||||
and r2, r5
|
||||
mov r4, pc
|
||||
ldr r5, table_index_mask
|
||||
and r4, r5 @ r4 = 31 to 20 bits of pc
|
||||
/* Extract the value to be written to table entry */
|
||||
ldr r6, table_entry
|
||||
/* r6 has the value to be written to table entry */
|
||||
add r6, r6, r4
|
||||
/* Getting the address of table entry to modify */
|
||||
lsr r4, #18
|
||||
/* r2 has the location which needs to be modified */
|
||||
add r2, r4
|
||||
ldr r4, [r2]
|
||||
str r6, [r2] /* modify the table entry */
|
||||
|
||||
mov r7, r6
|
||||
mov r5, r2
|
||||
mov r6, r4
|
||||
/* r5 = original page table address */
|
||||
/* r6 = original page table data */
|
||||
|
||||
mov r0, #0
|
||||
mcr p15, 0, r0, c7, c5, 4 @ Flush prefetch buffer
|
||||
mcr p15, 0, r0, c7, c5, 6 @ Invalidate branch predictor array
|
||||
mcr p15, 0, r0, c8, c5, 0 @ Invalidate instruction TLB
|
||||
mcr p15, 0, r0, c8, c6, 0 @ Invalidate data TLB
|
||||
|
||||
/*
|
||||
* Restore control register. This enables the MMU.
|
||||
* The caches and prediction are not enabled here, they
|
||||
* will be enabled after restoring the MMU table entry.
|
||||
*/
|
||||
ldmia r3!, {r4}
|
||||
stmia r3!, {r5} /* save original page table address */
|
||||
stmia r3!, {r6} /* save original page table data */
|
||||
stmia r3!, {r7} /* save modified page table data */
|
||||
|
||||
ldr r2, cache_pred_disable_mask
|
||||
and r4, r2
|
||||
mcr p15, 0, r4, c1, c0, 0
|
||||
dsb
|
||||
isb
|
||||
|
||||
ldr r0, =restoremmu_on
|
||||
bx r0
|
||||
|
||||
/*
|
||||
* ==============================
|
||||
* == Exit point from OFF mode ==
|
||||
* ==============================
|
||||
*/
|
||||
restoremmu_on:
|
||||
|
||||
ldmfd sp!, {r0-r12, pc} @ restore regs and return
|
||||
kernel_flush:
|
||||
.word v7_flush_dcache_all
|
||||
#endif
|
||||
|
@ -91,6 +91,7 @@ config S390
|
||||
select HAVE_ARCH_MUTEX_CPU_RELAX
|
||||
select HAVE_ARCH_JUMP_LABEL if !MARCH_G5
|
||||
select HAVE_RCU_TABLE_FREE if SMP
|
||||
select ARCH_SAVE_PAGE_KEYS if HIBERNATION
|
||||
select ARCH_INLINE_SPIN_TRYLOCK
|
||||
select ARCH_INLINE_SPIN_TRYLOCK_BH
|
||||
select ARCH_INLINE_SPIN_LOCK
|
||||
|
@ -7,6 +7,7 @@
|
||||
*/
|
||||
|
||||
#include <linux/pfn.h>
|
||||
#include <linux/mm.h>
|
||||
#include <asm/system.h>
|
||||
|
||||
/*
|
||||
@ -14,6 +15,123 @@
|
||||
*/
|
||||
extern const void __nosave_begin, __nosave_end;
|
||||
|
||||
/*
|
||||
* The restore of the saved pages in an hibernation image will set
|
||||
* the change and referenced bits in the storage key for each page.
|
||||
* Overindication of the referenced bits after an hibernation cycle
|
||||
* does not cause any harm but the overindication of the change bits
|
||||
* would cause trouble.
|
||||
* Use the ARCH_SAVE_PAGE_KEYS hooks to save the storage key of each
|
||||
* page to the most significant byte of the associated page frame
|
||||
* number in the hibernation image.
|
||||
*/
|
||||
|
||||
/*
|
||||
* Key storage is allocated as a linked list of pages.
|
||||
* The size of the keys array is (PAGE_SIZE - sizeof(long))
|
||||
*/
|
||||
struct page_key_data {
|
||||
struct page_key_data *next;
|
||||
unsigned char data[];
|
||||
};
|
||||
|
||||
#define PAGE_KEY_DATA_SIZE (PAGE_SIZE - sizeof(struct page_key_data *))
|
||||
|
||||
static struct page_key_data *page_key_data;
|
||||
static struct page_key_data *page_key_rp, *page_key_wp;
|
||||
static unsigned long page_key_rx, page_key_wx;
|
||||
|
||||
/*
|
||||
* For each page in the hibernation image one additional byte is
|
||||
* stored in the most significant byte of the page frame number.
|
||||
* On suspend no additional memory is required but on resume the
|
||||
* keys need to be memorized until the page data has been restored.
|
||||
* Only then can the storage keys be set to their old state.
|
||||
*/
|
||||
unsigned long page_key_additional_pages(unsigned long pages)
|
||||
{
|
||||
return DIV_ROUND_UP(pages, PAGE_KEY_DATA_SIZE);
|
||||
}
|
||||
|
||||
/*
|
||||
* Free page_key_data list of arrays.
|
||||
*/
|
||||
void page_key_free(void)
|
||||
{
|
||||
struct page_key_data *pkd;
|
||||
|
||||
while (page_key_data) {
|
||||
pkd = page_key_data;
|
||||
page_key_data = pkd->next;
|
||||
free_page((unsigned long) pkd);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Allocate page_key_data list of arrays with enough room to store
|
||||
* one byte for each page in the hibernation image.
|
||||
*/
|
||||
int page_key_alloc(unsigned long pages)
|
||||
{
|
||||
struct page_key_data *pk;
|
||||
unsigned long size;
|
||||
|
||||
size = DIV_ROUND_UP(pages, PAGE_KEY_DATA_SIZE);
|
||||
while (size--) {
|
||||
pk = (struct page_key_data *) get_zeroed_page(GFP_KERNEL);
|
||||
if (!pk) {
|
||||
page_key_free();
|
||||
return -ENOMEM;
|
||||
}
|
||||
pk->next = page_key_data;
|
||||
page_key_data = pk;
|
||||
}
|
||||
page_key_rp = page_key_wp = page_key_data;
|
||||
page_key_rx = page_key_wx = 0;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Save the storage key into the upper 8 bits of the page frame number.
|
||||
*/
|
||||
void page_key_read(unsigned long *pfn)
|
||||
{
|
||||
unsigned long addr;
|
||||
|
||||
addr = (unsigned long) page_address(pfn_to_page(*pfn));
|
||||
*(unsigned char *) pfn = (unsigned char) page_get_storage_key(addr);
|
||||
}
|
||||
|
||||
/*
|
||||
* Extract the storage key from the upper 8 bits of the page frame number
|
||||
* and store it in the page_key_data list of arrays.
|
||||
*/
|
||||
void page_key_memorize(unsigned long *pfn)
|
||||
{
|
||||
page_key_wp->data[page_key_wx] = *(unsigned char *) pfn;
|
||||
*(unsigned char *) pfn = 0;
|
||||
if (++page_key_wx < PAGE_KEY_DATA_SIZE)
|
||||
return;
|
||||
page_key_wp = page_key_wp->next;
|
||||
page_key_wx = 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Get the next key from the page_key_data list of arrays and set the
|
||||
* storage key of the page referred by @address. If @address refers to
|
||||
* a "safe" page the swsusp_arch_resume code will transfer the storage
|
||||
* key from the buffer page to the original page.
|
||||
*/
|
||||
void page_key_write(void *address)
|
||||
{
|
||||
page_set_storage_key((unsigned long) address,
|
||||
page_key_rp->data[page_key_rx], 0);
|
||||
if (++page_key_rx >= PAGE_KEY_DATA_SIZE)
|
||||
return;
|
||||
page_key_rp = page_key_rp->next;
|
||||
page_key_rx = 0;
|
||||
}
|
||||
|
||||
int pfn_is_nosave(unsigned long pfn)
|
||||
{
|
||||
unsigned long nosave_begin_pfn = PFN_DOWN(__pa(&__nosave_begin));
|
||||
|
@ -136,11 +136,14 @@ ENTRY(swsusp_arch_resume)
|
||||
0:
|
||||
lg %r2,8(%r1)
|
||||
lg %r4,0(%r1)
|
||||
iske %r0,%r4
|
||||
lghi %r3,PAGE_SIZE
|
||||
lghi %r5,PAGE_SIZE
|
||||
1:
|
||||
mvcle %r2,%r4,0
|
||||
jo 1b
|
||||
lg %r2,8(%r1)
|
||||
sske %r0,%r2
|
||||
lg %r1,16(%r1)
|
||||
ltgr %r1,%r1
|
||||
jnz 0b
|
||||
|
@ -132,4 +132,6 @@ source "drivers/iommu/Kconfig"
|
||||
|
||||
source "drivers/virt/Kconfig"
|
||||
|
||||
source "drivers/devfreq/Kconfig"
|
||||
|
||||
endmenu
|
||||
|
@ -129,3 +129,5 @@ obj-$(CONFIG_IOMMU_SUPPORT) += iommu/
|
||||
|
||||
# Virtualization drivers
|
||||
obj-$(CONFIG_VIRT_DRIVERS) += virt/
|
||||
|
||||
obj-$(CONFIG_PM_DEVFREQ) += devfreq/
|
||||
|
@ -37,7 +37,7 @@
|
||||
#include <linux/dmi.h>
|
||||
#include <linux/moduleparam.h>
|
||||
#include <linux/sched.h> /* need_resched() */
|
||||
#include <linux/pm_qos_params.h>
|
||||
#include <linux/pm_qos.h>
|
||||
#include <linux/clockchips.h>
|
||||
#include <linux/cpuidle.h>
|
||||
#include <linux/irqflags.h>
|
||||
|
@ -390,6 +390,14 @@ static struct dmi_system_id __initdata acpisleep_dmi_table[] = {
|
||||
},
|
||||
{
|
||||
.callback = init_nvs_nosave,
|
||||
.ident = "Sony Vaio VGN-FW21E",
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "VGN-FW21E"),
|
||||
},
|
||||
},
|
||||
{
|
||||
.callback = init_nvs_nosave,
|
||||
.ident = "Sony Vaio VGN-SR11M",
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
|
||||
@ -444,6 +452,22 @@ static struct dmi_system_id __initdata acpisleep_dmi_table[] = {
|
||||
DMI_MATCH(DMI_BOARD_NAME, "A8N-SLI Premium"),
|
||||
},
|
||||
},
|
||||
{
|
||||
.callback = init_nvs_nosave,
|
||||
.ident = "Sony Vaio VGN-SR26GN_P",
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "VGN-SR26GN_P"),
|
||||
},
|
||||
},
|
||||
{
|
||||
.callback = init_nvs_nosave,
|
||||
.ident = "Sony Vaio VGN-FW520F",
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "VGN-FW520F"),
|
||||
},
|
||||
},
|
||||
{},
|
||||
};
|
||||
#endif /* CONFIG_SUSPEND */
|
||||
|
@ -1,4 +1,4 @@
|
||||
obj-$(CONFIG_PM) += sysfs.o generic_ops.o
|
||||
obj-$(CONFIG_PM) += sysfs.o generic_ops.o common.o qos.o
|
||||
obj-$(CONFIG_PM_SLEEP) += main.o wakeup.o
|
||||
obj-$(CONFIG_PM_RUNTIME) += runtime.o
|
||||
obj-$(CONFIG_PM_TRACE_RTC) += trace.o
|
||||
|
@ -10,18 +10,13 @@
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/io.h>
|
||||
#include <linux/pm.h>
|
||||
#include <linux/pm_runtime.h>
|
||||
#include <linux/pm_clock.h>
|
||||
#include <linux/clk.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/err.h>
|
||||
|
||||
#ifdef CONFIG_PM
|
||||
|
||||
struct pm_clk_data {
|
||||
struct list_head clock_list;
|
||||
spinlock_t lock;
|
||||
};
|
||||
|
||||
enum pce_status {
|
||||
PCE_STATUS_NONE = 0,
|
||||
PCE_STATUS_ACQUIRED,
|
||||
@ -36,11 +31,6 @@ struct pm_clock_entry {
|
||||
enum pce_status status;
|
||||
};
|
||||
|
||||
static struct pm_clk_data *__to_pcd(struct device *dev)
|
||||
{
|
||||
return dev ? dev->power.subsys_data : NULL;
|
||||
}
|
||||
|
||||
/**
|
||||
* pm_clk_acquire - Acquire a device clock.
|
||||
* @dev: Device whose clock is to be acquired.
|
||||
@ -67,10 +57,10 @@ static void pm_clk_acquire(struct device *dev, struct pm_clock_entry *ce)
|
||||
*/
|
||||
int pm_clk_add(struct device *dev, const char *con_id)
|
||||
{
|
||||
struct pm_clk_data *pcd = __to_pcd(dev);
|
||||
struct pm_subsys_data *psd = dev_to_psd(dev);
|
||||
struct pm_clock_entry *ce;
|
||||
|
||||
if (!pcd)
|
||||
if (!psd)
|
||||
return -EINVAL;
|
||||
|
||||
ce = kzalloc(sizeof(*ce), GFP_KERNEL);
|
||||
@ -91,9 +81,9 @@ int pm_clk_add(struct device *dev, const char *con_id)
|
||||
|
||||
pm_clk_acquire(dev, ce);
|
||||
|
||||
spin_lock_irq(&pcd->lock);
|
||||
list_add_tail(&ce->node, &pcd->clock_list);
|
||||
spin_unlock_irq(&pcd->lock);
|
||||
spin_lock_irq(&psd->lock);
|
||||
list_add_tail(&ce->node, &psd->clock_list);
|
||||
spin_unlock_irq(&psd->lock);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -114,9 +104,7 @@ static void __pm_clk_remove(struct pm_clock_entry *ce)
|
||||
clk_put(ce->clk);
|
||||
}
|
||||
|
||||
if (ce->con_id)
|
||||
kfree(ce->con_id);
|
||||
|
||||
kfree(ce);
|
||||
}
|
||||
|
||||
@ -130,15 +118,15 @@ static void __pm_clk_remove(struct pm_clock_entry *ce)
|
||||
*/
|
||||
void pm_clk_remove(struct device *dev, const char *con_id)
|
||||
{
|
||||
struct pm_clk_data *pcd = __to_pcd(dev);
|
||||
struct pm_subsys_data *psd = dev_to_psd(dev);
|
||||
struct pm_clock_entry *ce;
|
||||
|
||||
if (!pcd)
|
||||
if (!psd)
|
||||
return;
|
||||
|
||||
spin_lock_irq(&pcd->lock);
|
||||
spin_lock_irq(&psd->lock);
|
||||
|
||||
list_for_each_entry(ce, &pcd->clock_list, node) {
|
||||
list_for_each_entry(ce, &psd->clock_list, node) {
|
||||
if (!con_id && !ce->con_id)
|
||||
goto remove;
|
||||
else if (!con_id || !ce->con_id)
|
||||
@ -147,12 +135,12 @@ void pm_clk_remove(struct device *dev, const char *con_id)
|
||||
goto remove;
|
||||
}
|
||||
|
||||
spin_unlock_irq(&pcd->lock);
|
||||
spin_unlock_irq(&psd->lock);
|
||||
return;
|
||||
|
||||
remove:
|
||||
list_del(&ce->node);
|
||||
spin_unlock_irq(&pcd->lock);
|
||||
spin_unlock_irq(&psd->lock);
|
||||
|
||||
__pm_clk_remove(ce);
|
||||
}
|
||||
@ -161,23 +149,27 @@ void pm_clk_remove(struct device *dev, const char *con_id)
|
||||
* pm_clk_init - Initialize a device's list of power management clocks.
|
||||
* @dev: Device to initialize the list of PM clocks for.
|
||||
*
|
||||
* Allocate a struct pm_clk_data object, initialize its lock member and
|
||||
* make the @dev's power.subsys_data field point to it.
|
||||
* Initialize the lock and clock_list members of the device's pm_subsys_data
|
||||
* object.
|
||||
*/
|
||||
int pm_clk_init(struct device *dev)
|
||||
void pm_clk_init(struct device *dev)
|
||||
{
|
||||
struct pm_clk_data *pcd;
|
||||
struct pm_subsys_data *psd = dev_to_psd(dev);
|
||||
if (psd)
|
||||
INIT_LIST_HEAD(&psd->clock_list);
|
||||
}
|
||||
|
||||
pcd = kzalloc(sizeof(*pcd), GFP_KERNEL);
|
||||
if (!pcd) {
|
||||
dev_err(dev, "Not enough memory for PM clock data.\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
INIT_LIST_HEAD(&pcd->clock_list);
|
||||
spin_lock_init(&pcd->lock);
|
||||
dev->power.subsys_data = pcd;
|
||||
return 0;
|
||||
/**
|
||||
* pm_clk_create - Create and initialize a device's list of PM clocks.
|
||||
* @dev: Device to create and initialize the list of PM clocks for.
|
||||
*
|
||||
* Allocate a struct pm_subsys_data object, initialize its lock and clock_list
|
||||
* members and make the @dev's power.subsys_data field point to it.
|
||||
*/
|
||||
int pm_clk_create(struct device *dev)
|
||||
{
|
||||
int ret = dev_pm_get_subsys_data(dev);
|
||||
return ret < 0 ? ret : 0;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -185,29 +177,28 @@ int pm_clk_init(struct device *dev)
|
||||
* @dev: Device to destroy the list of PM clocks for.
|
||||
*
|
||||
* Clear the @dev's power.subsys_data field, remove the list of clock entries
|
||||
* from the struct pm_clk_data object pointed to by it before and free
|
||||
* from the struct pm_subsys_data object pointed to by it before and free
|
||||
* that object.
|
||||
*/
|
||||
void pm_clk_destroy(struct device *dev)
|
||||
{
|
||||
struct pm_clk_data *pcd = __to_pcd(dev);
|
||||
struct pm_subsys_data *psd = dev_to_psd(dev);
|
||||
struct pm_clock_entry *ce, *c;
|
||||
struct list_head list;
|
||||
|
||||
if (!pcd)
|
||||
if (!psd)
|
||||
return;
|
||||
|
||||
dev->power.subsys_data = NULL;
|
||||
INIT_LIST_HEAD(&list);
|
||||
|
||||
spin_lock_irq(&pcd->lock);
|
||||
spin_lock_irq(&psd->lock);
|
||||
|
||||
list_for_each_entry_safe_reverse(ce, c, &pcd->clock_list, node)
|
||||
list_for_each_entry_safe_reverse(ce, c, &psd->clock_list, node)
|
||||
list_move(&ce->node, &list);
|
||||
|
||||
spin_unlock_irq(&pcd->lock);
|
||||
spin_unlock_irq(&psd->lock);
|
||||
|
||||
kfree(pcd);
|
||||
dev_pm_put_subsys_data(dev);
|
||||
|
||||
list_for_each_entry_safe_reverse(ce, c, &list, node) {
|
||||
list_del(&ce->node);
|
||||
@ -225,25 +216,25 @@ void pm_clk_destroy(struct device *dev)
|
||||
*/
|
||||
int pm_clk_suspend(struct device *dev)
|
||||
{
|
||||
struct pm_clk_data *pcd = __to_pcd(dev);
|
||||
struct pm_subsys_data *psd = dev_to_psd(dev);
|
||||
struct pm_clock_entry *ce;
|
||||
unsigned long flags;
|
||||
|
||||
dev_dbg(dev, "%s()\n", __func__);
|
||||
|
||||
if (!pcd)
|
||||
if (!psd)
|
||||
return 0;
|
||||
|
||||
spin_lock_irqsave(&pcd->lock, flags);
|
||||
spin_lock_irqsave(&psd->lock, flags);
|
||||
|
||||
list_for_each_entry_reverse(ce, &pcd->clock_list, node) {
|
||||
list_for_each_entry_reverse(ce, &psd->clock_list, node) {
|
||||
if (ce->status < PCE_STATUS_ERROR) {
|
||||
clk_disable(ce->clk);
|
||||
ce->status = PCE_STATUS_ACQUIRED;
|
||||
}
|
||||
}
|
||||
|
||||
spin_unlock_irqrestore(&pcd->lock, flags);
|
||||
spin_unlock_irqrestore(&psd->lock, flags);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -254,25 +245,25 @@ int pm_clk_suspend(struct device *dev)
|
||||
*/
|
||||
int pm_clk_resume(struct device *dev)
|
||||
{
|
||||
struct pm_clk_data *pcd = __to_pcd(dev);
|
||||
struct pm_subsys_data *psd = dev_to_psd(dev);
|
||||
struct pm_clock_entry *ce;
|
||||
unsigned long flags;
|
||||
|
||||
dev_dbg(dev, "%s()\n", __func__);
|
||||
|
||||
if (!pcd)
|
||||
if (!psd)
|
||||
return 0;
|
||||
|
||||
spin_lock_irqsave(&pcd->lock, flags);
|
||||
spin_lock_irqsave(&psd->lock, flags);
|
||||
|
||||
list_for_each_entry(ce, &pcd->clock_list, node) {
|
||||
list_for_each_entry(ce, &psd->clock_list, node) {
|
||||
if (ce->status < PCE_STATUS_ERROR) {
|
||||
clk_enable(ce->clk);
|
||||
ce->status = PCE_STATUS_ENABLED;
|
||||
}
|
||||
}
|
||||
|
||||
spin_unlock_irqrestore(&pcd->lock, flags);
|
||||
spin_unlock_irqrestore(&psd->lock, flags);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -310,7 +301,7 @@ static int pm_clk_notify(struct notifier_block *nb,
|
||||
if (dev->pm_domain)
|
||||
break;
|
||||
|
||||
error = pm_clk_init(dev);
|
||||
error = pm_clk_create(dev);
|
||||
if (error)
|
||||
break;
|
||||
|
||||
@ -345,22 +336,22 @@ static int pm_clk_notify(struct notifier_block *nb,
|
||||
*/
|
||||
int pm_clk_suspend(struct device *dev)
|
||||
{
|
||||
struct pm_clk_data *pcd = __to_pcd(dev);
|
||||
struct pm_subsys_data *psd = dev_to_psd(dev);
|
||||
struct pm_clock_entry *ce;
|
||||
unsigned long flags;
|
||||
|
||||
dev_dbg(dev, "%s()\n", __func__);
|
||||
|
||||
/* If there is no driver, the clocks are already disabled. */
|
||||
if (!pcd || !dev->driver)
|
||||
if (!psd || !dev->driver)
|
||||
return 0;
|
||||
|
||||
spin_lock_irqsave(&pcd->lock, flags);
|
||||
spin_lock_irqsave(&psd->lock, flags);
|
||||
|
||||
list_for_each_entry_reverse(ce, &pcd->clock_list, node)
|
||||
list_for_each_entry_reverse(ce, &psd->clock_list, node)
|
||||
clk_disable(ce->clk);
|
||||
|
||||
spin_unlock_irqrestore(&pcd->lock, flags);
|
||||
spin_unlock_irqrestore(&psd->lock, flags);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -371,22 +362,22 @@ int pm_clk_suspend(struct device *dev)
|
||||
*/
|
||||
int pm_clk_resume(struct device *dev)
|
||||
{
|
||||
struct pm_clk_data *pcd = __to_pcd(dev);
|
||||
struct pm_subsys_data *psd = dev_to_psd(dev);
|
||||
struct pm_clock_entry *ce;
|
||||
unsigned long flags;
|
||||
|
||||
dev_dbg(dev, "%s()\n", __func__);
|
||||
|
||||
/* If there is no driver, the clocks should remain disabled. */
|
||||
if (!pcd || !dev->driver)
|
||||
if (!psd || !dev->driver)
|
||||
return 0;
|
||||
|
||||
spin_lock_irqsave(&pcd->lock, flags);
|
||||
spin_lock_irqsave(&psd->lock, flags);
|
||||
|
||||
list_for_each_entry(ce, &pcd->clock_list, node)
|
||||
list_for_each_entry(ce, &psd->clock_list, node)
|
||||
clk_enable(ce->clk);
|
||||
|
||||
spin_unlock_irqrestore(&pcd->lock, flags);
|
||||
spin_unlock_irqrestore(&psd->lock, flags);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
86
drivers/base/power/common.c
Normal file
86
drivers/base/power/common.c
Normal file
@ -0,0 +1,86 @@
|
||||
/*
|
||||
* drivers/base/power/common.c - Common device power management code.
|
||||
*
|
||||
* Copyright (C) 2011 Rafael J. Wysocki <rjw@sisk.pl>, Renesas Electronics Corp.
|
||||
*
|
||||
* This file is released under the GPLv2.
|
||||
*/
|
||||
|
||||
#include <linux/init.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/pm_clock.h>
|
||||
|
||||
/**
|
||||
* dev_pm_get_subsys_data - Create or refcount power.subsys_data for device.
|
||||
* @dev: Device to handle.
|
||||
*
|
||||
* If power.subsys_data is NULL, point it to a new object, otherwise increment
|
||||
* its reference counter. Return 1 if a new object has been created, otherwise
|
||||
* return 0 or error code.
|
||||
*/
|
||||
int dev_pm_get_subsys_data(struct device *dev)
|
||||
{
|
||||
struct pm_subsys_data *psd;
|
||||
int ret = 0;
|
||||
|
||||
psd = kzalloc(sizeof(*psd), GFP_KERNEL);
|
||||
if (!psd)
|
||||
return -ENOMEM;
|
||||
|
||||
spin_lock_irq(&dev->power.lock);
|
||||
|
||||
if (dev->power.subsys_data) {
|
||||
dev->power.subsys_data->refcount++;
|
||||
} else {
|
||||
spin_lock_init(&psd->lock);
|
||||
psd->refcount = 1;
|
||||
dev->power.subsys_data = psd;
|
||||
pm_clk_init(dev);
|
||||
psd = NULL;
|
||||
ret = 1;
|
||||
}
|
||||
|
||||
spin_unlock_irq(&dev->power.lock);
|
||||
|
||||
/* kfree() verifies that its argument is nonzero. */
|
||||
kfree(psd);
|
||||
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(dev_pm_get_subsys_data);
|
||||
|
||||
/**
|
||||
* dev_pm_put_subsys_data - Drop reference to power.subsys_data.
|
||||
* @dev: Device to handle.
|
||||
*
|
||||
* If the reference counter of power.subsys_data is zero after dropping the
|
||||
* reference, power.subsys_data is removed. Return 1 if that happens or 0
|
||||
* otherwise.
|
||||
*/
|
||||
int dev_pm_put_subsys_data(struct device *dev)
|
||||
{
|
||||
struct pm_subsys_data *psd;
|
||||
int ret = 0;
|
||||
|
||||
spin_lock_irq(&dev->power.lock);
|
||||
|
||||
psd = dev_to_psd(dev);
|
||||
if (!psd) {
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (--psd->refcount == 0) {
|
||||
dev->power.subsys_data = NULL;
|
||||
kfree(psd);
|
||||
ret = 1;
|
||||
}
|
||||
|
||||
out:
|
||||
spin_unlock_irq(&dev->power.lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(dev_pm_put_subsys_data);
|
@ -29,10 +29,20 @@ static struct generic_pm_domain *dev_to_genpd(struct device *dev)
|
||||
return pd_to_genpd(dev->pm_domain);
|
||||
}
|
||||
|
||||
static void genpd_sd_counter_dec(struct generic_pm_domain *genpd)
|
||||
static bool genpd_sd_counter_dec(struct generic_pm_domain *genpd)
|
||||
{
|
||||
if (!WARN_ON(genpd->sd_count == 0))
|
||||
genpd->sd_count--;
|
||||
bool ret = false;
|
||||
|
||||
if (!WARN_ON(atomic_read(&genpd->sd_count) == 0))
|
||||
ret = !!atomic_dec_and_test(&genpd->sd_count);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void genpd_sd_counter_inc(struct generic_pm_domain *genpd)
|
||||
{
|
||||
atomic_inc(&genpd->sd_count);
|
||||
smp_mb__after_atomic_inc();
|
||||
}
|
||||
|
||||
static void genpd_acquire_lock(struct generic_pm_domain *genpd)
|
||||
@ -71,60 +81,97 @@ static void genpd_set_active(struct generic_pm_domain *genpd)
|
||||
}
|
||||
|
||||
/**
|
||||
* pm_genpd_poweron - Restore power to a given PM domain and its parents.
|
||||
* __pm_genpd_poweron - Restore power to a given PM domain and its masters.
|
||||
* @genpd: PM domain to power up.
|
||||
*
|
||||
* Restore power to @genpd and all of its parents so that it is possible to
|
||||
* Restore power to @genpd and all of its masters so that it is possible to
|
||||
* resume a device belonging to it.
|
||||
*/
|
||||
int pm_genpd_poweron(struct generic_pm_domain *genpd)
|
||||
int __pm_genpd_poweron(struct generic_pm_domain *genpd)
|
||||
__releases(&genpd->lock) __acquires(&genpd->lock)
|
||||
{
|
||||
struct generic_pm_domain *parent = genpd->parent;
|
||||
struct gpd_link *link;
|
||||
DEFINE_WAIT(wait);
|
||||
int ret = 0;
|
||||
|
||||
start:
|
||||
if (parent) {
|
||||
genpd_acquire_lock(parent);
|
||||
mutex_lock_nested(&genpd->lock, SINGLE_DEPTH_NESTING);
|
||||
} else {
|
||||
/* If the domain's master is being waited for, we have to wait too. */
|
||||
for (;;) {
|
||||
prepare_to_wait(&genpd->status_wait_queue, &wait,
|
||||
TASK_UNINTERRUPTIBLE);
|
||||
if (genpd->status != GPD_STATE_WAIT_MASTER)
|
||||
break;
|
||||
mutex_unlock(&genpd->lock);
|
||||
|
||||
schedule();
|
||||
|
||||
mutex_lock(&genpd->lock);
|
||||
}
|
||||
finish_wait(&genpd->status_wait_queue, &wait);
|
||||
|
||||
if (genpd->status == GPD_STATE_ACTIVE
|
||||
|| (genpd->prepared_count > 0 && genpd->suspend_power_off))
|
||||
goto out;
|
||||
return 0;
|
||||
|
||||
if (genpd->status != GPD_STATE_POWER_OFF) {
|
||||
genpd_set_active(genpd);
|
||||
goto out;
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (parent && parent->status != GPD_STATE_ACTIVE) {
|
||||
/*
|
||||
* The list is guaranteed not to change while the loop below is being
|
||||
* executed, unless one of the masters' .power_on() callbacks fiddles
|
||||
* with it.
|
||||
*/
|
||||
list_for_each_entry(link, &genpd->slave_links, slave_node) {
|
||||
genpd_sd_counter_inc(link->master);
|
||||
genpd->status = GPD_STATE_WAIT_MASTER;
|
||||
|
||||
mutex_unlock(&genpd->lock);
|
||||
genpd_release_lock(parent);
|
||||
|
||||
ret = pm_genpd_poweron(parent);
|
||||
if (ret)
|
||||
return ret;
|
||||
ret = pm_genpd_poweron(link->master);
|
||||
|
||||
goto start;
|
||||
mutex_lock(&genpd->lock);
|
||||
|
||||
/*
|
||||
* The "wait for parent" status is guaranteed not to change
|
||||
* while the master is powering on.
|
||||
*/
|
||||
genpd->status = GPD_STATE_POWER_OFF;
|
||||
wake_up_all(&genpd->status_wait_queue);
|
||||
if (ret) {
|
||||
genpd_sd_counter_dec(link->master);
|
||||
goto err;
|
||||
}
|
||||
}
|
||||
|
||||
if (genpd->power_on) {
|
||||
ret = genpd->power_on(genpd);
|
||||
if (ret)
|
||||
goto out;
|
||||
goto err;
|
||||
}
|
||||
|
||||
genpd_set_active(genpd);
|
||||
if (parent)
|
||||
parent->sd_count++;
|
||||
|
||||
out:
|
||||
return 0;
|
||||
|
||||
err:
|
||||
list_for_each_entry_continue_reverse(link, &genpd->slave_links, slave_node)
|
||||
genpd_sd_counter_dec(link->master);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* pm_genpd_poweron - Restore power to a given PM domain and its masters.
|
||||
* @genpd: PM domain to power up.
|
||||
*/
|
||||
int pm_genpd_poweron(struct generic_pm_domain *genpd)
|
||||
{
|
||||
int ret;
|
||||
|
||||
mutex_lock(&genpd->lock);
|
||||
ret = __pm_genpd_poweron(genpd);
|
||||
mutex_unlock(&genpd->lock);
|
||||
if (parent)
|
||||
genpd_release_lock(parent);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -134,18 +181,19 @@ int pm_genpd_poweron(struct generic_pm_domain *genpd)
|
||||
|
||||
/**
|
||||
* __pm_genpd_save_device - Save the pre-suspend state of a device.
|
||||
* @dle: Device list entry of the device to save the state of.
|
||||
* @pdd: Domain data of the device to save the state of.
|
||||
* @genpd: PM domain the device belongs to.
|
||||
*/
|
||||
static int __pm_genpd_save_device(struct dev_list_entry *dle,
|
||||
static int __pm_genpd_save_device(struct pm_domain_data *pdd,
|
||||
struct generic_pm_domain *genpd)
|
||||
__releases(&genpd->lock) __acquires(&genpd->lock)
|
||||
{
|
||||
struct device *dev = dle->dev;
|
||||
struct generic_pm_domain_data *gpd_data = to_gpd_data(pdd);
|
||||
struct device *dev = pdd->dev;
|
||||
struct device_driver *drv = dev->driver;
|
||||
int ret = 0;
|
||||
|
||||
if (dle->need_restore)
|
||||
if (gpd_data->need_restore)
|
||||
return 0;
|
||||
|
||||
mutex_unlock(&genpd->lock);
|
||||
@ -163,24 +211,25 @@ static int __pm_genpd_save_device(struct dev_list_entry *dle,
|
||||
mutex_lock(&genpd->lock);
|
||||
|
||||
if (!ret)
|
||||
dle->need_restore = true;
|
||||
gpd_data->need_restore = true;
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* __pm_genpd_restore_device - Restore the pre-suspend state of a device.
|
||||
* @dle: Device list entry of the device to restore the state of.
|
||||
* @pdd: Domain data of the device to restore the state of.
|
||||
* @genpd: PM domain the device belongs to.
|
||||
*/
|
||||
static void __pm_genpd_restore_device(struct dev_list_entry *dle,
|
||||
static void __pm_genpd_restore_device(struct pm_domain_data *pdd,
|
||||
struct generic_pm_domain *genpd)
|
||||
__releases(&genpd->lock) __acquires(&genpd->lock)
|
||||
{
|
||||
struct device *dev = dle->dev;
|
||||
struct generic_pm_domain_data *gpd_data = to_gpd_data(pdd);
|
||||
struct device *dev = pdd->dev;
|
||||
struct device_driver *drv = dev->driver;
|
||||
|
||||
if (!dle->need_restore)
|
||||
if (!gpd_data->need_restore)
|
||||
return;
|
||||
|
||||
mutex_unlock(&genpd->lock);
|
||||
@ -197,7 +246,7 @@ static void __pm_genpd_restore_device(struct dev_list_entry *dle,
|
||||
|
||||
mutex_lock(&genpd->lock);
|
||||
|
||||
dle->need_restore = false;
|
||||
gpd_data->need_restore = false;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -211,7 +260,8 @@ static void __pm_genpd_restore_device(struct dev_list_entry *dle,
|
||||
*/
|
||||
static bool genpd_abort_poweroff(struct generic_pm_domain *genpd)
|
||||
{
|
||||
return genpd->status == GPD_STATE_ACTIVE || genpd->resume_count > 0;
|
||||
return genpd->status == GPD_STATE_WAIT_MASTER
|
||||
|| genpd->status == GPD_STATE_ACTIVE || genpd->resume_count > 0;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -238,8 +288,8 @@ void genpd_queue_power_off_work(struct generic_pm_domain *genpd)
|
||||
static int pm_genpd_poweroff(struct generic_pm_domain *genpd)
|
||||
__releases(&genpd->lock) __acquires(&genpd->lock)
|
||||
{
|
||||
struct generic_pm_domain *parent;
|
||||
struct dev_list_entry *dle;
|
||||
struct pm_domain_data *pdd;
|
||||
struct gpd_link *link;
|
||||
unsigned int not_suspended;
|
||||
int ret = 0;
|
||||
|
||||
@ -247,19 +297,22 @@ static int pm_genpd_poweroff(struct generic_pm_domain *genpd)
|
||||
/*
|
||||
* Do not try to power off the domain in the following situations:
|
||||
* (1) The domain is already in the "power off" state.
|
||||
* (2) System suspend is in progress.
|
||||
* (2) The domain is waiting for its master to power up.
|
||||
* (3) One of the domain's devices is being resumed right now.
|
||||
* (4) System suspend is in progress.
|
||||
*/
|
||||
if (genpd->status == GPD_STATE_POWER_OFF || genpd->prepared_count > 0
|
||||
|| genpd->resume_count > 0)
|
||||
if (genpd->status == GPD_STATE_POWER_OFF
|
||||
|| genpd->status == GPD_STATE_WAIT_MASTER
|
||||
|| genpd->resume_count > 0 || genpd->prepared_count > 0)
|
||||
return 0;
|
||||
|
||||
if (genpd->sd_count > 0)
|
||||
if (atomic_read(&genpd->sd_count) > 0)
|
||||
return -EBUSY;
|
||||
|
||||
not_suspended = 0;
|
||||
list_for_each_entry(dle, &genpd->dev_list, node)
|
||||
if (dle->dev->driver && !pm_runtime_suspended(dle->dev))
|
||||
list_for_each_entry(pdd, &genpd->dev_list, list_node)
|
||||
if (pdd->dev->driver && (!pm_runtime_suspended(pdd->dev)
|
||||
|| pdd->dev->power.irq_safe))
|
||||
not_suspended++;
|
||||
|
||||
if (not_suspended > genpd->in_progress)
|
||||
@ -282,54 +335,50 @@ static int pm_genpd_poweroff(struct generic_pm_domain *genpd)
|
||||
genpd->status = GPD_STATE_BUSY;
|
||||
genpd->poweroff_task = current;
|
||||
|
||||
list_for_each_entry_reverse(dle, &genpd->dev_list, node) {
|
||||
ret = __pm_genpd_save_device(dle, genpd);
|
||||
list_for_each_entry_reverse(pdd, &genpd->dev_list, list_node) {
|
||||
ret = atomic_read(&genpd->sd_count) == 0 ?
|
||||
__pm_genpd_save_device(pdd, genpd) : -EBUSY;
|
||||
|
||||
if (genpd_abort_poweroff(genpd))
|
||||
goto out;
|
||||
|
||||
if (ret) {
|
||||
genpd_set_active(genpd);
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (genpd_abort_poweroff(genpd))
|
||||
goto out;
|
||||
|
||||
if (genpd->status == GPD_STATE_REPEAT) {
|
||||
genpd->poweroff_task = NULL;
|
||||
goto start;
|
||||
}
|
||||
}
|
||||
|
||||
parent = genpd->parent;
|
||||
if (parent) {
|
||||
mutex_unlock(&genpd->lock);
|
||||
|
||||
genpd_acquire_lock(parent);
|
||||
mutex_lock_nested(&genpd->lock, SINGLE_DEPTH_NESTING);
|
||||
|
||||
if (genpd_abort_poweroff(genpd)) {
|
||||
genpd_release_lock(parent);
|
||||
if (genpd->power_off) {
|
||||
if (atomic_read(&genpd->sd_count) > 0) {
|
||||
ret = -EBUSY;
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
|
||||
if (genpd->power_off) {
|
||||
/*
|
||||
* If sd_count > 0 at this point, one of the subdomains hasn't
|
||||
* managed to call pm_genpd_poweron() for the master yet after
|
||||
* incrementing it. In that case pm_genpd_poweron() will wait
|
||||
* for us to drop the lock, so we can call .power_off() and let
|
||||
* the pm_genpd_poweron() restore power for us (this shouldn't
|
||||
* happen very often).
|
||||
*/
|
||||
ret = genpd->power_off(genpd);
|
||||
if (ret == -EBUSY) {
|
||||
genpd_set_active(genpd);
|
||||
if (parent)
|
||||
genpd_release_lock(parent);
|
||||
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
|
||||
genpd->status = GPD_STATE_POWER_OFF;
|
||||
|
||||
if (parent) {
|
||||
genpd_sd_counter_dec(parent);
|
||||
if (parent->sd_count == 0)
|
||||
genpd_queue_power_off_work(parent);
|
||||
|
||||
genpd_release_lock(parent);
|
||||
list_for_each_entry(link, &genpd->slave_links, slave_node) {
|
||||
genpd_sd_counter_dec(link->master);
|
||||
genpd_queue_power_off_work(link->master);
|
||||
}
|
||||
|
||||
out:
|
||||
@ -371,12 +420,21 @@ static int pm_genpd_runtime_suspend(struct device *dev)
|
||||
if (IS_ERR(genpd))
|
||||
return -EINVAL;
|
||||
|
||||
might_sleep_if(!genpd->dev_irq_safe);
|
||||
|
||||
if (genpd->stop_device) {
|
||||
int ret = genpd->stop_device(dev);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* If power.irq_safe is set, this routine will be run with interrupts
|
||||
* off, so it can't use mutexes.
|
||||
*/
|
||||
if (dev->power.irq_safe)
|
||||
return 0;
|
||||
|
||||
mutex_lock(&genpd->lock);
|
||||
genpd->in_progress++;
|
||||
pm_genpd_poweroff(genpd);
|
||||
@ -386,24 +444,6 @@ static int pm_genpd_runtime_suspend(struct device *dev)
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* __pm_genpd_runtime_resume - Resume a device belonging to I/O PM domain.
|
||||
* @dev: Device to resume.
|
||||
* @genpd: PM domain the device belongs to.
|
||||
*/
|
||||
static void __pm_genpd_runtime_resume(struct device *dev,
|
||||
struct generic_pm_domain *genpd)
|
||||
{
|
||||
struct dev_list_entry *dle;
|
||||
|
||||
list_for_each_entry(dle, &genpd->dev_list, node) {
|
||||
if (dle->dev == dev) {
|
||||
__pm_genpd_restore_device(dle, genpd);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* pm_genpd_runtime_resume - Resume a device belonging to I/O PM domain.
|
||||
* @dev: Device to resume.
|
||||
@ -424,11 +464,18 @@ static int pm_genpd_runtime_resume(struct device *dev)
|
||||
if (IS_ERR(genpd))
|
||||
return -EINVAL;
|
||||
|
||||
ret = pm_genpd_poweron(genpd);
|
||||
if (ret)
|
||||
return ret;
|
||||
might_sleep_if(!genpd->dev_irq_safe);
|
||||
|
||||
/* If power.irq_safe, the PM domain is never powered off. */
|
||||
if (dev->power.irq_safe)
|
||||
goto out;
|
||||
|
||||
mutex_lock(&genpd->lock);
|
||||
ret = __pm_genpd_poweron(genpd);
|
||||
if (ret) {
|
||||
mutex_unlock(&genpd->lock);
|
||||
return ret;
|
||||
}
|
||||
genpd->status = GPD_STATE_BUSY;
|
||||
genpd->resume_count++;
|
||||
for (;;) {
|
||||
@ -448,12 +495,13 @@ static int pm_genpd_runtime_resume(struct device *dev)
|
||||
mutex_lock(&genpd->lock);
|
||||
}
|
||||
finish_wait(&genpd->status_wait_queue, &wait);
|
||||
__pm_genpd_runtime_resume(dev, genpd);
|
||||
__pm_genpd_restore_device(dev->power.subsys_data->domain_data, genpd);
|
||||
genpd->resume_count--;
|
||||
genpd_set_active(genpd);
|
||||
wake_up_all(&genpd->status_wait_queue);
|
||||
mutex_unlock(&genpd->lock);
|
||||
|
||||
out:
|
||||
if (genpd->start_device)
|
||||
genpd->start_device(dev);
|
||||
|
||||
@ -478,8 +526,6 @@ void pm_genpd_poweroff_unused(void)
|
||||
#else
|
||||
|
||||
static inline void genpd_power_off_work_fn(struct work_struct *work) {}
|
||||
static inline void __pm_genpd_runtime_resume(struct device *dev,
|
||||
struct generic_pm_domain *genpd) {}
|
||||
|
||||
#define pm_genpd_runtime_suspend NULL
|
||||
#define pm_genpd_runtime_resume NULL
|
||||
@ -489,11 +535,11 @@ static inline void __pm_genpd_runtime_resume(struct device *dev,
|
||||
#ifdef CONFIG_PM_SLEEP
|
||||
|
||||
/**
|
||||
* pm_genpd_sync_poweroff - Synchronously power off a PM domain and its parents.
|
||||
* pm_genpd_sync_poweroff - Synchronously power off a PM domain and its masters.
|
||||
* @genpd: PM domain to power off, if possible.
|
||||
*
|
||||
* Check if the given PM domain can be powered off (during system suspend or
|
||||
* hibernation) and do that if so. Also, in that case propagate to its parent.
|
||||
* hibernation) and do that if so. Also, in that case propagate to its masters.
|
||||
*
|
||||
* This function is only called in "noirq" stages of system power transitions,
|
||||
* so it need not acquire locks (all of the "noirq" callbacks are executed
|
||||
@ -501,21 +547,23 @@ static inline void __pm_genpd_runtime_resume(struct device *dev,
|
||||
*/
|
||||
static void pm_genpd_sync_poweroff(struct generic_pm_domain *genpd)
|
||||
{
|
||||
struct generic_pm_domain *parent = genpd->parent;
|
||||
struct gpd_link *link;
|
||||
|
||||
if (genpd->status == GPD_STATE_POWER_OFF)
|
||||
return;
|
||||
|
||||
if (genpd->suspended_count != genpd->device_count || genpd->sd_count > 0)
|
||||
if (genpd->suspended_count != genpd->device_count
|
||||
|| atomic_read(&genpd->sd_count) > 0)
|
||||
return;
|
||||
|
||||
if (genpd->power_off)
|
||||
genpd->power_off(genpd);
|
||||
|
||||
genpd->status = GPD_STATE_POWER_OFF;
|
||||
if (parent) {
|
||||
genpd_sd_counter_dec(parent);
|
||||
pm_genpd_sync_poweroff(parent);
|
||||
|
||||
list_for_each_entry(link, &genpd->slave_links, slave_node) {
|
||||
genpd_sd_counter_dec(link->master);
|
||||
pm_genpd_sync_poweroff(link->master);
|
||||
}
|
||||
}
|
||||
|
||||
@ -666,7 +714,7 @@ static int pm_genpd_suspend_noirq(struct device *dev)
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (device_may_wakeup(dev)
|
||||
if (dev->power.wakeup_path
|
||||
&& genpd->active_wakeup && genpd->active_wakeup(dev))
|
||||
return 0;
|
||||
|
||||
@ -890,7 +938,7 @@ static int pm_genpd_dev_poweroff_noirq(struct device *dev)
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (device_may_wakeup(dev)
|
||||
if (dev->power.wakeup_path
|
||||
&& genpd->active_wakeup && genpd->active_wakeup(dev))
|
||||
return 0;
|
||||
|
||||
@ -1034,7 +1082,8 @@ static void pm_genpd_complete(struct device *dev)
|
||||
*/
|
||||
int pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev)
|
||||
{
|
||||
struct dev_list_entry *dle;
|
||||
struct generic_pm_domain_data *gpd_data;
|
||||
struct pm_domain_data *pdd;
|
||||
int ret = 0;
|
||||
|
||||
dev_dbg(dev, "%s()\n", __func__);
|
||||
@ -1054,26 +1103,26 @@ int pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev)
|
||||
goto out;
|
||||
}
|
||||
|
||||
list_for_each_entry(dle, &genpd->dev_list, node)
|
||||
if (dle->dev == dev) {
|
||||
list_for_each_entry(pdd, &genpd->dev_list, list_node)
|
||||
if (pdd->dev == dev) {
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
dle = kzalloc(sizeof(*dle), GFP_KERNEL);
|
||||
if (!dle) {
|
||||
gpd_data = kzalloc(sizeof(*gpd_data), GFP_KERNEL);
|
||||
if (!gpd_data) {
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
|
||||
dle->dev = dev;
|
||||
dle->need_restore = false;
|
||||
list_add_tail(&dle->node, &genpd->dev_list);
|
||||
genpd->device_count++;
|
||||
|
||||
spin_lock_irq(&dev->power.lock);
|
||||
dev->pm_domain = &genpd->domain;
|
||||
spin_unlock_irq(&dev->power.lock);
|
||||
dev_pm_get_subsys_data(dev);
|
||||
dev->power.subsys_data->domain_data = &gpd_data->base;
|
||||
gpd_data->base.dev = dev;
|
||||
gpd_data->need_restore = false;
|
||||
list_add_tail(&gpd_data->base.list_node, &genpd->dev_list);
|
||||
|
||||
out:
|
||||
genpd_release_lock(genpd);
|
||||
@ -1089,7 +1138,7 @@ int pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev)
|
||||
int pm_genpd_remove_device(struct generic_pm_domain *genpd,
|
||||
struct device *dev)
|
||||
{
|
||||
struct dev_list_entry *dle;
|
||||
struct pm_domain_data *pdd;
|
||||
int ret = -EINVAL;
|
||||
|
||||
dev_dbg(dev, "%s()\n", __func__);
|
||||
@ -1104,17 +1153,17 @@ int pm_genpd_remove_device(struct generic_pm_domain *genpd,
|
||||
goto out;
|
||||
}
|
||||
|
||||
list_for_each_entry(dle, &genpd->dev_list, node) {
|
||||
if (dle->dev != dev)
|
||||
list_for_each_entry(pdd, &genpd->dev_list, list_node) {
|
||||
if (pdd->dev != dev)
|
||||
continue;
|
||||
|
||||
spin_lock_irq(&dev->power.lock);
|
||||
list_del_init(&pdd->list_node);
|
||||
pdd->dev = NULL;
|
||||
dev_pm_put_subsys_data(dev);
|
||||
dev->pm_domain = NULL;
|
||||
spin_unlock_irq(&dev->power.lock);
|
||||
kfree(to_gpd_data(pdd));
|
||||
|
||||
genpd->device_count--;
|
||||
list_del(&dle->node);
|
||||
kfree(dle);
|
||||
|
||||
ret = 0;
|
||||
break;
|
||||
@ -1129,48 +1178,55 @@ int pm_genpd_remove_device(struct generic_pm_domain *genpd,
|
||||
/**
|
||||
* pm_genpd_add_subdomain - Add a subdomain to an I/O PM domain.
|
||||
* @genpd: Master PM domain to add the subdomain to.
|
||||
* @new_subdomain: Subdomain to be added.
|
||||
* @subdomain: Subdomain to be added.
|
||||
*/
|
||||
int pm_genpd_add_subdomain(struct generic_pm_domain *genpd,
|
||||
struct generic_pm_domain *new_subdomain)
|
||||
struct generic_pm_domain *subdomain)
|
||||
{
|
||||
struct generic_pm_domain *subdomain;
|
||||
struct gpd_link *link;
|
||||
int ret = 0;
|
||||
|
||||
if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(new_subdomain))
|
||||
if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain))
|
||||
return -EINVAL;
|
||||
|
||||
start:
|
||||
genpd_acquire_lock(genpd);
|
||||
mutex_lock_nested(&new_subdomain->lock, SINGLE_DEPTH_NESTING);
|
||||
mutex_lock_nested(&subdomain->lock, SINGLE_DEPTH_NESTING);
|
||||
|
||||
if (new_subdomain->status != GPD_STATE_POWER_OFF
|
||||
&& new_subdomain->status != GPD_STATE_ACTIVE) {
|
||||
mutex_unlock(&new_subdomain->lock);
|
||||
if (subdomain->status != GPD_STATE_POWER_OFF
|
||||
&& subdomain->status != GPD_STATE_ACTIVE) {
|
||||
mutex_unlock(&subdomain->lock);
|
||||
genpd_release_lock(genpd);
|
||||
goto start;
|
||||
}
|
||||
|
||||
if (genpd->status == GPD_STATE_POWER_OFF
|
||||
&& new_subdomain->status != GPD_STATE_POWER_OFF) {
|
||||
&& subdomain->status != GPD_STATE_POWER_OFF) {
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
list_for_each_entry(subdomain, &genpd->sd_list, sd_node) {
|
||||
if (subdomain == new_subdomain) {
|
||||
list_for_each_entry(link, &genpd->slave_links, slave_node) {
|
||||
if (link->slave == subdomain && link->master == genpd) {
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
|
||||
list_add_tail(&new_subdomain->sd_node, &genpd->sd_list);
|
||||
new_subdomain->parent = genpd;
|
||||
link = kzalloc(sizeof(*link), GFP_KERNEL);
|
||||
if (!link) {
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
link->master = genpd;
|
||||
list_add_tail(&link->master_node, &genpd->master_links);
|
||||
link->slave = subdomain;
|
||||
list_add_tail(&link->slave_node, &subdomain->slave_links);
|
||||
if (subdomain->status != GPD_STATE_POWER_OFF)
|
||||
genpd->sd_count++;
|
||||
genpd_sd_counter_inc(genpd);
|
||||
|
||||
out:
|
||||
mutex_unlock(&new_subdomain->lock);
|
||||
mutex_unlock(&subdomain->lock);
|
||||
genpd_release_lock(genpd);
|
||||
|
||||
return ret;
|
||||
@ -1179,22 +1235,22 @@ int pm_genpd_add_subdomain(struct generic_pm_domain *genpd,
|
||||
/**
|
||||
* pm_genpd_remove_subdomain - Remove a subdomain from an I/O PM domain.
|
||||
* @genpd: Master PM domain to remove the subdomain from.
|
||||
* @target: Subdomain to be removed.
|
||||
* @subdomain: Subdomain to be removed.
|
||||
*/
|
||||
int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
|
||||
struct generic_pm_domain *target)
|
||||
struct generic_pm_domain *subdomain)
|
||||
{
|
||||
struct generic_pm_domain *subdomain;
|
||||
struct gpd_link *link;
|
||||
int ret = -EINVAL;
|
||||
|
||||
if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(target))
|
||||
if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain))
|
||||
return -EINVAL;
|
||||
|
||||
start:
|
||||
genpd_acquire_lock(genpd);
|
||||
|
||||
list_for_each_entry(subdomain, &genpd->sd_list, sd_node) {
|
||||
if (subdomain != target)
|
||||
list_for_each_entry(link, &genpd->master_links, master_node) {
|
||||
if (link->slave != subdomain)
|
||||
continue;
|
||||
|
||||
mutex_lock_nested(&subdomain->lock, SINGLE_DEPTH_NESTING);
|
||||
@ -1206,8 +1262,9 @@ int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
|
||||
goto start;
|
||||
}
|
||||
|
||||
list_del(&subdomain->sd_node);
|
||||
subdomain->parent = NULL;
|
||||
list_del(&link->master_node);
|
||||
list_del(&link->slave_node);
|
||||
kfree(link);
|
||||
if (subdomain->status != GPD_STATE_POWER_OFF)
|
||||
genpd_sd_counter_dec(genpd);
|
||||
|
||||
@ -1234,15 +1291,14 @@ void pm_genpd_init(struct generic_pm_domain *genpd,
|
||||
if (IS_ERR_OR_NULL(genpd))
|
||||
return;
|
||||
|
||||
INIT_LIST_HEAD(&genpd->sd_node);
|
||||
genpd->parent = NULL;
|
||||
INIT_LIST_HEAD(&genpd->master_links);
|
||||
INIT_LIST_HEAD(&genpd->slave_links);
|
||||
INIT_LIST_HEAD(&genpd->dev_list);
|
||||
INIT_LIST_HEAD(&genpd->sd_list);
|
||||
mutex_init(&genpd->lock);
|
||||
genpd->gov = gov;
|
||||
INIT_WORK(&genpd->power_off_work, genpd_power_off_work_fn);
|
||||
genpd->in_progress = 0;
|
||||
genpd->sd_count = 0;
|
||||
atomic_set(&genpd->sd_count, 0);
|
||||
genpd->status = is_off ? GPD_STATE_POWER_OFF : GPD_STATE_ACTIVE;
|
||||
init_waitqueue_head(&genpd->status_wait_queue);
|
||||
genpd->poweroff_task = NULL;
|
||||
|
@ -46,6 +46,7 @@ LIST_HEAD(dpm_prepared_list);
|
||||
LIST_HEAD(dpm_suspended_list);
|
||||
LIST_HEAD(dpm_noirq_list);
|
||||
|
||||
struct suspend_stats suspend_stats;
|
||||
static DEFINE_MUTEX(dpm_list_mtx);
|
||||
static pm_message_t pm_transition;
|
||||
|
||||
@ -65,6 +66,7 @@ void device_pm_init(struct device *dev)
|
||||
spin_lock_init(&dev->power.lock);
|
||||
pm_runtime_init(dev);
|
||||
INIT_LIST_HEAD(&dev->power.entry);
|
||||
dev->power.power_state = PMSG_INVALID;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -96,6 +98,7 @@ void device_pm_add(struct device *dev)
|
||||
dev_warn(dev, "parent %s should not be sleeping\n",
|
||||
dev_name(dev->parent));
|
||||
list_add_tail(&dev->power.entry, &dpm_list);
|
||||
dev_pm_qos_constraints_init(dev);
|
||||
mutex_unlock(&dpm_list_mtx);
|
||||
}
|
||||
|
||||
@ -109,6 +112,7 @@ void device_pm_remove(struct device *dev)
|
||||
dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
|
||||
complete_all(&dev->power.completion);
|
||||
mutex_lock(&dpm_list_mtx);
|
||||
dev_pm_qos_constraints_destroy(dev);
|
||||
list_del_init(&dev->power.entry);
|
||||
mutex_unlock(&dpm_list_mtx);
|
||||
device_wakeup_disable(dev);
|
||||
@ -464,8 +468,12 @@ void dpm_resume_noirq(pm_message_t state)
|
||||
mutex_unlock(&dpm_list_mtx);
|
||||
|
||||
error = device_resume_noirq(dev, state);
|
||||
if (error)
|
||||
if (error) {
|
||||
suspend_stats.failed_resume_noirq++;
|
||||
dpm_save_failed_step(SUSPEND_RESUME_NOIRQ);
|
||||
dpm_save_failed_dev(dev_name(dev));
|
||||
pm_dev_err(dev, state, " early", error);
|
||||
}
|
||||
|
||||
mutex_lock(&dpm_list_mtx);
|
||||
put_device(dev);
|
||||
@ -626,8 +634,12 @@ void dpm_resume(pm_message_t state)
|
||||
mutex_unlock(&dpm_list_mtx);
|
||||
|
||||
error = device_resume(dev, state, false);
|
||||
if (error)
|
||||
if (error) {
|
||||
suspend_stats.failed_resume++;
|
||||
dpm_save_failed_step(SUSPEND_RESUME);
|
||||
dpm_save_failed_dev(dev_name(dev));
|
||||
pm_dev_err(dev, state, "", error);
|
||||
}
|
||||
|
||||
mutex_lock(&dpm_list_mtx);
|
||||
}
|
||||
@ -802,6 +814,9 @@ int dpm_suspend_noirq(pm_message_t state)
|
||||
mutex_lock(&dpm_list_mtx);
|
||||
if (error) {
|
||||
pm_dev_err(dev, state, " late", error);
|
||||
suspend_stats.failed_suspend_noirq++;
|
||||
dpm_save_failed_step(SUSPEND_SUSPEND_NOIRQ);
|
||||
dpm_save_failed_dev(dev_name(dev));
|
||||
put_device(dev);
|
||||
break;
|
||||
}
|
||||
@ -902,7 +917,11 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
|
||||
}
|
||||
|
||||
End:
|
||||
dev->power.is_suspended = !error;
|
||||
if (!error) {
|
||||
dev->power.is_suspended = true;
|
||||
if (dev->power.wakeup_path && dev->parent)
|
||||
dev->parent->power.wakeup_path = true;
|
||||
}
|
||||
|
||||
device_unlock(dev);
|
||||
complete_all(&dev->power.completion);
|
||||
@ -923,8 +942,10 @@ static void async_suspend(void *data, async_cookie_t cookie)
|
||||
int error;
|
||||
|
||||
error = __device_suspend(dev, pm_transition, true);
|
||||
if (error)
|
||||
if (error) {
|
||||
dpm_save_failed_dev(dev_name(dev));
|
||||
pm_dev_err(dev, pm_transition, " async", error);
|
||||
}
|
||||
|
||||
put_device(dev);
|
||||
}
|
||||
@ -967,6 +988,7 @@ int dpm_suspend(pm_message_t state)
|
||||
mutex_lock(&dpm_list_mtx);
|
||||
if (error) {
|
||||
pm_dev_err(dev, state, "", error);
|
||||
dpm_save_failed_dev(dev_name(dev));
|
||||
put_device(dev);
|
||||
break;
|
||||
}
|
||||
@ -980,7 +1002,10 @@ int dpm_suspend(pm_message_t state)
|
||||
async_synchronize_full();
|
||||
if (!error)
|
||||
error = async_error;
|
||||
if (!error)
|
||||
if (error) {
|
||||
suspend_stats.failed_suspend++;
|
||||
dpm_save_failed_step(SUSPEND_SUSPEND);
|
||||
} else
|
||||
dpm_show_time(starttime, state, NULL);
|
||||
return error;
|
||||
}
|
||||
@ -999,6 +1024,8 @@ static int device_prepare(struct device *dev, pm_message_t state)
|
||||
|
||||
device_lock(dev);
|
||||
|
||||
dev->power.wakeup_path = device_may_wakeup(dev);
|
||||
|
||||
if (dev->pm_domain) {
|
||||
pm_dev_dbg(dev, state, "preparing power domain ");
|
||||
if (dev->pm_domain->ops.prepare)
|
||||
@ -1088,7 +1115,10 @@ int dpm_suspend_start(pm_message_t state)
|
||||
int error;
|
||||
|
||||
error = dpm_prepare(state);
|
||||
if (!error)
|
||||
if (error) {
|
||||
suspend_stats.failed_prepare++;
|
||||
dpm_save_failed_step(SUSPEND_PREPARE);
|
||||
} else
|
||||
error = dpm_suspend(state);
|
||||
return error;
|
||||
}
|
||||
|
@ -73,6 +73,7 @@ struct opp {
|
||||
* RCU usage: nodes are not modified in the list of device_opp,
|
||||
* however addition is possible and is secured by dev_opp_list_lock
|
||||
* @dev: device pointer
|
||||
* @head: notifier head to notify the OPP availability changes.
|
||||
* @opp_list: list of opps
|
||||
*
|
||||
* This is an internal data structure maintaining the link to opps attached to
|
||||
@ -83,6 +84,7 @@ struct device_opp {
|
||||
struct list_head node;
|
||||
|
||||
struct device *dev;
|
||||
struct srcu_notifier_head head;
|
||||
struct list_head opp_list;
|
||||
};
|
||||
|
||||
@ -404,6 +406,7 @@ int opp_add(struct device *dev, unsigned long freq, unsigned long u_volt)
|
||||
}
|
||||
|
||||
dev_opp->dev = dev;
|
||||
srcu_init_notifier_head(&dev_opp->head);
|
||||
INIT_LIST_HEAD(&dev_opp->opp_list);
|
||||
|
||||
/* Secure the device list modification */
|
||||
@ -428,6 +431,11 @@ int opp_add(struct device *dev, unsigned long freq, unsigned long u_volt)
|
||||
list_add_rcu(&new_opp->node, head);
|
||||
mutex_unlock(&dev_opp_list_lock);
|
||||
|
||||
/*
|
||||
* Notify the changes in the availability of the operable
|
||||
* frequency/voltage list.
|
||||
*/
|
||||
srcu_notifier_call_chain(&dev_opp->head, OPP_EVENT_ADD, new_opp);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -504,6 +512,14 @@ static int opp_set_availability(struct device *dev, unsigned long freq,
|
||||
mutex_unlock(&dev_opp_list_lock);
|
||||
synchronize_rcu();
|
||||
|
||||
/* Notify the change of the OPP availability */
|
||||
if (availability_req)
|
||||
srcu_notifier_call_chain(&dev_opp->head, OPP_EVENT_ENABLE,
|
||||
new_opp);
|
||||
else
|
||||
srcu_notifier_call_chain(&dev_opp->head, OPP_EVENT_DISABLE,
|
||||
new_opp);
|
||||
|
||||
/* clean up old opp */
|
||||
new_opp = opp;
|
||||
goto out;
|
||||
@ -643,3 +659,17 @@ void opp_free_cpufreq_table(struct device *dev,
|
||||
*table = NULL;
|
||||
}
|
||||
#endif /* CONFIG_CPU_FREQ */
|
||||
|
||||
/**
|
||||
* opp_get_notifier() - find notifier_head of the device with opp
|
||||
* @dev: device pointer used to lookup device OPPs.
|
||||
*/
|
||||
struct srcu_notifier_head *opp_get_notifier(struct device *dev)
|
||||
{
|
||||
struct device_opp *dev_opp = find_device_opp(dev);
|
||||
|
||||
if (IS_ERR(dev_opp))
|
||||
return ERR_PTR(PTR_ERR(dev_opp)); /* matching type */
|
||||
|
||||
return &dev_opp->head;
|
||||
}
|
||||
|
@ -1,3 +1,5 @@
|
||||
#include <linux/pm_qos.h>
|
||||
|
||||
#ifdef CONFIG_PM_RUNTIME
|
||||
|
||||
extern void pm_runtime_init(struct device *dev);
|
||||
@ -35,15 +37,21 @@ extern void device_pm_move_last(struct device *);
|
||||
static inline void device_pm_init(struct device *dev)
|
||||
{
|
||||
spin_lock_init(&dev->power.lock);
|
||||
dev->power.power_state = PMSG_INVALID;
|
||||
pm_runtime_init(dev);
|
||||
}
|
||||
|
||||
static inline void device_pm_add(struct device *dev)
|
||||
{
|
||||
dev_pm_qos_constraints_init(dev);
|
||||
}
|
||||
|
||||
static inline void device_pm_remove(struct device *dev)
|
||||
{
|
||||
dev_pm_qos_constraints_destroy(dev);
|
||||
pm_runtime_remove(dev);
|
||||
}
|
||||
|
||||
static inline void device_pm_add(struct device *dev) {}
|
||||
static inline void device_pm_move_before(struct device *deva,
|
||||
struct device *devb) {}
|
||||
static inline void device_pm_move_after(struct device *deva,
|
||||
|
419
drivers/base/power/qos.c
Normal file
419
drivers/base/power/qos.c
Normal file
@ -0,0 +1,419 @@
|
||||
/*
|
||||
* Devices PM QoS constraints management
|
||||
*
|
||||
* Copyright (C) 2011 Texas Instruments, Inc.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
*
|
||||
* This module exposes the interface to kernel space for specifying
|
||||
* per-device PM QoS dependencies. It provides infrastructure for registration
|
||||
* of:
|
||||
*
|
||||
* Dependents on a QoS value : register requests
|
||||
* Watchers of QoS value : get notified when target QoS value changes
|
||||
*
|
||||
* This QoS design is best effort based. Dependents register their QoS needs.
|
||||
* Watchers register to keep track of the current QoS needs of the system.
|
||||
* Watchers can register different types of notification callbacks:
|
||||
* . a per-device notification callback using the dev_pm_qos_*_notifier API.
|
||||
* The notification chain data is stored in the per-device constraint
|
||||
* data struct.
|
||||
* . a system-wide notification callback using the dev_pm_qos_*_global_notifier
|
||||
* API. The notification chain data is stored in a static variable.
|
||||
*
|
||||
* Note about the per-device constraint data struct allocation:
|
||||
* . The per-device constraints data struct ptr is tored into the device
|
||||
* dev_pm_info.
|
||||
* . To minimize the data usage by the per-device constraints, the data struct
|
||||
* is only allocated at the first call to dev_pm_qos_add_request.
|
||||
* . The data is later free'd when the device is removed from the system.
|
||||
* . A global mutex protects the constraints users from the data being
|
||||
* allocated and free'd.
|
||||
*/
|
||||
|
||||
#include <linux/pm_qos.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/device.h>
|
||||
#include <linux/mutex.h>
|
||||
|
||||
|
||||
static DEFINE_MUTEX(dev_pm_qos_mtx);
|
||||
|
||||
static BLOCKING_NOTIFIER_HEAD(dev_pm_notifiers);
|
||||
|
||||
/**
|
||||
* dev_pm_qos_read_value - Get PM QoS constraint for a given device.
|
||||
* @dev: Device to get the PM QoS constraint value for.
|
||||
*/
|
||||
s32 dev_pm_qos_read_value(struct device *dev)
|
||||
{
|
||||
struct pm_qos_constraints *c;
|
||||
unsigned long flags;
|
||||
s32 ret = 0;
|
||||
|
||||
spin_lock_irqsave(&dev->power.lock, flags);
|
||||
|
||||
c = dev->power.constraints;
|
||||
if (c)
|
||||
ret = pm_qos_read_value(c);
|
||||
|
||||
spin_unlock_irqrestore(&dev->power.lock, flags);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* apply_constraint
|
||||
* @req: constraint request to apply
|
||||
* @action: action to perform add/update/remove, of type enum pm_qos_req_action
|
||||
* @value: defines the qos request
|
||||
*
|
||||
* Internal function to update the constraints list using the PM QoS core
|
||||
* code and if needed call the per-device and the global notification
|
||||
* callbacks
|
||||
*/
|
||||
static int apply_constraint(struct dev_pm_qos_request *req,
|
||||
enum pm_qos_req_action action, int value)
|
||||
{
|
||||
int ret, curr_value;
|
||||
|
||||
ret = pm_qos_update_target(req->dev->power.constraints,
|
||||
&req->node, action, value);
|
||||
|
||||
if (ret) {
|
||||
/* Call the global callbacks if needed */
|
||||
curr_value = pm_qos_read_value(req->dev->power.constraints);
|
||||
blocking_notifier_call_chain(&dev_pm_notifiers,
|
||||
(unsigned long)curr_value,
|
||||
req);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* dev_pm_qos_constraints_allocate
|
||||
* @dev: device to allocate data for
|
||||
*
|
||||
* Called at the first call to add_request, for constraint data allocation
|
||||
* Must be called with the dev_pm_qos_mtx mutex held
|
||||
*/
|
||||
static int dev_pm_qos_constraints_allocate(struct device *dev)
|
||||
{
|
||||
struct pm_qos_constraints *c;
|
||||
struct blocking_notifier_head *n;
|
||||
|
||||
c = kzalloc(sizeof(*c), GFP_KERNEL);
|
||||
if (!c)
|
||||
return -ENOMEM;
|
||||
|
||||
n = kzalloc(sizeof(*n), GFP_KERNEL);
|
||||
if (!n) {
|
||||
kfree(c);
|
||||
return -ENOMEM;
|
||||
}
|
||||
BLOCKING_INIT_NOTIFIER_HEAD(n);
|
||||
|
||||
plist_head_init(&c->list);
|
||||
c->target_value = PM_QOS_DEV_LAT_DEFAULT_VALUE;
|
||||
c->default_value = PM_QOS_DEV_LAT_DEFAULT_VALUE;
|
||||
c->type = PM_QOS_MIN;
|
||||
c->notifiers = n;
|
||||
|
||||
spin_lock_irq(&dev->power.lock);
|
||||
dev->power.constraints = c;
|
||||
spin_unlock_irq(&dev->power.lock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* dev_pm_qos_constraints_init - Initalize device's PM QoS constraints pointer.
|
||||
* @dev: target device
|
||||
*
|
||||
* Called from the device PM subsystem during device insertion under
|
||||
* device_pm_lock().
|
||||
*/
|
||||
void dev_pm_qos_constraints_init(struct device *dev)
|
||||
{
|
||||
mutex_lock(&dev_pm_qos_mtx);
|
||||
dev->power.constraints = NULL;
|
||||
dev->power.power_state = PMSG_ON;
|
||||
mutex_unlock(&dev_pm_qos_mtx);
|
||||
}
|
||||
|
||||
/**
|
||||
* dev_pm_qos_constraints_destroy
|
||||
* @dev: target device
|
||||
*
|
||||
* Called from the device PM subsystem on device removal under device_pm_lock().
|
||||
*/
|
||||
void dev_pm_qos_constraints_destroy(struct device *dev)
|
||||
{
|
||||
struct dev_pm_qos_request *req, *tmp;
|
||||
struct pm_qos_constraints *c;
|
||||
|
||||
mutex_lock(&dev_pm_qos_mtx);
|
||||
|
||||
dev->power.power_state = PMSG_INVALID;
|
||||
c = dev->power.constraints;
|
||||
if (!c)
|
||||
goto out;
|
||||
|
||||
/* Flush the constraints list for the device */
|
||||
plist_for_each_entry_safe(req, tmp, &c->list, node) {
|
||||
/*
|
||||
* Update constraints list and call the notification
|
||||
* callbacks if needed
|
||||
*/
|
||||
apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE);
|
||||
memset(req, 0, sizeof(*req));
|
||||
}
|
||||
|
||||
spin_lock_irq(&dev->power.lock);
|
||||
dev->power.constraints = NULL;
|
||||
spin_unlock_irq(&dev->power.lock);
|
||||
|
||||
kfree(c->notifiers);
|
||||
kfree(c);
|
||||
|
||||
out:
|
||||
mutex_unlock(&dev_pm_qos_mtx);
|
||||
}
|
||||
|
||||
/**
|
||||
* dev_pm_qos_add_request - inserts new qos request into the list
|
||||
* @dev: target device for the constraint
|
||||
* @req: pointer to a preallocated handle
|
||||
* @value: defines the qos request
|
||||
*
|
||||
* This function inserts a new entry in the device constraints list of
|
||||
* requested qos performance characteristics. It recomputes the aggregate
|
||||
* QoS expectations of parameters and initializes the dev_pm_qos_request
|
||||
* handle. Caller needs to save this handle for later use in updates and
|
||||
* removal.
|
||||
*
|
||||
* Returns 1 if the aggregated constraint value has changed,
|
||||
* 0 if the aggregated constraint value has not changed,
|
||||
* -EINVAL in case of wrong parameters, -ENOMEM if there's not enough memory
|
||||
* to allocate for data structures, -ENODEV if the device has just been removed
|
||||
* from the system.
|
||||
*/
|
||||
int dev_pm_qos_add_request(struct device *dev, struct dev_pm_qos_request *req,
|
||||
s32 value)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
if (!dev || !req) /*guard against callers passing in null */
|
||||
return -EINVAL;
|
||||
|
||||
if (dev_pm_qos_request_active(req)) {
|
||||
WARN(1, KERN_ERR "dev_pm_qos_add_request() called for already "
|
||||
"added request\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
req->dev = dev;
|
||||
|
||||
mutex_lock(&dev_pm_qos_mtx);
|
||||
|
||||
if (!dev->power.constraints) {
|
||||
if (dev->power.power_state.event == PM_EVENT_INVALID) {
|
||||
/* The device has been removed from the system. */
|
||||
req->dev = NULL;
|
||||
ret = -ENODEV;
|
||||
goto out;
|
||||
} else {
|
||||
/*
|
||||
* Allocate the constraints data on the first call to
|
||||
* add_request, i.e. only if the data is not already
|
||||
* allocated and if the device has not been removed.
|
||||
*/
|
||||
ret = dev_pm_qos_constraints_allocate(dev);
|
||||
}
|
||||
}
|
||||
|
||||
if (!ret)
|
||||
ret = apply_constraint(req, PM_QOS_ADD_REQ, value);
|
||||
|
||||
out:
|
||||
mutex_unlock(&dev_pm_qos_mtx);
|
||||
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(dev_pm_qos_add_request);
|
||||
|
||||
/**
|
||||
* dev_pm_qos_update_request - modifies an existing qos request
|
||||
* @req : handle to list element holding a dev_pm_qos request to use
|
||||
* @new_value: defines the qos request
|
||||
*
|
||||
* Updates an existing dev PM qos request along with updating the
|
||||
* target value.
|
||||
*
|
||||
* Attempts are made to make this code callable on hot code paths.
|
||||
*
|
||||
* Returns 1 if the aggregated constraint value has changed,
|
||||
* 0 if the aggregated constraint value has not changed,
|
||||
* -EINVAL in case of wrong parameters, -ENODEV if the device has been
|
||||
* removed from the system
|
||||
*/
|
||||
int dev_pm_qos_update_request(struct dev_pm_qos_request *req,
|
||||
s32 new_value)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
if (!req) /*guard against callers passing in null */
|
||||
return -EINVAL;
|
||||
|
||||
if (!dev_pm_qos_request_active(req)) {
|
||||
WARN(1, KERN_ERR "dev_pm_qos_update_request() called for "
|
||||
"unknown object\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
mutex_lock(&dev_pm_qos_mtx);
|
||||
|
||||
if (req->dev->power.constraints) {
|
||||
if (new_value != req->node.prio)
|
||||
ret = apply_constraint(req, PM_QOS_UPDATE_REQ,
|
||||
new_value);
|
||||
} else {
|
||||
/* Return if the device has been removed */
|
||||
ret = -ENODEV;
|
||||
}
|
||||
|
||||
mutex_unlock(&dev_pm_qos_mtx);
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(dev_pm_qos_update_request);
|
||||
|
||||
/**
|
||||
* dev_pm_qos_remove_request - modifies an existing qos request
|
||||
* @req: handle to request list element
|
||||
*
|
||||
* Will remove pm qos request from the list of constraints and
|
||||
* recompute the current target value. Call this on slow code paths.
|
||||
*
|
||||
* Returns 1 if the aggregated constraint value has changed,
|
||||
* 0 if the aggregated constraint value has not changed,
|
||||
* -EINVAL in case of wrong parameters, -ENODEV if the device has been
|
||||
* removed from the system
|
||||
*/
|
||||
int dev_pm_qos_remove_request(struct dev_pm_qos_request *req)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
if (!req) /*guard against callers passing in null */
|
||||
return -EINVAL;
|
||||
|
||||
if (!dev_pm_qos_request_active(req)) {
|
||||
WARN(1, KERN_ERR "dev_pm_qos_remove_request() called for "
|
||||
"unknown object\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
mutex_lock(&dev_pm_qos_mtx);
|
||||
|
||||
if (req->dev->power.constraints) {
|
||||
ret = apply_constraint(req, PM_QOS_REMOVE_REQ,
|
||||
PM_QOS_DEFAULT_VALUE);
|
||||
memset(req, 0, sizeof(*req));
|
||||
} else {
|
||||
/* Return if the device has been removed */
|
||||
ret = -ENODEV;
|
||||
}
|
||||
|
||||
mutex_unlock(&dev_pm_qos_mtx);
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(dev_pm_qos_remove_request);
|
||||
|
||||
/**
|
||||
* dev_pm_qos_add_notifier - sets notification entry for changes to target value
|
||||
* of per-device PM QoS constraints
|
||||
*
|
||||
* @dev: target device for the constraint
|
||||
* @notifier: notifier block managed by caller.
|
||||
*
|
||||
* Will register the notifier into a notification chain that gets called
|
||||
* upon changes to the target value for the device.
|
||||
*/
|
||||
int dev_pm_qos_add_notifier(struct device *dev, struct notifier_block *notifier)
|
||||
{
|
||||
int retval = 0;
|
||||
|
||||
mutex_lock(&dev_pm_qos_mtx);
|
||||
|
||||
/* Silently return if the constraints object is not present. */
|
||||
if (dev->power.constraints)
|
||||
retval = blocking_notifier_chain_register(
|
||||
dev->power.constraints->notifiers,
|
||||
notifier);
|
||||
|
||||
mutex_unlock(&dev_pm_qos_mtx);
|
||||
return retval;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(dev_pm_qos_add_notifier);
|
||||
|
||||
/**
|
||||
* dev_pm_qos_remove_notifier - deletes notification for changes to target value
|
||||
* of per-device PM QoS constraints
|
||||
*
|
||||
* @dev: target device for the constraint
|
||||
* @notifier: notifier block to be removed.
|
||||
*
|
||||
* Will remove the notifier from the notification chain that gets called
|
||||
* upon changes to the target value.
|
||||
*/
|
||||
int dev_pm_qos_remove_notifier(struct device *dev,
|
||||
struct notifier_block *notifier)
|
||||
{
|
||||
int retval = 0;
|
||||
|
||||
mutex_lock(&dev_pm_qos_mtx);
|
||||
|
||||
/* Silently return if the constraints object is not present. */
|
||||
if (dev->power.constraints)
|
||||
retval = blocking_notifier_chain_unregister(
|
||||
dev->power.constraints->notifiers,
|
||||
notifier);
|
||||
|
||||
mutex_unlock(&dev_pm_qos_mtx);
|
||||
return retval;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(dev_pm_qos_remove_notifier);
|
||||
|
||||
/**
|
||||
* dev_pm_qos_add_global_notifier - sets notification entry for changes to
|
||||
* target value of the PM QoS constraints for any device
|
||||
*
|
||||
* @notifier: notifier block managed by caller.
|
||||
*
|
||||
* Will register the notifier into a notification chain that gets called
|
||||
* upon changes to the target value for any device.
|
||||
*/
|
||||
int dev_pm_qos_add_global_notifier(struct notifier_block *notifier)
|
||||
{
|
||||
return blocking_notifier_chain_register(&dev_pm_notifiers, notifier);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(dev_pm_qos_add_global_notifier);
|
||||
|
||||
/**
|
||||
* dev_pm_qos_remove_global_notifier - deletes notification for changes to
|
||||
* target value of PM QoS constraints for any device
|
||||
*
|
||||
* @notifier: notifier block to be removed.
|
||||
*
|
||||
* Will remove the notifier from the notification chain that gets called
|
||||
* upon changes to the target value for any device.
|
||||
*/
|
||||
int dev_pm_qos_remove_global_notifier(struct notifier_block *notifier)
|
||||
{
|
||||
return blocking_notifier_chain_unregister(&dev_pm_notifiers, notifier);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(dev_pm_qos_remove_global_notifier);
|
@ -9,6 +9,7 @@
|
||||
|
||||
#include <linux/sched.h>
|
||||
#include <linux/pm_runtime.h>
|
||||
#include <trace/events/rpm.h>
|
||||
#include "power.h"
|
||||
|
||||
static int rpm_resume(struct device *dev, int rpmflags);
|
||||
@ -154,6 +155,31 @@ static int rpm_check_suspend_allowed(struct device *dev)
|
||||
return retval;
|
||||
}
|
||||
|
||||
/**
|
||||
* __rpm_callback - Run a given runtime PM callback for a given device.
|
||||
* @cb: Runtime PM callback to run.
|
||||
* @dev: Device to run the callback for.
|
||||
*/
|
||||
static int __rpm_callback(int (*cb)(struct device *), struct device *dev)
|
||||
__releases(&dev->power.lock) __acquires(&dev->power.lock)
|
||||
{
|
||||
int retval;
|
||||
|
||||
if (dev->power.irq_safe)
|
||||
spin_unlock(&dev->power.lock);
|
||||
else
|
||||
spin_unlock_irq(&dev->power.lock);
|
||||
|
||||
retval = cb(dev);
|
||||
|
||||
if (dev->power.irq_safe)
|
||||
spin_lock(&dev->power.lock);
|
||||
else
|
||||
spin_lock_irq(&dev->power.lock);
|
||||
|
||||
return retval;
|
||||
}
|
||||
|
||||
/**
|
||||
* rpm_idle - Notify device bus type if the device can be suspended.
|
||||
* @dev: Device to notify the bus type about.
|
||||
@ -171,6 +197,7 @@ static int rpm_idle(struct device *dev, int rpmflags)
|
||||
int (*callback)(struct device *);
|
||||
int retval;
|
||||
|
||||
trace_rpm_idle(dev, rpmflags);
|
||||
retval = rpm_check_suspend_allowed(dev);
|
||||
if (retval < 0)
|
||||
; /* Conditions are wrong. */
|
||||
@ -225,24 +252,14 @@ static int rpm_idle(struct device *dev, int rpmflags)
|
||||
else
|
||||
callback = NULL;
|
||||
|
||||
if (callback) {
|
||||
if (dev->power.irq_safe)
|
||||
spin_unlock(&dev->power.lock);
|
||||
else
|
||||
spin_unlock_irq(&dev->power.lock);
|
||||
|
||||
callback(dev);
|
||||
|
||||
if (dev->power.irq_safe)
|
||||
spin_lock(&dev->power.lock);
|
||||
else
|
||||
spin_lock_irq(&dev->power.lock);
|
||||
}
|
||||
if (callback)
|
||||
__rpm_callback(callback, dev);
|
||||
|
||||
dev->power.idle_notification = false;
|
||||
wake_up_all(&dev->power.wait_queue);
|
||||
|
||||
out:
|
||||
trace_rpm_return_int(dev, _THIS_IP_, retval);
|
||||
return retval;
|
||||
}
|
||||
|
||||
@ -252,22 +269,14 @@ static int rpm_idle(struct device *dev, int rpmflags)
|
||||
* @dev: Device to run the callback for.
|
||||
*/
|
||||
static int rpm_callback(int (*cb)(struct device *), struct device *dev)
|
||||
__releases(&dev->power.lock) __acquires(&dev->power.lock)
|
||||
{
|
||||
int retval;
|
||||
|
||||
if (!cb)
|
||||
return -ENOSYS;
|
||||
|
||||
if (dev->power.irq_safe) {
|
||||
retval = cb(dev);
|
||||
} else {
|
||||
spin_unlock_irq(&dev->power.lock);
|
||||
retval = __rpm_callback(cb, dev);
|
||||
|
||||
retval = cb(dev);
|
||||
|
||||
spin_lock_irq(&dev->power.lock);
|
||||
}
|
||||
dev->power.runtime_error = retval;
|
||||
return retval != -EACCES ? retval : -EIO;
|
||||
}
|
||||
@ -277,14 +286,16 @@ static int rpm_callback(int (*cb)(struct device *), struct device *dev)
|
||||
* @dev: Device to suspend.
|
||||
* @rpmflags: Flag bits.
|
||||
*
|
||||
* Check if the device's runtime PM status allows it to be suspended. If
|
||||
* another suspend has been started earlier, either return immediately or wait
|
||||
* for it to finish, depending on the RPM_NOWAIT and RPM_ASYNC flags. Cancel a
|
||||
* pending idle notification. If the RPM_ASYNC flag is set then queue a
|
||||
* suspend request; otherwise run the ->runtime_suspend() callback directly.
|
||||
* If a deferred resume was requested while the callback was running then carry
|
||||
* it out; otherwise send an idle notification for the device (if the suspend
|
||||
* failed) or for its parent (if the suspend succeeded).
|
||||
* Check if the device's runtime PM status allows it to be suspended.
|
||||
* Cancel a pending idle notification, autosuspend or suspend. If
|
||||
* another suspend has been started earlier, either return immediately
|
||||
* or wait for it to finish, depending on the RPM_NOWAIT and RPM_ASYNC
|
||||
* flags. If the RPM_ASYNC flag is set then queue a suspend request;
|
||||
* otherwise run the ->runtime_suspend() callback directly. When
|
||||
* ->runtime_suspend succeeded, if a deferred resume was requested while
|
||||
* the callback was running then carry it out, otherwise send an idle
|
||||
* notification for its parent (if the suspend succeeded and both
|
||||
* ignore_children of parent->power and irq_safe of dev->power are not set).
|
||||
*
|
||||
* This function must be called under dev->power.lock with interrupts disabled.
|
||||
*/
|
||||
@ -295,7 +306,7 @@ static int rpm_suspend(struct device *dev, int rpmflags)
|
||||
struct device *parent = NULL;
|
||||
int retval;
|
||||
|
||||
dev_dbg(dev, "%s flags 0x%x\n", __func__, rpmflags);
|
||||
trace_rpm_suspend(dev, rpmflags);
|
||||
|
||||
repeat:
|
||||
retval = rpm_check_suspend_allowed(dev);
|
||||
@ -347,6 +358,15 @@ static int rpm_suspend(struct device *dev, int rpmflags)
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (dev->power.irq_safe) {
|
||||
spin_unlock(&dev->power.lock);
|
||||
|
||||
cpu_relax();
|
||||
|
||||
spin_lock(&dev->power.lock);
|
||||
goto repeat;
|
||||
}
|
||||
|
||||
/* Wait for the other suspend running in parallel with us. */
|
||||
for (;;) {
|
||||
prepare_to_wait(&dev->power.wait_queue, &wait,
|
||||
@ -400,7 +420,9 @@ static int rpm_suspend(struct device *dev, int rpmflags)
|
||||
dev->power.runtime_error = 0;
|
||||
else
|
||||
pm_runtime_cancel_pending(dev);
|
||||
} else {
|
||||
wake_up_all(&dev->power.wait_queue);
|
||||
goto out;
|
||||
}
|
||||
no_callback:
|
||||
__update_runtime_status(dev, RPM_SUSPENDED);
|
||||
pm_runtime_deactivate_timer(dev);
|
||||
@ -409,7 +431,6 @@ static int rpm_suspend(struct device *dev, int rpmflags)
|
||||
parent = dev->parent;
|
||||
atomic_add_unless(&parent->power.child_count, -1, 0);
|
||||
}
|
||||
}
|
||||
wake_up_all(&dev->power.wait_queue);
|
||||
|
||||
if (dev->power.deferred_resume) {
|
||||
@ -430,7 +451,7 @@ static int rpm_suspend(struct device *dev, int rpmflags)
|
||||
}
|
||||
|
||||
out:
|
||||
dev_dbg(dev, "%s returns %d\n", __func__, retval);
|
||||
trace_rpm_return_int(dev, _THIS_IP_, retval);
|
||||
|
||||
return retval;
|
||||
}
|
||||
@ -459,7 +480,7 @@ static int rpm_resume(struct device *dev, int rpmflags)
|
||||
struct device *parent = NULL;
|
||||
int retval = 0;
|
||||
|
||||
dev_dbg(dev, "%s flags 0x%x\n", __func__, rpmflags);
|
||||
trace_rpm_resume(dev, rpmflags);
|
||||
|
||||
repeat:
|
||||
if (dev->power.runtime_error)
|
||||
@ -496,6 +517,15 @@ static int rpm_resume(struct device *dev, int rpmflags)
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (dev->power.irq_safe) {
|
||||
spin_unlock(&dev->power.lock);
|
||||
|
||||
cpu_relax();
|
||||
|
||||
spin_lock(&dev->power.lock);
|
||||
goto repeat;
|
||||
}
|
||||
|
||||
/* Wait for the operation carried out in parallel with us. */
|
||||
for (;;) {
|
||||
prepare_to_wait(&dev->power.wait_queue, &wait,
|
||||
@ -615,7 +645,7 @@ static int rpm_resume(struct device *dev, int rpmflags)
|
||||
spin_lock_irq(&dev->power.lock);
|
||||
}
|
||||
|
||||
dev_dbg(dev, "%s returns %d\n", __func__, retval);
|
||||
trace_rpm_return_int(dev, _THIS_IP_, retval);
|
||||
|
||||
return retval;
|
||||
}
|
||||
@ -732,13 +762,16 @@ EXPORT_SYMBOL_GPL(pm_schedule_suspend);
|
||||
* return immediately if it is larger than zero. Then carry out an idle
|
||||
* notification, either synchronous or asynchronous.
|
||||
*
|
||||
* This routine may be called in atomic context if the RPM_ASYNC flag is set.
|
||||
* This routine may be called in atomic context if the RPM_ASYNC flag is set,
|
||||
* or if pm_runtime_irq_safe() has been called.
|
||||
*/
|
||||
int __pm_runtime_idle(struct device *dev, int rpmflags)
|
||||
{
|
||||
unsigned long flags;
|
||||
int retval;
|
||||
|
||||
might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe);
|
||||
|
||||
if (rpmflags & RPM_GET_PUT) {
|
||||
if (!atomic_dec_and_test(&dev->power.usage_count))
|
||||
return 0;
|
||||
@ -761,13 +794,16 @@ EXPORT_SYMBOL_GPL(__pm_runtime_idle);
|
||||
* return immediately if it is larger than zero. Then carry out a suspend,
|
||||
* either synchronous or asynchronous.
|
||||
*
|
||||
* This routine may be called in atomic context if the RPM_ASYNC flag is set.
|
||||
* This routine may be called in atomic context if the RPM_ASYNC flag is set,
|
||||
* or if pm_runtime_irq_safe() has been called.
|
||||
*/
|
||||
int __pm_runtime_suspend(struct device *dev, int rpmflags)
|
||||
{
|
||||
unsigned long flags;
|
||||
int retval;
|
||||
|
||||
might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe);
|
||||
|
||||
if (rpmflags & RPM_GET_PUT) {
|
||||
if (!atomic_dec_and_test(&dev->power.usage_count))
|
||||
return 0;
|
||||
@ -789,13 +825,16 @@ EXPORT_SYMBOL_GPL(__pm_runtime_suspend);
|
||||
* If the RPM_GET_PUT flag is set, increment the device's usage count. Then
|
||||
* carry out a resume, either synchronous or asynchronous.
|
||||
*
|
||||
* This routine may be called in atomic context if the RPM_ASYNC flag is set.
|
||||
* This routine may be called in atomic context if the RPM_ASYNC flag is set,
|
||||
* or if pm_runtime_irq_safe() has been called.
|
||||
*/
|
||||
int __pm_runtime_resume(struct device *dev, int rpmflags)
|
||||
{
|
||||
unsigned long flags;
|
||||
int retval;
|
||||
|
||||
might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe);
|
||||
|
||||
if (rpmflags & RPM_GET_PUT)
|
||||
atomic_inc(&dev->power.usage_count);
|
||||
|
||||
|
@ -276,7 +276,9 @@ EXPORT_SYMBOL_GPL(device_set_wakeup_capable);
|
||||
*
|
||||
* By default, most devices should leave wakeup disabled. The exceptions are
|
||||
* devices that everyone expects to be wakeup sources: keyboards, power buttons,
|
||||
* possibly network interfaces, etc.
|
||||
* possibly network interfaces, etc. Also, devices that don't generate their
|
||||
* own wakeup requests but merely forward requests from one bus to another
|
||||
* (like PCI bridges) should have wakeup enabled by default.
|
||||
*/
|
||||
int device_init_wakeup(struct device *dev, bool enable)
|
||||
{
|
||||
|
@ -1118,7 +1118,7 @@ static int btusb_suspend(struct usb_interface *intf, pm_message_t message)
|
||||
return 0;
|
||||
|
||||
spin_lock_irq(&data->txlock);
|
||||
if (!((message.event & PM_EVENT_AUTO) && data->tx_in_flight)) {
|
||||
if (!(PMSG_IS_AUTO(message) && data->tx_in_flight)) {
|
||||
set_bit(BTUSB_SUSPENDING, &data->flags);
|
||||
spin_unlock_irq(&data->txlock);
|
||||
} else {
|
||||
|
@ -12,7 +12,7 @@
|
||||
#include <linux/mutex.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/notifier.h>
|
||||
#include <linux/pm_qos_params.h>
|
||||
#include <linux/pm_qos.h>
|
||||
#include <linux/cpu.h>
|
||||
#include <linux/cpuidle.h>
|
||||
#include <linux/ktime.h>
|
||||
|
@ -14,7 +14,7 @@
|
||||
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/cpuidle.h>
|
||||
#include <linux/pm_qos_params.h>
|
||||
#include <linux/pm_qos.h>
|
||||
#include <linux/moduleparam.h>
|
||||
#include <linux/jiffies.h>
|
||||
|
||||
|
@ -12,7 +12,7 @@
|
||||
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/cpuidle.h>
|
||||
#include <linux/pm_qos_params.h>
|
||||
#include <linux/pm_qos.h>
|
||||
#include <linux/time.h>
|
||||
#include <linux/ktime.h>
|
||||
#include <linux/hrtimer.h>
|
||||
|
75
drivers/devfreq/Kconfig
Normal file
75
drivers/devfreq/Kconfig
Normal file
@ -0,0 +1,75 @@
|
||||
config ARCH_HAS_DEVFREQ
|
||||
bool
|
||||
depends on ARCH_HAS_OPP
|
||||
help
|
||||
Denotes that the architecture supports DEVFREQ. If the architecture
|
||||
supports multiple OPP entries per device and the frequency of the
|
||||
devices with OPPs may be altered dynamically, the architecture
|
||||
supports DEVFREQ.
|
||||
|
||||
menuconfig PM_DEVFREQ
|
||||
bool "Generic Dynamic Voltage and Frequency Scaling (DVFS) support"
|
||||
depends on PM_OPP && ARCH_HAS_DEVFREQ
|
||||
help
|
||||
With OPP support, a device may have a list of frequencies and
|
||||
voltages available. DEVFREQ, a generic DVFS framework can be
|
||||
registered for a device with OPP support in order to let the
|
||||
governor provided to DEVFREQ choose an operating frequency
|
||||
based on the OPP's list and the policy given with DEVFREQ.
|
||||
|
||||
Each device may have its own governor and policy. DEVFREQ can
|
||||
reevaluate the device state periodically and/or based on the
|
||||
OPP list changes (each frequency/voltage pair in OPP may be
|
||||
disabled or enabled).
|
||||
|
||||
Like some CPUs with CPUFREQ, a device may have multiple clocks.
|
||||
However, because the clock frequencies of a single device are
|
||||
determined by the single device's state, an instance of DEVFREQ
|
||||
is attached to a single device and returns a "representative"
|
||||
clock frequency from the OPP of the device, which is also attached
|
||||
to a device by 1-to-1. The device registering DEVFREQ takes the
|
||||
responsiblity to "interpret" the frequency listed in OPP and
|
||||
to set its every clock accordingly with the "target" callback
|
||||
given to DEVFREQ.
|
||||
|
||||
if PM_DEVFREQ
|
||||
|
||||
comment "DEVFREQ Governors"
|
||||
|
||||
config DEVFREQ_GOV_SIMPLE_ONDEMAND
|
||||
bool "Simple Ondemand"
|
||||
help
|
||||
Chooses frequency based on the recent load on the device. Works
|
||||
similar as ONDEMAND governor of CPUFREQ does. A device with
|
||||
Simple-Ondemand should be able to provide busy/total counter
|
||||
values that imply the usage rate. A device may provide tuned
|
||||
values to the governor with data field at devfreq_add_device().
|
||||
|
||||
config DEVFREQ_GOV_PERFORMANCE
|
||||
bool "Performance"
|
||||
help
|
||||
Sets the frequency at the maximum available frequency.
|
||||
This governor always returns UINT_MAX as frequency so that
|
||||
the DEVFREQ framework returns the highest frequency available
|
||||
at any time.
|
||||
|
||||
config DEVFREQ_GOV_POWERSAVE
|
||||
bool "Powersave"
|
||||
help
|
||||
Sets the frequency at the minimum available frequency.
|
||||
This governor always returns 0 as frequency so that
|
||||
the DEVFREQ framework returns the lowest frequency available
|
||||
at any time.
|
||||
|
||||
config DEVFREQ_GOV_USERSPACE
|
||||
bool "Userspace"
|
||||
help
|
||||
Sets the frequency at the user specified one.
|
||||
This governor returns the user configured frequency if there
|
||||
has been an input to /sys/devices/.../power/devfreq_set_freq.
|
||||
Otherwise, the governor does not change the frequnecy
|
||||
given at the initialization.
|
||||
|
||||
comment "DEVFREQ Drivers"
|
||||
|
||||
endif # PM_DEVFREQ
|
5
drivers/devfreq/Makefile
Normal file
5
drivers/devfreq/Makefile
Normal file
@ -0,0 +1,5 @@
|
||||
obj-$(CONFIG_PM_DEVFREQ) += devfreq.o
|
||||
obj-$(CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND) += governor_simpleondemand.o
|
||||
obj-$(CONFIG_DEVFREQ_GOV_PERFORMANCE) += governor_performance.o
|
||||
obj-$(CONFIG_DEVFREQ_GOV_POWERSAVE) += governor_powersave.o
|
||||
obj-$(CONFIG_DEVFREQ_GOV_USERSPACE) += governor_userspace.o
|
601
drivers/devfreq/devfreq.c
Normal file
601
drivers/devfreq/devfreq.c
Normal file
@ -0,0 +1,601 @@
|
||||
/*
|
||||
* devfreq: Generic Dynamic Voltage and Frequency Scaling (DVFS) Framework
|
||||
* for Non-CPU Devices.
|
||||
*
|
||||
* Copyright (C) 2011 Samsung Electronics
|
||||
* MyungJoo Ham <myungjoo.ham@samsung.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*/
|
||||
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/errno.h>
|
||||
#include <linux/err.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/opp.h>
|
||||
#include <linux/devfreq.h>
|
||||
#include <linux/workqueue.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/list.h>
|
||||
#include <linux/printk.h>
|
||||
#include <linux/hrtimer.h>
|
||||
#include "governor.h"
|
||||
|
||||
struct class *devfreq_class;
|
||||
|
||||
/*
|
||||
* devfreq_work periodically monitors every registered device.
|
||||
* The minimum polling interval is one jiffy. The polling interval is
|
||||
* determined by the minimum polling period among all polling devfreq
|
||||
* devices. The resolution of polling interval is one jiffy.
|
||||
*/
|
||||
static bool polling;
|
||||
static struct workqueue_struct *devfreq_wq;
|
||||
static struct delayed_work devfreq_work;
|
||||
|
||||
/* wait removing if this is to be removed */
|
||||
static struct devfreq *wait_remove_device;
|
||||
|
||||
/* The list of all device-devfreq */
|
||||
static LIST_HEAD(devfreq_list);
|
||||
static DEFINE_MUTEX(devfreq_list_lock);
|
||||
|
||||
/**
|
||||
* find_device_devfreq() - find devfreq struct using device pointer
|
||||
* @dev: device pointer used to lookup device devfreq.
|
||||
*
|
||||
* Search the list of device devfreqs and return the matched device's
|
||||
* devfreq info. devfreq_list_lock should be held by the caller.
|
||||
*/
|
||||
static struct devfreq *find_device_devfreq(struct device *dev)
|
||||
{
|
||||
struct devfreq *tmp_devfreq;
|
||||
|
||||
if (unlikely(IS_ERR_OR_NULL(dev))) {
|
||||
pr_err("DEVFREQ: %s: Invalid parameters\n", __func__);
|
||||
return ERR_PTR(-EINVAL);
|
||||
}
|
||||
WARN(!mutex_is_locked(&devfreq_list_lock),
|
||||
"devfreq_list_lock must be locked.");
|
||||
|
||||
list_for_each_entry(tmp_devfreq, &devfreq_list, node) {
|
||||
if (tmp_devfreq->dev.parent == dev)
|
||||
return tmp_devfreq;
|
||||
}
|
||||
|
||||
return ERR_PTR(-ENODEV);
|
||||
}
|
||||
|
||||
/**
|
||||
* update_devfreq() - Reevaluate the device and configure frequency.
|
||||
* @devfreq: the devfreq instance.
|
||||
*
|
||||
* Note: Lock devfreq->lock before calling update_devfreq
|
||||
* This function is exported for governors.
|
||||
*/
|
||||
int update_devfreq(struct devfreq *devfreq)
|
||||
{
|
||||
unsigned long freq;
|
||||
int err = 0;
|
||||
|
||||
if (!mutex_is_locked(&devfreq->lock)) {
|
||||
WARN(true, "devfreq->lock must be locked by the caller.\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* Reevaluate the proper frequency */
|
||||
err = devfreq->governor->get_target_freq(devfreq, &freq);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
err = devfreq->profile->target(devfreq->dev.parent, &freq);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
devfreq->previous_freq = freq;
|
||||
return err;
|
||||
}
|
||||
|
||||
/**
|
||||
* devfreq_notifier_call() - Notify that the device frequency requirements
|
||||
* has been changed out of devfreq framework.
|
||||
* @nb the notifier_block (supposed to be devfreq->nb)
|
||||
* @type not used
|
||||
* @devp not used
|
||||
*
|
||||
* Called by a notifier that uses devfreq->nb.
|
||||
*/
|
||||
static int devfreq_notifier_call(struct notifier_block *nb, unsigned long type,
|
||||
void *devp)
|
||||
{
|
||||
struct devfreq *devfreq = container_of(nb, struct devfreq, nb);
|
||||
int ret;
|
||||
|
||||
mutex_lock(&devfreq->lock);
|
||||
ret = update_devfreq(devfreq);
|
||||
mutex_unlock(&devfreq->lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* _remove_devfreq() - Remove devfreq from the device.
|
||||
* @devfreq: the devfreq struct
|
||||
* @skip: skip calling device_unregister().
|
||||
*
|
||||
* Note that the caller should lock devfreq->lock before calling
|
||||
* this. _remove_devfreq() will unlock it and free devfreq
|
||||
* internally. devfreq_list_lock should be locked by the caller
|
||||
* as well (not relased at return)
|
||||
*
|
||||
* Lock usage:
|
||||
* devfreq->lock: locked before call.
|
||||
* unlocked at return (and freed)
|
||||
* devfreq_list_lock: locked before call.
|
||||
* kept locked at return.
|
||||
* if devfreq is centrally polled.
|
||||
*
|
||||
* Freed memory:
|
||||
* devfreq
|
||||
*/
|
||||
static void _remove_devfreq(struct devfreq *devfreq, bool skip)
|
||||
{
|
||||
if (!mutex_is_locked(&devfreq->lock)) {
|
||||
WARN(true, "devfreq->lock must be locked by the caller.\n");
|
||||
return;
|
||||
}
|
||||
if (!devfreq->governor->no_central_polling &&
|
||||
!mutex_is_locked(&devfreq_list_lock)) {
|
||||
WARN(true, "devfreq_list_lock must be locked by the caller.\n");
|
||||
return;
|
||||
}
|
||||
|
||||
if (devfreq->being_removed)
|
||||
return;
|
||||
|
||||
devfreq->being_removed = true;
|
||||
|
||||
if (devfreq->profile->exit)
|
||||
devfreq->profile->exit(devfreq->dev.parent);
|
||||
|
||||
if (devfreq->governor->exit)
|
||||
devfreq->governor->exit(devfreq);
|
||||
|
||||
if (!skip && get_device(&devfreq->dev)) {
|
||||
device_unregister(&devfreq->dev);
|
||||
put_device(&devfreq->dev);
|
||||
}
|
||||
|
||||
if (!devfreq->governor->no_central_polling)
|
||||
list_del(&devfreq->node);
|
||||
|
||||
mutex_unlock(&devfreq->lock);
|
||||
mutex_destroy(&devfreq->lock);
|
||||
|
||||
kfree(devfreq);
|
||||
}
|
||||
|
||||
/**
|
||||
* devfreq_dev_release() - Callback for struct device to release the device.
|
||||
* @dev: the devfreq device
|
||||
*
|
||||
* This calls _remove_devfreq() if _remove_devfreq() is not called.
|
||||
* Note that devfreq_dev_release() could be called by _remove_devfreq() as
|
||||
* well as by others unregistering the device.
|
||||
*/
|
||||
static void devfreq_dev_release(struct device *dev)
|
||||
{
|
||||
struct devfreq *devfreq = to_devfreq(dev);
|
||||
bool central_polling = !devfreq->governor->no_central_polling;
|
||||
|
||||
/*
|
||||
* If devfreq_dev_release() was called by device_unregister() of
|
||||
* _remove_devfreq(), we cannot mutex_lock(&devfreq->lock) and
|
||||
* being_removed is already set. This also partially checks the case
|
||||
* where devfreq_dev_release() is called from a thread other than
|
||||
* the one called _remove_devfreq(); however, this case is
|
||||
* dealt completely with another following being_removed check.
|
||||
*
|
||||
* Because being_removed is never being
|
||||
* unset, we do not need to worry about race conditions on
|
||||
* being_removed.
|
||||
*/
|
||||
if (devfreq->being_removed)
|
||||
return;
|
||||
|
||||
if (central_polling)
|
||||
mutex_lock(&devfreq_list_lock);
|
||||
|
||||
mutex_lock(&devfreq->lock);
|
||||
|
||||
/*
|
||||
* Check being_removed flag again for the case where
|
||||
* devfreq_dev_release() was called in a thread other than the one
|
||||
* possibly called _remove_devfreq().
|
||||
*/
|
||||
if (devfreq->being_removed) {
|
||||
mutex_unlock(&devfreq->lock);
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* devfreq->lock is unlocked and removed in _removed_devfreq() */
|
||||
_remove_devfreq(devfreq, true);
|
||||
|
||||
out:
|
||||
if (central_polling)
|
||||
mutex_unlock(&devfreq_list_lock);
|
||||
}
|
||||
|
||||
/**
|
||||
* devfreq_monitor() - Periodically poll devfreq objects.
|
||||
* @work: the work struct used to run devfreq_monitor periodically.
|
||||
*
|
||||
*/
|
||||
static void devfreq_monitor(struct work_struct *work)
|
||||
{
|
||||
static unsigned long last_polled_at;
|
||||
struct devfreq *devfreq, *tmp;
|
||||
int error;
|
||||
unsigned long jiffies_passed;
|
||||
unsigned long next_jiffies = ULONG_MAX, now = jiffies;
|
||||
struct device *dev;
|
||||
|
||||
/* Initially last_polled_at = 0, polling every device at bootup */
|
||||
jiffies_passed = now - last_polled_at;
|
||||
last_polled_at = now;
|
||||
if (jiffies_passed == 0)
|
||||
jiffies_passed = 1;
|
||||
|
||||
mutex_lock(&devfreq_list_lock);
|
||||
list_for_each_entry_safe(devfreq, tmp, &devfreq_list, node) {
|
||||
mutex_lock(&devfreq->lock);
|
||||
dev = devfreq->dev.parent;
|
||||
|
||||
/* Do not remove tmp for a while */
|
||||
wait_remove_device = tmp;
|
||||
|
||||
if (devfreq->governor->no_central_polling ||
|
||||
devfreq->next_polling == 0) {
|
||||
mutex_unlock(&devfreq->lock);
|
||||
continue;
|
||||
}
|
||||
mutex_unlock(&devfreq_list_lock);
|
||||
|
||||
/*
|
||||
* Reduce more next_polling if devfreq_wq took an extra
|
||||
* delay. (i.e., CPU has been idled.)
|
||||
*/
|
||||
if (devfreq->next_polling <= jiffies_passed) {
|
||||
error = update_devfreq(devfreq);
|
||||
|
||||
/* Remove a devfreq with an error. */
|
||||
if (error && error != -EAGAIN) {
|
||||
|
||||
dev_err(dev, "Due to update_devfreq error(%d), devfreq(%s) is removed from the device\n",
|
||||
error, devfreq->governor->name);
|
||||
|
||||
/*
|
||||
* Unlock devfreq before locking the list
|
||||
* in order to avoid deadlock with
|
||||
* find_device_devfreq or others
|
||||
*/
|
||||
mutex_unlock(&devfreq->lock);
|
||||
mutex_lock(&devfreq_list_lock);
|
||||
/* Check if devfreq is already removed */
|
||||
if (IS_ERR(find_device_devfreq(dev)))
|
||||
continue;
|
||||
mutex_lock(&devfreq->lock);
|
||||
/* This unlocks devfreq->lock and free it */
|
||||
_remove_devfreq(devfreq, false);
|
||||
continue;
|
||||
}
|
||||
devfreq->next_polling = devfreq->polling_jiffies;
|
||||
} else {
|
||||
devfreq->next_polling -= jiffies_passed;
|
||||
}
|
||||
|
||||
if (devfreq->next_polling)
|
||||
next_jiffies = (next_jiffies > devfreq->next_polling) ?
|
||||
devfreq->next_polling : next_jiffies;
|
||||
|
||||
mutex_unlock(&devfreq->lock);
|
||||
mutex_lock(&devfreq_list_lock);
|
||||
}
|
||||
wait_remove_device = NULL;
|
||||
mutex_unlock(&devfreq_list_lock);
|
||||
|
||||
if (next_jiffies > 0 && next_jiffies < ULONG_MAX) {
|
||||
polling = true;
|
||||
queue_delayed_work(devfreq_wq, &devfreq_work, next_jiffies);
|
||||
} else {
|
||||
polling = false;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* devfreq_add_device() - Add devfreq feature to the device
|
||||
* @dev: the device to add devfreq feature.
|
||||
* @profile: device-specific profile to run devfreq.
|
||||
* @governor: the policy to choose frequency.
|
||||
* @data: private data for the governor. The devfreq framework does not
|
||||
* touch this value.
|
||||
*/
|
||||
struct devfreq *devfreq_add_device(struct device *dev,
|
||||
struct devfreq_dev_profile *profile,
|
||||
const struct devfreq_governor *governor,
|
||||
void *data)
|
||||
{
|
||||
struct devfreq *devfreq;
|
||||
int err = 0;
|
||||
|
||||
if (!dev || !profile || !governor) {
|
||||
dev_err(dev, "%s: Invalid parameters.\n", __func__);
|
||||
return ERR_PTR(-EINVAL);
|
||||
}
|
||||
|
||||
|
||||
if (!governor->no_central_polling) {
|
||||
mutex_lock(&devfreq_list_lock);
|
||||
devfreq = find_device_devfreq(dev);
|
||||
mutex_unlock(&devfreq_list_lock);
|
||||
if (!IS_ERR(devfreq)) {
|
||||
dev_err(dev, "%s: Unable to create devfreq for the device. It already has one.\n", __func__);
|
||||
err = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
|
||||
devfreq = kzalloc(sizeof(struct devfreq), GFP_KERNEL);
|
||||
if (!devfreq) {
|
||||
dev_err(dev, "%s: Unable to create devfreq for the device\n",
|
||||
__func__);
|
||||
err = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
|
||||
mutex_init(&devfreq->lock);
|
||||
mutex_lock(&devfreq->lock);
|
||||
devfreq->dev.parent = dev;
|
||||
devfreq->dev.class = devfreq_class;
|
||||
devfreq->dev.release = devfreq_dev_release;
|
||||
devfreq->profile = profile;
|
||||
devfreq->governor = governor;
|
||||
devfreq->previous_freq = profile->initial_freq;
|
||||
devfreq->data = data;
|
||||
devfreq->next_polling = devfreq->polling_jiffies
|
||||
= msecs_to_jiffies(devfreq->profile->polling_ms);
|
||||
devfreq->nb.notifier_call = devfreq_notifier_call;
|
||||
|
||||
dev_set_name(&devfreq->dev, dev_name(dev));
|
||||
err = device_register(&devfreq->dev);
|
||||
if (err) {
|
||||
put_device(&devfreq->dev);
|
||||
goto err_dev;
|
||||
}
|
||||
|
||||
if (governor->init)
|
||||
err = governor->init(devfreq);
|
||||
if (err)
|
||||
goto err_init;
|
||||
|
||||
mutex_unlock(&devfreq->lock);
|
||||
|
||||
if (governor->no_central_polling)
|
||||
goto out;
|
||||
|
||||
mutex_lock(&devfreq_list_lock);
|
||||
|
||||
list_add(&devfreq->node, &devfreq_list);
|
||||
|
||||
if (devfreq_wq && devfreq->next_polling && !polling) {
|
||||
polling = true;
|
||||
queue_delayed_work(devfreq_wq, &devfreq_work,
|
||||
devfreq->next_polling);
|
||||
}
|
||||
mutex_unlock(&devfreq_list_lock);
|
||||
goto out;
|
||||
err_init:
|
||||
device_unregister(&devfreq->dev);
|
||||
err_dev:
|
||||
mutex_unlock(&devfreq->lock);
|
||||
kfree(devfreq);
|
||||
out:
|
||||
if (err)
|
||||
return ERR_PTR(err);
|
||||
else
|
||||
return devfreq;
|
||||
}
|
||||
|
||||
/**
|
||||
* devfreq_remove_device() - Remove devfreq feature from a device.
|
||||
* @devfreq the devfreq instance to be removed
|
||||
*/
|
||||
int devfreq_remove_device(struct devfreq *devfreq)
|
||||
{
|
||||
if (!devfreq)
|
||||
return -EINVAL;
|
||||
|
||||
if (!devfreq->governor->no_central_polling) {
|
||||
mutex_lock(&devfreq_list_lock);
|
||||
while (wait_remove_device == devfreq) {
|
||||
mutex_unlock(&devfreq_list_lock);
|
||||
schedule();
|
||||
mutex_lock(&devfreq_list_lock);
|
||||
}
|
||||
}
|
||||
|
||||
mutex_lock(&devfreq->lock);
|
||||
_remove_devfreq(devfreq, false); /* it unlocks devfreq->lock */
|
||||
|
||||
if (!devfreq->governor->no_central_polling)
|
||||
mutex_unlock(&devfreq_list_lock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static ssize_t show_governor(struct device *dev,
|
||||
struct device_attribute *attr, char *buf)
|
||||
{
|
||||
return sprintf(buf, "%s\n", to_devfreq(dev)->governor->name);
|
||||
}
|
||||
|
||||
static ssize_t show_freq(struct device *dev,
|
||||
struct device_attribute *attr, char *buf)
|
||||
{
|
||||
return sprintf(buf, "%lu\n", to_devfreq(dev)->previous_freq);
|
||||
}
|
||||
|
||||
static ssize_t show_polling_interval(struct device *dev,
|
||||
struct device_attribute *attr, char *buf)
|
||||
{
|
||||
return sprintf(buf, "%d\n", to_devfreq(dev)->profile->polling_ms);
|
||||
}
|
||||
|
||||
static ssize_t store_polling_interval(struct device *dev,
|
||||
struct device_attribute *attr,
|
||||
const char *buf, size_t count)
|
||||
{
|
||||
struct devfreq *df = to_devfreq(dev);
|
||||
unsigned int value;
|
||||
int ret;
|
||||
|
||||
ret = sscanf(buf, "%u", &value);
|
||||
if (ret != 1)
|
||||
goto out;
|
||||
|
||||
mutex_lock(&df->lock);
|
||||
df->profile->polling_ms = value;
|
||||
df->next_polling = df->polling_jiffies
|
||||
= msecs_to_jiffies(value);
|
||||
mutex_unlock(&df->lock);
|
||||
|
||||
ret = count;
|
||||
|
||||
if (df->governor->no_central_polling)
|
||||
goto out;
|
||||
|
||||
mutex_lock(&devfreq_list_lock);
|
||||
if (df->next_polling > 0 && !polling) {
|
||||
polling = true;
|
||||
queue_delayed_work(devfreq_wq, &devfreq_work,
|
||||
df->next_polling);
|
||||
}
|
||||
mutex_unlock(&devfreq_list_lock);
|
||||
out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static ssize_t show_central_polling(struct device *dev,
|
||||
struct device_attribute *attr, char *buf)
|
||||
{
|
||||
return sprintf(buf, "%d\n",
|
||||
!to_devfreq(dev)->governor->no_central_polling);
|
||||
}
|
||||
|
||||
static struct device_attribute devfreq_attrs[] = {
|
||||
__ATTR(governor, S_IRUGO, show_governor, NULL),
|
||||
__ATTR(cur_freq, S_IRUGO, show_freq, NULL),
|
||||
__ATTR(central_polling, S_IRUGO, show_central_polling, NULL),
|
||||
__ATTR(polling_interval, S_IRUGO | S_IWUSR, show_polling_interval,
|
||||
store_polling_interval),
|
||||
{ },
|
||||
};
|
||||
|
||||
/**
|
||||
* devfreq_start_polling() - Initialize data structure for devfreq framework and
|
||||
* start polling registered devfreq devices.
|
||||
*/
|
||||
static int __init devfreq_start_polling(void)
|
||||
{
|
||||
mutex_lock(&devfreq_list_lock);
|
||||
polling = false;
|
||||
devfreq_wq = create_freezable_workqueue("devfreq_wq");
|
||||
INIT_DELAYED_WORK_DEFERRABLE(&devfreq_work, devfreq_monitor);
|
||||
mutex_unlock(&devfreq_list_lock);
|
||||
|
||||
devfreq_monitor(&devfreq_work.work);
|
||||
return 0;
|
||||
}
|
||||
late_initcall(devfreq_start_polling);
|
||||
|
||||
static int __init devfreq_init(void)
|
||||
{
|
||||
devfreq_class = class_create(THIS_MODULE, "devfreq");
|
||||
if (IS_ERR(devfreq_class)) {
|
||||
pr_err("%s: couldn't create class\n", __FILE__);
|
||||
return PTR_ERR(devfreq_class);
|
||||
}
|
||||
devfreq_class->dev_attrs = devfreq_attrs;
|
||||
return 0;
|
||||
}
|
||||
subsys_initcall(devfreq_init);
|
||||
|
||||
static void __exit devfreq_exit(void)
|
||||
{
|
||||
class_destroy(devfreq_class);
|
||||
}
|
||||
module_exit(devfreq_exit);
|
||||
|
||||
/*
|
||||
* The followings are helper functions for devfreq user device drivers with
|
||||
* OPP framework.
|
||||
*/
|
||||
|
||||
/**
|
||||
* devfreq_recommended_opp() - Helper function to get proper OPP for the
|
||||
* freq value given to target callback.
|
||||
* @dev The devfreq user device. (parent of devfreq)
|
||||
* @freq The frequency given to target function
|
||||
*
|
||||
*/
|
||||
struct opp *devfreq_recommended_opp(struct device *dev, unsigned long *freq)
|
||||
{
|
||||
struct opp *opp = opp_find_freq_ceil(dev, freq);
|
||||
|
||||
if (opp == ERR_PTR(-ENODEV))
|
||||
opp = opp_find_freq_floor(dev, freq);
|
||||
return opp;
|
||||
}
|
||||
|
||||
/**
|
||||
* devfreq_register_opp_notifier() - Helper function to get devfreq notified
|
||||
* for any changes in the OPP availability
|
||||
* changes
|
||||
* @dev The devfreq user device. (parent of devfreq)
|
||||
* @devfreq The devfreq object.
|
||||
*/
|
||||
int devfreq_register_opp_notifier(struct device *dev, struct devfreq *devfreq)
|
||||
{
|
||||
struct srcu_notifier_head *nh = opp_get_notifier(dev);
|
||||
|
||||
if (IS_ERR(nh))
|
||||
return PTR_ERR(nh);
|
||||
return srcu_notifier_chain_register(nh, &devfreq->nb);
|
||||
}
|
||||
|
||||
/**
|
||||
* devfreq_unregister_opp_notifier() - Helper function to stop getting devfreq
|
||||
* notified for any changes in the OPP
|
||||
* availability changes anymore.
|
||||
* @dev The devfreq user device. (parent of devfreq)
|
||||
* @devfreq The devfreq object.
|
||||
*
|
||||
* At exit() callback of devfreq_dev_profile, this must be included if
|
||||
* devfreq_recommended_opp is used.
|
||||
*/
|
||||
int devfreq_unregister_opp_notifier(struct device *dev, struct devfreq *devfreq)
|
||||
{
|
||||
struct srcu_notifier_head *nh = opp_get_notifier(dev);
|
||||
|
||||
if (IS_ERR(nh))
|
||||
return PTR_ERR(nh);
|
||||
return srcu_notifier_chain_unregister(nh, &devfreq->nb);
|
||||
}
|
||||
|
||||
MODULE_AUTHOR("MyungJoo Ham <myungjoo.ham@samsung.com>");
|
||||
MODULE_DESCRIPTION("devfreq class support");
|
||||
MODULE_LICENSE("GPL");
|
24
drivers/devfreq/governor.h
Normal file
24
drivers/devfreq/governor.h
Normal file
@ -0,0 +1,24 @@
|
||||
/*
|
||||
* governor.h - internal header for devfreq governors.
|
||||
*
|
||||
* Copyright (C) 2011 Samsung Electronics
|
||||
* MyungJoo Ham <myungjoo.ham@samsung.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This header is for devfreq governors in drivers/devfreq/
|
||||
*/
|
||||
|
||||
#ifndef _GOVERNOR_H
|
||||
#define _GOVERNOR_H
|
||||
|
||||
#include <linux/devfreq.h>
|
||||
|
||||
#define to_devfreq(DEV) container_of((DEV), struct devfreq, dev)
|
||||
|
||||
/* Caution: devfreq->lock must be locked before calling update_devfreq */
|
||||
extern int update_devfreq(struct devfreq *devfreq);
|
||||
|
||||
#endif /* _GOVERNOR_H */
|
29
drivers/devfreq/governor_performance.c
Normal file
29
drivers/devfreq/governor_performance.c
Normal file
@ -0,0 +1,29 @@
|
||||
/*
|
||||
* linux/drivers/devfreq/governor_performance.c
|
||||
*
|
||||
* Copyright (C) 2011 Samsung Electronics
|
||||
* MyungJoo Ham <myungjoo.ham@samsung.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*/
|
||||
|
||||
#include <linux/devfreq.h>
|
||||
|
||||
static int devfreq_performance_func(struct devfreq *df,
|
||||
unsigned long *freq)
|
||||
{
|
||||
/*
|
||||
* target callback should be able to get floor value as
|
||||
* said in devfreq.h
|
||||
*/
|
||||
*freq = UINT_MAX;
|
||||
return 0;
|
||||
}
|
||||
|
||||
const struct devfreq_governor devfreq_performance = {
|
||||
.name = "performance",
|
||||
.get_target_freq = devfreq_performance_func,
|
||||
.no_central_polling = true,
|
||||
};
|
29
drivers/devfreq/governor_powersave.c
Normal file
29
drivers/devfreq/governor_powersave.c
Normal file
@ -0,0 +1,29 @@
|
||||
/*
|
||||
* linux/drivers/devfreq/governor_powersave.c
|
||||
*
|
||||
* Copyright (C) 2011 Samsung Electronics
|
||||
* MyungJoo Ham <myungjoo.ham@samsung.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*/
|
||||
|
||||
#include <linux/devfreq.h>
|
||||
|
||||
static int devfreq_powersave_func(struct devfreq *df,
|
||||
unsigned long *freq)
|
||||
{
|
||||
/*
|
||||
* target callback should be able to get ceiling value as
|
||||
* said in devfreq.h
|
||||
*/
|
||||
*freq = 0;
|
||||
return 0;
|
||||
}
|
||||
|
||||
const struct devfreq_governor devfreq_powersave = {
|
||||
.name = "powersave",
|
||||
.get_target_freq = devfreq_powersave_func,
|
||||
.no_central_polling = true,
|
||||
};
|
88
drivers/devfreq/governor_simpleondemand.c
Normal file
88
drivers/devfreq/governor_simpleondemand.c
Normal file
@ -0,0 +1,88 @@
|
||||
/*
|
||||
* linux/drivers/devfreq/governor_simpleondemand.c
|
||||
*
|
||||
* Copyright (C) 2011 Samsung Electronics
|
||||
* MyungJoo Ham <myungjoo.ham@samsung.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*/
|
||||
|
||||
#include <linux/errno.h>
|
||||
#include <linux/devfreq.h>
|
||||
#include <linux/math64.h>
|
||||
|
||||
/* Default constants for DevFreq-Simple-Ondemand (DFSO) */
|
||||
#define DFSO_UPTHRESHOLD (90)
|
||||
#define DFSO_DOWNDIFFERENCTIAL (5)
|
||||
static int devfreq_simple_ondemand_func(struct devfreq *df,
|
||||
unsigned long *freq)
|
||||
{
|
||||
struct devfreq_dev_status stat;
|
||||
int err = df->profile->get_dev_status(df->dev.parent, &stat);
|
||||
unsigned long long a, b;
|
||||
unsigned int dfso_upthreshold = DFSO_UPTHRESHOLD;
|
||||
unsigned int dfso_downdifferential = DFSO_DOWNDIFFERENCTIAL;
|
||||
struct devfreq_simple_ondemand_data *data = df->data;
|
||||
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
if (data) {
|
||||
if (data->upthreshold)
|
||||
dfso_upthreshold = data->upthreshold;
|
||||
if (data->downdifferential)
|
||||
dfso_downdifferential = data->downdifferential;
|
||||
}
|
||||
if (dfso_upthreshold > 100 ||
|
||||
dfso_upthreshold < dfso_downdifferential)
|
||||
return -EINVAL;
|
||||
|
||||
/* Assume MAX if it is going to be divided by zero */
|
||||
if (stat.total_time == 0) {
|
||||
*freq = UINT_MAX;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Prevent overflow */
|
||||
if (stat.busy_time >= (1 << 24) || stat.total_time >= (1 << 24)) {
|
||||
stat.busy_time >>= 7;
|
||||
stat.total_time >>= 7;
|
||||
}
|
||||
|
||||
/* Set MAX if it's busy enough */
|
||||
if (stat.busy_time * 100 >
|
||||
stat.total_time * dfso_upthreshold) {
|
||||
*freq = UINT_MAX;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Set MAX if we do not know the initial frequency */
|
||||
if (stat.current_frequency == 0) {
|
||||
*freq = UINT_MAX;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Keep the current frequency */
|
||||
if (stat.busy_time * 100 >
|
||||
stat.total_time * (dfso_upthreshold - dfso_downdifferential)) {
|
||||
*freq = stat.current_frequency;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Set the desired frequency based on the load */
|
||||
a = stat.busy_time;
|
||||
a *= stat.current_frequency;
|
||||
b = div_u64(a, stat.total_time);
|
||||
b *= 100;
|
||||
b = div_u64(b, (dfso_upthreshold - dfso_downdifferential / 2));
|
||||
*freq = (unsigned long) b;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
const struct devfreq_governor devfreq_simple_ondemand = {
|
||||
.name = "simple_ondemand",
|
||||
.get_target_freq = devfreq_simple_ondemand_func,
|
||||
};
|
116
drivers/devfreq/governor_userspace.c
Normal file
116
drivers/devfreq/governor_userspace.c
Normal file
@ -0,0 +1,116 @@
|
||||
/*
|
||||
* linux/drivers/devfreq/governor_simpleondemand.c
|
||||
*
|
||||
* Copyright (C) 2011 Samsung Electronics
|
||||
* MyungJoo Ham <myungjoo.ham@samsung.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*/
|
||||
|
||||
#include <linux/slab.h>
|
||||
#include <linux/device.h>
|
||||
#include <linux/devfreq.h>
|
||||
#include <linux/pm.h>
|
||||
#include <linux/mutex.h>
|
||||
#include "governor.h"
|
||||
|
||||
struct userspace_data {
|
||||
unsigned long user_frequency;
|
||||
bool valid;
|
||||
};
|
||||
|
||||
static int devfreq_userspace_func(struct devfreq *df, unsigned long *freq)
|
||||
{
|
||||
struct userspace_data *data = df->data;
|
||||
|
||||
if (!data->valid)
|
||||
*freq = df->previous_freq; /* No user freq specified yet */
|
||||
else
|
||||
*freq = data->user_frequency;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static ssize_t store_freq(struct device *dev, struct device_attribute *attr,
|
||||
const char *buf, size_t count)
|
||||
{
|
||||
struct devfreq *devfreq = to_devfreq(dev);
|
||||
struct userspace_data *data;
|
||||
unsigned long wanted;
|
||||
int err = 0;
|
||||
|
||||
|
||||
mutex_lock(&devfreq->lock);
|
||||
data = devfreq->data;
|
||||
|
||||
sscanf(buf, "%lu", &wanted);
|
||||
data->user_frequency = wanted;
|
||||
data->valid = true;
|
||||
err = update_devfreq(devfreq);
|
||||
if (err == 0)
|
||||
err = count;
|
||||
mutex_unlock(&devfreq->lock);
|
||||
return err;
|
||||
}
|
||||
|
||||
static ssize_t show_freq(struct device *dev, struct device_attribute *attr,
|
||||
char *buf)
|
||||
{
|
||||
struct devfreq *devfreq = to_devfreq(dev);
|
||||
struct userspace_data *data;
|
||||
int err = 0;
|
||||
|
||||
mutex_lock(&devfreq->lock);
|
||||
data = devfreq->data;
|
||||
|
||||
if (data->valid)
|
||||
err = sprintf(buf, "%lu\n", data->user_frequency);
|
||||
else
|
||||
err = sprintf(buf, "undefined\n");
|
||||
mutex_unlock(&devfreq->lock);
|
||||
return err;
|
||||
}
|
||||
|
||||
static DEVICE_ATTR(set_freq, 0644, show_freq, store_freq);
|
||||
static struct attribute *dev_entries[] = {
|
||||
&dev_attr_set_freq.attr,
|
||||
NULL,
|
||||
};
|
||||
static struct attribute_group dev_attr_group = {
|
||||
.name = "userspace",
|
||||
.attrs = dev_entries,
|
||||
};
|
||||
|
||||
static int userspace_init(struct devfreq *devfreq)
|
||||
{
|
||||
int err = 0;
|
||||
struct userspace_data *data = kzalloc(sizeof(struct userspace_data),
|
||||
GFP_KERNEL);
|
||||
|
||||
if (!data) {
|
||||
err = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
data->valid = false;
|
||||
devfreq->data = data;
|
||||
|
||||
err = sysfs_create_group(&devfreq->dev.kobj, &dev_attr_group);
|
||||
out:
|
||||
return err;
|
||||
}
|
||||
|
||||
static void userspace_exit(struct devfreq *devfreq)
|
||||
{
|
||||
sysfs_remove_group(&devfreq->dev.kobj, &dev_attr_group);
|
||||
kfree(devfreq->data);
|
||||
devfreq->data = NULL;
|
||||
}
|
||||
|
||||
const struct devfreq_governor devfreq_userspace = {
|
||||
.name = "userspace",
|
||||
.get_target_freq = devfreq_userspace_func,
|
||||
.init = userspace_init,
|
||||
.exit = userspace_exit,
|
||||
.no_central_polling = true,
|
||||
};
|
@ -2409,7 +2409,7 @@ static int picolcd_raw_event(struct hid_device *hdev,
|
||||
#ifdef CONFIG_PM
|
||||
static int picolcd_suspend(struct hid_device *hdev, pm_message_t message)
|
||||
{
|
||||
if (message.event & PM_EVENT_AUTO)
|
||||
if (PMSG_IS_AUTO(message))
|
||||
return 0;
|
||||
|
||||
picolcd_suspend_backlight(hid_get_drvdata(hdev));
|
||||
|
@ -1332,7 +1332,7 @@ static int hid_suspend(struct usb_interface *intf, pm_message_t message)
|
||||
struct usbhid_device *usbhid = hid->driver_data;
|
||||
int status;
|
||||
|
||||
if (message.event & PM_EVENT_AUTO) {
|
||||
if (PMSG_IS_AUTO(message)) {
|
||||
spin_lock_irq(&usbhid->lock); /* Sync with error handler */
|
||||
if (!test_bit(HID_RESET_PENDING, &usbhid->iofl)
|
||||
&& !test_bit(HID_CLEAR_HALT, &usbhid->iofl)
|
||||
@ -1367,7 +1367,7 @@ static int hid_suspend(struct usb_interface *intf, pm_message_t message)
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
if (!ignoreled && (message.event & PM_EVENT_AUTO)) {
|
||||
if (!ignoreled && PMSG_IS_AUTO(message)) {
|
||||
spin_lock_irq(&usbhid->lock);
|
||||
if (test_bit(HID_LED_ON, &usbhid->iofl)) {
|
||||
spin_unlock_irq(&usbhid->lock);
|
||||
@ -1380,8 +1380,7 @@ static int hid_suspend(struct usb_interface *intf, pm_message_t message)
|
||||
hid_cancel_delayed_stuff(usbhid);
|
||||
hid_cease_io(usbhid);
|
||||
|
||||
if ((message.event & PM_EVENT_AUTO) &&
|
||||
test_bit(HID_KEYS_PRESSED, &usbhid->iofl)) {
|
||||
if (PMSG_IS_AUTO(message) && test_bit(HID_KEYS_PRESSED, &usbhid->iofl)) {
|
||||
/* lost race against keypresses */
|
||||
status = hid_start_in(hid);
|
||||
if (status < 0)
|
||||
|
@ -21,7 +21,7 @@
|
||||
#include <media/videobuf-dma-sg.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/dma-mapping.h>
|
||||
#include <linux/pm_qos_params.h>
|
||||
#include <linux/pm_qos.h>
|
||||
#include <linux/via-core.h>
|
||||
#include <linux/via-gpio.h>
|
||||
#include <linux/via_i2c.h>
|
||||
@ -69,7 +69,7 @@ struct via_camera {
|
||||
struct mutex lock;
|
||||
enum viacam_opstate opstate;
|
||||
unsigned long flags;
|
||||
struct pm_qos_request_list qos_request;
|
||||
struct pm_qos_request qos_request;
|
||||
/*
|
||||
* GPIO info for power/reset management
|
||||
*/
|
||||
|
@ -47,7 +47,7 @@
|
||||
#include <linux/if_vlan.h>
|
||||
#include <linux/cpu.h>
|
||||
#include <linux/smp.h>
|
||||
#include <linux/pm_qos_params.h>
|
||||
#include <linux/pm_qos.h>
|
||||
#include <linux/pm_runtime.h>
|
||||
#include <linux/aer.h>
|
||||
#include <linux/prefetch.h>
|
||||
|
@ -1476,7 +1476,7 @@ int usbnet_suspend (struct usb_interface *intf, pm_message_t message)
|
||||
if (!dev->suspend_count++) {
|
||||
spin_lock_irq(&dev->txq.lock);
|
||||
/* don't autosuspend while transmitting */
|
||||
if (dev->txq.qlen && (message.event & PM_EVENT_AUTO)) {
|
||||
if (dev->txq.qlen && PMSG_IS_AUTO(message)) {
|
||||
spin_unlock_irq(&dev->txq.lock);
|
||||
return -EBUSY;
|
||||
} else {
|
||||
|
@ -599,7 +599,7 @@ void i2400mu_disconnect(struct usb_interface *iface)
|
||||
*
|
||||
* As well, the device might refuse going to sleep for whichever
|
||||
* reason. In this case we just fail. For system suspend/hibernate,
|
||||
* we *can't* fail. We check PM_EVENT_AUTO to see if the
|
||||
* we *can't* fail. We check PMSG_IS_AUTO to see if the
|
||||
* suspend call comes from the USB stack or from the system and act
|
||||
* in consequence.
|
||||
*
|
||||
@ -615,7 +615,7 @@ int i2400mu_suspend(struct usb_interface *iface, pm_message_t pm_msg)
|
||||
struct i2400m *i2400m = &i2400mu->i2400m;
|
||||
|
||||
#ifdef CONFIG_PM
|
||||
if (pm_msg.event & PM_EVENT_AUTO)
|
||||
if (PMSG_IS_AUTO(pm_msg))
|
||||
is_autosuspend = 1;
|
||||
#endif
|
||||
|
||||
|
@ -161,7 +161,7 @@ that only one external action is invoked at a time.
|
||||
#include <linux/firmware.h>
|
||||
#include <linux/acpi.h>
|
||||
#include <linux/ctype.h>
|
||||
#include <linux/pm_qos_params.h>
|
||||
#include <linux/pm_qos.h>
|
||||
|
||||
#include <net/lib80211.h>
|
||||
|
||||
@ -174,7 +174,7 @@ that only one external action is invoked at a time.
|
||||
#define DRV_DESCRIPTION "Intel(R) PRO/Wireless 2100 Network Driver"
|
||||
#define DRV_COPYRIGHT "Copyright(c) 2003-2006 Intel Corporation"
|
||||
|
||||
static struct pm_qos_request_list ipw2100_pm_qos_req;
|
||||
static struct pm_qos_request ipw2100_pm_qos_req;
|
||||
|
||||
/* Debugging stuff */
|
||||
#ifdef CONFIG_IPW2100_DEBUG
|
||||
|
@ -60,6 +60,10 @@ config VT_CONSOLE
|
||||
|
||||
If unsure, say Y.
|
||||
|
||||
config VT_CONSOLE_SLEEP
|
||||
def_bool y
|
||||
depends on VT_CONSOLE && PM_SLEEP
|
||||
|
||||
config HW_CONSOLE
|
||||
bool
|
||||
depends on VT && !UML
|
||||
|
@ -1305,7 +1305,7 @@ static int acm_suspend(struct usb_interface *intf, pm_message_t message)
|
||||
struct acm *acm = usb_get_intfdata(intf);
|
||||
int cnt;
|
||||
|
||||
if (message.event & PM_EVENT_AUTO) {
|
||||
if (PMSG_IS_AUTO(message)) {
|
||||
int b;
|
||||
|
||||
spin_lock_irq(&acm->write_lock);
|
||||
|
@ -798,11 +798,11 @@ static int wdm_suspend(struct usb_interface *intf, pm_message_t message)
|
||||
dev_dbg(&desc->intf->dev, "wdm%d_suspend\n", intf->minor);
|
||||
|
||||
/* if this is an autosuspend the caller does the locking */
|
||||
if (!(message.event & PM_EVENT_AUTO))
|
||||
if (!PMSG_IS_AUTO(message))
|
||||
mutex_lock(&desc->lock);
|
||||
spin_lock_irq(&desc->iuspin);
|
||||
|
||||
if ((message.event & PM_EVENT_AUTO) &&
|
||||
if (PMSG_IS_AUTO(message) &&
|
||||
(test_bit(WDM_IN_USE, &desc->flags)
|
||||
|| test_bit(WDM_RESPONDING, &desc->flags))) {
|
||||
spin_unlock_irq(&desc->iuspin);
|
||||
@ -815,7 +815,7 @@ static int wdm_suspend(struct usb_interface *intf, pm_message_t message)
|
||||
kill_urbs(desc);
|
||||
cancel_work_sync(&desc->rxwork);
|
||||
}
|
||||
if (!(message.event & PM_EVENT_AUTO))
|
||||
if (!PMSG_IS_AUTO(message))
|
||||
mutex_unlock(&desc->lock);
|
||||
|
||||
return rv;
|
||||
|
@ -1046,8 +1046,7 @@ static int usb_resume_device(struct usb_device *udev, pm_message_t msg)
|
||||
/* Non-root devices on a full/low-speed bus must wait for their
|
||||
* companion high-speed root hub, in case a handoff is needed.
|
||||
*/
|
||||
if (!(msg.event & PM_EVENT_AUTO) && udev->parent &&
|
||||
udev->bus->hs_companion)
|
||||
if (!PMSG_IS_AUTO(msg) && udev->parent && udev->bus->hs_companion)
|
||||
device_pm_wait_for_dev(&udev->dev,
|
||||
&udev->bus->hs_companion->root_hub->dev);
|
||||
|
||||
@ -1075,7 +1074,7 @@ static int usb_suspend_interface(struct usb_device *udev,
|
||||
|
||||
if (driver->suspend) {
|
||||
status = driver->suspend(intf, msg);
|
||||
if (status && !(msg.event & PM_EVENT_AUTO))
|
||||
if (status && !PMSG_IS_AUTO(msg))
|
||||
dev_err(&intf->dev, "%s error %d\n",
|
||||
"suspend", status);
|
||||
} else {
|
||||
@ -1189,7 +1188,7 @@ static int usb_suspend_both(struct usb_device *udev, pm_message_t msg)
|
||||
status = usb_suspend_interface(udev, intf, msg);
|
||||
|
||||
/* Ignore errors during system sleep transitions */
|
||||
if (!(msg.event & PM_EVENT_AUTO))
|
||||
if (!PMSG_IS_AUTO(msg))
|
||||
status = 0;
|
||||
if (status != 0)
|
||||
break;
|
||||
@ -1199,7 +1198,7 @@ static int usb_suspend_both(struct usb_device *udev, pm_message_t msg)
|
||||
status = usb_suspend_device(udev, msg);
|
||||
|
||||
/* Again, ignore errors during system sleep transitions */
|
||||
if (!(msg.event & PM_EVENT_AUTO))
|
||||
if (!PMSG_IS_AUTO(msg))
|
||||
status = 0;
|
||||
}
|
||||
|
||||
|
@ -1975,8 +1975,9 @@ int hcd_bus_suspend(struct usb_device *rhdev, pm_message_t msg)
|
||||
int status;
|
||||
int old_state = hcd->state;
|
||||
|
||||
dev_dbg(&rhdev->dev, "bus %s%s\n",
|
||||
(msg.event & PM_EVENT_AUTO ? "auto-" : ""), "suspend");
|
||||
dev_dbg(&rhdev->dev, "bus %ssuspend, wakeup %d\n",
|
||||
(PMSG_IS_AUTO(msg) ? "auto-" : ""),
|
||||
rhdev->do_remote_wakeup);
|
||||
if (HCD_DEAD(hcd)) {
|
||||
dev_dbg(&rhdev->dev, "skipped %s of dead bus\n", "suspend");
|
||||
return 0;
|
||||
@ -2011,8 +2012,8 @@ int hcd_bus_resume(struct usb_device *rhdev, pm_message_t msg)
|
||||
int status;
|
||||
int old_state = hcd->state;
|
||||
|
||||
dev_dbg(&rhdev->dev, "usb %s%s\n",
|
||||
(msg.event & PM_EVENT_AUTO ? "auto-" : ""), "resume");
|
||||
dev_dbg(&rhdev->dev, "usb %sresume\n",
|
||||
(PMSG_IS_AUTO(msg) ? "auto-" : ""));
|
||||
if (HCD_DEAD(hcd)) {
|
||||
dev_dbg(&rhdev->dev, "skipped %s of dead bus\n", "resume");
|
||||
return 0;
|
||||
|
@ -2369,8 +2369,6 @@ int usb_port_suspend(struct usb_device *udev, pm_message_t msg)
|
||||
int port1 = udev->portnum;
|
||||
int status;
|
||||
|
||||
// dev_dbg(hub->intfdev, "suspend port %d\n", port1);
|
||||
|
||||
/* enable remote wakeup when appropriate; this lets the device
|
||||
* wake up the upstream hub (including maybe the root hub).
|
||||
*
|
||||
@ -2387,7 +2385,7 @@ int usb_port_suspend(struct usb_device *udev, pm_message_t msg)
|
||||
dev_dbg(&udev->dev, "won't remote wakeup, status %d\n",
|
||||
status);
|
||||
/* bail if autosuspend is requested */
|
||||
if (msg.event & PM_EVENT_AUTO)
|
||||
if (PMSG_IS_AUTO(msg))
|
||||
return status;
|
||||
}
|
||||
}
|
||||
@ -2416,12 +2414,13 @@ int usb_port_suspend(struct usb_device *udev, pm_message_t msg)
|
||||
USB_CTRL_SET_TIMEOUT);
|
||||
|
||||
/* System sleep transitions should never fail */
|
||||
if (!(msg.event & PM_EVENT_AUTO))
|
||||
if (!PMSG_IS_AUTO(msg))
|
||||
status = 0;
|
||||
} else {
|
||||
/* device has up to 10 msec to fully suspend */
|
||||
dev_dbg(&udev->dev, "usb %ssuspend\n",
|
||||
(msg.event & PM_EVENT_AUTO ? "auto-" : ""));
|
||||
dev_dbg(&udev->dev, "usb %ssuspend, wakeup %d\n",
|
||||
(PMSG_IS_AUTO(msg) ? "auto-" : ""),
|
||||
udev->do_remote_wakeup);
|
||||
usb_set_device_state(udev, USB_STATE_SUSPENDED);
|
||||
msleep(10);
|
||||
}
|
||||
@ -2572,7 +2571,7 @@ int usb_port_resume(struct usb_device *udev, pm_message_t msg)
|
||||
} else {
|
||||
/* drive resume for at least 20 msec */
|
||||
dev_dbg(&udev->dev, "usb %sresume\n",
|
||||
(msg.event & PM_EVENT_AUTO ? "auto-" : ""));
|
||||
(PMSG_IS_AUTO(msg) ? "auto-" : ""));
|
||||
msleep(25);
|
||||
|
||||
/* Virtual root hubs can trigger on GET_PORT_STATUS to
|
||||
@ -2679,7 +2678,7 @@ static int hub_suspend(struct usb_interface *intf, pm_message_t msg)
|
||||
udev = hdev->children [port1-1];
|
||||
if (udev && udev->can_submit) {
|
||||
dev_warn(&intf->dev, "port %d nyet suspended\n", port1);
|
||||
if (msg.event & PM_EVENT_AUTO)
|
||||
if (PMSG_IS_AUTO(msg))
|
||||
return -EBUSY;
|
||||
}
|
||||
}
|
||||
|
@ -1009,7 +1009,7 @@ static int sierra_suspend(struct usb_serial *serial, pm_message_t message)
|
||||
struct sierra_intf_private *intfdata;
|
||||
int b;
|
||||
|
||||
if (message.event & PM_EVENT_AUTO) {
|
||||
if (PMSG_IS_AUTO(message)) {
|
||||
intfdata = serial->private;
|
||||
spin_lock_irq(&intfdata->susp_lock);
|
||||
b = intfdata->in_flight;
|
||||
|
@ -651,7 +651,7 @@ int usb_wwan_suspend(struct usb_serial *serial, pm_message_t message)
|
||||
|
||||
dbg("%s entered", __func__);
|
||||
|
||||
if (message.event & PM_EVENT_AUTO) {
|
||||
if (PMSG_IS_AUTO(message)) {
|
||||
spin_lock_irq(&intfdata->susp_lock);
|
||||
b = intfdata->in_flight;
|
||||
spin_unlock_irq(&intfdata->susp_lock);
|
||||
|
238
include/linux/devfreq.h
Normal file
238
include/linux/devfreq.h
Normal file
@ -0,0 +1,238 @@
|
||||
/*
|
||||
* devfreq: Generic Dynamic Voltage and Frequency Scaling (DVFS) Framework
|
||||
* for Non-CPU Devices.
|
||||
*
|
||||
* Copyright (C) 2011 Samsung Electronics
|
||||
* MyungJoo Ham <myungjoo.ham@samsung.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*/
|
||||
|
||||
#ifndef __LINUX_DEVFREQ_H__
|
||||
#define __LINUX_DEVFREQ_H__
|
||||
|
||||
#include <linux/device.h>
|
||||
#include <linux/notifier.h>
|
||||
#include <linux/opp.h>
|
||||
|
||||
#define DEVFREQ_NAME_LEN 16
|
||||
|
||||
struct devfreq;
|
||||
|
||||
/**
|
||||
* struct devfreq_dev_status - Data given from devfreq user device to
|
||||
* governors. Represents the performance
|
||||
* statistics.
|
||||
* @total_time The total time represented by this instance of
|
||||
* devfreq_dev_status
|
||||
* @busy_time The time that the device was working among the
|
||||
* total_time.
|
||||
* @current_frequency The operating frequency.
|
||||
* @private_data An entry not specified by the devfreq framework.
|
||||
* A device and a specific governor may have their
|
||||
* own protocol with private_data. However, because
|
||||
* this is governor-specific, a governor using this
|
||||
* will be only compatible with devices aware of it.
|
||||
*/
|
||||
struct devfreq_dev_status {
|
||||
/* both since the last measure */
|
||||
unsigned long total_time;
|
||||
unsigned long busy_time;
|
||||
unsigned long current_frequency;
|
||||
void *private_date;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct devfreq_dev_profile - Devfreq's user device profile
|
||||
* @initial_freq The operating frequency when devfreq_add_device() is
|
||||
* called.
|
||||
* @polling_ms The polling interval in ms. 0 disables polling.
|
||||
* @target The device should set its operating frequency at
|
||||
* freq or lowest-upper-than-freq value. If freq is
|
||||
* higher than any operable frequency, set maximum.
|
||||
* Before returning, target function should set
|
||||
* freq at the current frequency.
|
||||
* @get_dev_status The device should provide the current performance
|
||||
* status to devfreq, which is used by governors.
|
||||
* @exit An optional callback that is called when devfreq
|
||||
* is removing the devfreq object due to error or
|
||||
* from devfreq_remove_device() call. If the user
|
||||
* has registered devfreq->nb at a notifier-head,
|
||||
* this is the time to unregister it.
|
||||
*/
|
||||
struct devfreq_dev_profile {
|
||||
unsigned long initial_freq;
|
||||
unsigned int polling_ms;
|
||||
|
||||
int (*target)(struct device *dev, unsigned long *freq);
|
||||
int (*get_dev_status)(struct device *dev,
|
||||
struct devfreq_dev_status *stat);
|
||||
void (*exit)(struct device *dev);
|
||||
};
|
||||
|
||||
/**
|
||||
* struct devfreq_governor - Devfreq policy governor
|
||||
* @name Governor's name
|
||||
* @get_target_freq Returns desired operating frequency for the device.
|
||||
* Basically, get_target_freq will run
|
||||
* devfreq_dev_profile.get_dev_status() to get the
|
||||
* status of the device (load = busy_time / total_time).
|
||||
* If no_central_polling is set, this callback is called
|
||||
* only with update_devfreq() notified by OPP.
|
||||
* @init Called when the devfreq is being attached to a device
|
||||
* @exit Called when the devfreq is being removed from a
|
||||
* device. Governor should stop any internal routines
|
||||
* before return because related data may be
|
||||
* freed after exit().
|
||||
* @no_central_polling Do not use devfreq's central polling mechanism.
|
||||
* When this is set, devfreq will not call
|
||||
* get_target_freq with devfreq_monitor(). However,
|
||||
* devfreq will call get_target_freq with
|
||||
* devfreq_update() notified by OPP framework.
|
||||
*
|
||||
* Note that the callbacks are called with devfreq->lock locked by devfreq.
|
||||
*/
|
||||
struct devfreq_governor {
|
||||
const char name[DEVFREQ_NAME_LEN];
|
||||
int (*get_target_freq)(struct devfreq *this, unsigned long *freq);
|
||||
int (*init)(struct devfreq *this);
|
||||
void (*exit)(struct devfreq *this);
|
||||
const bool no_central_polling;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct devfreq - Device devfreq structure
|
||||
* @node list node - contains the devices with devfreq that have been
|
||||
* registered.
|
||||
* @lock a mutex to protect accessing devfreq.
|
||||
* @dev device registered by devfreq class. dev.parent is the device
|
||||
* using devfreq.
|
||||
* @profile device-specific devfreq profile
|
||||
* @governor method how to choose frequency based on the usage.
|
||||
* @nb notifier block used to notify devfreq object that it should
|
||||
* reevaluate operable frequencies. Devfreq users may use
|
||||
* devfreq.nb to the corresponding register notifier call chain.
|
||||
* @polling_jiffies interval in jiffies.
|
||||
* @previous_freq previously configured frequency value.
|
||||
* @next_polling the number of remaining jiffies to poll with
|
||||
* "devfreq_monitor" executions to reevaluate
|
||||
* frequency/voltage of the device. Set by
|
||||
* profile's polling_ms interval.
|
||||
* @data Private data of the governor. The devfreq framework does not
|
||||
* touch this.
|
||||
* @being_removed a flag to mark that this object is being removed in
|
||||
* order to prevent trying to remove the object multiple times.
|
||||
*
|
||||
* This structure stores the devfreq information for a give device.
|
||||
*
|
||||
* Note that when a governor accesses entries in struct devfreq in its
|
||||
* functions except for the context of callbacks defined in struct
|
||||
* devfreq_governor, the governor should protect its access with the
|
||||
* struct mutex lock in struct devfreq. A governor may use this mutex
|
||||
* to protect its own private data in void *data as well.
|
||||
*/
|
||||
struct devfreq {
|
||||
struct list_head node;
|
||||
|
||||
struct mutex lock;
|
||||
struct device dev;
|
||||
struct devfreq_dev_profile *profile;
|
||||
const struct devfreq_governor *governor;
|
||||
struct notifier_block nb;
|
||||
|
||||
unsigned long polling_jiffies;
|
||||
unsigned long previous_freq;
|
||||
unsigned int next_polling;
|
||||
|
||||
void *data; /* private data for governors */
|
||||
|
||||
bool being_removed;
|
||||
};
|
||||
|
||||
#if defined(CONFIG_PM_DEVFREQ)
|
||||
extern struct devfreq *devfreq_add_device(struct device *dev,
|
||||
struct devfreq_dev_profile *profile,
|
||||
const struct devfreq_governor *governor,
|
||||
void *data);
|
||||
extern int devfreq_remove_device(struct devfreq *devfreq);
|
||||
|
||||
/* Helper functions for devfreq user device driver with OPP. */
|
||||
extern struct opp *devfreq_recommended_opp(struct device *dev,
|
||||
unsigned long *freq);
|
||||
extern int devfreq_register_opp_notifier(struct device *dev,
|
||||
struct devfreq *devfreq);
|
||||
extern int devfreq_unregister_opp_notifier(struct device *dev,
|
||||
struct devfreq *devfreq);
|
||||
|
||||
#ifdef CONFIG_DEVFREQ_GOV_POWERSAVE
|
||||
extern const struct devfreq_governor devfreq_powersave;
|
||||
#endif
|
||||
#ifdef CONFIG_DEVFREQ_GOV_PERFORMANCE
|
||||
extern const struct devfreq_governor devfreq_performance;
|
||||
#endif
|
||||
#ifdef CONFIG_DEVFREQ_GOV_USERSPACE
|
||||
extern const struct devfreq_governor devfreq_userspace;
|
||||
#endif
|
||||
#ifdef CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND
|
||||
extern const struct devfreq_governor devfreq_simple_ondemand;
|
||||
/**
|
||||
* struct devfreq_simple_ondemand_data - void *data fed to struct devfreq
|
||||
* and devfreq_add_device
|
||||
* @ upthreshold If the load is over this value, the frequency jumps.
|
||||
* Specify 0 to use the default. Valid value = 0 to 100.
|
||||
* @ downdifferential If the load is under upthreshold - downdifferential,
|
||||
* the governor may consider slowing the frequency down.
|
||||
* Specify 0 to use the default. Valid value = 0 to 100.
|
||||
* downdifferential < upthreshold must hold.
|
||||
*
|
||||
* If the fed devfreq_simple_ondemand_data pointer is NULL to the governor,
|
||||
* the governor uses the default values.
|
||||
*/
|
||||
struct devfreq_simple_ondemand_data {
|
||||
unsigned int upthreshold;
|
||||
unsigned int downdifferential;
|
||||
};
|
||||
#endif
|
||||
|
||||
#else /* !CONFIG_PM_DEVFREQ */
|
||||
static struct devfreq *devfreq_add_device(struct device *dev,
|
||||
struct devfreq_dev_profile *profile,
|
||||
struct devfreq_governor *governor,
|
||||
void *data);
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static int devfreq_remove_device(struct devfreq *devfreq);
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct opp *devfreq_recommended_opp(struct device *dev,
|
||||
unsigned long *freq)
|
||||
{
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static int devfreq_register_opp_notifier(struct device *dev,
|
||||
struct devfreq *devfreq)
|
||||
{
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static int devfreq_unregister_opp_notifier(struct device *dev,
|
||||
struct devfreq *devfreq)
|
||||
{
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
#define devfreq_powersave NULL
|
||||
#define devfreq_performance NULL
|
||||
#define devfreq_userspace NULL
|
||||
#define devfreq_simple_ondemand NULL
|
||||
|
||||
#endif /* CONFIG_PM_DEVFREQ */
|
||||
|
||||
#endif /* __LINUX_DEVFREQ_H__ */
|
@ -638,6 +638,11 @@ static inline void set_dev_node(struct device *dev, int node)
|
||||
}
|
||||
#endif
|
||||
|
||||
static inline struct pm_subsys_data *dev_to_psd(struct device *dev)
|
||||
{
|
||||
return dev ? dev->power.subsys_data : NULL;
|
||||
}
|
||||
|
||||
static inline unsigned int dev_get_uevent_suppress(const struct device *dev)
|
||||
{
|
||||
return dev->kobj.uevent_suppress;
|
||||
|
@ -49,6 +49,7 @@ extern int thaw_process(struct task_struct *p);
|
||||
|
||||
extern void refrigerator(void);
|
||||
extern int freeze_processes(void);
|
||||
extern int freeze_kernel_threads(void);
|
||||
extern void thaw_processes(void);
|
||||
|
||||
static inline int try_to_freeze(void)
|
||||
@ -171,7 +172,8 @@ static inline void clear_freeze_flag(struct task_struct *p) {}
|
||||
static inline int thaw_process(struct task_struct *p) { return 1; }
|
||||
|
||||
static inline void refrigerator(void) {}
|
||||
static inline int freeze_processes(void) { BUG(); return 0; }
|
||||
static inline int freeze_processes(void) { return -ENOSYS; }
|
||||
static inline int freeze_kernel_threads(void) { return -ENOSYS; }
|
||||
static inline void thaw_processes(void) {}
|
||||
|
||||
static inline int try_to_freeze(void) { return 0; }
|
||||
|
@ -31,7 +31,7 @@
|
||||
#include <linux/if_link.h>
|
||||
|
||||
#ifdef __KERNEL__
|
||||
#include <linux/pm_qos_params.h>
|
||||
#include <linux/pm_qos.h>
|
||||
#include <linux/timer.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/atomic.h>
|
||||
@ -969,7 +969,7 @@ struct net_device {
|
||||
*/
|
||||
char name[IFNAMSIZ];
|
||||
|
||||
struct pm_qos_request_list pm_qos_req;
|
||||
struct pm_qos_request pm_qos_req;
|
||||
|
||||
/* device name hash chain */
|
||||
struct hlist_node name_hlist;
|
||||
|
@ -16,9 +16,14 @@
|
||||
|
||||
#include <linux/err.h>
|
||||
#include <linux/cpufreq.h>
|
||||
#include <linux/notifier.h>
|
||||
|
||||
struct opp;
|
||||
|
||||
enum opp_event {
|
||||
OPP_EVENT_ADD, OPP_EVENT_ENABLE, OPP_EVENT_DISABLE,
|
||||
};
|
||||
|
||||
#if defined(CONFIG_PM_OPP)
|
||||
|
||||
unsigned long opp_get_voltage(struct opp *opp);
|
||||
@ -40,6 +45,8 @@ int opp_enable(struct device *dev, unsigned long freq);
|
||||
|
||||
int opp_disable(struct device *dev, unsigned long freq);
|
||||
|
||||
struct srcu_notifier_head *opp_get_notifier(struct device *dev);
|
||||
|
||||
#else
|
||||
static inline unsigned long opp_get_voltage(struct opp *opp)
|
||||
{
|
||||
@ -89,6 +96,11 @@ static inline int opp_disable(struct device *dev, unsigned long freq)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
struct srcu_notifier_head *opp_get_notifier(struct device *dev)
|
||||
{
|
||||
return ERR_PTR(-EINVAL);
|
||||
}
|
||||
#endif /* CONFIG_PM */
|
||||
|
||||
#if defined(CONFIG_CPU_FREQ) && defined(CONFIG_PM_OPP)
|
||||
|
@ -326,6 +326,7 @@ extern struct dev_pm_ops generic_subsys_pm_ops;
|
||||
* requested by a driver.
|
||||
*/
|
||||
|
||||
#define PM_EVENT_INVALID (-1)
|
||||
#define PM_EVENT_ON 0x0000
|
||||
#define PM_EVENT_FREEZE 0x0001
|
||||
#define PM_EVENT_SUSPEND 0x0002
|
||||
@ -346,6 +347,7 @@ extern struct dev_pm_ops generic_subsys_pm_ops;
|
||||
#define PM_EVENT_AUTO_SUSPEND (PM_EVENT_AUTO | PM_EVENT_SUSPEND)
|
||||
#define PM_EVENT_AUTO_RESUME (PM_EVENT_AUTO | PM_EVENT_RESUME)
|
||||
|
||||
#define PMSG_INVALID ((struct pm_message){ .event = PM_EVENT_INVALID, })
|
||||
#define PMSG_ON ((struct pm_message){ .event = PM_EVENT_ON, })
|
||||
#define PMSG_FREEZE ((struct pm_message){ .event = PM_EVENT_FREEZE, })
|
||||
#define PMSG_QUIESCE ((struct pm_message){ .event = PM_EVENT_QUIESCE, })
|
||||
@ -366,6 +368,8 @@ extern struct dev_pm_ops generic_subsys_pm_ops;
|
||||
#define PMSG_AUTO_RESUME ((struct pm_message) \
|
||||
{ .event = PM_EVENT_AUTO_RESUME, })
|
||||
|
||||
#define PMSG_IS_AUTO(msg) (((msg).event & PM_EVENT_AUTO) != 0)
|
||||
|
||||
/**
|
||||
* Device run-time power management status.
|
||||
*
|
||||
@ -421,6 +425,22 @@ enum rpm_request {
|
||||
|
||||
struct wakeup_source;
|
||||
|
||||
struct pm_domain_data {
|
||||
struct list_head list_node;
|
||||
struct device *dev;
|
||||
};
|
||||
|
||||
struct pm_subsys_data {
|
||||
spinlock_t lock;
|
||||
unsigned int refcount;
|
||||
#ifdef CONFIG_PM_CLK
|
||||
struct list_head clock_list;
|
||||
#endif
|
||||
#ifdef CONFIG_PM_GENERIC_DOMAINS
|
||||
struct pm_domain_data *domain_data;
|
||||
#endif
|
||||
};
|
||||
|
||||
struct dev_pm_info {
|
||||
pm_message_t power_state;
|
||||
unsigned int can_wakeup:1;
|
||||
@ -432,6 +452,7 @@ struct dev_pm_info {
|
||||
struct list_head entry;
|
||||
struct completion completion;
|
||||
struct wakeup_source *wakeup;
|
||||
bool wakeup_path:1;
|
||||
#else
|
||||
unsigned int should_wakeup:1;
|
||||
#endif
|
||||
@ -462,10 +483,13 @@ struct dev_pm_info {
|
||||
unsigned long suspended_jiffies;
|
||||
unsigned long accounting_timestamp;
|
||||
#endif
|
||||
void *subsys_data; /* Owned by the subsystem. */
|
||||
struct pm_subsys_data *subsys_data; /* Owned by the subsystem. */
|
||||
struct pm_qos_constraints *constraints;
|
||||
};
|
||||
|
||||
extern void update_pm_runtime_accounting(struct device *dev);
|
||||
extern int dev_pm_get_subsys_data(struct device *dev);
|
||||
extern int dev_pm_put_subsys_data(struct device *dev);
|
||||
|
||||
/*
|
||||
* Power domains provide callbacks that are executed during system suspend,
|
||||
|
71
include/linux/pm_clock.h
Normal file
71
include/linux/pm_clock.h
Normal file
@ -0,0 +1,71 @@
|
||||
/*
|
||||
* pm_clock.h - Definitions and headers related to device clocks.
|
||||
*
|
||||
* Copyright (C) 2011 Rafael J. Wysocki <rjw@sisk.pl>, Renesas Electronics Corp.
|
||||
*
|
||||
* This file is released under the GPLv2.
|
||||
*/
|
||||
|
||||
#ifndef _LINUX_PM_CLOCK_H
|
||||
#define _LINUX_PM_CLOCK_H
|
||||
|
||||
#include <linux/device.h>
|
||||
#include <linux/notifier.h>
|
||||
|
||||
struct pm_clk_notifier_block {
|
||||
struct notifier_block nb;
|
||||
struct dev_pm_domain *pm_domain;
|
||||
char *con_ids[];
|
||||
};
|
||||
|
||||
#ifdef CONFIG_PM_CLK
|
||||
static inline bool pm_clk_no_clocks(struct device *dev)
|
||||
{
|
||||
return dev && dev->power.subsys_data
|
||||
&& list_empty(&dev->power.subsys_data->clock_list);
|
||||
}
|
||||
|
||||
extern void pm_clk_init(struct device *dev);
|
||||
extern int pm_clk_create(struct device *dev);
|
||||
extern void pm_clk_destroy(struct device *dev);
|
||||
extern int pm_clk_add(struct device *dev, const char *con_id);
|
||||
extern void pm_clk_remove(struct device *dev, const char *con_id);
|
||||
extern int pm_clk_suspend(struct device *dev);
|
||||
extern int pm_clk_resume(struct device *dev);
|
||||
#else
|
||||
static inline bool pm_clk_no_clocks(struct device *dev)
|
||||
{
|
||||
return true;
|
||||
}
|
||||
static inline void pm_clk_init(struct device *dev)
|
||||
{
|
||||
}
|
||||
static inline int pm_clk_create(struct device *dev)
|
||||
{
|
||||
return -EINVAL;
|
||||
}
|
||||
static inline void pm_clk_destroy(struct device *dev)
|
||||
{
|
||||
}
|
||||
static inline int pm_clk_add(struct device *dev, const char *con_id)
|
||||
{
|
||||
return -EINVAL;
|
||||
}
|
||||
static inline void pm_clk_remove(struct device *dev, const char *con_id)
|
||||
{
|
||||
}
|
||||
#define pm_clk_suspend NULL
|
||||
#define pm_clk_resume NULL
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_HAVE_CLK
|
||||
extern void pm_clk_add_notifier(struct bus_type *bus,
|
||||
struct pm_clk_notifier_block *clknb);
|
||||
#else
|
||||
static inline void pm_clk_add_notifier(struct bus_type *bus,
|
||||
struct pm_clk_notifier_block *clknb)
|
||||
{
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif
|
@ -13,6 +13,7 @@
|
||||
|
||||
enum gpd_status {
|
||||
GPD_STATE_ACTIVE = 0, /* PM domain is active */
|
||||
GPD_STATE_WAIT_MASTER, /* PM domain's master is being waited for */
|
||||
GPD_STATE_BUSY, /* Something is happening to the PM domain */
|
||||
GPD_STATE_REPEAT, /* Power off in progress, to be repeated */
|
||||
GPD_STATE_POWER_OFF, /* PM domain is off */
|
||||
@ -25,15 +26,14 @@ struct dev_power_governor {
|
||||
struct generic_pm_domain {
|
||||
struct dev_pm_domain domain; /* PM domain operations */
|
||||
struct list_head gpd_list_node; /* Node in the global PM domains list */
|
||||
struct list_head sd_node; /* Node in the parent's subdomain list */
|
||||
struct generic_pm_domain *parent; /* Parent PM domain */
|
||||
struct list_head sd_list; /* List of dubdomains */
|
||||
struct list_head master_links; /* Links with PM domain as a master */
|
||||
struct list_head slave_links; /* Links with PM domain as a slave */
|
||||
struct list_head dev_list; /* List of devices */
|
||||
struct mutex lock;
|
||||
struct dev_power_governor *gov;
|
||||
struct work_struct power_off_work;
|
||||
unsigned int in_progress; /* Number of devices being suspended now */
|
||||
unsigned int sd_count; /* Number of subdomains with power "on" */
|
||||
atomic_t sd_count; /* Number of subdomains with power "on" */
|
||||
enum gpd_status status; /* Current state of the domain */
|
||||
wait_queue_head_t status_wait_queue;
|
||||
struct task_struct *poweroff_task; /* Powering off task */
|
||||
@ -42,6 +42,7 @@ struct generic_pm_domain {
|
||||
unsigned int suspended_count; /* System suspend device counter */
|
||||
unsigned int prepared_count; /* Suspend counter of prepared devices */
|
||||
bool suspend_power_off; /* Power status before system suspend */
|
||||
bool dev_irq_safe; /* Device callbacks are IRQ-safe */
|
||||
int (*power_off)(struct generic_pm_domain *domain);
|
||||
int (*power_on)(struct generic_pm_domain *domain);
|
||||
int (*start_device)(struct device *dev);
|
||||
@ -54,12 +55,23 @@ static inline struct generic_pm_domain *pd_to_genpd(struct dev_pm_domain *pd)
|
||||
return container_of(pd, struct generic_pm_domain, domain);
|
||||
}
|
||||
|
||||
struct dev_list_entry {
|
||||
struct list_head node;
|
||||
struct device *dev;
|
||||
struct gpd_link {
|
||||
struct generic_pm_domain *master;
|
||||
struct list_head master_node;
|
||||
struct generic_pm_domain *slave;
|
||||
struct list_head slave_node;
|
||||
};
|
||||
|
||||
struct generic_pm_domain_data {
|
||||
struct pm_domain_data base;
|
||||
bool need_restore;
|
||||
};
|
||||
|
||||
static inline struct generic_pm_domain_data *to_gpd_data(struct pm_domain_data *pdd)
|
||||
{
|
||||
return container_of(pdd, struct generic_pm_domain_data, base);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PM_GENERIC_DOMAINS
|
||||
extern int pm_genpd_add_device(struct generic_pm_domain *genpd,
|
||||
struct device *dev);
|
||||
|
155
include/linux/pm_qos.h
Normal file
155
include/linux/pm_qos.h
Normal file
@ -0,0 +1,155 @@
|
||||
#ifndef _LINUX_PM_QOS_H
|
||||
#define _LINUX_PM_QOS_H
|
||||
/* interface for the pm_qos_power infrastructure of the linux kernel.
|
||||
*
|
||||
* Mark Gross <mgross@linux.intel.com>
|
||||
*/
|
||||
#include <linux/plist.h>
|
||||
#include <linux/notifier.h>
|
||||
#include <linux/miscdevice.h>
|
||||
#include <linux/device.h>
|
||||
|
||||
#define PM_QOS_RESERVED 0
|
||||
#define PM_QOS_CPU_DMA_LATENCY 1
|
||||
#define PM_QOS_NETWORK_LATENCY 2
|
||||
#define PM_QOS_NETWORK_THROUGHPUT 3
|
||||
|
||||
#define PM_QOS_NUM_CLASSES 4
|
||||
#define PM_QOS_DEFAULT_VALUE -1
|
||||
|
||||
#define PM_QOS_CPU_DMA_LAT_DEFAULT_VALUE (2000 * USEC_PER_SEC)
|
||||
#define PM_QOS_NETWORK_LAT_DEFAULT_VALUE (2000 * USEC_PER_SEC)
|
||||
#define PM_QOS_NETWORK_THROUGHPUT_DEFAULT_VALUE 0
|
||||
#define PM_QOS_DEV_LAT_DEFAULT_VALUE 0
|
||||
|
||||
struct pm_qos_request {
|
||||
struct plist_node node;
|
||||
int pm_qos_class;
|
||||
};
|
||||
|
||||
struct dev_pm_qos_request {
|
||||
struct plist_node node;
|
||||
struct device *dev;
|
||||
};
|
||||
|
||||
enum pm_qos_type {
|
||||
PM_QOS_UNITIALIZED,
|
||||
PM_QOS_MAX, /* return the largest value */
|
||||
PM_QOS_MIN /* return the smallest value */
|
||||
};
|
||||
|
||||
/*
|
||||
* Note: The lockless read path depends on the CPU accessing
|
||||
* target_value atomically. Atomic access is only guaranteed on all CPU
|
||||
* types linux supports for 32 bit quantites
|
||||
*/
|
||||
struct pm_qos_constraints {
|
||||
struct plist_head list;
|
||||
s32 target_value; /* Do not change to 64 bit */
|
||||
s32 default_value;
|
||||
enum pm_qos_type type;
|
||||
struct blocking_notifier_head *notifiers;
|
||||
};
|
||||
|
||||
/* Action requested to pm_qos_update_target */
|
||||
enum pm_qos_req_action {
|
||||
PM_QOS_ADD_REQ, /* Add a new request */
|
||||
PM_QOS_UPDATE_REQ, /* Update an existing request */
|
||||
PM_QOS_REMOVE_REQ /* Remove an existing request */
|
||||
};
|
||||
|
||||
static inline int dev_pm_qos_request_active(struct dev_pm_qos_request *req)
|
||||
{
|
||||
return req->dev != 0;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PM
|
||||
int pm_qos_update_target(struct pm_qos_constraints *c, struct plist_node *node,
|
||||
enum pm_qos_req_action action, int value);
|
||||
void pm_qos_add_request(struct pm_qos_request *req, int pm_qos_class,
|
||||
s32 value);
|
||||
void pm_qos_update_request(struct pm_qos_request *req,
|
||||
s32 new_value);
|
||||
void pm_qos_remove_request(struct pm_qos_request *req);
|
||||
|
||||
int pm_qos_request(int pm_qos_class);
|
||||
int pm_qos_add_notifier(int pm_qos_class, struct notifier_block *notifier);
|
||||
int pm_qos_remove_notifier(int pm_qos_class, struct notifier_block *notifier);
|
||||
int pm_qos_request_active(struct pm_qos_request *req);
|
||||
s32 pm_qos_read_value(struct pm_qos_constraints *c);
|
||||
|
||||
s32 dev_pm_qos_read_value(struct device *dev);
|
||||
int dev_pm_qos_add_request(struct device *dev, struct dev_pm_qos_request *req,
|
||||
s32 value);
|
||||
int dev_pm_qos_update_request(struct dev_pm_qos_request *req, s32 new_value);
|
||||
int dev_pm_qos_remove_request(struct dev_pm_qos_request *req);
|
||||
int dev_pm_qos_add_notifier(struct device *dev,
|
||||
struct notifier_block *notifier);
|
||||
int dev_pm_qos_remove_notifier(struct device *dev,
|
||||
struct notifier_block *notifier);
|
||||
int dev_pm_qos_add_global_notifier(struct notifier_block *notifier);
|
||||
int dev_pm_qos_remove_global_notifier(struct notifier_block *notifier);
|
||||
void dev_pm_qos_constraints_init(struct device *dev);
|
||||
void dev_pm_qos_constraints_destroy(struct device *dev);
|
||||
#else
|
||||
static inline int pm_qos_update_target(struct pm_qos_constraints *c,
|
||||
struct plist_node *node,
|
||||
enum pm_qos_req_action action,
|
||||
int value)
|
||||
{ return 0; }
|
||||
static inline void pm_qos_add_request(struct pm_qos_request *req,
|
||||
int pm_qos_class, s32 value)
|
||||
{ return; }
|
||||
static inline void pm_qos_update_request(struct pm_qos_request *req,
|
||||
s32 new_value)
|
||||
{ return; }
|
||||
static inline void pm_qos_remove_request(struct pm_qos_request *req)
|
||||
{ return; }
|
||||
|
||||
static inline int pm_qos_request(int pm_qos_class)
|
||||
{ return 0; }
|
||||
static inline int pm_qos_add_notifier(int pm_qos_class,
|
||||
struct notifier_block *notifier)
|
||||
{ return 0; }
|
||||
static inline int pm_qos_remove_notifier(int pm_qos_class,
|
||||
struct notifier_block *notifier)
|
||||
{ return 0; }
|
||||
static inline int pm_qos_request_active(struct pm_qos_request *req)
|
||||
{ return 0; }
|
||||
static inline s32 pm_qos_read_value(struct pm_qos_constraints *c)
|
||||
{ return 0; }
|
||||
|
||||
static inline s32 dev_pm_qos_read_value(struct device *dev)
|
||||
{ return 0; }
|
||||
static inline int dev_pm_qos_add_request(struct device *dev,
|
||||
struct dev_pm_qos_request *req,
|
||||
s32 value)
|
||||
{ return 0; }
|
||||
static inline int dev_pm_qos_update_request(struct dev_pm_qos_request *req,
|
||||
s32 new_value)
|
||||
{ return 0; }
|
||||
static inline int dev_pm_qos_remove_request(struct dev_pm_qos_request *req)
|
||||
{ return 0; }
|
||||
static inline int dev_pm_qos_add_notifier(struct device *dev,
|
||||
struct notifier_block *notifier)
|
||||
{ return 0; }
|
||||
static inline int dev_pm_qos_remove_notifier(struct device *dev,
|
||||
struct notifier_block *notifier)
|
||||
{ return 0; }
|
||||
static inline int dev_pm_qos_add_global_notifier(
|
||||
struct notifier_block *notifier)
|
||||
{ return 0; }
|
||||
static inline int dev_pm_qos_remove_global_notifier(
|
||||
struct notifier_block *notifier)
|
||||
{ return 0; }
|
||||
static inline void dev_pm_qos_constraints_init(struct device *dev)
|
||||
{
|
||||
dev->power.power_state = PMSG_ON;
|
||||
}
|
||||
static inline void dev_pm_qos_constraints_destroy(struct device *dev)
|
||||
{
|
||||
dev->power.power_state = PMSG_INVALID;
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif
|
@ -1,38 +0,0 @@
|
||||
#ifndef _LINUX_PM_QOS_PARAMS_H
|
||||
#define _LINUX_PM_QOS_PARAMS_H
|
||||
/* interface for the pm_qos_power infrastructure of the linux kernel.
|
||||
*
|
||||
* Mark Gross <mgross@linux.intel.com>
|
||||
*/
|
||||
#include <linux/plist.h>
|
||||
#include <linux/notifier.h>
|
||||
#include <linux/miscdevice.h>
|
||||
|
||||
#define PM_QOS_RESERVED 0
|
||||
#define PM_QOS_CPU_DMA_LATENCY 1
|
||||
#define PM_QOS_NETWORK_LATENCY 2
|
||||
#define PM_QOS_NETWORK_THROUGHPUT 3
|
||||
|
||||
#define PM_QOS_NUM_CLASSES 4
|
||||
#define PM_QOS_DEFAULT_VALUE -1
|
||||
|
||||
#define PM_QOS_CPU_DMA_LAT_DEFAULT_VALUE (2000 * USEC_PER_SEC)
|
||||
#define PM_QOS_NETWORK_LAT_DEFAULT_VALUE (2000 * USEC_PER_SEC)
|
||||
#define PM_QOS_NETWORK_THROUGHPUT_DEFAULT_VALUE 0
|
||||
|
||||
struct pm_qos_request_list {
|
||||
struct plist_node list;
|
||||
int pm_qos_class;
|
||||
};
|
||||
|
||||
void pm_qos_add_request(struct pm_qos_request_list *l, int pm_qos_class, s32 value);
|
||||
void pm_qos_update_request(struct pm_qos_request_list *pm_qos_req,
|
||||
s32 new_value);
|
||||
void pm_qos_remove_request(struct pm_qos_request_list *pm_qos_req);
|
||||
|
||||
int pm_qos_request(int pm_qos_class);
|
||||
int pm_qos_add_notifier(int pm_qos_class, struct notifier_block *notifier);
|
||||
int pm_qos_remove_notifier(int pm_qos_class, struct notifier_block *notifier);
|
||||
int pm_qos_request_active(struct pm_qos_request_list *req);
|
||||
|
||||
#endif
|
@ -251,46 +251,4 @@ static inline void pm_runtime_dont_use_autosuspend(struct device *dev)
|
||||
__pm_runtime_use_autosuspend(dev, false);
|
||||
}
|
||||
|
||||
struct pm_clk_notifier_block {
|
||||
struct notifier_block nb;
|
||||
struct dev_pm_domain *pm_domain;
|
||||
char *con_ids[];
|
||||
};
|
||||
|
||||
#ifdef CONFIG_PM_CLK
|
||||
extern int pm_clk_init(struct device *dev);
|
||||
extern void pm_clk_destroy(struct device *dev);
|
||||
extern int pm_clk_add(struct device *dev, const char *con_id);
|
||||
extern void pm_clk_remove(struct device *dev, const char *con_id);
|
||||
extern int pm_clk_suspend(struct device *dev);
|
||||
extern int pm_clk_resume(struct device *dev);
|
||||
#else
|
||||
static inline int pm_clk_init(struct device *dev)
|
||||
{
|
||||
return -EINVAL;
|
||||
}
|
||||
static inline void pm_clk_destroy(struct device *dev)
|
||||
{
|
||||
}
|
||||
static inline int pm_clk_add(struct device *dev, const char *con_id)
|
||||
{
|
||||
return -EINVAL;
|
||||
}
|
||||
static inline void pm_clk_remove(struct device *dev, const char *con_id)
|
||||
{
|
||||
}
|
||||
#define pm_clk_suspend NULL
|
||||
#define pm_clk_resume NULL
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_HAVE_CLK
|
||||
extern void pm_clk_add_notifier(struct bus_type *bus,
|
||||
struct pm_clk_notifier_block *clknb);
|
||||
#else
|
||||
static inline void pm_clk_add_notifier(struct bus_type *bus,
|
||||
struct pm_clk_notifier_block *clknb)
|
||||
{
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif
|
||||
|
@ -8,15 +8,18 @@
|
||||
#include <linux/mm.h>
|
||||
#include <asm/errno.h>
|
||||
|
||||
#if defined(CONFIG_PM_SLEEP) && defined(CONFIG_VT) && defined(CONFIG_VT_CONSOLE)
|
||||
#ifdef CONFIG_VT
|
||||
extern void pm_set_vt_switch(int);
|
||||
extern int pm_prepare_console(void);
|
||||
extern void pm_restore_console(void);
|
||||
#else
|
||||
static inline void pm_set_vt_switch(int do_switch)
|
||||
{
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_VT_CONSOLE_SLEEP
|
||||
extern int pm_prepare_console(void);
|
||||
extern void pm_restore_console(void);
|
||||
#else
|
||||
static inline int pm_prepare_console(void)
|
||||
{
|
||||
return 0;
|
||||
@ -34,6 +37,58 @@ typedef int __bitwise suspend_state_t;
|
||||
#define PM_SUSPEND_MEM ((__force suspend_state_t) 3)
|
||||
#define PM_SUSPEND_MAX ((__force suspend_state_t) 4)
|
||||
|
||||
enum suspend_stat_step {
|
||||
SUSPEND_FREEZE = 1,
|
||||
SUSPEND_PREPARE,
|
||||
SUSPEND_SUSPEND,
|
||||
SUSPEND_SUSPEND_NOIRQ,
|
||||
SUSPEND_RESUME_NOIRQ,
|
||||
SUSPEND_RESUME
|
||||
};
|
||||
|
||||
struct suspend_stats {
|
||||
int success;
|
||||
int fail;
|
||||
int failed_freeze;
|
||||
int failed_prepare;
|
||||
int failed_suspend;
|
||||
int failed_suspend_noirq;
|
||||
int failed_resume;
|
||||
int failed_resume_noirq;
|
||||
#define REC_FAILED_NUM 2
|
||||
int last_failed_dev;
|
||||
char failed_devs[REC_FAILED_NUM][40];
|
||||
int last_failed_errno;
|
||||
int errno[REC_FAILED_NUM];
|
||||
int last_failed_step;
|
||||
enum suspend_stat_step failed_steps[REC_FAILED_NUM];
|
||||
};
|
||||
|
||||
extern struct suspend_stats suspend_stats;
|
||||
|
||||
static inline void dpm_save_failed_dev(const char *name)
|
||||
{
|
||||
strlcpy(suspend_stats.failed_devs[suspend_stats.last_failed_dev],
|
||||
name,
|
||||
sizeof(suspend_stats.failed_devs[0]));
|
||||
suspend_stats.last_failed_dev++;
|
||||
suspend_stats.last_failed_dev %= REC_FAILED_NUM;
|
||||
}
|
||||
|
||||
static inline void dpm_save_failed_errno(int err)
|
||||
{
|
||||
suspend_stats.errno[suspend_stats.last_failed_errno] = err;
|
||||
suspend_stats.last_failed_errno++;
|
||||
suspend_stats.last_failed_errno %= REC_FAILED_NUM;
|
||||
}
|
||||
|
||||
static inline void dpm_save_failed_step(enum suspend_stat_step step)
|
||||
{
|
||||
suspend_stats.failed_steps[suspend_stats.last_failed_step] = step;
|
||||
suspend_stats.last_failed_step++;
|
||||
suspend_stats.last_failed_step %= REC_FAILED_NUM;
|
||||
}
|
||||
|
||||
/**
|
||||
* struct platform_suspend_ops - Callbacks for managing platform dependent
|
||||
* system sleep states.
|
||||
@ -334,4 +389,38 @@ static inline void unlock_system_sleep(void)
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_ARCH_SAVE_PAGE_KEYS
|
||||
/*
|
||||
* The ARCH_SAVE_PAGE_KEYS functions can be used by an architecture
|
||||
* to save/restore additional information to/from the array of page
|
||||
* frame numbers in the hibernation image. For s390 this is used to
|
||||
* save and restore the storage key for each page that is included
|
||||
* in the hibernation image.
|
||||
*/
|
||||
unsigned long page_key_additional_pages(unsigned long pages);
|
||||
int page_key_alloc(unsigned long pages);
|
||||
void page_key_free(void);
|
||||
void page_key_read(unsigned long *pfn);
|
||||
void page_key_memorize(unsigned long *pfn);
|
||||
void page_key_write(void *address);
|
||||
|
||||
#else /* !CONFIG_ARCH_SAVE_PAGE_KEYS */
|
||||
|
||||
static inline unsigned long page_key_additional_pages(unsigned long pages)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int page_key_alloc(unsigned long pages)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline void page_key_free(void) {}
|
||||
static inline void page_key_read(unsigned long *pfn) {}
|
||||
static inline void page_key_memorize(unsigned long *pfn) {}
|
||||
static inline void page_key_write(void *address) {}
|
||||
|
||||
#endif /* !CONFIG_ARCH_SAVE_PAGE_KEYS */
|
||||
|
||||
#endif /* _LINUX_SUSPEND_H */
|
||||
|
@ -29,7 +29,7 @@
|
||||
#include <linux/poll.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/bitops.h>
|
||||
#include <linux/pm_qos_params.h>
|
||||
#include <linux/pm_qos.h>
|
||||
|
||||
#define snd_pcm_substream_chip(substream) ((substream)->private_data)
|
||||
#define snd_pcm_chip(pcm) ((pcm)->private_data)
|
||||
@ -373,7 +373,7 @@ struct snd_pcm_substream {
|
||||
int number;
|
||||
char name[32]; /* substream name */
|
||||
int stream; /* stream (direction) */
|
||||
struct pm_qos_request_list latency_pm_qos_req; /* pm_qos request */
|
||||
struct pm_qos_request latency_pm_qos_req; /* pm_qos request */
|
||||
size_t buffer_bytes_max; /* limit ring buffer size */
|
||||
struct snd_dma_buffer dma_buffer;
|
||||
unsigned int dma_buf_id;
|
||||
|
99
include/trace/events/rpm.h
Normal file
99
include/trace/events/rpm.h
Normal file
@ -0,0 +1,99 @@
|
||||
|
||||
#undef TRACE_SYSTEM
|
||||
#define TRACE_SYSTEM rpm
|
||||
|
||||
#if !defined(_TRACE_RUNTIME_POWER_H) || defined(TRACE_HEADER_MULTI_READ)
|
||||
#define _TRACE_RUNTIME_POWER_H
|
||||
|
||||
#include <linux/ktime.h>
|
||||
#include <linux/tracepoint.h>
|
||||
#include <linux/device.h>
|
||||
|
||||
/*
|
||||
* The rpm_internal events are used for tracing some important
|
||||
* runtime pm internal functions.
|
||||
*/
|
||||
DECLARE_EVENT_CLASS(rpm_internal,
|
||||
|
||||
TP_PROTO(struct device *dev, int flags),
|
||||
|
||||
TP_ARGS(dev, flags),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__string( name, dev_name(dev) )
|
||||
__field( int, flags )
|
||||
__field( int , usage_count )
|
||||
__field( int , disable_depth )
|
||||
__field( int , runtime_auto )
|
||||
__field( int , request_pending )
|
||||
__field( int , irq_safe )
|
||||
__field( int , child_count )
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__assign_str(name, dev_name(dev));
|
||||
__entry->flags = flags;
|
||||
__entry->usage_count = atomic_read(
|
||||
&dev->power.usage_count);
|
||||
__entry->disable_depth = dev->power.disable_depth;
|
||||
__entry->runtime_auto = dev->power.runtime_auto;
|
||||
__entry->request_pending = dev->power.request_pending;
|
||||
__entry->irq_safe = dev->power.irq_safe;
|
||||
__entry->child_count = atomic_read(
|
||||
&dev->power.child_count);
|
||||
),
|
||||
|
||||
TP_printk("%s flags-%x cnt-%-2d dep-%-2d auto-%-1d p-%-1d"
|
||||
" irq-%-1d child-%d",
|
||||
__get_str(name), __entry->flags,
|
||||
__entry->usage_count,
|
||||
__entry->disable_depth,
|
||||
__entry->runtime_auto,
|
||||
__entry->request_pending,
|
||||
__entry->irq_safe,
|
||||
__entry->child_count
|
||||
)
|
||||
);
|
||||
DEFINE_EVENT(rpm_internal, rpm_suspend,
|
||||
|
||||
TP_PROTO(struct device *dev, int flags),
|
||||
|
||||
TP_ARGS(dev, flags)
|
||||
);
|
||||
DEFINE_EVENT(rpm_internal, rpm_resume,
|
||||
|
||||
TP_PROTO(struct device *dev, int flags),
|
||||
|
||||
TP_ARGS(dev, flags)
|
||||
);
|
||||
DEFINE_EVENT(rpm_internal, rpm_idle,
|
||||
|
||||
TP_PROTO(struct device *dev, int flags),
|
||||
|
||||
TP_ARGS(dev, flags)
|
||||
);
|
||||
|
||||
TRACE_EVENT(rpm_return_int,
|
||||
TP_PROTO(struct device *dev, unsigned long ip, int ret),
|
||||
TP_ARGS(dev, ip, ret),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__string( name, dev_name(dev))
|
||||
__field( unsigned long, ip )
|
||||
__field( int, ret )
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__assign_str(name, dev_name(dev));
|
||||
__entry->ip = ip;
|
||||
__entry->ret = ret;
|
||||
),
|
||||
|
||||
TP_printk("%pS:%s ret=%d", (void *)__entry->ip, __get_str(name),
|
||||
__entry->ret)
|
||||
);
|
||||
|
||||
#endif /* _TRACE_RUNTIME_POWER_H */
|
||||
|
||||
/* This part must be outside protection */
|
||||
#include <trace/define_trace.h>
|
@ -9,7 +9,7 @@ obj-y = sched.o fork.o exec_domain.o panic.o printk.o \
|
||||
rcupdate.o extable.o params.o posix-timers.o \
|
||||
kthread.o wait.o kfifo.o sys_ni.o posix-cpu-timers.o mutex.o \
|
||||
hrtimer.o rwsem.o nsproxy.o srcu.o semaphore.o \
|
||||
notifier.o ksysfs.o pm_qos_params.o sched_clock.o cred.o \
|
||||
notifier.o ksysfs.o sched_clock.o cred.o \
|
||||
async.o range.o
|
||||
obj-y += groups.o
|
||||
|
||||
|
@ -67,7 +67,7 @@ static void fake_signal_wake_up(struct task_struct *p)
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&p->sighand->siglock, flags);
|
||||
signal_wake_up(p, 0);
|
||||
signal_wake_up(p, 1);
|
||||
spin_unlock_irqrestore(&p->sighand->siglock, flags);
|
||||
}
|
||||
|
||||
|
@ -27,6 +27,7 @@ config HIBERNATION
|
||||
select HIBERNATE_CALLBACKS
|
||||
select LZO_COMPRESS
|
||||
select LZO_DECOMPRESS
|
||||
select CRC32
|
||||
---help---
|
||||
Enable the suspend to disk (STD) functionality, which is usually
|
||||
called "hibernation" in user interfaces. STD checkpoints the
|
||||
@ -65,6 +66,9 @@ config HIBERNATION
|
||||
|
||||
For more information take a look at <file:Documentation/power/swsusp.txt>.
|
||||
|
||||
config ARCH_SAVE_PAGE_KEYS
|
||||
bool
|
||||
|
||||
config PM_STD_PARTITION
|
||||
string "Default resume partition"
|
||||
depends on HIBERNATION
|
||||
|
@ -1,8 +1,8 @@
|
||||
|
||||
ccflags-$(CONFIG_PM_DEBUG) := -DDEBUG
|
||||
|
||||
obj-$(CONFIG_PM) += main.o
|
||||
obj-$(CONFIG_PM_SLEEP) += console.o
|
||||
obj-$(CONFIG_PM) += main.o qos.o
|
||||
obj-$(CONFIG_VT_CONSOLE_SLEEP) += console.o
|
||||
obj-$(CONFIG_FREEZER) += process.o
|
||||
obj-$(CONFIG_SUSPEND) += suspend.o
|
||||
obj-$(CONFIG_PM_TEST_SUSPEND) += suspend_test.o
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* drivers/power/process.c - Functions for saving/restoring console.
|
||||
* Functions for saving/restoring console.
|
||||
*
|
||||
* Originally from swsusp.
|
||||
*/
|
||||
@ -10,7 +10,6 @@
|
||||
#include <linux/module.h>
|
||||
#include "power.h"
|
||||
|
||||
#if defined(CONFIG_VT) && defined(CONFIG_VT_CONSOLE)
|
||||
#define SUSPEND_CONSOLE (MAX_NR_CONSOLES-1)
|
||||
|
||||
static int orig_fgconsole, orig_kmsg;
|
||||
@ -32,4 +31,3 @@ void pm_restore_console(void)
|
||||
vt_kmsg_redirect(orig_kmsg);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
@ -14,6 +14,7 @@
|
||||
#include <linux/reboot.h>
|
||||
#include <linux/string.h>
|
||||
#include <linux/device.h>
|
||||
#include <linux/async.h>
|
||||
#include <linux/kmod.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/fs.h>
|
||||
@ -29,12 +30,14 @@
|
||||
#include "power.h"
|
||||
|
||||
|
||||
static int nocompress = 0;
|
||||
static int noresume = 0;
|
||||
static int nocompress;
|
||||
static int noresume;
|
||||
static int resume_wait;
|
||||
static int resume_delay;
|
||||
static char resume_file[256] = CONFIG_PM_STD_PARTITION;
|
||||
dev_t swsusp_resume_device;
|
||||
sector_t swsusp_resume_block;
|
||||
int in_suspend __nosavedata = 0;
|
||||
int in_suspend __nosavedata;
|
||||
|
||||
enum {
|
||||
HIBERNATION_INVALID,
|
||||
@ -334,12 +337,16 @@ int hibernation_snapshot(int platform_mode)
|
||||
if (error)
|
||||
goto Close;
|
||||
|
||||
error = dpm_prepare(PMSG_FREEZE);
|
||||
if (error)
|
||||
goto Complete_devices;
|
||||
|
||||
/* Preallocate image memory before shutting down devices. */
|
||||
error = hibernate_preallocate_memory();
|
||||
if (error)
|
||||
goto Close;
|
||||
|
||||
error = freeze_kernel_threads();
|
||||
if (error)
|
||||
goto Close;
|
||||
|
||||
error = dpm_prepare(PMSG_FREEZE);
|
||||
if (error)
|
||||
goto Complete_devices;
|
||||
|
||||
@ -463,7 +470,7 @@ static int resume_target_kernel(bool platform_mode)
|
||||
* @platform_mode: If set, use platform driver to prepare for the transition.
|
||||
*
|
||||
* This routine must be called with pm_mutex held. If it is successful, control
|
||||
* reappears in the restored target kernel in hibernation_snaphot().
|
||||
* reappears in the restored target kernel in hibernation_snapshot().
|
||||
*/
|
||||
int hibernation_restore(int platform_mode)
|
||||
{
|
||||
@ -650,6 +657,9 @@ int hibernate(void)
|
||||
flags |= SF_PLATFORM_MODE;
|
||||
if (nocompress)
|
||||
flags |= SF_NOCOMPRESS_MODE;
|
||||
else
|
||||
flags |= SF_CRC32_MODE;
|
||||
|
||||
pr_debug("PM: writing image.\n");
|
||||
error = swsusp_write(flags);
|
||||
swsusp_free();
|
||||
@ -724,6 +734,12 @@ static int software_resume(void)
|
||||
|
||||
pr_debug("PM: Checking hibernation image partition %s\n", resume_file);
|
||||
|
||||
if (resume_delay) {
|
||||
printk(KERN_INFO "Waiting %dsec before reading resume device...\n",
|
||||
resume_delay);
|
||||
ssleep(resume_delay);
|
||||
}
|
||||
|
||||
/* Check if the device is there */
|
||||
swsusp_resume_device = name_to_dev_t(resume_file);
|
||||
if (!swsusp_resume_device) {
|
||||
@ -732,6 +748,13 @@ static int software_resume(void)
|
||||
* to wait for this to finish.
|
||||
*/
|
||||
wait_for_device_probe();
|
||||
|
||||
if (resume_wait) {
|
||||
while ((swsusp_resume_device = name_to_dev_t(resume_file)) == 0)
|
||||
msleep(10);
|
||||
async_synchronize_full();
|
||||
}
|
||||
|
||||
/*
|
||||
* We can't depend on SCSI devices being available after loading
|
||||
* one of their modules until scsi_complete_async_scans() is
|
||||
@ -1060,7 +1083,21 @@ static int __init noresume_setup(char *str)
|
||||
return 1;
|
||||
}
|
||||
|
||||
static int __init resumewait_setup(char *str)
|
||||
{
|
||||
resume_wait = 1;
|
||||
return 1;
|
||||
}
|
||||
|
||||
static int __init resumedelay_setup(char *str)
|
||||
{
|
||||
resume_delay = simple_strtoul(str, NULL, 0);
|
||||
return 1;
|
||||
}
|
||||
|
||||
__setup("noresume", noresume_setup);
|
||||
__setup("resume_offset=", resume_offset_setup);
|
||||
__setup("resume=", resume_setup);
|
||||
__setup("hibernate=", hibernate_setup);
|
||||
__setup("resumewait", resumewait_setup);
|
||||
__setup("resumedelay=", resumedelay_setup);
|
||||
|
@ -12,6 +12,8 @@
|
||||
#include <linux/string.h>
|
||||
#include <linux/resume-trace.h>
|
||||
#include <linux/workqueue.h>
|
||||
#include <linux/debugfs.h>
|
||||
#include <linux/seq_file.h>
|
||||
|
||||
#include "power.h"
|
||||
|
||||
@ -131,6 +133,101 @@ static ssize_t pm_test_store(struct kobject *kobj, struct kobj_attribute *attr,
|
||||
power_attr(pm_test);
|
||||
#endif /* CONFIG_PM_DEBUG */
|
||||
|
||||
#ifdef CONFIG_DEBUG_FS
|
||||
static char *suspend_step_name(enum suspend_stat_step step)
|
||||
{
|
||||
switch (step) {
|
||||
case SUSPEND_FREEZE:
|
||||
return "freeze";
|
||||
case SUSPEND_PREPARE:
|
||||
return "prepare";
|
||||
case SUSPEND_SUSPEND:
|
||||
return "suspend";
|
||||
case SUSPEND_SUSPEND_NOIRQ:
|
||||
return "suspend_noirq";
|
||||
case SUSPEND_RESUME_NOIRQ:
|
||||
return "resume_noirq";
|
||||
case SUSPEND_RESUME:
|
||||
return "resume";
|
||||
default:
|
||||
return "";
|
||||
}
|
||||
}
|
||||
|
||||
static int suspend_stats_show(struct seq_file *s, void *unused)
|
||||
{
|
||||
int i, index, last_dev, last_errno, last_step;
|
||||
|
||||
last_dev = suspend_stats.last_failed_dev + REC_FAILED_NUM - 1;
|
||||
last_dev %= REC_FAILED_NUM;
|
||||
last_errno = suspend_stats.last_failed_errno + REC_FAILED_NUM - 1;
|
||||
last_errno %= REC_FAILED_NUM;
|
||||
last_step = suspend_stats.last_failed_step + REC_FAILED_NUM - 1;
|
||||
last_step %= REC_FAILED_NUM;
|
||||
seq_printf(s, "%s: %d\n%s: %d\n%s: %d\n%s: %d\n"
|
||||
"%s: %d\n%s: %d\n%s: %d\n%s: %d\n",
|
||||
"success", suspend_stats.success,
|
||||
"fail", suspend_stats.fail,
|
||||
"failed_freeze", suspend_stats.failed_freeze,
|
||||
"failed_prepare", suspend_stats.failed_prepare,
|
||||
"failed_suspend", suspend_stats.failed_suspend,
|
||||
"failed_suspend_noirq",
|
||||
suspend_stats.failed_suspend_noirq,
|
||||
"failed_resume", suspend_stats.failed_resume,
|
||||
"failed_resume_noirq",
|
||||
suspend_stats.failed_resume_noirq);
|
||||
seq_printf(s, "failures:\n last_failed_dev:\t%-s\n",
|
||||
suspend_stats.failed_devs[last_dev]);
|
||||
for (i = 1; i < REC_FAILED_NUM; i++) {
|
||||
index = last_dev + REC_FAILED_NUM - i;
|
||||
index %= REC_FAILED_NUM;
|
||||
seq_printf(s, "\t\t\t%-s\n",
|
||||
suspend_stats.failed_devs[index]);
|
||||
}
|
||||
seq_printf(s, " last_failed_errno:\t%-d\n",
|
||||
suspend_stats.errno[last_errno]);
|
||||
for (i = 1; i < REC_FAILED_NUM; i++) {
|
||||
index = last_errno + REC_FAILED_NUM - i;
|
||||
index %= REC_FAILED_NUM;
|
||||
seq_printf(s, "\t\t\t%-d\n",
|
||||
suspend_stats.errno[index]);
|
||||
}
|
||||
seq_printf(s, " last_failed_step:\t%-s\n",
|
||||
suspend_step_name(
|
||||
suspend_stats.failed_steps[last_step]));
|
||||
for (i = 1; i < REC_FAILED_NUM; i++) {
|
||||
index = last_step + REC_FAILED_NUM - i;
|
||||
index %= REC_FAILED_NUM;
|
||||
seq_printf(s, "\t\t\t%-s\n",
|
||||
suspend_step_name(
|
||||
suspend_stats.failed_steps[index]));
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int suspend_stats_open(struct inode *inode, struct file *file)
|
||||
{
|
||||
return single_open(file, suspend_stats_show, NULL);
|
||||
}
|
||||
|
||||
static const struct file_operations suspend_stats_operations = {
|
||||
.open = suspend_stats_open,
|
||||
.read = seq_read,
|
||||
.llseek = seq_lseek,
|
||||
.release = single_release,
|
||||
};
|
||||
|
||||
static int __init pm_debugfs_init(void)
|
||||
{
|
||||
debugfs_create_file("suspend_stats", S_IFREG | S_IRUGO,
|
||||
NULL, NULL, &suspend_stats_operations);
|
||||
return 0;
|
||||
}
|
||||
|
||||
late_initcall(pm_debugfs_init);
|
||||
#endif /* CONFIG_DEBUG_FS */
|
||||
|
||||
#endif /* CONFIG_PM_SLEEP */
|
||||
|
||||
struct kobject *power_kobj;
|
||||
@ -194,6 +291,11 @@ static ssize_t state_store(struct kobject *kobj, struct kobj_attribute *attr,
|
||||
}
|
||||
if (state < PM_SUSPEND_MAX && *s)
|
||||
error = enter_state(state);
|
||||
if (error) {
|
||||
suspend_stats.fail++;
|
||||
dpm_save_failed_errno(error);
|
||||
} else
|
||||
suspend_stats.success++;
|
||||
#endif
|
||||
|
||||
Exit:
|
||||
|
@ -146,6 +146,7 @@ extern int swsusp_swap_in_use(void);
|
||||
*/
|
||||
#define SF_PLATFORM_MODE 1
|
||||
#define SF_NOCOMPRESS_MODE 2
|
||||
#define SF_CRC32_MODE 4
|
||||
|
||||
/* kernel/power/hibernate.c */
|
||||
extern int swsusp_check(void);
|
||||
@ -228,7 +229,8 @@ extern int pm_test_level;
|
||||
#ifdef CONFIG_SUSPEND_FREEZER
|
||||
static inline int suspend_freeze_processes(void)
|
||||
{
|
||||
return freeze_processes();
|
||||
int error = freeze_processes();
|
||||
return error ? : freeze_kernel_threads();
|
||||
}
|
||||
|
||||
static inline void suspend_thaw_processes(void)
|
||||
|
@ -135,7 +135,7 @@ static int try_to_freeze_tasks(bool sig_only)
|
||||
}
|
||||
|
||||
/**
|
||||
* freeze_processes - tell processes to enter the refrigerator
|
||||
* freeze_processes - Signal user space processes to enter the refrigerator.
|
||||
*/
|
||||
int freeze_processes(void)
|
||||
{
|
||||
@ -143,20 +143,30 @@ int freeze_processes(void)
|
||||
|
||||
printk("Freezing user space processes ... ");
|
||||
error = try_to_freeze_tasks(true);
|
||||
if (error)
|
||||
goto Exit;
|
||||
printk("done.\n");
|
||||
if (!error) {
|
||||
printk("done.");
|
||||
oom_killer_disable();
|
||||
}
|
||||
printk("\n");
|
||||
BUG_ON(in_atomic());
|
||||
|
||||
return error;
|
||||
}
|
||||
|
||||
/**
|
||||
* freeze_kernel_threads - Make freezable kernel threads go to the refrigerator.
|
||||
*/
|
||||
int freeze_kernel_threads(void)
|
||||
{
|
||||
int error;
|
||||
|
||||
printk("Freezing remaining freezable tasks ... ");
|
||||
error = try_to_freeze_tasks(false);
|
||||
if (error)
|
||||
goto Exit;
|
||||
if (!error)
|
||||
printk("done.");
|
||||
|
||||
oom_killer_disable();
|
||||
Exit:
|
||||
BUG_ON(in_atomic());
|
||||
printk("\n");
|
||||
BUG_ON(in_atomic());
|
||||
|
||||
return error;
|
||||
}
|
||||
|
@ -29,7 +29,7 @@
|
||||
|
||||
/*#define DEBUG*/
|
||||
|
||||
#include <linux/pm_qos_params.h>
|
||||
#include <linux/pm_qos.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/slab.h>
|
||||
@ -45,62 +45,57 @@
|
||||
#include <linux/uaccess.h>
|
||||
|
||||
/*
|
||||
* locking rule: all changes to requests or notifiers lists
|
||||
* locking rule: all changes to constraints or notifiers lists
|
||||
* or pm_qos_object list and pm_qos_objects need to happen with pm_qos_lock
|
||||
* held, taken with _irqsave. One lock to rule them all
|
||||
*/
|
||||
enum pm_qos_type {
|
||||
PM_QOS_MAX, /* return the largest value */
|
||||
PM_QOS_MIN /* return the smallest value */
|
||||
};
|
||||
|
||||
/*
|
||||
* Note: The lockless read path depends on the CPU accessing
|
||||
* target_value atomically. Atomic access is only guaranteed on all CPU
|
||||
* types linux supports for 32 bit quantites
|
||||
*/
|
||||
struct pm_qos_object {
|
||||
struct plist_head requests;
|
||||
struct blocking_notifier_head *notifiers;
|
||||
struct pm_qos_constraints *constraints;
|
||||
struct miscdevice pm_qos_power_miscdev;
|
||||
char *name;
|
||||
s32 target_value; /* Do not change to 64 bit */
|
||||
s32 default_value;
|
||||
enum pm_qos_type type;
|
||||
};
|
||||
|
||||
static DEFINE_SPINLOCK(pm_qos_lock);
|
||||
|
||||
static struct pm_qos_object null_pm_qos;
|
||||
|
||||
static BLOCKING_NOTIFIER_HEAD(cpu_dma_lat_notifier);
|
||||
static struct pm_qos_object cpu_dma_pm_qos = {
|
||||
.requests = PLIST_HEAD_INIT(cpu_dma_pm_qos.requests),
|
||||
.notifiers = &cpu_dma_lat_notifier,
|
||||
.name = "cpu_dma_latency",
|
||||
static struct pm_qos_constraints cpu_dma_constraints = {
|
||||
.list = PLIST_HEAD_INIT(cpu_dma_constraints.list),
|
||||
.target_value = PM_QOS_CPU_DMA_LAT_DEFAULT_VALUE,
|
||||
.default_value = PM_QOS_CPU_DMA_LAT_DEFAULT_VALUE,
|
||||
.type = PM_QOS_MIN,
|
||||
.notifiers = &cpu_dma_lat_notifier,
|
||||
};
|
||||
static struct pm_qos_object cpu_dma_pm_qos = {
|
||||
.constraints = &cpu_dma_constraints,
|
||||
};
|
||||
|
||||
static BLOCKING_NOTIFIER_HEAD(network_lat_notifier);
|
||||
static struct pm_qos_object network_lat_pm_qos = {
|
||||
.requests = PLIST_HEAD_INIT(network_lat_pm_qos.requests),
|
||||
.notifiers = &network_lat_notifier,
|
||||
.name = "network_latency",
|
||||
static struct pm_qos_constraints network_lat_constraints = {
|
||||
.list = PLIST_HEAD_INIT(network_lat_constraints.list),
|
||||
.target_value = PM_QOS_NETWORK_LAT_DEFAULT_VALUE,
|
||||
.default_value = PM_QOS_NETWORK_LAT_DEFAULT_VALUE,
|
||||
.type = PM_QOS_MIN
|
||||
.type = PM_QOS_MIN,
|
||||
.notifiers = &network_lat_notifier,
|
||||
};
|
||||
static struct pm_qos_object network_lat_pm_qos = {
|
||||
.constraints = &network_lat_constraints,
|
||||
.name = "network_latency",
|
||||
};
|
||||
|
||||
|
||||
static BLOCKING_NOTIFIER_HEAD(network_throughput_notifier);
|
||||
static struct pm_qos_object network_throughput_pm_qos = {
|
||||
.requests = PLIST_HEAD_INIT(network_throughput_pm_qos.requests),
|
||||
.notifiers = &network_throughput_notifier,
|
||||
.name = "network_throughput",
|
||||
static struct pm_qos_constraints network_tput_constraints = {
|
||||
.list = PLIST_HEAD_INIT(network_tput_constraints.list),
|
||||
.target_value = PM_QOS_NETWORK_THROUGHPUT_DEFAULT_VALUE,
|
||||
.default_value = PM_QOS_NETWORK_THROUGHPUT_DEFAULT_VALUE,
|
||||
.type = PM_QOS_MAX,
|
||||
.notifiers = &network_throughput_notifier,
|
||||
};
|
||||
static struct pm_qos_object network_throughput_pm_qos = {
|
||||
.constraints = &network_tput_constraints,
|
||||
.name = "network_throughput",
|
||||
};
|
||||
|
||||
|
||||
@ -127,17 +122,17 @@ static const struct file_operations pm_qos_power_fops = {
|
||||
};
|
||||
|
||||
/* unlocked internal variant */
|
||||
static inline int pm_qos_get_value(struct pm_qos_object *o)
|
||||
static inline int pm_qos_get_value(struct pm_qos_constraints *c)
|
||||
{
|
||||
if (plist_head_empty(&o->requests))
|
||||
return o->default_value;
|
||||
if (plist_head_empty(&c->list))
|
||||
return c->default_value;
|
||||
|
||||
switch (o->type) {
|
||||
switch (c->type) {
|
||||
case PM_QOS_MIN:
|
||||
return plist_first(&o->requests)->prio;
|
||||
return plist_first(&c->list)->prio;
|
||||
|
||||
case PM_QOS_MAX:
|
||||
return plist_last(&o->requests)->prio;
|
||||
return plist_last(&c->list)->prio;
|
||||
|
||||
default:
|
||||
/* runtime check for not using enum */
|
||||
@ -145,49 +140,217 @@ static inline int pm_qos_get_value(struct pm_qos_object *o)
|
||||
}
|
||||
}
|
||||
|
||||
static inline s32 pm_qos_read_value(struct pm_qos_object *o)
|
||||
s32 pm_qos_read_value(struct pm_qos_constraints *c)
|
||||
{
|
||||
return o->target_value;
|
||||
return c->target_value;
|
||||
}
|
||||
|
||||
static inline void pm_qos_set_value(struct pm_qos_object *o, s32 value)
|
||||
static inline void pm_qos_set_value(struct pm_qos_constraints *c, s32 value)
|
||||
{
|
||||
o->target_value = value;
|
||||
c->target_value = value;
|
||||
}
|
||||
|
||||
static void update_target(struct pm_qos_object *o, struct plist_node *node,
|
||||
int del, int value)
|
||||
/**
|
||||
* pm_qos_update_target - manages the constraints list and calls the notifiers
|
||||
* if needed
|
||||
* @c: constraints data struct
|
||||
* @node: request to add to the list, to update or to remove
|
||||
* @action: action to take on the constraints list
|
||||
* @value: value of the request to add or update
|
||||
*
|
||||
* This function returns 1 if the aggregated constraint value has changed, 0
|
||||
* otherwise.
|
||||
*/
|
||||
int pm_qos_update_target(struct pm_qos_constraints *c, struct plist_node *node,
|
||||
enum pm_qos_req_action action, int value)
|
||||
{
|
||||
unsigned long flags;
|
||||
int prev_value, curr_value;
|
||||
int prev_value, curr_value, new_value;
|
||||
|
||||
spin_lock_irqsave(&pm_qos_lock, flags);
|
||||
prev_value = pm_qos_get_value(o);
|
||||
/* PM_QOS_DEFAULT_VALUE is a signal that the value is unchanged */
|
||||
if (value != PM_QOS_DEFAULT_VALUE) {
|
||||
prev_value = pm_qos_get_value(c);
|
||||
if (value == PM_QOS_DEFAULT_VALUE)
|
||||
new_value = c->default_value;
|
||||
else
|
||||
new_value = value;
|
||||
|
||||
switch (action) {
|
||||
case PM_QOS_REMOVE_REQ:
|
||||
plist_del(node, &c->list);
|
||||
break;
|
||||
case PM_QOS_UPDATE_REQ:
|
||||
/*
|
||||
* to change the list, we atomically remove, reinit
|
||||
* with new value and add, then see if the extremal
|
||||
* changed
|
||||
*/
|
||||
plist_del(node, &o->requests);
|
||||
plist_node_init(node, value);
|
||||
plist_add(node, &o->requests);
|
||||
} else if (del) {
|
||||
plist_del(node, &o->requests);
|
||||
} else {
|
||||
plist_add(node, &o->requests);
|
||||
plist_del(node, &c->list);
|
||||
case PM_QOS_ADD_REQ:
|
||||
plist_node_init(node, new_value);
|
||||
plist_add(node, &c->list);
|
||||
break;
|
||||
default:
|
||||
/* no action */
|
||||
;
|
||||
}
|
||||
curr_value = pm_qos_get_value(o);
|
||||
pm_qos_set_value(o, curr_value);
|
||||
|
||||
curr_value = pm_qos_get_value(c);
|
||||
pm_qos_set_value(c, curr_value);
|
||||
|
||||
spin_unlock_irqrestore(&pm_qos_lock, flags);
|
||||
|
||||
if (prev_value != curr_value)
|
||||
blocking_notifier_call_chain(o->notifiers,
|
||||
if (prev_value != curr_value) {
|
||||
blocking_notifier_call_chain(c->notifiers,
|
||||
(unsigned long)curr_value,
|
||||
NULL);
|
||||
return 1;
|
||||
} else {
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* pm_qos_request - returns current system wide qos expectation
|
||||
* @pm_qos_class: identification of which qos value is requested
|
||||
*
|
||||
* This function returns the current target value.
|
||||
*/
|
||||
int pm_qos_request(int pm_qos_class)
|
||||
{
|
||||
return pm_qos_read_value(pm_qos_array[pm_qos_class]->constraints);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(pm_qos_request);
|
||||
|
||||
int pm_qos_request_active(struct pm_qos_request *req)
|
||||
{
|
||||
return req->pm_qos_class != 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(pm_qos_request_active);
|
||||
|
||||
/**
|
||||
* pm_qos_add_request - inserts new qos request into the list
|
||||
* @req: pointer to a preallocated handle
|
||||
* @pm_qos_class: identifies which list of qos request to use
|
||||
* @value: defines the qos request
|
||||
*
|
||||
* This function inserts a new entry in the pm_qos_class list of requested qos
|
||||
* performance characteristics. It recomputes the aggregate QoS expectations
|
||||
* for the pm_qos_class of parameters and initializes the pm_qos_request
|
||||
* handle. Caller needs to save this handle for later use in updates and
|
||||
* removal.
|
||||
*/
|
||||
|
||||
void pm_qos_add_request(struct pm_qos_request *req,
|
||||
int pm_qos_class, s32 value)
|
||||
{
|
||||
if (!req) /*guard against callers passing in null */
|
||||
return;
|
||||
|
||||
if (pm_qos_request_active(req)) {
|
||||
WARN(1, KERN_ERR "pm_qos_add_request() called for already added request\n");
|
||||
return;
|
||||
}
|
||||
req->pm_qos_class = pm_qos_class;
|
||||
pm_qos_update_target(pm_qos_array[pm_qos_class]->constraints,
|
||||
&req->node, PM_QOS_ADD_REQ, value);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(pm_qos_add_request);
|
||||
|
||||
/**
|
||||
* pm_qos_update_request - modifies an existing qos request
|
||||
* @req : handle to list element holding a pm_qos request to use
|
||||
* @value: defines the qos request
|
||||
*
|
||||
* Updates an existing qos request for the pm_qos_class of parameters along
|
||||
* with updating the target pm_qos_class value.
|
||||
*
|
||||
* Attempts are made to make this code callable on hot code paths.
|
||||
*/
|
||||
void pm_qos_update_request(struct pm_qos_request *req,
|
||||
s32 new_value)
|
||||
{
|
||||
if (!req) /*guard against callers passing in null */
|
||||
return;
|
||||
|
||||
if (!pm_qos_request_active(req)) {
|
||||
WARN(1, KERN_ERR "pm_qos_update_request() called for unknown object\n");
|
||||
return;
|
||||
}
|
||||
|
||||
if (new_value != req->node.prio)
|
||||
pm_qos_update_target(
|
||||
pm_qos_array[req->pm_qos_class]->constraints,
|
||||
&req->node, PM_QOS_UPDATE_REQ, new_value);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(pm_qos_update_request);
|
||||
|
||||
/**
|
||||
* pm_qos_remove_request - modifies an existing qos request
|
||||
* @req: handle to request list element
|
||||
*
|
||||
* Will remove pm qos request from the list of constraints and
|
||||
* recompute the current target value for the pm_qos_class. Call this
|
||||
* on slow code paths.
|
||||
*/
|
||||
void pm_qos_remove_request(struct pm_qos_request *req)
|
||||
{
|
||||
if (!req) /*guard against callers passing in null */
|
||||
return;
|
||||
/* silent return to keep pcm code cleaner */
|
||||
|
||||
if (!pm_qos_request_active(req)) {
|
||||
WARN(1, KERN_ERR "pm_qos_remove_request() called for unknown object\n");
|
||||
return;
|
||||
}
|
||||
|
||||
pm_qos_update_target(pm_qos_array[req->pm_qos_class]->constraints,
|
||||
&req->node, PM_QOS_REMOVE_REQ,
|
||||
PM_QOS_DEFAULT_VALUE);
|
||||
memset(req, 0, sizeof(*req));
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(pm_qos_remove_request);
|
||||
|
||||
/**
|
||||
* pm_qos_add_notifier - sets notification entry for changes to target value
|
||||
* @pm_qos_class: identifies which qos target changes should be notified.
|
||||
* @notifier: notifier block managed by caller.
|
||||
*
|
||||
* will register the notifier into a notification chain that gets called
|
||||
* upon changes to the pm_qos_class target value.
|
||||
*/
|
||||
int pm_qos_add_notifier(int pm_qos_class, struct notifier_block *notifier)
|
||||
{
|
||||
int retval;
|
||||
|
||||
retval = blocking_notifier_chain_register(
|
||||
pm_qos_array[pm_qos_class]->constraints->notifiers,
|
||||
notifier);
|
||||
|
||||
return retval;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(pm_qos_add_notifier);
|
||||
|
||||
/**
|
||||
* pm_qos_remove_notifier - deletes notification entry from chain.
|
||||
* @pm_qos_class: identifies which qos target changes are notified.
|
||||
* @notifier: notifier block to be removed.
|
||||
*
|
||||
* will remove the notifier from the notification chain that gets called
|
||||
* upon changes to the pm_qos_class target value.
|
||||
*/
|
||||
int pm_qos_remove_notifier(int pm_qos_class, struct notifier_block *notifier)
|
||||
{
|
||||
int retval;
|
||||
|
||||
retval = blocking_notifier_chain_unregister(
|
||||
pm_qos_array[pm_qos_class]->constraints->notifiers,
|
||||
notifier);
|
||||
|
||||
return retval;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(pm_qos_remove_notifier);
|
||||
|
||||
/* User space interface to PM QoS classes via misc devices */
|
||||
static int register_pm_qos_misc(struct pm_qos_object *qos)
|
||||
{
|
||||
qos->pm_qos_power_miscdev.minor = MISC_DYNAMIC_MINOR;
|
||||
@ -210,165 +373,13 @@ static int find_pm_qos_object_by_minor(int minor)
|
||||
return -1;
|
||||
}
|
||||
|
||||
/**
|
||||
* pm_qos_request - returns current system wide qos expectation
|
||||
* @pm_qos_class: identification of which qos value is requested
|
||||
*
|
||||
* This function returns the current target value.
|
||||
*/
|
||||
int pm_qos_request(int pm_qos_class)
|
||||
{
|
||||
return pm_qos_read_value(pm_qos_array[pm_qos_class]);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(pm_qos_request);
|
||||
|
||||
int pm_qos_request_active(struct pm_qos_request_list *req)
|
||||
{
|
||||
return req->pm_qos_class != 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(pm_qos_request_active);
|
||||
|
||||
/**
|
||||
* pm_qos_add_request - inserts new qos request into the list
|
||||
* @dep: pointer to a preallocated handle
|
||||
* @pm_qos_class: identifies which list of qos request to use
|
||||
* @value: defines the qos request
|
||||
*
|
||||
* This function inserts a new entry in the pm_qos_class list of requested qos
|
||||
* performance characteristics. It recomputes the aggregate QoS expectations
|
||||
* for the pm_qos_class of parameters and initializes the pm_qos_request_list
|
||||
* handle. Caller needs to save this handle for later use in updates and
|
||||
* removal.
|
||||
*/
|
||||
|
||||
void pm_qos_add_request(struct pm_qos_request_list *dep,
|
||||
int pm_qos_class, s32 value)
|
||||
{
|
||||
struct pm_qos_object *o = pm_qos_array[pm_qos_class];
|
||||
int new_value;
|
||||
|
||||
if (pm_qos_request_active(dep)) {
|
||||
WARN(1, KERN_ERR "pm_qos_add_request() called for already added request\n");
|
||||
return;
|
||||
}
|
||||
if (value == PM_QOS_DEFAULT_VALUE)
|
||||
new_value = o->default_value;
|
||||
else
|
||||
new_value = value;
|
||||
plist_node_init(&dep->list, new_value);
|
||||
dep->pm_qos_class = pm_qos_class;
|
||||
update_target(o, &dep->list, 0, PM_QOS_DEFAULT_VALUE);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(pm_qos_add_request);
|
||||
|
||||
/**
|
||||
* pm_qos_update_request - modifies an existing qos request
|
||||
* @pm_qos_req : handle to list element holding a pm_qos request to use
|
||||
* @value: defines the qos request
|
||||
*
|
||||
* Updates an existing qos request for the pm_qos_class of parameters along
|
||||
* with updating the target pm_qos_class value.
|
||||
*
|
||||
* Attempts are made to make this code callable on hot code paths.
|
||||
*/
|
||||
void pm_qos_update_request(struct pm_qos_request_list *pm_qos_req,
|
||||
s32 new_value)
|
||||
{
|
||||
s32 temp;
|
||||
struct pm_qos_object *o;
|
||||
|
||||
if (!pm_qos_req) /*guard against callers passing in null */
|
||||
return;
|
||||
|
||||
if (!pm_qos_request_active(pm_qos_req)) {
|
||||
WARN(1, KERN_ERR "pm_qos_update_request() called for unknown object\n");
|
||||
return;
|
||||
}
|
||||
|
||||
o = pm_qos_array[pm_qos_req->pm_qos_class];
|
||||
|
||||
if (new_value == PM_QOS_DEFAULT_VALUE)
|
||||
temp = o->default_value;
|
||||
else
|
||||
temp = new_value;
|
||||
|
||||
if (temp != pm_qos_req->list.prio)
|
||||
update_target(o, &pm_qos_req->list, 0, temp);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(pm_qos_update_request);
|
||||
|
||||
/**
|
||||
* pm_qos_remove_request - modifies an existing qos request
|
||||
* @pm_qos_req: handle to request list element
|
||||
*
|
||||
* Will remove pm qos request from the list of requests and
|
||||
* recompute the current target value for the pm_qos_class. Call this
|
||||
* on slow code paths.
|
||||
*/
|
||||
void pm_qos_remove_request(struct pm_qos_request_list *pm_qos_req)
|
||||
{
|
||||
struct pm_qos_object *o;
|
||||
|
||||
if (pm_qos_req == NULL)
|
||||
return;
|
||||
/* silent return to keep pcm code cleaner */
|
||||
|
||||
if (!pm_qos_request_active(pm_qos_req)) {
|
||||
WARN(1, KERN_ERR "pm_qos_remove_request() called for unknown object\n");
|
||||
return;
|
||||
}
|
||||
|
||||
o = pm_qos_array[pm_qos_req->pm_qos_class];
|
||||
update_target(o, &pm_qos_req->list, 1, PM_QOS_DEFAULT_VALUE);
|
||||
memset(pm_qos_req, 0, sizeof(*pm_qos_req));
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(pm_qos_remove_request);
|
||||
|
||||
/**
|
||||
* pm_qos_add_notifier - sets notification entry for changes to target value
|
||||
* @pm_qos_class: identifies which qos target changes should be notified.
|
||||
* @notifier: notifier block managed by caller.
|
||||
*
|
||||
* will register the notifier into a notification chain that gets called
|
||||
* upon changes to the pm_qos_class target value.
|
||||
*/
|
||||
int pm_qos_add_notifier(int pm_qos_class, struct notifier_block *notifier)
|
||||
{
|
||||
int retval;
|
||||
|
||||
retval = blocking_notifier_chain_register(
|
||||
pm_qos_array[pm_qos_class]->notifiers, notifier);
|
||||
|
||||
return retval;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(pm_qos_add_notifier);
|
||||
|
||||
/**
|
||||
* pm_qos_remove_notifier - deletes notification entry from chain.
|
||||
* @pm_qos_class: identifies which qos target changes are notified.
|
||||
* @notifier: notifier block to be removed.
|
||||
*
|
||||
* will remove the notifier from the notification chain that gets called
|
||||
* upon changes to the pm_qos_class target value.
|
||||
*/
|
||||
int pm_qos_remove_notifier(int pm_qos_class, struct notifier_block *notifier)
|
||||
{
|
||||
int retval;
|
||||
|
||||
retval = blocking_notifier_chain_unregister(
|
||||
pm_qos_array[pm_qos_class]->notifiers, notifier);
|
||||
|
||||
return retval;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(pm_qos_remove_notifier);
|
||||
|
||||
static int pm_qos_power_open(struct inode *inode, struct file *filp)
|
||||
{
|
||||
long pm_qos_class;
|
||||
|
||||
pm_qos_class = find_pm_qos_object_by_minor(iminor(inode));
|
||||
if (pm_qos_class >= 0) {
|
||||
struct pm_qos_request_list *req = kzalloc(sizeof(*req), GFP_KERNEL);
|
||||
struct pm_qos_request *req = kzalloc(sizeof(*req), GFP_KERNEL);
|
||||
if (!req)
|
||||
return -ENOMEM;
|
||||
|
||||
@ -383,7 +394,7 @@ static int pm_qos_power_open(struct inode *inode, struct file *filp)
|
||||
|
||||
static int pm_qos_power_release(struct inode *inode, struct file *filp)
|
||||
{
|
||||
struct pm_qos_request_list *req;
|
||||
struct pm_qos_request *req;
|
||||
|
||||
req = filp->private_data;
|
||||
pm_qos_remove_request(req);
|
||||
@ -398,17 +409,15 @@ static ssize_t pm_qos_power_read(struct file *filp, char __user *buf,
|
||||
{
|
||||
s32 value;
|
||||
unsigned long flags;
|
||||
struct pm_qos_object *o;
|
||||
struct pm_qos_request_list *pm_qos_req = filp->private_data;
|
||||
struct pm_qos_request *req = filp->private_data;
|
||||
|
||||
if (!pm_qos_req)
|
||||
if (!req)
|
||||
return -EINVAL;
|
||||
if (!pm_qos_request_active(pm_qos_req))
|
||||
if (!pm_qos_request_active(req))
|
||||
return -EINVAL;
|
||||
|
||||
o = pm_qos_array[pm_qos_req->pm_qos_class];
|
||||
spin_lock_irqsave(&pm_qos_lock, flags);
|
||||
value = pm_qos_get_value(o);
|
||||
value = pm_qos_get_value(pm_qos_array[req->pm_qos_class]->constraints);
|
||||
spin_unlock_irqrestore(&pm_qos_lock, flags);
|
||||
|
||||
return simple_read_from_buffer(buf, count, f_pos, &value, sizeof(s32));
|
||||
@ -418,7 +427,7 @@ static ssize_t pm_qos_power_write(struct file *filp, const char __user *buf,
|
||||
size_t count, loff_t *f_pos)
|
||||
{
|
||||
s32 value;
|
||||
struct pm_qos_request_list *pm_qos_req;
|
||||
struct pm_qos_request *req;
|
||||
|
||||
if (count == sizeof(s32)) {
|
||||
if (copy_from_user(&value, buf, sizeof(s32)))
|
||||
@ -449,8 +458,8 @@ static ssize_t pm_qos_power_write(struct file *filp, const char __user *buf,
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
pm_qos_req = filp->private_data;
|
||||
pm_qos_update_request(pm_qos_req, value);
|
||||
req = filp->private_data;
|
||||
pm_qos_update_request(req, value);
|
||||
|
||||
return count;
|
||||
}
|
@ -1339,6 +1339,9 @@ int hibernate_preallocate_memory(void)
|
||||
count += highmem;
|
||||
count -= totalreserve_pages;
|
||||
|
||||
/* Add number of pages required for page keys (s390 only). */
|
||||
size += page_key_additional_pages(saveable);
|
||||
|
||||
/* Compute the maximum number of saveable pages to leave in memory. */
|
||||
max_size = (count - (size + PAGES_FOR_IO)) / 2
|
||||
- 2 * DIV_ROUND_UP(reserved_size, PAGE_SIZE);
|
||||
@ -1662,6 +1665,8 @@ pack_pfns(unsigned long *buf, struct memory_bitmap *bm)
|
||||
buf[j] = memory_bm_next_pfn(bm);
|
||||
if (unlikely(buf[j] == BM_END_OF_MAP))
|
||||
break;
|
||||
/* Save page key for data page (s390 only). */
|
||||
page_key_read(buf + j);
|
||||
}
|
||||
}
|
||||
|
||||
@ -1821,6 +1826,9 @@ static int unpack_orig_pfns(unsigned long *buf, struct memory_bitmap *bm)
|
||||
if (unlikely(buf[j] == BM_END_OF_MAP))
|
||||
break;
|
||||
|
||||
/* Extract and buffer page key for data page (s390 only). */
|
||||
page_key_memorize(buf + j);
|
||||
|
||||
if (memory_bm_pfn_present(bm, buf[j]))
|
||||
memory_bm_set_bit(bm, buf[j]);
|
||||
else
|
||||
@ -2223,6 +2231,11 @@ int snapshot_write_next(struct snapshot_handle *handle)
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
/* Allocate buffer for page keys. */
|
||||
error = page_key_alloc(nr_copy_pages);
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
} else if (handle->cur <= nr_meta_pages + 1) {
|
||||
error = unpack_orig_pfns(buffer, ©_bm);
|
||||
if (error)
|
||||
@ -2243,6 +2256,8 @@ int snapshot_write_next(struct snapshot_handle *handle)
|
||||
}
|
||||
} else {
|
||||
copy_last_highmem_page();
|
||||
/* Restore page key for data page (s390 only). */
|
||||
page_key_write(handle->buffer);
|
||||
handle->buffer = get_buffer(&orig_bm, &ca);
|
||||
if (IS_ERR(handle->buffer))
|
||||
return PTR_ERR(handle->buffer);
|
||||
@ -2264,6 +2279,9 @@ int snapshot_write_next(struct snapshot_handle *handle)
|
||||
void snapshot_write_finalize(struct snapshot_handle *handle)
|
||||
{
|
||||
copy_last_highmem_page();
|
||||
/* Restore page key for data page (s390 only). */
|
||||
page_key_write(handle->buffer);
|
||||
page_key_free();
|
||||
/* Free only if we have loaded the image entirely */
|
||||
if (handle->cur > 1 && handle->cur > nr_meta_pages + nr_copy_pages) {
|
||||
memory_bm_free(&orig_bm, PG_UNSAFE_CLEAR);
|
||||
|
@ -104,7 +104,10 @@ static int suspend_prepare(void)
|
||||
goto Finish;
|
||||
|
||||
error = suspend_freeze_processes();
|
||||
if (!error)
|
||||
if (error) {
|
||||
suspend_stats.failed_freeze++;
|
||||
dpm_save_failed_step(SUSPEND_FREEZE);
|
||||
} else
|
||||
return 0;
|
||||
|
||||
suspend_thaw_processes();
|
||||
@ -315,8 +318,16 @@ int enter_state(suspend_state_t state)
|
||||
*/
|
||||
int pm_suspend(suspend_state_t state)
|
||||
{
|
||||
if (state > PM_SUSPEND_ON && state <= PM_SUSPEND_MAX)
|
||||
return enter_state(state);
|
||||
int ret;
|
||||
if (state > PM_SUSPEND_ON && state < PM_SUSPEND_MAX) {
|
||||
ret = enter_state(state);
|
||||
if (ret) {
|
||||
suspend_stats.fail++;
|
||||
dpm_save_failed_errno(ret);
|
||||
} else
|
||||
suspend_stats.success++;
|
||||
return ret;
|
||||
}
|
||||
return -EINVAL;
|
||||
}
|
||||
EXPORT_SYMBOL(pm_suspend);
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -53,6 +53,9 @@ endif
|
||||
obj-$(CONFIG_EVENT_TRACING) += trace_events_filter.o
|
||||
obj-$(CONFIG_KPROBE_EVENT) += trace_kprobe.o
|
||||
obj-$(CONFIG_TRACEPOINTS) += power-traces.o
|
||||
ifeq ($(CONFIG_PM_RUNTIME),y)
|
||||
obj-$(CONFIG_TRACEPOINTS) += rpm-traces.o
|
||||
endif
|
||||
ifeq ($(CONFIG_TRACING),y)
|
||||
obj-$(CONFIG_KGDB_KDB) += trace_kdb.o
|
||||
endif
|
||||
|
20
kernel/trace/rpm-traces.c
Normal file
20
kernel/trace/rpm-traces.c
Normal file
@ -0,0 +1,20 @@
|
||||
/*
|
||||
* Power trace points
|
||||
*
|
||||
* Copyright (C) 2009 Ming Lei <ming.lei@canonical.com>
|
||||
*/
|
||||
|
||||
#include <linux/string.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/workqueue.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/usb.h>
|
||||
|
||||
#define CREATE_TRACE_POINTS
|
||||
#include <trace/events/rpm.h>
|
||||
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(rpm_return_int);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(rpm_idle);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(rpm_suspend);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(rpm_resume);
|
@ -19,7 +19,7 @@
|
||||
#include <linux/if_arp.h>
|
||||
#include <linux/rtnetlink.h>
|
||||
#include <linux/bitmap.h>
|
||||
#include <linux/pm_qos_params.h>
|
||||
#include <linux/pm_qos.h>
|
||||
#include <linux/inetdevice.h>
|
||||
#include <net/net_namespace.h>
|
||||
#include <net/cfg80211.h>
|
||||
|
@ -17,7 +17,7 @@
|
||||
#include <linux/if_arp.h>
|
||||
#include <linux/etherdevice.h>
|
||||
#include <linux/rtnetlink.h>
|
||||
#include <linux/pm_qos_params.h>
|
||||
#include <linux/pm_qos.h>
|
||||
#include <linux/crc32.h>
|
||||
#include <linux/slab.h>
|
||||
#include <net/mac80211.h>
|
||||
|
@ -14,7 +14,7 @@
|
||||
|
||||
#include <linux/if_arp.h>
|
||||
#include <linux/rtnetlink.h>
|
||||
#include <linux/pm_qos_params.h>
|
||||
#include <linux/pm_qos.h>
|
||||
#include <net/sch_generic.h>
|
||||
#include <linux/slab.h>
|
||||
#include <net/mac80211.h>
|
||||
|
@ -23,7 +23,7 @@
|
||||
#include <linux/file.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/time.h>
|
||||
#include <linux/pm_qos_params.h>
|
||||
#include <linux/pm_qos.h>
|
||||
#include <linux/uio.h>
|
||||
#include <linux/dma-mapping.h>
|
||||
#include <sound/core.h>
|
||||
|
@ -631,7 +631,7 @@ static int usb_audio_suspend(struct usb_interface *intf, pm_message_t message)
|
||||
if (chip == (void *)-1L)
|
||||
return 0;
|
||||
|
||||
if (!(message.event & PM_EVENT_AUTO)) {
|
||||
if (!PMSG_IS_AUTO(message)) {
|
||||
snd_power_change_state(chip->card, SNDRV_CTL_POWER_D3hot);
|
||||
if (!chip->num_suspended_intf++) {
|
||||
list_for_each(p, &chip->pcm_list) {
|
||||
|
Loading…
Reference in New Issue
Block a user