mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-25 05:04:09 +08:00
Merge branch 'pm-sleep' into pm-for-linus
* pm-sleep: (51 commits) PM: Drop generic_subsys_pm_ops PM / Sleep: Remove forward-only callbacks from AMBA bus type PM / Sleep: Remove forward-only callbacks from platform bus type PM: Run the driver callback directly if the subsystem one is not there PM / Sleep: Make pm_op() and pm_noirq_op() return callback pointers PM / Sleep: Merge internal functions in generic_ops.c PM / Sleep: Simplify generic system suspend callbacks PM / Hibernate: Remove deprecated hibernation snapshot ioctls PM / Sleep: Fix freezer failures due to racy usermodehelper_is_disabled() PM / Sleep: Recommend [un]lock_system_sleep() over using pm_mutex directly PM / Sleep: Replace mutex_[un]lock(&pm_mutex) with [un]lock_system_sleep() PM / Sleep: Make [un]lock_system_sleep() generic PM / Sleep: Use the freezer_count() functions in [un]lock_system_sleep() APIs PM / Freezer: Remove the "userspace only" constraint from freezer[_do_not]_count() PM / Hibernate: Replace unintuitive 'if' condition in kernel/power/user.c with 'else' Freezer / sunrpc / NFS: don't allow TASK_KILLABLE sleeps to block the freezer PM / Sleep: Unify diagnostic messages from device suspend/resume ACPI / PM: Do not save/restore NVS on Asus K54C/K54HR PM / Hibernate: Remove deprecated hibernation test modes PM / Hibernate: Thaw processes in SNAPSHOT_CREATE_IMAGE ioctl test path ... Conflicts: kernel/kmod.c
This commit is contained in:
commit
b7ba68c4a0
@ -85,17 +85,6 @@ Who: Robin Getz <rgetz@blackfin.uclinux.org> & Matt Mackall <mpm@selenic.com>
|
||||
|
||||
---------------------------
|
||||
|
||||
What: Deprecated snapshot ioctls
|
||||
When: 2.6.36
|
||||
|
||||
Why: The ioctls in kernel/power/user.c were marked as deprecated long time
|
||||
ago. Now they notify users about that so that they need to replace
|
||||
their userspace. After some more time, remove them completely.
|
||||
|
||||
Who: Jiri Slaby <jirislaby@gmail.com>
|
||||
|
||||
---------------------------
|
||||
|
||||
What: The ieee80211_regdom module parameter
|
||||
When: March 2010 / desktop catchup
|
||||
|
||||
|
@ -126,7 +126,9 @@ The core methods to suspend and resume devices reside in struct dev_pm_ops
|
||||
pointed to by the ops member of struct dev_pm_domain, or by the pm member of
|
||||
struct bus_type, struct device_type and struct class. They are mostly of
|
||||
interest to the people writing infrastructure for platforms and buses, like PCI
|
||||
or USB, or device type and device class drivers.
|
||||
or USB, or device type and device class drivers. They also are relevant to the
|
||||
writers of device drivers whose subsystems (PM domains, device types, device
|
||||
classes and bus types) don't provide all power management methods.
|
||||
|
||||
Bus drivers implement these methods as appropriate for the hardware and the
|
||||
drivers using it; PCI works differently from USB, and so on. Not many people
|
||||
@ -268,32 +270,35 @@ various phases always run after tasks have been frozen and before they are
|
||||
unfrozen. Furthermore, the *_noirq phases run at a time when IRQ handlers have
|
||||
been disabled (except for those marked with the IRQF_NO_SUSPEND flag).
|
||||
|
||||
All phases use PM domain, bus, type, or class callbacks (that is, methods
|
||||
defined in dev->pm_domain->ops, dev->bus->pm, dev->type->pm, or dev->class->pm).
|
||||
These callbacks are regarded by the PM core as mutually exclusive. Moreover,
|
||||
PM domain callbacks always take precedence over bus, type and class callbacks,
|
||||
while type callbacks take precedence over bus and class callbacks, and class
|
||||
callbacks take precedence over bus callbacks. To be precise, the following
|
||||
rules are used to determine which callback to execute in the given phase:
|
||||
All phases use PM domain, bus, type, class or driver callbacks (that is, methods
|
||||
defined in dev->pm_domain->ops, dev->bus->pm, dev->type->pm, dev->class->pm or
|
||||
dev->driver->pm). These callbacks are regarded by the PM core as mutually
|
||||
exclusive. Moreover, PM domain callbacks always take precedence over all of the
|
||||
other callbacks and, for example, type callbacks take precedence over bus, class
|
||||
and driver callbacks. To be precise, the following rules are used to determine
|
||||
which callback to execute in the given phase:
|
||||
|
||||
1. If dev->pm_domain is present, the PM core will attempt to execute the
|
||||
callback included in dev->pm_domain->ops. If that callback is not
|
||||
present, no action will be carried out for the given device.
|
||||
1. If dev->pm_domain is present, the PM core will choose the callback
|
||||
included in dev->pm_domain->ops for execution
|
||||
|
||||
2. Otherwise, if both dev->type and dev->type->pm are present, the callback
|
||||
included in dev->type->pm will be executed.
|
||||
included in dev->type->pm will be chosen for execution.
|
||||
|
||||
3. Otherwise, if both dev->class and dev->class->pm are present, the
|
||||
callback included in dev->class->pm will be executed.
|
||||
callback included in dev->class->pm will be chosen for execution.
|
||||
|
||||
4. Otherwise, if both dev->bus and dev->bus->pm are present, the callback
|
||||
included in dev->bus->pm will be executed.
|
||||
included in dev->bus->pm will be chosen for execution.
|
||||
|
||||
This allows PM domains and device types to override callbacks provided by bus
|
||||
types or device classes if necessary.
|
||||
|
||||
These callbacks may in turn invoke device- or driver-specific methods stored in
|
||||
dev->driver->pm, but they don't have to.
|
||||
The PM domain, type, class and bus callbacks may in turn invoke device- or
|
||||
driver-specific methods stored in dev->driver->pm, but they don't have to do
|
||||
that.
|
||||
|
||||
If the subsystem callback chosen for execution is not present, the PM core will
|
||||
execute the corresponding method from dev->driver->pm instead if there is one.
|
||||
|
||||
|
||||
Entering System Suspend
|
||||
|
@ -21,7 +21,7 @@ freeze_processes() (defined in kernel/power/process.c) is called. It executes
|
||||
try_to_freeze_tasks() that sets TIF_FREEZE for all of the freezable tasks and
|
||||
either wakes them up, if they are kernel threads, or sends fake signals to them,
|
||||
if they are user space processes. A task that has TIF_FREEZE set, should react
|
||||
to it by calling the function called refrigerator() (defined in
|
||||
to it by calling the function called __refrigerator() (defined in
|
||||
kernel/freezer.c), which sets the task's PF_FROZEN flag, changes its state
|
||||
to TASK_UNINTERRUPTIBLE and makes it loop until PF_FROZEN is cleared for it.
|
||||
Then, we say that the task is 'frozen' and therefore the set of functions
|
||||
@ -29,10 +29,10 @@ handling this mechanism is referred to as 'the freezer' (these functions are
|
||||
defined in kernel/power/process.c, kernel/freezer.c & include/linux/freezer.h).
|
||||
User space processes are generally frozen before kernel threads.
|
||||
|
||||
It is not recommended to call refrigerator() directly. Instead, it is
|
||||
recommended to use the try_to_freeze() function (defined in
|
||||
include/linux/freezer.h), that checks the task's TIF_FREEZE flag and makes the
|
||||
task enter refrigerator() if the flag is set.
|
||||
__refrigerator() must not be called directly. Instead, use the
|
||||
try_to_freeze() function (defined in include/linux/freezer.h), that checks
|
||||
the task's TIF_FREEZE flag and makes the task enter __refrigerator() if the
|
||||
flag is set.
|
||||
|
||||
For user space processes try_to_freeze() is called automatically from the
|
||||
signal-handling code, but the freezable kernel threads need to call it
|
||||
@ -61,13 +61,13 @@ wait_event_freezable() and wait_event_freezable_timeout() macros.
|
||||
After the system memory state has been restored from a hibernation image and
|
||||
devices have been reinitialized, the function thaw_processes() is called in
|
||||
order to clear the PF_FROZEN flag for each frozen task. Then, the tasks that
|
||||
have been frozen leave refrigerator() and continue running.
|
||||
have been frozen leave __refrigerator() and continue running.
|
||||
|
||||
III. Which kernel threads are freezable?
|
||||
|
||||
Kernel threads are not freezable by default. However, a kernel thread may clear
|
||||
PF_NOFREEZE for itself by calling set_freezable() (the resetting of PF_NOFREEZE
|
||||
directly is strongly discouraged). From this point it is regarded as freezable
|
||||
directly is not allowed). From this point it is regarded as freezable
|
||||
and must call try_to_freeze() in a suitable place.
|
||||
|
||||
IV. Why do we do that?
|
||||
@ -176,3 +176,28 @@ tasks, since it generally exists anyway.
|
||||
A driver must have all firmwares it may need in RAM before suspend() is called.
|
||||
If keeping them is not practical, for example due to their size, they must be
|
||||
requested early enough using the suspend notifier API described in notifiers.txt.
|
||||
|
||||
VI. Are there any precautions to be taken to prevent freezing failures?
|
||||
|
||||
Yes, there are.
|
||||
|
||||
First of all, grabbing the 'pm_mutex' lock to mutually exclude a piece of code
|
||||
from system-wide sleep such as suspend/hibernation is not encouraged.
|
||||
If possible, that piece of code must instead hook onto the suspend/hibernation
|
||||
notifiers to achieve mutual exclusion. Look at the CPU-Hotplug code
|
||||
(kernel/cpu.c) for an example.
|
||||
|
||||
However, if that is not feasible, and grabbing 'pm_mutex' is deemed necessary,
|
||||
it is strongly discouraged to directly call mutex_[un]lock(&pm_mutex) since
|
||||
that could lead to freezing failures, because if the suspend/hibernate code
|
||||
successfully acquired the 'pm_mutex' lock, and hence that other entity failed
|
||||
to acquire the lock, then that task would get blocked in TASK_UNINTERRUPTIBLE
|
||||
state. As a consequence, the freezer would not be able to freeze that task,
|
||||
leading to freezing failure.
|
||||
|
||||
However, the [un]lock_system_sleep() APIs are safe to use in this scenario,
|
||||
since they ask the freezer to skip freezing this task, since it is anyway
|
||||
"frozen enough" as it is blocked on 'pm_mutex', which will be released
|
||||
only after the entire suspend/hibernation sequence is complete.
|
||||
So, to summarize, use [un]lock_system_sleep() instead of directly using
|
||||
mutex_[un]lock(&pm_mutex). That would prevent freezing failures.
|
||||
|
@ -57,6 +57,10 @@ the following:
|
||||
|
||||
4. Bus type of the device, if both dev->bus and dev->bus->pm are present.
|
||||
|
||||
If the subsystem chosen by applying the above rules doesn't provide the relevant
|
||||
callback, the PM core will invoke the corresponding driver callback stored in
|
||||
dev->driver->pm directly (if present).
|
||||
|
||||
The PM core always checks which callback to use in the order given above, so the
|
||||
priority order of callbacks from high to low is: PM domain, device type, class
|
||||
and bus type. Moreover, the high-priority one will always take precedence over
|
||||
@ -64,86 +68,88 @@ a low-priority one. The PM domain, bus type, device type and class callbacks
|
||||
are referred to as subsystem-level callbacks in what follows.
|
||||
|
||||
By default, the callbacks are always invoked in process context with interrupts
|
||||
enabled. However, subsystems can use the pm_runtime_irq_safe() helper function
|
||||
to tell the PM core that their ->runtime_suspend(), ->runtime_resume() and
|
||||
->runtime_idle() callbacks may be invoked in atomic context with interrupts
|
||||
disabled for a given device. This implies that the callback routines in
|
||||
question must not block or sleep, but it also means that the synchronous helper
|
||||
functions listed at the end of Section 4 may be used for that device within an
|
||||
interrupt handler or generally in an atomic context.
|
||||
enabled. However, the pm_runtime_irq_safe() helper function can be used to tell
|
||||
the PM core that it is safe to run the ->runtime_suspend(), ->runtime_resume()
|
||||
and ->runtime_idle() callbacks for the given device in atomic context with
|
||||
interrupts disabled. This implies that the callback routines in question must
|
||||
not block or sleep, but it also means that the synchronous helper functions
|
||||
listed at the end of Section 4 may be used for that device within an interrupt
|
||||
handler or generally in an atomic context.
|
||||
|
||||
The subsystem-level suspend callback is _entirely_ _responsible_ for handling
|
||||
the suspend of the device as appropriate, which may, but need not include
|
||||
executing the device driver's own ->runtime_suspend() callback (from the
|
||||
The subsystem-level suspend callback, if present, is _entirely_ _responsible_
|
||||
for handling the suspend of the device as appropriate, which may, but need not
|
||||
include executing the device driver's own ->runtime_suspend() callback (from the
|
||||
PM core's point of view it is not necessary to implement a ->runtime_suspend()
|
||||
callback in a device driver as long as the subsystem-level suspend callback
|
||||
knows what to do to handle the device).
|
||||
|
||||
* Once the subsystem-level suspend callback has completed successfully
|
||||
for given device, the PM core regards the device as suspended, which need
|
||||
not mean that the device has been put into a low power state. It is
|
||||
supposed to mean, however, that the device will not process data and will
|
||||
not communicate with the CPU(s) and RAM until the subsystem-level resume
|
||||
callback is executed for it. The runtime PM status of a device after
|
||||
successful execution of the subsystem-level suspend callback is 'suspended'.
|
||||
* Once the subsystem-level suspend callback (or the driver suspend callback,
|
||||
if invoked directly) has completed successfully for the given device, the PM
|
||||
core regards the device as suspended, which need not mean that it has been
|
||||
put into a low power state. It is supposed to mean, however, that the
|
||||
device will not process data and will not communicate with the CPU(s) and
|
||||
RAM until the appropriate resume callback is executed for it. The runtime
|
||||
PM status of a device after successful execution of the suspend callback is
|
||||
'suspended'.
|
||||
|
||||
* If the subsystem-level suspend callback returns -EBUSY or -EAGAIN,
|
||||
the device's runtime PM status is 'active', which means that the device
|
||||
_must_ be fully operational afterwards.
|
||||
* If the suspend callback returns -EBUSY or -EAGAIN, the device's runtime PM
|
||||
status remains 'active', which means that the device _must_ be fully
|
||||
operational afterwards.
|
||||
|
||||
* If the subsystem-level suspend callback returns an error code different
|
||||
from -EBUSY or -EAGAIN, the PM core regards this as a fatal error and will
|
||||
refuse to run the helper functions described in Section 4 for the device,
|
||||
until the status of it is directly set either to 'active', or to 'suspended'
|
||||
(the PM core provides special helper functions for this purpose).
|
||||
* If the suspend callback returns an error code different from -EBUSY and
|
||||
-EAGAIN, the PM core regards this as a fatal error and will refuse to run
|
||||
the helper functions described in Section 4 for the device until its status
|
||||
is directly set to either'active', or 'suspended' (the PM core provides
|
||||
special helper functions for this purpose).
|
||||
|
||||
In particular, if the driver requires remote wake-up capability (i.e. hardware
|
||||
In particular, if the driver requires remote wakeup capability (i.e. hardware
|
||||
mechanism allowing the device to request a change of its power state, such as
|
||||
PCI PME) for proper functioning and device_run_wake() returns 'false' for the
|
||||
device, then ->runtime_suspend() should return -EBUSY. On the other hand, if
|
||||
device_run_wake() returns 'true' for the device and the device is put into a low
|
||||
power state during the execution of the subsystem-level suspend callback, it is
|
||||
expected that remote wake-up will be enabled for the device. Generally, remote
|
||||
wake-up should be enabled for all input devices put into a low power state at
|
||||
run time.
|
||||
device_run_wake() returns 'true' for the device and the device is put into a
|
||||
low-power state during the execution of the suspend callback, it is expected
|
||||
that remote wakeup will be enabled for the device. Generally, remote wakeup
|
||||
should be enabled for all input devices put into low-power states at run time.
|
||||
|
||||
The subsystem-level resume callback is _entirely_ _responsible_ for handling the
|
||||
resume of the device as appropriate, which may, but need not include executing
|
||||
the device driver's own ->runtime_resume() callback (from the PM core's point of
|
||||
view it is not necessary to implement a ->runtime_resume() callback in a device
|
||||
driver as long as the subsystem-level resume callback knows what to do to handle
|
||||
the device).
|
||||
The subsystem-level resume callback, if present, is _entirely_ _responsible_ for
|
||||
handling the resume of the device as appropriate, which may, but need not
|
||||
include executing the device driver's own ->runtime_resume() callback (from the
|
||||
PM core's point of view it is not necessary to implement a ->runtime_resume()
|
||||
callback in a device driver as long as the subsystem-level resume callback knows
|
||||
what to do to handle the device).
|
||||
|
||||
* Once the subsystem-level resume callback has completed successfully, the PM
|
||||
core regards the device as fully operational, which means that the device
|
||||
_must_ be able to complete I/O operations as needed. The runtime PM status
|
||||
of the device is then 'active'.
|
||||
* Once the subsystem-level resume callback (or the driver resume callback, if
|
||||
invoked directly) has completed successfully, the PM core regards the device
|
||||
as fully operational, which means that the device _must_ be able to complete
|
||||
I/O operations as needed. The runtime PM status of the device is then
|
||||
'active'.
|
||||
|
||||
* If the subsystem-level resume callback returns an error code, the PM core
|
||||
regards this as a fatal error and will refuse to run the helper functions
|
||||
described in Section 4 for the device, until its status is directly set
|
||||
either to 'active' or to 'suspended' (the PM core provides special helper
|
||||
functions for this purpose).
|
||||
* If the resume callback returns an error code, the PM core regards this as a
|
||||
fatal error and will refuse to run the helper functions described in Section
|
||||
4 for the device, until its status is directly set to either 'active', or
|
||||
'suspended' (by means of special helper functions provided by the PM core
|
||||
for this purpose).
|
||||
|
||||
The subsystem-level idle callback is executed by the PM core whenever the device
|
||||
appears to be idle, which is indicated to the PM core by two counters, the
|
||||
device's usage counter and the counter of 'active' children of the device.
|
||||
The idle callback (a subsystem-level one, if present, or the driver one) is
|
||||
executed by the PM core whenever the device appears to be idle, which is
|
||||
indicated to the PM core by two counters, the device's usage counter and the
|
||||
counter of 'active' children of the device.
|
||||
|
||||
* If any of these counters is decreased using a helper function provided by
|
||||
the PM core and it turns out to be equal to zero, the other counter is
|
||||
checked. If that counter also is equal to zero, the PM core executes the
|
||||
subsystem-level idle callback with the device as an argument.
|
||||
idle callback with the device as its argument.
|
||||
|
||||
The action performed by a subsystem-level idle callback is totally dependent on
|
||||
the subsystem in question, but the expected and recommended action is to check
|
||||
The action performed by the idle callback is totally dependent on the subsystem
|
||||
(or driver) in question, but the expected and recommended action is to check
|
||||
if the device can be suspended (i.e. if all of the conditions necessary for
|
||||
suspending the device are satisfied) and to queue up a suspend request for the
|
||||
device in that case. The value returned by this callback is ignored by the PM
|
||||
core.
|
||||
|
||||
The helper functions provided by the PM core, described in Section 4, guarantee
|
||||
that the following constraints are met with respect to the bus type's runtime
|
||||
PM callbacks:
|
||||
that the following constraints are met with respect to runtime PM callbacks for
|
||||
one device:
|
||||
|
||||
(1) The callbacks are mutually exclusive (e.g. it is forbidden to execute
|
||||
->runtime_suspend() in parallel with ->runtime_resume() or with another
|
||||
|
@ -79,7 +79,6 @@ register struct thread_info *__current_thread_info __asm__("$8");
|
||||
#define TIF_UAC_SIGBUS 12 /* ! userspace part of 'osf_sysinfo' */
|
||||
#define TIF_MEMDIE 13 /* is terminating due to OOM killer */
|
||||
#define TIF_RESTORE_SIGMASK 14 /* restore signal mask in do_signal */
|
||||
#define TIF_FREEZE 16 /* is freezing for suspend */
|
||||
|
||||
#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
|
||||
#define _TIF_SIGPENDING (1<<TIF_SIGPENDING)
|
||||
@ -87,7 +86,6 @@ register struct thread_info *__current_thread_info __asm__("$8");
|
||||
#define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG)
|
||||
#define _TIF_RESTORE_SIGMASK (1<<TIF_RESTORE_SIGMASK)
|
||||
#define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME)
|
||||
#define _TIF_FREEZE (1<<TIF_FREEZE)
|
||||
|
||||
/* Work to do on interrupt/exception return. */
|
||||
#define _TIF_WORK_MASK (_TIF_SIGPENDING | _TIF_NEED_RESCHED | \
|
||||
|
@ -142,7 +142,6 @@ extern void vfp_flush_hwstate(struct thread_info *);
|
||||
#define TIF_POLLING_NRFLAG 16
|
||||
#define TIF_USING_IWMMXT 17
|
||||
#define TIF_MEMDIE 18 /* is terminating due to OOM killer */
|
||||
#define TIF_FREEZE 19
|
||||
#define TIF_RESTORE_SIGMASK 20
|
||||
#define TIF_SECCOMP 21
|
||||
|
||||
@ -152,7 +151,6 @@ extern void vfp_flush_hwstate(struct thread_info *);
|
||||
#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
|
||||
#define _TIF_POLLING_NRFLAG (1 << TIF_POLLING_NRFLAG)
|
||||
#define _TIF_USING_IWMMXT (1 << TIF_USING_IWMMXT)
|
||||
#define _TIF_FREEZE (1 << TIF_FREEZE)
|
||||
#define _TIF_RESTORE_SIGMASK (1 << TIF_RESTORE_SIGMASK)
|
||||
#define _TIF_SECCOMP (1 << TIF_SECCOMP)
|
||||
|
||||
|
@ -85,7 +85,6 @@ static inline struct thread_info *current_thread_info(void)
|
||||
#define TIF_RESTORE_SIGMASK 7 /* restore signal mask in do_signal */
|
||||
#define TIF_CPU_GOING_TO_SLEEP 8 /* CPU is entering sleep 0 mode */
|
||||
#define TIF_NOTIFY_RESUME 9 /* callback before returning to user */
|
||||
#define TIF_FREEZE 29
|
||||
#define TIF_DEBUG 30 /* debugging enabled */
|
||||
#define TIF_USERSPACE 31 /* true if FS sets userspace */
|
||||
|
||||
@ -98,7 +97,6 @@ static inline struct thread_info *current_thread_info(void)
|
||||
#define _TIF_RESTORE_SIGMASK (1 << TIF_RESTORE_SIGMASK)
|
||||
#define _TIF_CPU_GOING_TO_SLEEP (1 << TIF_CPU_GOING_TO_SLEEP)
|
||||
#define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
|
||||
#define _TIF_FREEZE (1 << TIF_FREEZE)
|
||||
|
||||
/* Note: The masks below must never span more than 16 bits! */
|
||||
|
||||
|
@ -100,7 +100,6 @@ static inline struct thread_info *current_thread_info(void)
|
||||
TIF_NEED_RESCHED */
|
||||
#define TIF_MEMDIE 4 /* is terminating due to OOM killer */
|
||||
#define TIF_RESTORE_SIGMASK 5 /* restore signal mask in do_signal() */
|
||||
#define TIF_FREEZE 6 /* is freezing for suspend */
|
||||
#define TIF_IRQ_SYNC 7 /* sync pipeline stage */
|
||||
#define TIF_NOTIFY_RESUME 8 /* callback before returning to user */
|
||||
#define TIF_SINGLESTEP 9
|
||||
@ -111,7 +110,6 @@ static inline struct thread_info *current_thread_info(void)
|
||||
#define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED)
|
||||
#define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG)
|
||||
#define _TIF_RESTORE_SIGMASK (1<<TIF_RESTORE_SIGMASK)
|
||||
#define _TIF_FREEZE (1<<TIF_FREEZE)
|
||||
#define _TIF_IRQ_SYNC (1<<TIF_IRQ_SYNC)
|
||||
#define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME)
|
||||
#define _TIF_SINGLESTEP (1<<TIF_SINGLESTEP)
|
||||
|
@ -86,7 +86,6 @@ struct thread_info {
|
||||
#define TIF_RESTORE_SIGMASK 9 /* restore signal mask in do_signal() */
|
||||
#define TIF_POLLING_NRFLAG 16 /* true if poll_idle() is polling TIF_NEED_RESCHED */
|
||||
#define TIF_MEMDIE 17 /* is terminating due to OOM killer */
|
||||
#define TIF_FREEZE 18 /* is freezing for suspend */
|
||||
|
||||
#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
|
||||
#define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME)
|
||||
@ -94,7 +93,6 @@ struct thread_info {
|
||||
#define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED)
|
||||
#define _TIF_RESTORE_SIGMASK (1<<TIF_RESTORE_SIGMASK)
|
||||
#define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG)
|
||||
#define _TIF_FREEZE (1<<TIF_FREEZE)
|
||||
|
||||
#define _TIF_WORK_MASK 0x0000FFFE /* work to do on interrupt/exception return */
|
||||
#define _TIF_ALLWORK_MASK 0x0000FFFF /* work to do on any return to u-space */
|
||||
|
@ -111,7 +111,6 @@ register struct thread_info *__current_thread_info asm("gr15");
|
||||
#define TIF_RESTORE_SIGMASK 5 /* restore signal mask in do_signal() */
|
||||
#define TIF_POLLING_NRFLAG 16 /* true if poll_idle() is polling TIF_NEED_RESCHED */
|
||||
#define TIF_MEMDIE 17 /* is terminating due to OOM killer */
|
||||
#define TIF_FREEZE 18 /* freezing for suspend */
|
||||
|
||||
#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
|
||||
#define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
|
||||
@ -120,7 +119,6 @@ register struct thread_info *__current_thread_info asm("gr15");
|
||||
#define _TIF_SINGLESTEP (1 << TIF_SINGLESTEP)
|
||||
#define _TIF_RESTORE_SIGMASK (1 << TIF_RESTORE_SIGMASK)
|
||||
#define _TIF_POLLING_NRFLAG (1 << TIF_POLLING_NRFLAG)
|
||||
#define _TIF_FREEZE (1 << TIF_FREEZE)
|
||||
|
||||
#define _TIF_WORK_MASK 0x0000FFFE /* work to do on interrupt/exception return */
|
||||
#define _TIF_ALLWORK_MASK 0x0000FFFF /* work to do on any return to u-space */
|
||||
|
@ -90,7 +90,6 @@ static inline struct thread_info *current_thread_info(void)
|
||||
#define TIF_MEMDIE 4 /* is terminating due to OOM killer */
|
||||
#define TIF_RESTORE_SIGMASK 5 /* restore signal mask in do_signal() */
|
||||
#define TIF_NOTIFY_RESUME 6 /* callback before returning to user */
|
||||
#define TIF_FREEZE 16 /* is freezing for suspend */
|
||||
|
||||
/* as above, but as bit values */
|
||||
#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
|
||||
@ -99,7 +98,6 @@ static inline struct thread_info *current_thread_info(void)
|
||||
#define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG)
|
||||
#define _TIF_RESTORE_SIGMASK (1<<TIF_RESTORE_SIGMASK)
|
||||
#define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
|
||||
#define _TIF_FREEZE (1<<TIF_FREEZE)
|
||||
|
||||
#define _TIF_WORK_MASK 0x0000FFFE /* work to do on interrupt/exception return */
|
||||
|
||||
|
@ -113,7 +113,6 @@ struct thread_info {
|
||||
#define TIF_MEMDIE 17 /* is terminating due to OOM killer */
|
||||
#define TIF_MCA_INIT 18 /* this task is processing MCA or INIT */
|
||||
#define TIF_DB_DISABLED 19 /* debug trap disabled for fsyscall */
|
||||
#define TIF_FREEZE 20 /* is freezing for suspend */
|
||||
#define TIF_RESTORE_RSE 21 /* user RBS is newer than kernel RBS */
|
||||
|
||||
#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
|
||||
@ -126,7 +125,6 @@ struct thread_info {
|
||||
#define _TIF_POLLING_NRFLAG (1 << TIF_POLLING_NRFLAG)
|
||||
#define _TIF_MCA_INIT (1 << TIF_MCA_INIT)
|
||||
#define _TIF_DB_DISABLED (1 << TIF_DB_DISABLED)
|
||||
#define _TIF_FREEZE (1 << TIF_FREEZE)
|
||||
#define _TIF_RESTORE_RSE (1 << TIF_RESTORE_RSE)
|
||||
|
||||
/* "work to do on user-return" bits */
|
||||
|
@ -138,7 +138,6 @@ static inline unsigned int get_thread_fault_code(void)
|
||||
#define TIF_USEDFPU 16 /* FPU was used by this task this quantum (SMP) */
|
||||
#define TIF_POLLING_NRFLAG 17 /* true if poll_idle() is polling TIF_NEED_RESCHED */
|
||||
#define TIF_MEMDIE 18 /* is terminating due to OOM killer */
|
||||
#define TIF_FREEZE 19 /* is freezing for suspend */
|
||||
|
||||
#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
|
||||
#define _TIF_SIGPENDING (1<<TIF_SIGPENDING)
|
||||
@ -149,7 +148,6 @@ static inline unsigned int get_thread_fault_code(void)
|
||||
#define _TIF_RESTORE_SIGMASK (1<<TIF_RESTORE_SIGMASK)
|
||||
#define _TIF_USEDFPU (1<<TIF_USEDFPU)
|
||||
#define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG)
|
||||
#define _TIF_FREEZE (1<<TIF_FREEZE)
|
||||
|
||||
#define _TIF_WORK_MASK 0x0000FFFE /* work to do on interrupt/exception return */
|
||||
#define _TIF_ALLWORK_MASK 0x0000FFFF /* work to do on any return to u-space */
|
||||
|
@ -103,7 +103,6 @@ static inline struct thread_info *current_thread_info(void)
|
||||
#define TIF_DELAYED_TRACE 14 /* single step a syscall */
|
||||
#define TIF_SYSCALL_TRACE 15 /* syscall trace active */
|
||||
#define TIF_MEMDIE 16 /* is terminating due to OOM killer */
|
||||
#define TIF_FREEZE 17 /* thread is freezing for suspend */
|
||||
#define TIF_RESTORE_SIGMASK 18 /* restore signal mask in do_signal */
|
||||
|
||||
#endif /* _ASM_M68K_THREAD_INFO_H */
|
||||
|
@ -125,7 +125,6 @@ static inline struct thread_info *current_thread_info(void)
|
||||
#define TIF_MEMDIE 6 /* is terminating due to OOM killer */
|
||||
#define TIF_SYSCALL_AUDIT 9 /* syscall auditing active */
|
||||
#define TIF_SECCOMP 10 /* secure computing */
|
||||
#define TIF_FREEZE 14 /* Freezing for suspend */
|
||||
|
||||
/* true if poll_idle() is polling TIF_NEED_RESCHED */
|
||||
#define TIF_POLLING_NRFLAG 16
|
||||
@ -137,7 +136,6 @@ static inline struct thread_info *current_thread_info(void)
|
||||
#define _TIF_SINGLESTEP (1 << TIF_SINGLESTEP)
|
||||
#define _TIF_IRET (1 << TIF_IRET)
|
||||
#define _TIF_POLLING_NRFLAG (1 << TIF_POLLING_NRFLAG)
|
||||
#define _TIF_FREEZE (1 << TIF_FREEZE)
|
||||
#define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT)
|
||||
#define _TIF_SECCOMP (1 << TIF_SECCOMP)
|
||||
|
||||
|
@ -117,7 +117,6 @@ register struct thread_info *__current_thread_info __asm__("$28");
|
||||
#define TIF_USEDFPU 16 /* FPU was used by this task this quantum (SMP) */
|
||||
#define TIF_POLLING_NRFLAG 17 /* true if poll_idle() is polling TIF_NEED_RESCHED */
|
||||
#define TIF_MEMDIE 18 /* is terminating due to OOM killer */
|
||||
#define TIF_FREEZE 19
|
||||
#define TIF_FIXADE 20 /* Fix address errors in software */
|
||||
#define TIF_LOGADE 21 /* Log address errors to syslog */
|
||||
#define TIF_32BIT_REGS 22 /* also implies 16/32 fprs */
|
||||
@ -141,7 +140,6 @@ register struct thread_info *__current_thread_info __asm__("$28");
|
||||
#define _TIF_RESTORE_SIGMASK (1<<TIF_RESTORE_SIGMASK)
|
||||
#define _TIF_USEDFPU (1<<TIF_USEDFPU)
|
||||
#define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG)
|
||||
#define _TIF_FREEZE (1<<TIF_FREEZE)
|
||||
#define _TIF_FIXADE (1<<TIF_FIXADE)
|
||||
#define _TIF_LOGADE (1<<TIF_LOGADE)
|
||||
#define _TIF_32BIT_REGS (1<<TIF_32BIT_REGS)
|
||||
|
@ -165,7 +165,6 @@ extern void free_thread_info(struct thread_info *);
|
||||
#define TIF_RESTORE_SIGMASK 5 /* restore signal mask in do_signal() */
|
||||
#define TIF_POLLING_NRFLAG 16 /* true if poll_idle() is polling TIF_NEED_RESCHED */
|
||||
#define TIF_MEMDIE 17 /* is terminating due to OOM killer */
|
||||
#define TIF_FREEZE 18 /* freezing for suspend */
|
||||
|
||||
#define _TIF_SYSCALL_TRACE +(1 << TIF_SYSCALL_TRACE)
|
||||
#define _TIF_NOTIFY_RESUME +(1 << TIF_NOTIFY_RESUME)
|
||||
@ -174,7 +173,6 @@ extern void free_thread_info(struct thread_info *);
|
||||
#define _TIF_SINGLESTEP +(1 << TIF_SINGLESTEP)
|
||||
#define _TIF_RESTORE_SIGMASK +(1 << TIF_RESTORE_SIGMASK)
|
||||
#define _TIF_POLLING_NRFLAG +(1 << TIF_POLLING_NRFLAG)
|
||||
#define _TIF_FREEZE +(1 << TIF_FREEZE)
|
||||
|
||||
#define _TIF_WORK_MASK 0x0000FFFE /* work to do on interrupt/exception return */
|
||||
#define _TIF_ALLWORK_MASK 0x0000FFFF /* work to do on any return to u-space */
|
||||
|
@ -58,7 +58,6 @@ struct thread_info {
|
||||
#define TIF_32BIT 4 /* 32 bit binary */
|
||||
#define TIF_MEMDIE 5 /* is terminating due to OOM killer */
|
||||
#define TIF_RESTORE_SIGMASK 6 /* restore saved signal mask */
|
||||
#define TIF_FREEZE 7 /* is freezing for suspend */
|
||||
#define TIF_NOTIFY_RESUME 8 /* callback before returning to user */
|
||||
#define TIF_SINGLESTEP 9 /* single stepping? */
|
||||
#define TIF_BLOCKSTEP 10 /* branch stepping? */
|
||||
@ -69,7 +68,6 @@ struct thread_info {
|
||||
#define _TIF_POLLING_NRFLAG (1 << TIF_POLLING_NRFLAG)
|
||||
#define _TIF_32BIT (1 << TIF_32BIT)
|
||||
#define _TIF_RESTORE_SIGMASK (1 << TIF_RESTORE_SIGMASK)
|
||||
#define _TIF_FREEZE (1 << TIF_FREEZE)
|
||||
#define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
|
||||
#define _TIF_SINGLESTEP (1 << TIF_SINGLESTEP)
|
||||
#define _TIF_BLOCKSTEP (1 << TIF_BLOCKSTEP)
|
||||
|
@ -109,7 +109,6 @@ static inline struct thread_info *current_thread_info(void)
|
||||
#define TIF_RESTOREALL 11 /* Restore all regs (implies NOERROR) */
|
||||
#define TIF_NOERROR 12 /* Force successful syscall return */
|
||||
#define TIF_NOTIFY_RESUME 13 /* callback before returning to user */
|
||||
#define TIF_FREEZE 14 /* Freezing for suspend */
|
||||
#define TIF_SYSCALL_TRACEPOINT 15 /* syscall tracepoint instrumentation */
|
||||
#define TIF_RUNLATCH 16 /* Is the runlatch enabled? */
|
||||
|
||||
@ -127,7 +126,6 @@ static inline struct thread_info *current_thread_info(void)
|
||||
#define _TIF_RESTOREALL (1<<TIF_RESTOREALL)
|
||||
#define _TIF_NOERROR (1<<TIF_NOERROR)
|
||||
#define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME)
|
||||
#define _TIF_FREEZE (1<<TIF_FREEZE)
|
||||
#define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT)
|
||||
#define _TIF_RUNLATCH (1<<TIF_RUNLATCH)
|
||||
#define _TIF_SYSCALL_T_OR_A (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
|
||||
|
@ -1406,7 +1406,6 @@ static struct bus_type vio_bus_type = {
|
||||
.match = vio_bus_match,
|
||||
.probe = vio_bus_probe,
|
||||
.remove = vio_bus_remove,
|
||||
.pm = GENERIC_SUBSYS_PM_OPS,
|
||||
};
|
||||
|
||||
/**
|
||||
|
@ -102,7 +102,6 @@ static inline struct thread_info *current_thread_info(void)
|
||||
#define TIF_MEMDIE 18 /* is terminating due to OOM killer */
|
||||
#define TIF_RESTORE_SIGMASK 19 /* restore signal mask in do_signal() */
|
||||
#define TIF_SINGLE_STEP 20 /* This task is single stepped */
|
||||
#define TIF_FREEZE 21 /* thread is freezing for suspend */
|
||||
|
||||
#define _TIF_SYSCALL (1<<TIF_SYSCALL)
|
||||
#define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME)
|
||||
@ -119,7 +118,6 @@ static inline struct thread_info *current_thread_info(void)
|
||||
#define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG)
|
||||
#define _TIF_31BIT (1<<TIF_31BIT)
|
||||
#define _TIF_SINGLE_STEP (1<<TIF_SINGLE_STEP)
|
||||
#define _TIF_FREEZE (1<<TIF_FREEZE)
|
||||
|
||||
#ifdef CONFIG_64BIT
|
||||
#define is_32bit_task() (test_thread_flag(TIF_31BIT))
|
||||
|
@ -122,7 +122,6 @@ extern void init_thread_xstate(void);
|
||||
#define TIF_SYSCALL_TRACEPOINT 8 /* for ftrace syscall instrumentation */
|
||||
#define TIF_POLLING_NRFLAG 17 /* true if poll_idle() is polling TIF_NEED_RESCHED */
|
||||
#define TIF_MEMDIE 18 /* is terminating due to OOM killer */
|
||||
#define TIF_FREEZE 19 /* Freezing for suspend */
|
||||
|
||||
#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
|
||||
#define _TIF_SIGPENDING (1 << TIF_SIGPENDING)
|
||||
@ -133,7 +132,6 @@ extern void init_thread_xstate(void);
|
||||
#define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
|
||||
#define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT)
|
||||
#define _TIF_POLLING_NRFLAG (1 << TIF_POLLING_NRFLAG)
|
||||
#define _TIF_FREEZE (1 << TIF_FREEZE)
|
||||
|
||||
/*
|
||||
* _TIF_ALLWORK_MASK and _TIF_WORK_MASK need to fit within 2 bytes, or we
|
||||
|
@ -133,7 +133,6 @@ BTFIXUPDEF_CALL(void, free_thread_info, struct thread_info *)
|
||||
#define TIF_POLLING_NRFLAG 9 /* true if poll_idle() is polling
|
||||
* TIF_NEED_RESCHED */
|
||||
#define TIF_MEMDIE 10 /* is terminating due to OOM killer */
|
||||
#define TIF_FREEZE 11 /* is freezing for suspend */
|
||||
|
||||
/* as above, but as bit values */
|
||||
#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
|
||||
@ -147,7 +146,6 @@ BTFIXUPDEF_CALL(void, free_thread_info, struct thread_info *)
|
||||
#define _TIF_DO_NOTIFY_RESUME_MASK (_TIF_NOTIFY_RESUME | \
|
||||
_TIF_SIGPENDING | \
|
||||
_TIF_RESTORE_SIGMASK)
|
||||
#define _TIF_FREEZE (1<<TIF_FREEZE)
|
||||
|
||||
#endif /* __KERNEL__ */
|
||||
|
||||
|
@ -225,7 +225,6 @@ register struct thread_info *current_thread_info_reg asm("g6");
|
||||
/* flag bit 12 is available */
|
||||
#define TIF_MEMDIE 13 /* is terminating due to OOM killer */
|
||||
#define TIF_POLLING_NRFLAG 14
|
||||
#define TIF_FREEZE 15 /* is freezing for suspend */
|
||||
|
||||
#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
|
||||
#define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME)
|
||||
@ -237,7 +236,6 @@ register struct thread_info *current_thread_info_reg asm("g6");
|
||||
#define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT)
|
||||
#define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT)
|
||||
#define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG)
|
||||
#define _TIF_FREEZE (1<<TIF_FREEZE)
|
||||
|
||||
#define _TIF_USER_WORK_MASK ((0xff << TI_FLAG_WSAVED_SHIFT) | \
|
||||
_TIF_DO_NOTIFY_RESUME_MASK | \
|
||||
|
@ -71,7 +71,6 @@ static inline struct thread_info *current_thread_info(void)
|
||||
#define TIF_MEMDIE 5 /* is terminating due to OOM killer */
|
||||
#define TIF_SYSCALL_AUDIT 6
|
||||
#define TIF_RESTORE_SIGMASK 7
|
||||
#define TIF_FREEZE 16 /* is freezing for suspend */
|
||||
|
||||
#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
|
||||
#define _TIF_SIGPENDING (1 << TIF_SIGPENDING)
|
||||
@ -80,6 +79,5 @@ static inline struct thread_info *current_thread_info(void)
|
||||
#define _TIF_MEMDIE (1 << TIF_MEMDIE)
|
||||
#define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT)
|
||||
#define _TIF_RESTORE_SIGMASK (1 << TIF_RESTORE_SIGMASK)
|
||||
#define _TIF_FREEZE (1 << TIF_FREEZE)
|
||||
|
||||
#endif
|
||||
|
@ -135,14 +135,12 @@ static inline struct thread_info *current_thread_info(void)
|
||||
#define TIF_NOTIFY_RESUME 2 /* callback before returning to user */
|
||||
#define TIF_SYSCALL_TRACE 8
|
||||
#define TIF_MEMDIE 18
|
||||
#define TIF_FREEZE 19
|
||||
#define TIF_RESTORE_SIGMASK 20
|
||||
|
||||
#define _TIF_SIGPENDING (1 << TIF_SIGPENDING)
|
||||
#define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED)
|
||||
#define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
|
||||
#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
|
||||
#define _TIF_FREEZE (1 << TIF_FREEZE)
|
||||
#define _TIF_RESTORE_SIGMASK (1 << TIF_RESTORE_SIGMASK)
|
||||
|
||||
/*
|
||||
|
@ -90,7 +90,6 @@ struct thread_info {
|
||||
#define TIF_MEMDIE 20 /* is terminating due to OOM killer */
|
||||
#define TIF_DEBUG 21 /* uses debug registers */
|
||||
#define TIF_IO_BITMAP 22 /* uses I/O bitmap */
|
||||
#define TIF_FREEZE 23 /* is freezing for suspend */
|
||||
#define TIF_FORCED_TF 24 /* true if TF in eflags artificially */
|
||||
#define TIF_BLOCKSTEP 25 /* set when we want DEBUGCTLMSR_BTF */
|
||||
#define TIF_LAZY_MMU_UPDATES 27 /* task is updating the mmu lazily */
|
||||
@ -112,7 +111,6 @@ struct thread_info {
|
||||
#define _TIF_FORK (1 << TIF_FORK)
|
||||
#define _TIF_DEBUG (1 << TIF_DEBUG)
|
||||
#define _TIF_IO_BITMAP (1 << TIF_IO_BITMAP)
|
||||
#define _TIF_FREEZE (1 << TIF_FREEZE)
|
||||
#define _TIF_FORCED_TF (1 << TIF_FORCED_TF)
|
||||
#define _TIF_BLOCKSTEP (1 << TIF_BLOCKSTEP)
|
||||
#define _TIF_LAZY_MMU_UPDATES (1 << TIF_LAZY_MMU_UPDATES)
|
||||
|
@ -132,7 +132,6 @@ static inline struct thread_info *current_thread_info(void)
|
||||
#define TIF_MEMDIE 5 /* is terminating due to OOM killer */
|
||||
#define TIF_RESTORE_SIGMASK 6 /* restore signal mask in do_signal() */
|
||||
#define TIF_POLLING_NRFLAG 16 /* true if poll_idle() is polling TIF_NEED_RESCHED */
|
||||
#define TIF_FREEZE 17 /* is freezing for suspend */
|
||||
|
||||
#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
|
||||
#define _TIF_SIGPENDING (1<<TIF_SIGPENDING)
|
||||
@ -141,7 +140,6 @@ static inline struct thread_info *current_thread_info(void)
|
||||
#define _TIF_IRET (1<<TIF_IRET)
|
||||
#define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG)
|
||||
#define _TIF_RESTORE_SIGMASK (1<<TIF_RESTORE_SIGMASK)
|
||||
#define _TIF_FREEZE (1<<TIF_FREEZE)
|
||||
|
||||
#define _TIF_WORK_MASK 0x0000FFFE /* work to do on interrupt/exception return */
|
||||
#define _TIF_ALLWORK_MASK 0x0000FFFF /* work to do on any return to u-space */
|
||||
|
@ -476,6 +476,22 @@ static struct dmi_system_id __initdata acpisleep_dmi_table[] = {
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "VGN-FW520F"),
|
||||
},
|
||||
},
|
||||
{
|
||||
.callback = init_nvs_nosave,
|
||||
.ident = "Asus K54C",
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer Inc."),
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "K54C"),
|
||||
},
|
||||
},
|
||||
{
|
||||
.callback = init_nvs_nosave,
|
||||
.ident = "Asus K54HR",
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer Inc."),
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "K54HR"),
|
||||
},
|
||||
},
|
||||
{},
|
||||
};
|
||||
#endif /* CONFIG_SUSPEND */
|
||||
|
@ -109,31 +109,7 @@ static int amba_legacy_resume(struct device *dev)
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int amba_pm_prepare(struct device *dev)
|
||||
{
|
||||
struct device_driver *drv = dev->driver;
|
||||
int ret = 0;
|
||||
|
||||
if (drv && drv->pm && drv->pm->prepare)
|
||||
ret = drv->pm->prepare(dev);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void amba_pm_complete(struct device *dev)
|
||||
{
|
||||
struct device_driver *drv = dev->driver;
|
||||
|
||||
if (drv && drv->pm && drv->pm->complete)
|
||||
drv->pm->complete(dev);
|
||||
}
|
||||
|
||||
#else /* !CONFIG_PM_SLEEP */
|
||||
|
||||
#define amba_pm_prepare NULL
|
||||
#define amba_pm_complete NULL
|
||||
|
||||
#endif /* !CONFIG_PM_SLEEP */
|
||||
#endif /* CONFIG_PM_SLEEP */
|
||||
|
||||
#ifdef CONFIG_SUSPEND
|
||||
|
||||
@ -155,22 +131,6 @@ static int amba_pm_suspend(struct device *dev)
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int amba_pm_suspend_noirq(struct device *dev)
|
||||
{
|
||||
struct device_driver *drv = dev->driver;
|
||||
int ret = 0;
|
||||
|
||||
if (!drv)
|
||||
return 0;
|
||||
|
||||
if (drv->pm) {
|
||||
if (drv->pm->suspend_noirq)
|
||||
ret = drv->pm->suspend_noirq(dev);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int amba_pm_resume(struct device *dev)
|
||||
{
|
||||
struct device_driver *drv = dev->driver;
|
||||
@ -189,28 +149,10 @@ static int amba_pm_resume(struct device *dev)
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int amba_pm_resume_noirq(struct device *dev)
|
||||
{
|
||||
struct device_driver *drv = dev->driver;
|
||||
int ret = 0;
|
||||
|
||||
if (!drv)
|
||||
return 0;
|
||||
|
||||
if (drv->pm) {
|
||||
if (drv->pm->resume_noirq)
|
||||
ret = drv->pm->resume_noirq(dev);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
#else /* !CONFIG_SUSPEND */
|
||||
|
||||
#define amba_pm_suspend NULL
|
||||
#define amba_pm_resume NULL
|
||||
#define amba_pm_suspend_noirq NULL
|
||||
#define amba_pm_resume_noirq NULL
|
||||
|
||||
#endif /* !CONFIG_SUSPEND */
|
||||
|
||||
@ -234,22 +176,6 @@ static int amba_pm_freeze(struct device *dev)
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int amba_pm_freeze_noirq(struct device *dev)
|
||||
{
|
||||
struct device_driver *drv = dev->driver;
|
||||
int ret = 0;
|
||||
|
||||
if (!drv)
|
||||
return 0;
|
||||
|
||||
if (drv->pm) {
|
||||
if (drv->pm->freeze_noirq)
|
||||
ret = drv->pm->freeze_noirq(dev);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int amba_pm_thaw(struct device *dev)
|
||||
{
|
||||
struct device_driver *drv = dev->driver;
|
||||
@ -268,22 +194,6 @@ static int amba_pm_thaw(struct device *dev)
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int amba_pm_thaw_noirq(struct device *dev)
|
||||
{
|
||||
struct device_driver *drv = dev->driver;
|
||||
int ret = 0;
|
||||
|
||||
if (!drv)
|
||||
return 0;
|
||||
|
||||
if (drv->pm) {
|
||||
if (drv->pm->thaw_noirq)
|
||||
ret = drv->pm->thaw_noirq(dev);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int amba_pm_poweroff(struct device *dev)
|
||||
{
|
||||
struct device_driver *drv = dev->driver;
|
||||
@ -302,22 +212,6 @@ static int amba_pm_poweroff(struct device *dev)
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int amba_pm_poweroff_noirq(struct device *dev)
|
||||
{
|
||||
struct device_driver *drv = dev->driver;
|
||||
int ret = 0;
|
||||
|
||||
if (!drv)
|
||||
return 0;
|
||||
|
||||
if (drv->pm) {
|
||||
if (drv->pm->poweroff_noirq)
|
||||
ret = drv->pm->poweroff_noirq(dev);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int amba_pm_restore(struct device *dev)
|
||||
{
|
||||
struct device_driver *drv = dev->driver;
|
||||
@ -336,32 +230,12 @@ static int amba_pm_restore(struct device *dev)
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int amba_pm_restore_noirq(struct device *dev)
|
||||
{
|
||||
struct device_driver *drv = dev->driver;
|
||||
int ret = 0;
|
||||
|
||||
if (!drv)
|
||||
return 0;
|
||||
|
||||
if (drv->pm) {
|
||||
if (drv->pm->restore_noirq)
|
||||
ret = drv->pm->restore_noirq(dev);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
#else /* !CONFIG_HIBERNATE_CALLBACKS */
|
||||
|
||||
#define amba_pm_freeze NULL
|
||||
#define amba_pm_thaw NULL
|
||||
#define amba_pm_poweroff NULL
|
||||
#define amba_pm_restore NULL
|
||||
#define amba_pm_freeze_noirq NULL
|
||||
#define amba_pm_thaw_noirq NULL
|
||||
#define amba_pm_poweroff_noirq NULL
|
||||
#define amba_pm_restore_noirq NULL
|
||||
|
||||
#endif /* !CONFIG_HIBERNATE_CALLBACKS */
|
||||
|
||||
@ -402,20 +276,12 @@ static int amba_pm_runtime_resume(struct device *dev)
|
||||
#ifdef CONFIG_PM
|
||||
|
||||
static const struct dev_pm_ops amba_pm = {
|
||||
.prepare = amba_pm_prepare,
|
||||
.complete = amba_pm_complete,
|
||||
.suspend = amba_pm_suspend,
|
||||
.resume = amba_pm_resume,
|
||||
.freeze = amba_pm_freeze,
|
||||
.thaw = amba_pm_thaw,
|
||||
.poweroff = amba_pm_poweroff,
|
||||
.restore = amba_pm_restore,
|
||||
.suspend_noirq = amba_pm_suspend_noirq,
|
||||
.resume_noirq = amba_pm_resume_noirq,
|
||||
.freeze_noirq = amba_pm_freeze_noirq,
|
||||
.thaw_noirq = amba_pm_thaw_noirq,
|
||||
.poweroff_noirq = amba_pm_poweroff_noirq,
|
||||
.restore_noirq = amba_pm_restore_noirq,
|
||||
SET_RUNTIME_PM_OPS(
|
||||
amba_pm_runtime_suspend,
|
||||
amba_pm_runtime_resume,
|
||||
|
@ -534,6 +534,8 @@ static int _request_firmware(const struct firmware **firmware_p,
|
||||
return 0;
|
||||
}
|
||||
|
||||
read_lock_usermodehelper();
|
||||
|
||||
if (WARN_ON(usermodehelper_is_disabled())) {
|
||||
dev_err(device, "firmware: %s will not be loaded\n", name);
|
||||
retval = -EBUSY;
|
||||
@ -572,6 +574,8 @@ static int _request_firmware(const struct firmware **firmware_p,
|
||||
fw_destroy_instance(fw_priv);
|
||||
|
||||
out:
|
||||
read_unlock_usermodehelper();
|
||||
|
||||
if (retval) {
|
||||
release_firmware(firmware);
|
||||
*firmware_p = NULL;
|
||||
|
@ -700,25 +700,6 @@ static int platform_legacy_resume(struct device *dev)
|
||||
return ret;
|
||||
}
|
||||
|
||||
int platform_pm_prepare(struct device *dev)
|
||||
{
|
||||
struct device_driver *drv = dev->driver;
|
||||
int ret = 0;
|
||||
|
||||
if (drv && drv->pm && drv->pm->prepare)
|
||||
ret = drv->pm->prepare(dev);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
void platform_pm_complete(struct device *dev)
|
||||
{
|
||||
struct device_driver *drv = dev->driver;
|
||||
|
||||
if (drv && drv->pm && drv->pm->complete)
|
||||
drv->pm->complete(dev);
|
||||
}
|
||||
|
||||
#endif /* CONFIG_PM_SLEEP */
|
||||
|
||||
#ifdef CONFIG_SUSPEND
|
||||
@ -741,22 +722,6 @@ int platform_pm_suspend(struct device *dev)
|
||||
return ret;
|
||||
}
|
||||
|
||||
int platform_pm_suspend_noirq(struct device *dev)
|
||||
{
|
||||
struct device_driver *drv = dev->driver;
|
||||
int ret = 0;
|
||||
|
||||
if (!drv)
|
||||
return 0;
|
||||
|
||||
if (drv->pm) {
|
||||
if (drv->pm->suspend_noirq)
|
||||
ret = drv->pm->suspend_noirq(dev);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int platform_pm_resume(struct device *dev)
|
||||
{
|
||||
struct device_driver *drv = dev->driver;
|
||||
@ -775,22 +740,6 @@ int platform_pm_resume(struct device *dev)
|
||||
return ret;
|
||||
}
|
||||
|
||||
int platform_pm_resume_noirq(struct device *dev)
|
||||
{
|
||||
struct device_driver *drv = dev->driver;
|
||||
int ret = 0;
|
||||
|
||||
if (!drv)
|
||||
return 0;
|
||||
|
||||
if (drv->pm) {
|
||||
if (drv->pm->resume_noirq)
|
||||
ret = drv->pm->resume_noirq(dev);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
#endif /* CONFIG_SUSPEND */
|
||||
|
||||
#ifdef CONFIG_HIBERNATE_CALLBACKS
|
||||
@ -813,22 +762,6 @@ int platform_pm_freeze(struct device *dev)
|
||||
return ret;
|
||||
}
|
||||
|
||||
int platform_pm_freeze_noirq(struct device *dev)
|
||||
{
|
||||
struct device_driver *drv = dev->driver;
|
||||
int ret = 0;
|
||||
|
||||
if (!drv)
|
||||
return 0;
|
||||
|
||||
if (drv->pm) {
|
||||
if (drv->pm->freeze_noirq)
|
||||
ret = drv->pm->freeze_noirq(dev);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int platform_pm_thaw(struct device *dev)
|
||||
{
|
||||
struct device_driver *drv = dev->driver;
|
||||
@ -847,22 +780,6 @@ int platform_pm_thaw(struct device *dev)
|
||||
return ret;
|
||||
}
|
||||
|
||||
int platform_pm_thaw_noirq(struct device *dev)
|
||||
{
|
||||
struct device_driver *drv = dev->driver;
|
||||
int ret = 0;
|
||||
|
||||
if (!drv)
|
||||
return 0;
|
||||
|
||||
if (drv->pm) {
|
||||
if (drv->pm->thaw_noirq)
|
||||
ret = drv->pm->thaw_noirq(dev);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int platform_pm_poweroff(struct device *dev)
|
||||
{
|
||||
struct device_driver *drv = dev->driver;
|
||||
@ -881,22 +798,6 @@ int platform_pm_poweroff(struct device *dev)
|
||||
return ret;
|
||||
}
|
||||
|
||||
int platform_pm_poweroff_noirq(struct device *dev)
|
||||
{
|
||||
struct device_driver *drv = dev->driver;
|
||||
int ret = 0;
|
||||
|
||||
if (!drv)
|
||||
return 0;
|
||||
|
||||
if (drv->pm) {
|
||||
if (drv->pm->poweroff_noirq)
|
||||
ret = drv->pm->poweroff_noirq(dev);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int platform_pm_restore(struct device *dev)
|
||||
{
|
||||
struct device_driver *drv = dev->driver;
|
||||
@ -915,22 +816,6 @@ int platform_pm_restore(struct device *dev)
|
||||
return ret;
|
||||
}
|
||||
|
||||
int platform_pm_restore_noirq(struct device *dev)
|
||||
{
|
||||
struct device_driver *drv = dev->driver;
|
||||
int ret = 0;
|
||||
|
||||
if (!drv)
|
||||
return 0;
|
||||
|
||||
if (drv->pm) {
|
||||
if (drv->pm->restore_noirq)
|
||||
ret = drv->pm->restore_noirq(dev);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
#endif /* CONFIG_HIBERNATE_CALLBACKS */
|
||||
|
||||
static const struct dev_pm_ops platform_dev_pm_ops = {
|
||||
|
@ -97,16 +97,16 @@ int pm_generic_prepare(struct device *dev)
|
||||
* @event: PM transition of the system under way.
|
||||
* @bool: Whether or not this is the "noirq" stage.
|
||||
*
|
||||
* If the device has not been suspended at run time, execute the
|
||||
* suspend/freeze/poweroff/thaw callback provided by its driver, if defined, and
|
||||
* return its error code. Otherwise, return zero.
|
||||
* Execute the PM callback corresponding to @event provided by the driver of
|
||||
* @dev, if defined, and return its error code. Return 0 if the callback is
|
||||
* not present.
|
||||
*/
|
||||
static int __pm_generic_call(struct device *dev, int event, bool noirq)
|
||||
{
|
||||
const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
|
||||
int (*callback)(struct device *);
|
||||
|
||||
if (!pm || pm_runtime_suspended(dev))
|
||||
if (!pm)
|
||||
return 0;
|
||||
|
||||
switch (event) {
|
||||
@ -119,9 +119,15 @@ static int __pm_generic_call(struct device *dev, int event, bool noirq)
|
||||
case PM_EVENT_HIBERNATE:
|
||||
callback = noirq ? pm->poweroff_noirq : pm->poweroff;
|
||||
break;
|
||||
case PM_EVENT_RESUME:
|
||||
callback = noirq ? pm->resume_noirq : pm->resume;
|
||||
break;
|
||||
case PM_EVENT_THAW:
|
||||
callback = noirq ? pm->thaw_noirq : pm->thaw;
|
||||
break;
|
||||
case PM_EVENT_RESTORE:
|
||||
callback = noirq ? pm->restore_noirq : pm->restore;
|
||||
break;
|
||||
default:
|
||||
callback = NULL;
|
||||
break;
|
||||
@ -210,57 +216,13 @@ int pm_generic_thaw(struct device *dev)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(pm_generic_thaw);
|
||||
|
||||
/**
|
||||
* __pm_generic_resume - Generic resume/restore callback for subsystems.
|
||||
* @dev: Device to handle.
|
||||
* @event: PM transition of the system under way.
|
||||
* @bool: Whether or not this is the "noirq" stage.
|
||||
*
|
||||
* Execute the resume/resotre callback provided by the @dev's driver, if
|
||||
* defined. If it returns 0, change the device's runtime PM status to 'active'.
|
||||
* Return the callback's error code.
|
||||
*/
|
||||
static int __pm_generic_resume(struct device *dev, int event, bool noirq)
|
||||
{
|
||||
const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
|
||||
int (*callback)(struct device *);
|
||||
int ret;
|
||||
|
||||
if (!pm)
|
||||
return 0;
|
||||
|
||||
switch (event) {
|
||||
case PM_EVENT_RESUME:
|
||||
callback = noirq ? pm->resume_noirq : pm->resume;
|
||||
break;
|
||||
case PM_EVENT_RESTORE:
|
||||
callback = noirq ? pm->restore_noirq : pm->restore;
|
||||
break;
|
||||
default:
|
||||
callback = NULL;
|
||||
break;
|
||||
}
|
||||
|
||||
if (!callback)
|
||||
return 0;
|
||||
|
||||
ret = callback(dev);
|
||||
if (!ret && !noirq && pm_runtime_enabled(dev)) {
|
||||
pm_runtime_disable(dev);
|
||||
pm_runtime_set_active(dev);
|
||||
pm_runtime_enable(dev);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* pm_generic_resume_noirq - Generic resume_noirq callback for subsystems.
|
||||
* @dev: Device to resume.
|
||||
*/
|
||||
int pm_generic_resume_noirq(struct device *dev)
|
||||
{
|
||||
return __pm_generic_resume(dev, PM_EVENT_RESUME, true);
|
||||
return __pm_generic_call(dev, PM_EVENT_RESUME, true);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(pm_generic_resume_noirq);
|
||||
|
||||
@ -270,7 +232,7 @@ EXPORT_SYMBOL_GPL(pm_generic_resume_noirq);
|
||||
*/
|
||||
int pm_generic_resume(struct device *dev)
|
||||
{
|
||||
return __pm_generic_resume(dev, PM_EVENT_RESUME, false);
|
||||
return __pm_generic_call(dev, PM_EVENT_RESUME, false);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(pm_generic_resume);
|
||||
|
||||
@ -280,7 +242,7 @@ EXPORT_SYMBOL_GPL(pm_generic_resume);
|
||||
*/
|
||||
int pm_generic_restore_noirq(struct device *dev)
|
||||
{
|
||||
return __pm_generic_resume(dev, PM_EVENT_RESTORE, true);
|
||||
return __pm_generic_call(dev, PM_EVENT_RESTORE, true);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(pm_generic_restore_noirq);
|
||||
|
||||
@ -290,7 +252,7 @@ EXPORT_SYMBOL_GPL(pm_generic_restore_noirq);
|
||||
*/
|
||||
int pm_generic_restore(struct device *dev)
|
||||
{
|
||||
return __pm_generic_resume(dev, PM_EVENT_RESTORE, false);
|
||||
return __pm_generic_call(dev, PM_EVENT_RESTORE, false);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(pm_generic_restore);
|
||||
|
||||
@ -314,28 +276,3 @@ void pm_generic_complete(struct device *dev)
|
||||
pm_runtime_idle(dev);
|
||||
}
|
||||
#endif /* CONFIG_PM_SLEEP */
|
||||
|
||||
struct dev_pm_ops generic_subsys_pm_ops = {
|
||||
#ifdef CONFIG_PM_SLEEP
|
||||
.prepare = pm_generic_prepare,
|
||||
.suspend = pm_generic_suspend,
|
||||
.suspend_noirq = pm_generic_suspend_noirq,
|
||||
.resume = pm_generic_resume,
|
||||
.resume_noirq = pm_generic_resume_noirq,
|
||||
.freeze = pm_generic_freeze,
|
||||
.freeze_noirq = pm_generic_freeze_noirq,
|
||||
.thaw = pm_generic_thaw,
|
||||
.thaw_noirq = pm_generic_thaw_noirq,
|
||||
.poweroff = pm_generic_poweroff,
|
||||
.poweroff_noirq = pm_generic_poweroff_noirq,
|
||||
.restore = pm_generic_restore,
|
||||
.restore_noirq = pm_generic_restore_noirq,
|
||||
.complete = pm_generic_complete,
|
||||
#endif
|
||||
#ifdef CONFIG_PM_RUNTIME
|
||||
.runtime_suspend = pm_generic_runtime_suspend,
|
||||
.runtime_resume = pm_generic_runtime_resume,
|
||||
.runtime_idle = pm_generic_runtime_idle,
|
||||
#endif
|
||||
};
|
||||
EXPORT_SYMBOL_GPL(generic_subsys_pm_ops);
|
||||
|
@ -32,6 +32,8 @@
|
||||
#include "../base.h"
|
||||
#include "power.h"
|
||||
|
||||
typedef int (*pm_callback_t)(struct device *);
|
||||
|
||||
/*
|
||||
* The entries in the dpm_list list are in a depth first order, simply
|
||||
* because children are guaranteed to be discovered after parents, and
|
||||
@ -164,8 +166,9 @@ static ktime_t initcall_debug_start(struct device *dev)
|
||||
ktime_t calltime = ktime_set(0, 0);
|
||||
|
||||
if (initcall_debug) {
|
||||
pr_info("calling %s+ @ %i\n",
|
||||
dev_name(dev), task_pid_nr(current));
|
||||
pr_info("calling %s+ @ %i, parent: %s\n",
|
||||
dev_name(dev), task_pid_nr(current),
|
||||
dev->parent ? dev_name(dev->parent) : "none");
|
||||
calltime = ktime_get();
|
||||
}
|
||||
|
||||
@ -211,151 +214,69 @@ static void dpm_wait_for_children(struct device *dev, bool async)
|
||||
}
|
||||
|
||||
/**
|
||||
* pm_op - Execute the PM operation appropriate for given PM event.
|
||||
* @dev: Device to handle.
|
||||
* pm_op - Return the PM operation appropriate for given PM event.
|
||||
* @ops: PM operations to choose from.
|
||||
* @state: PM transition of the system being carried out.
|
||||
*/
|
||||
static int pm_op(struct device *dev,
|
||||
const struct dev_pm_ops *ops,
|
||||
pm_message_t state)
|
||||
static pm_callback_t pm_op(const struct dev_pm_ops *ops, pm_message_t state)
|
||||
{
|
||||
int error = 0;
|
||||
ktime_t calltime;
|
||||
|
||||
calltime = initcall_debug_start(dev);
|
||||
|
||||
switch (state.event) {
|
||||
#ifdef CONFIG_SUSPEND
|
||||
case PM_EVENT_SUSPEND:
|
||||
if (ops->suspend) {
|
||||
error = ops->suspend(dev);
|
||||
suspend_report_result(ops->suspend, error);
|
||||
}
|
||||
break;
|
||||
return ops->suspend;
|
||||
case PM_EVENT_RESUME:
|
||||
if (ops->resume) {
|
||||
error = ops->resume(dev);
|
||||
suspend_report_result(ops->resume, error);
|
||||
}
|
||||
break;
|
||||
return ops->resume;
|
||||
#endif /* CONFIG_SUSPEND */
|
||||
#ifdef CONFIG_HIBERNATE_CALLBACKS
|
||||
case PM_EVENT_FREEZE:
|
||||
case PM_EVENT_QUIESCE:
|
||||
if (ops->freeze) {
|
||||
error = ops->freeze(dev);
|
||||
suspend_report_result(ops->freeze, error);
|
||||
}
|
||||
break;
|
||||
return ops->freeze;
|
||||
case PM_EVENT_HIBERNATE:
|
||||
if (ops->poweroff) {
|
||||
error = ops->poweroff(dev);
|
||||
suspend_report_result(ops->poweroff, error);
|
||||
}
|
||||
break;
|
||||
return ops->poweroff;
|
||||
case PM_EVENT_THAW:
|
||||
case PM_EVENT_RECOVER:
|
||||
if (ops->thaw) {
|
||||
error = ops->thaw(dev);
|
||||
suspend_report_result(ops->thaw, error);
|
||||
}
|
||||
return ops->thaw;
|
||||
break;
|
||||
case PM_EVENT_RESTORE:
|
||||
if (ops->restore) {
|
||||
error = ops->restore(dev);
|
||||
suspend_report_result(ops->restore, error);
|
||||
}
|
||||
break;
|
||||
return ops->restore;
|
||||
#endif /* CONFIG_HIBERNATE_CALLBACKS */
|
||||
default:
|
||||
error = -EINVAL;
|
||||
}
|
||||
|
||||
initcall_debug_report(dev, calltime, error);
|
||||
|
||||
return error;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/**
|
||||
* pm_noirq_op - Execute the PM operation appropriate for given PM event.
|
||||
* @dev: Device to handle.
|
||||
* pm_noirq_op - Return the PM operation appropriate for given PM event.
|
||||
* @ops: PM operations to choose from.
|
||||
* @state: PM transition of the system being carried out.
|
||||
*
|
||||
* The driver of @dev will not receive interrupts while this function is being
|
||||
* executed.
|
||||
*/
|
||||
static int pm_noirq_op(struct device *dev,
|
||||
const struct dev_pm_ops *ops,
|
||||
pm_message_t state)
|
||||
static pm_callback_t pm_noirq_op(const struct dev_pm_ops *ops, pm_message_t state)
|
||||
{
|
||||
int error = 0;
|
||||
ktime_t calltime = ktime_set(0, 0), delta, rettime;
|
||||
|
||||
if (initcall_debug) {
|
||||
pr_info("calling %s+ @ %i, parent: %s\n",
|
||||
dev_name(dev), task_pid_nr(current),
|
||||
dev->parent ? dev_name(dev->parent) : "none");
|
||||
calltime = ktime_get();
|
||||
}
|
||||
|
||||
switch (state.event) {
|
||||
#ifdef CONFIG_SUSPEND
|
||||
case PM_EVENT_SUSPEND:
|
||||
if (ops->suspend_noirq) {
|
||||
error = ops->suspend_noirq(dev);
|
||||
suspend_report_result(ops->suspend_noirq, error);
|
||||
}
|
||||
break;
|
||||
return ops->suspend_noirq;
|
||||
case PM_EVENT_RESUME:
|
||||
if (ops->resume_noirq) {
|
||||
error = ops->resume_noirq(dev);
|
||||
suspend_report_result(ops->resume_noirq, error);
|
||||
}
|
||||
break;
|
||||
return ops->resume_noirq;
|
||||
#endif /* CONFIG_SUSPEND */
|
||||
#ifdef CONFIG_HIBERNATE_CALLBACKS
|
||||
case PM_EVENT_FREEZE:
|
||||
case PM_EVENT_QUIESCE:
|
||||
if (ops->freeze_noirq) {
|
||||
error = ops->freeze_noirq(dev);
|
||||
suspend_report_result(ops->freeze_noirq, error);
|
||||
}
|
||||
break;
|
||||
return ops->freeze_noirq;
|
||||
case PM_EVENT_HIBERNATE:
|
||||
if (ops->poweroff_noirq) {
|
||||
error = ops->poweroff_noirq(dev);
|
||||
suspend_report_result(ops->poweroff_noirq, error);
|
||||
}
|
||||
break;
|
||||
return ops->poweroff_noirq;
|
||||
case PM_EVENT_THAW:
|
||||
case PM_EVENT_RECOVER:
|
||||
if (ops->thaw_noirq) {
|
||||
error = ops->thaw_noirq(dev);
|
||||
suspend_report_result(ops->thaw_noirq, error);
|
||||
}
|
||||
break;
|
||||
return ops->thaw_noirq;
|
||||
case PM_EVENT_RESTORE:
|
||||
if (ops->restore_noirq) {
|
||||
error = ops->restore_noirq(dev);
|
||||
suspend_report_result(ops->restore_noirq, error);
|
||||
}
|
||||
break;
|
||||
return ops->restore_noirq;
|
||||
#endif /* CONFIG_HIBERNATE_CALLBACKS */
|
||||
default:
|
||||
error = -EINVAL;
|
||||
}
|
||||
|
||||
if (initcall_debug) {
|
||||
rettime = ktime_get();
|
||||
delta = ktime_sub(rettime, calltime);
|
||||
printk("initcall %s_i+ returned %d after %Ld usecs\n",
|
||||
dev_name(dev), error,
|
||||
(unsigned long long)ktime_to_ns(delta) >> 10);
|
||||
}
|
||||
|
||||
return error;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static char *pm_verb(int event)
|
||||
@ -413,6 +334,26 @@ static void dpm_show_time(ktime_t starttime, pm_message_t state, char *info)
|
||||
usecs / USEC_PER_MSEC, usecs % USEC_PER_MSEC);
|
||||
}
|
||||
|
||||
static int dpm_run_callback(pm_callback_t cb, struct device *dev,
|
||||
pm_message_t state, char *info)
|
||||
{
|
||||
ktime_t calltime;
|
||||
int error;
|
||||
|
||||
if (!cb)
|
||||
return 0;
|
||||
|
||||
calltime = initcall_debug_start(dev);
|
||||
|
||||
pm_dev_dbg(dev, state, info);
|
||||
error = cb(dev);
|
||||
suspend_report_result(cb, error);
|
||||
|
||||
initcall_debug_report(dev, calltime, error);
|
||||
|
||||
return error;
|
||||
}
|
||||
|
||||
/*------------------------- Resume routines -------------------------*/
|
||||
|
||||
/**
|
||||
@ -425,25 +366,34 @@ static void dpm_show_time(ktime_t starttime, pm_message_t state, char *info)
|
||||
*/
|
||||
static int device_resume_noirq(struct device *dev, pm_message_t state)
|
||||
{
|
||||
pm_callback_t callback = NULL;
|
||||
char *info = NULL;
|
||||
int error = 0;
|
||||
|
||||
TRACE_DEVICE(dev);
|
||||
TRACE_RESUME(0);
|
||||
|
||||
if (dev->pm_domain) {
|
||||
pm_dev_dbg(dev, state, "EARLY power domain ");
|
||||
error = pm_noirq_op(dev, &dev->pm_domain->ops, state);
|
||||
info = "EARLY power domain ";
|
||||
callback = pm_noirq_op(&dev->pm_domain->ops, state);
|
||||
} else if (dev->type && dev->type->pm) {
|
||||
pm_dev_dbg(dev, state, "EARLY type ");
|
||||
error = pm_noirq_op(dev, dev->type->pm, state);
|
||||
info = "EARLY type ";
|
||||
callback = pm_noirq_op(dev->type->pm, state);
|
||||
} else if (dev->class && dev->class->pm) {
|
||||
pm_dev_dbg(dev, state, "EARLY class ");
|
||||
error = pm_noirq_op(dev, dev->class->pm, state);
|
||||
info = "EARLY class ";
|
||||
callback = pm_noirq_op(dev->class->pm, state);
|
||||
} else if (dev->bus && dev->bus->pm) {
|
||||
pm_dev_dbg(dev, state, "EARLY ");
|
||||
error = pm_noirq_op(dev, dev->bus->pm, state);
|
||||
info = "EARLY bus ";
|
||||
callback = pm_noirq_op(dev->bus->pm, state);
|
||||
}
|
||||
|
||||
if (!callback && dev->driver && dev->driver->pm) {
|
||||
info = "EARLY driver ";
|
||||
callback = pm_noirq_op(dev->driver->pm, state);
|
||||
}
|
||||
|
||||
error = dpm_run_callback(callback, dev, state, info);
|
||||
|
||||
TRACE_RESUME(error);
|
||||
return error;
|
||||
}
|
||||
@ -485,26 +435,6 @@ void dpm_resume_noirq(pm_message_t state)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(dpm_resume_noirq);
|
||||
|
||||
/**
|
||||
* legacy_resume - Execute a legacy (bus or class) resume callback for device.
|
||||
* @dev: Device to resume.
|
||||
* @cb: Resume callback to execute.
|
||||
*/
|
||||
static int legacy_resume(struct device *dev, int (*cb)(struct device *dev))
|
||||
{
|
||||
int error;
|
||||
ktime_t calltime;
|
||||
|
||||
calltime = initcall_debug_start(dev);
|
||||
|
||||
error = cb(dev);
|
||||
suspend_report_result(cb, error);
|
||||
|
||||
initcall_debug_report(dev, calltime, error);
|
||||
|
||||
return error;
|
||||
}
|
||||
|
||||
/**
|
||||
* device_resume - Execute "resume" callbacks for given device.
|
||||
* @dev: Device to handle.
|
||||
@ -513,6 +443,8 @@ static int legacy_resume(struct device *dev, int (*cb)(struct device *dev))
|
||||
*/
|
||||
static int device_resume(struct device *dev, pm_message_t state, bool async)
|
||||
{
|
||||
pm_callback_t callback = NULL;
|
||||
char *info = NULL;
|
||||
int error = 0;
|
||||
bool put = false;
|
||||
|
||||
@ -535,40 +467,48 @@ static int device_resume(struct device *dev, pm_message_t state, bool async)
|
||||
put = true;
|
||||
|
||||
if (dev->pm_domain) {
|
||||
pm_dev_dbg(dev, state, "power domain ");
|
||||
error = pm_op(dev, &dev->pm_domain->ops, state);
|
||||
goto End;
|
||||
info = "power domain ";
|
||||
callback = pm_op(&dev->pm_domain->ops, state);
|
||||
goto Driver;
|
||||
}
|
||||
|
||||
if (dev->type && dev->type->pm) {
|
||||
pm_dev_dbg(dev, state, "type ");
|
||||
error = pm_op(dev, dev->type->pm, state);
|
||||
goto End;
|
||||
info = "type ";
|
||||
callback = pm_op(dev->type->pm, state);
|
||||
goto Driver;
|
||||
}
|
||||
|
||||
if (dev->class) {
|
||||
if (dev->class->pm) {
|
||||
pm_dev_dbg(dev, state, "class ");
|
||||
error = pm_op(dev, dev->class->pm, state);
|
||||
goto End;
|
||||
info = "class ";
|
||||
callback = pm_op(dev->class->pm, state);
|
||||
goto Driver;
|
||||
} else if (dev->class->resume) {
|
||||
pm_dev_dbg(dev, state, "legacy class ");
|
||||
error = legacy_resume(dev, dev->class->resume);
|
||||
info = "legacy class ";
|
||||
callback = dev->class->resume;
|
||||
goto End;
|
||||
}
|
||||
}
|
||||
|
||||
if (dev->bus) {
|
||||
if (dev->bus->pm) {
|
||||
pm_dev_dbg(dev, state, "");
|
||||
error = pm_op(dev, dev->bus->pm, state);
|
||||
info = "bus ";
|
||||
callback = pm_op(dev->bus->pm, state);
|
||||
} else if (dev->bus->resume) {
|
||||
pm_dev_dbg(dev, state, "legacy ");
|
||||
error = legacy_resume(dev, dev->bus->resume);
|
||||
info = "legacy bus ";
|
||||
callback = dev->bus->resume;
|
||||
goto End;
|
||||
}
|
||||
}
|
||||
|
||||
Driver:
|
||||
if (!callback && dev->driver && dev->driver->pm) {
|
||||
info = "driver ";
|
||||
callback = pm_op(dev->driver->pm, state);
|
||||
}
|
||||
|
||||
End:
|
||||
error = dpm_run_callback(callback, dev, state, info);
|
||||
dev->power.is_suspended = false;
|
||||
|
||||
Unlock:
|
||||
@ -660,24 +600,33 @@ void dpm_resume(pm_message_t state)
|
||||
*/
|
||||
static void device_complete(struct device *dev, pm_message_t state)
|
||||
{
|
||||
void (*callback)(struct device *) = NULL;
|
||||
char *info = NULL;
|
||||
|
||||
device_lock(dev);
|
||||
|
||||
if (dev->pm_domain) {
|
||||
pm_dev_dbg(dev, state, "completing power domain ");
|
||||
if (dev->pm_domain->ops.complete)
|
||||
dev->pm_domain->ops.complete(dev);
|
||||
info = "completing power domain ";
|
||||
callback = dev->pm_domain->ops.complete;
|
||||
} else if (dev->type && dev->type->pm) {
|
||||
pm_dev_dbg(dev, state, "completing type ");
|
||||
if (dev->type->pm->complete)
|
||||
dev->type->pm->complete(dev);
|
||||
info = "completing type ";
|
||||
callback = dev->type->pm->complete;
|
||||
} else if (dev->class && dev->class->pm) {
|
||||
pm_dev_dbg(dev, state, "completing class ");
|
||||
if (dev->class->pm->complete)
|
||||
dev->class->pm->complete(dev);
|
||||
info = "completing class ";
|
||||
callback = dev->class->pm->complete;
|
||||
} else if (dev->bus && dev->bus->pm) {
|
||||
pm_dev_dbg(dev, state, "completing ");
|
||||
if (dev->bus->pm->complete)
|
||||
dev->bus->pm->complete(dev);
|
||||
info = "completing bus ";
|
||||
callback = dev->bus->pm->complete;
|
||||
}
|
||||
|
||||
if (!callback && dev->driver && dev->driver->pm) {
|
||||
info = "completing driver ";
|
||||
callback = dev->driver->pm->complete;
|
||||
}
|
||||
|
||||
if (callback) {
|
||||
pm_dev_dbg(dev, state, info);
|
||||
callback(dev);
|
||||
}
|
||||
|
||||
device_unlock(dev);
|
||||
@ -763,31 +712,29 @@ static pm_message_t resume_event(pm_message_t sleep_state)
|
||||
*/
|
||||
static int device_suspend_noirq(struct device *dev, pm_message_t state)
|
||||
{
|
||||
int error;
|
||||
pm_callback_t callback = NULL;
|
||||
char *info = NULL;
|
||||
|
||||
if (dev->pm_domain) {
|
||||
pm_dev_dbg(dev, state, "LATE power domain ");
|
||||
error = pm_noirq_op(dev, &dev->pm_domain->ops, state);
|
||||
if (error)
|
||||
return error;
|
||||
info = "LATE power domain ";
|
||||
callback = pm_noirq_op(&dev->pm_domain->ops, state);
|
||||
} else if (dev->type && dev->type->pm) {
|
||||
pm_dev_dbg(dev, state, "LATE type ");
|
||||
error = pm_noirq_op(dev, dev->type->pm, state);
|
||||
if (error)
|
||||
return error;
|
||||
info = "LATE type ";
|
||||
callback = pm_noirq_op(dev->type->pm, state);
|
||||
} else if (dev->class && dev->class->pm) {
|
||||
pm_dev_dbg(dev, state, "LATE class ");
|
||||
error = pm_noirq_op(dev, dev->class->pm, state);
|
||||
if (error)
|
||||
return error;
|
||||
info = "LATE class ";
|
||||
callback = pm_noirq_op(dev->class->pm, state);
|
||||
} else if (dev->bus && dev->bus->pm) {
|
||||
pm_dev_dbg(dev, state, "LATE ");
|
||||
error = pm_noirq_op(dev, dev->bus->pm, state);
|
||||
if (error)
|
||||
return error;
|
||||
info = "LATE bus ";
|
||||
callback = pm_noirq_op(dev->bus->pm, state);
|
||||
}
|
||||
|
||||
return 0;
|
||||
if (!callback && dev->driver && dev->driver->pm) {
|
||||
info = "LATE driver ";
|
||||
callback = pm_noirq_op(dev->driver->pm, state);
|
||||
}
|
||||
|
||||
return dpm_run_callback(callback, dev, state, info);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -864,6 +811,8 @@ static int legacy_suspend(struct device *dev, pm_message_t state,
|
||||
*/
|
||||
static int __device_suspend(struct device *dev, pm_message_t state, bool async)
|
||||
{
|
||||
pm_callback_t callback = NULL;
|
||||
char *info = NULL;
|
||||
int error = 0;
|
||||
|
||||
dpm_wait_for_children(dev, async);
|
||||
@ -884,22 +833,22 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
|
||||
device_lock(dev);
|
||||
|
||||
if (dev->pm_domain) {
|
||||
pm_dev_dbg(dev, state, "power domain ");
|
||||
error = pm_op(dev, &dev->pm_domain->ops, state);
|
||||
goto End;
|
||||
info = "power domain ";
|
||||
callback = pm_op(&dev->pm_domain->ops, state);
|
||||
goto Run;
|
||||
}
|
||||
|
||||
if (dev->type && dev->type->pm) {
|
||||
pm_dev_dbg(dev, state, "type ");
|
||||
error = pm_op(dev, dev->type->pm, state);
|
||||
goto End;
|
||||
info = "type ";
|
||||
callback = pm_op(dev->type->pm, state);
|
||||
goto Run;
|
||||
}
|
||||
|
||||
if (dev->class) {
|
||||
if (dev->class->pm) {
|
||||
pm_dev_dbg(dev, state, "class ");
|
||||
error = pm_op(dev, dev->class->pm, state);
|
||||
goto End;
|
||||
info = "class ";
|
||||
callback = pm_op(dev->class->pm, state);
|
||||
goto Run;
|
||||
} else if (dev->class->suspend) {
|
||||
pm_dev_dbg(dev, state, "legacy class ");
|
||||
error = legacy_suspend(dev, state, dev->class->suspend);
|
||||
@ -909,14 +858,23 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
|
||||
|
||||
if (dev->bus) {
|
||||
if (dev->bus->pm) {
|
||||
pm_dev_dbg(dev, state, "");
|
||||
error = pm_op(dev, dev->bus->pm, state);
|
||||
info = "bus ";
|
||||
callback = pm_op(dev->bus->pm, state);
|
||||
} else if (dev->bus->suspend) {
|
||||
pm_dev_dbg(dev, state, "legacy ");
|
||||
pm_dev_dbg(dev, state, "legacy bus ");
|
||||
error = legacy_suspend(dev, state, dev->bus->suspend);
|
||||
goto End;
|
||||
}
|
||||
}
|
||||
|
||||
Run:
|
||||
if (!callback && dev->driver && dev->driver->pm) {
|
||||
info = "driver ";
|
||||
callback = pm_op(dev->driver->pm, state);
|
||||
}
|
||||
|
||||
error = dpm_run_callback(callback, dev, state, info);
|
||||
|
||||
End:
|
||||
if (!error) {
|
||||
dev->power.is_suspended = true;
|
||||
@ -1022,6 +980,8 @@ int dpm_suspend(pm_message_t state)
|
||||
*/
|
||||
static int device_prepare(struct device *dev, pm_message_t state)
|
||||
{
|
||||
int (*callback)(struct device *) = NULL;
|
||||
char *info = NULL;
|
||||
int error = 0;
|
||||
|
||||
device_lock(dev);
|
||||
@ -1029,34 +989,29 @@ static int device_prepare(struct device *dev, pm_message_t state)
|
||||
dev->power.wakeup_path = device_may_wakeup(dev);
|
||||
|
||||
if (dev->pm_domain) {
|
||||
pm_dev_dbg(dev, state, "preparing power domain ");
|
||||
if (dev->pm_domain->ops.prepare)
|
||||
error = dev->pm_domain->ops.prepare(dev);
|
||||
suspend_report_result(dev->pm_domain->ops.prepare, error);
|
||||
if (error)
|
||||
goto End;
|
||||
info = "preparing power domain ";
|
||||
callback = dev->pm_domain->ops.prepare;
|
||||
} else if (dev->type && dev->type->pm) {
|
||||
pm_dev_dbg(dev, state, "preparing type ");
|
||||
if (dev->type->pm->prepare)
|
||||
error = dev->type->pm->prepare(dev);
|
||||
suspend_report_result(dev->type->pm->prepare, error);
|
||||
if (error)
|
||||
goto End;
|
||||
info = "preparing type ";
|
||||
callback = dev->type->pm->prepare;
|
||||
} else if (dev->class && dev->class->pm) {
|
||||
pm_dev_dbg(dev, state, "preparing class ");
|
||||
if (dev->class->pm->prepare)
|
||||
error = dev->class->pm->prepare(dev);
|
||||
suspend_report_result(dev->class->pm->prepare, error);
|
||||
if (error)
|
||||
goto End;
|
||||
info = "preparing class ";
|
||||
callback = dev->class->pm->prepare;
|
||||
} else if (dev->bus && dev->bus->pm) {
|
||||
pm_dev_dbg(dev, state, "preparing ");
|
||||
if (dev->bus->pm->prepare)
|
||||
error = dev->bus->pm->prepare(dev);
|
||||
suspend_report_result(dev->bus->pm->prepare, error);
|
||||
info = "preparing bus ";
|
||||
callback = dev->bus->pm->prepare;
|
||||
}
|
||||
|
||||
if (!callback && dev->driver && dev->driver->pm) {
|
||||
info = "preparing driver ";
|
||||
callback = dev->driver->pm->prepare;
|
||||
}
|
||||
|
||||
if (callback) {
|
||||
error = callback(dev);
|
||||
suspend_report_result(callback, error);
|
||||
}
|
||||
|
||||
End:
|
||||
device_unlock(dev);
|
||||
|
||||
return error;
|
||||
|
@ -250,6 +250,9 @@ static int rpm_idle(struct device *dev, int rpmflags)
|
||||
else
|
||||
callback = NULL;
|
||||
|
||||
if (!callback && dev->driver && dev->driver->pm)
|
||||
callback = dev->driver->pm->runtime_idle;
|
||||
|
||||
if (callback)
|
||||
__rpm_callback(callback, dev);
|
||||
|
||||
@ -413,6 +416,9 @@ static int rpm_suspend(struct device *dev, int rpmflags)
|
||||
else
|
||||
callback = NULL;
|
||||
|
||||
if (!callback && dev->driver && dev->driver->pm)
|
||||
callback = dev->driver->pm->runtime_suspend;
|
||||
|
||||
retval = rpm_callback(callback, dev);
|
||||
if (retval) {
|
||||
__update_runtime_status(dev, RPM_ACTIVE);
|
||||
@ -633,6 +639,9 @@ static int rpm_resume(struct device *dev, int rpmflags)
|
||||
else
|
||||
callback = NULL;
|
||||
|
||||
if (!callback && dev->driver && dev->driver->pm)
|
||||
callback = dev->driver->pm->runtime_resume;
|
||||
|
||||
retval = rpm_callback(callback, dev);
|
||||
if (retval) {
|
||||
__update_runtime_status(dev, RPM_SUSPENDED);
|
||||
|
@ -475,8 +475,6 @@ static int btmrvl_service_main_thread(void *data)
|
||||
|
||||
init_waitqueue_entry(&wait, current);
|
||||
|
||||
current->flags |= PF_NOFREEZE;
|
||||
|
||||
for (;;) {
|
||||
add_wait_queue(&thread->wait_q, &wait);
|
||||
|
||||
|
@ -214,9 +214,18 @@ static unsigned int dmatest_verify(u8 **bufs, unsigned int start,
|
||||
return error_count;
|
||||
}
|
||||
|
||||
static void dmatest_callback(void *completion)
|
||||
/* poor man's completion - we want to use wait_event_freezable() on it */
|
||||
struct dmatest_done {
|
||||
bool done;
|
||||
wait_queue_head_t *wait;
|
||||
};
|
||||
|
||||
static void dmatest_callback(void *arg)
|
||||
{
|
||||
complete(completion);
|
||||
struct dmatest_done *done = arg;
|
||||
|
||||
done->done = true;
|
||||
wake_up_all(done->wait);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -235,7 +244,9 @@ static void dmatest_callback(void *completion)
|
||||
*/
|
||||
static int dmatest_func(void *data)
|
||||
{
|
||||
DECLARE_WAIT_QUEUE_HEAD_ONSTACK(done_wait);
|
||||
struct dmatest_thread *thread = data;
|
||||
struct dmatest_done done = { .wait = &done_wait };
|
||||
struct dma_chan *chan;
|
||||
const char *thread_name;
|
||||
unsigned int src_off, dst_off, len;
|
||||
@ -252,7 +263,7 @@ static int dmatest_func(void *data)
|
||||
int i;
|
||||
|
||||
thread_name = current->comm;
|
||||
set_freezable_with_signal();
|
||||
set_freezable();
|
||||
|
||||
ret = -ENOMEM;
|
||||
|
||||
@ -306,9 +317,6 @@ static int dmatest_func(void *data)
|
||||
struct dma_async_tx_descriptor *tx = NULL;
|
||||
dma_addr_t dma_srcs[src_cnt];
|
||||
dma_addr_t dma_dsts[dst_cnt];
|
||||
struct completion cmp;
|
||||
unsigned long start, tmo, end = 0 /* compiler... */;
|
||||
bool reload = true;
|
||||
u8 align = 0;
|
||||
|
||||
total_tests++;
|
||||
@ -391,9 +399,9 @@ static int dmatest_func(void *data)
|
||||
continue;
|
||||
}
|
||||
|
||||
init_completion(&cmp);
|
||||
done.done = false;
|
||||
tx->callback = dmatest_callback;
|
||||
tx->callback_param = &cmp;
|
||||
tx->callback_param = &done;
|
||||
cookie = tx->tx_submit(tx);
|
||||
|
||||
if (dma_submit_error(cookie)) {
|
||||
@ -407,20 +415,20 @@ static int dmatest_func(void *data)
|
||||
}
|
||||
dma_async_issue_pending(chan);
|
||||
|
||||
do {
|
||||
start = jiffies;
|
||||
if (reload)
|
||||
end = start + msecs_to_jiffies(timeout);
|
||||
else if (end <= start)
|
||||
end = start + 1;
|
||||
tmo = wait_for_completion_interruptible_timeout(&cmp,
|
||||
end - start);
|
||||
reload = try_to_freeze();
|
||||
} while (tmo == -ERESTARTSYS);
|
||||
wait_event_freezable_timeout(done_wait, done.done,
|
||||
msecs_to_jiffies(timeout));
|
||||
|
||||
status = dma_async_is_tx_complete(chan, cookie, NULL, NULL);
|
||||
|
||||
if (tmo == 0) {
|
||||
if (!done.done) {
|
||||
/*
|
||||
* We're leaving the timed out dma operation with
|
||||
* dangling pointer to done_wait. To make this
|
||||
* correct, we'll need to allocate wait_done for
|
||||
* each test iteration and perform "who's gonna
|
||||
* free it this time?" dancing. For now, just
|
||||
* leave it dangling.
|
||||
*/
|
||||
pr_warning("%s: #%u: test timed out\n",
|
||||
thread_name, total_tests - 1);
|
||||
failed_tests++;
|
||||
|
@ -138,8 +138,6 @@ static int twl6030_irq_thread(void *data)
|
||||
static const unsigned max_i2c_errors = 100;
|
||||
int ret;
|
||||
|
||||
current->flags |= PF_NOFREEZE;
|
||||
|
||||
while (!kthread_should_stop()) {
|
||||
int i;
|
||||
union {
|
||||
|
@ -750,7 +750,7 @@ static int stir_transmit_thread(void *arg)
|
||||
|
||||
write_reg(stir, REG_CTRL1, CTRL1_TXPWD|CTRL1_RXPWD);
|
||||
|
||||
refrigerator();
|
||||
try_to_freeze();
|
||||
|
||||
if (change_speed(stir, stir->speed))
|
||||
break;
|
||||
|
@ -2456,8 +2456,9 @@ static int hotkey_kthread(void *data)
|
||||
u32 poll_mask, event_mask;
|
||||
unsigned int si, so;
|
||||
unsigned long t;
|
||||
unsigned int change_detector, must_reset;
|
||||
unsigned int change_detector;
|
||||
unsigned int poll_freq;
|
||||
bool was_frozen;
|
||||
|
||||
mutex_lock(&hotkey_thread_mutex);
|
||||
|
||||
@ -2488,14 +2489,14 @@ static int hotkey_kthread(void *data)
|
||||
t = 100; /* should never happen... */
|
||||
}
|
||||
t = msleep_interruptible(t);
|
||||
if (unlikely(kthread_should_stop()))
|
||||
if (unlikely(kthread_freezable_should_stop(&was_frozen)))
|
||||
break;
|
||||
must_reset = try_to_freeze();
|
||||
if (t > 0 && !must_reset)
|
||||
|
||||
if (t > 0 && !was_frozen)
|
||||
continue;
|
||||
|
||||
mutex_lock(&hotkey_thread_data_mutex);
|
||||
if (must_reset || hotkey_config_change != change_detector) {
|
||||
if (was_frozen || hotkey_config_change != change_detector) {
|
||||
/* forget old state on thaw or config change */
|
||||
si = so;
|
||||
t = 0;
|
||||
@ -2528,10 +2529,6 @@ exit:
|
||||
static void hotkey_poll_stop_sync(void)
|
||||
{
|
||||
if (tpacpi_hotkey_task) {
|
||||
if (frozen(tpacpi_hotkey_task) ||
|
||||
freezing(tpacpi_hotkey_task))
|
||||
thaw_process(tpacpi_hotkey_task);
|
||||
|
||||
kthread_stop(tpacpi_hotkey_task);
|
||||
tpacpi_hotkey_task = NULL;
|
||||
mutex_lock(&hotkey_thread_mutex);
|
||||
|
@ -466,8 +466,6 @@ static int rtsx_control_thread(void *__dev)
|
||||
struct rtsx_chip *chip = dev->chip;
|
||||
struct Scsi_Host *host = rtsx_to_host(dev);
|
||||
|
||||
current->flags |= PF_NOFREEZE;
|
||||
|
||||
for (;;) {
|
||||
if (wait_for_completion_interruptible(&dev->cmnd_ready))
|
||||
break;
|
||||
|
@ -831,7 +831,8 @@ static int usb_stor_scan_thread(void * __us)
|
||||
|
||||
dev_dbg(dev, "device found\n");
|
||||
|
||||
set_freezable_with_signal();
|
||||
set_freezable();
|
||||
|
||||
/*
|
||||
* Wait for the timeout to expire or for a disconnect
|
||||
*
|
||||
@ -839,16 +840,16 @@ static int usb_stor_scan_thread(void * __us)
|
||||
* fail to freeze, but we can't be non-freezable either. Nor can
|
||||
* khubd freeze while waiting for scanning to complete as it may
|
||||
* hold the device lock, causing a hang when suspending devices.
|
||||
* So we request a fake signal when freezing and use
|
||||
* interruptible sleep to kick us out of our wait early when
|
||||
* freezing happens.
|
||||
* So instead of using wait_event_freezable(), explicitly test
|
||||
* for (DONT_SCAN || freezing) in interruptible wait and proceed
|
||||
* if any of DONT_SCAN, freezing or timeout has happened.
|
||||
*/
|
||||
if (delay_use > 0) {
|
||||
dev_dbg(dev, "waiting for device to settle "
|
||||
"before scanning\n");
|
||||
wait_event_interruptible_timeout(us->delay_wait,
|
||||
test_bit(US_FLIDX_DONT_SCAN, &us->dflags),
|
||||
delay_use * HZ);
|
||||
test_bit(US_FLIDX_DONT_SCAN, &us->dflags) ||
|
||||
freezing(current), delay_use * HZ);
|
||||
}
|
||||
|
||||
/* If the device is still connected, perform the scanning */
|
||||
|
@ -334,7 +334,7 @@ again:
|
||||
if (freezing(current)) {
|
||||
worker->working = 0;
|
||||
spin_unlock_irq(&worker->lock);
|
||||
refrigerator();
|
||||
try_to_freeze();
|
||||
} else {
|
||||
spin_unlock_irq(&worker->lock);
|
||||
if (!kthread_should_stop()) {
|
||||
|
@ -1579,9 +1579,7 @@ static int cleaner_kthread(void *arg)
|
||||
btrfs_run_defrag_inodes(root->fs_info);
|
||||
}
|
||||
|
||||
if (freezing(current)) {
|
||||
refrigerator();
|
||||
} else {
|
||||
if (!try_to_freeze()) {
|
||||
set_current_state(TASK_INTERRUPTIBLE);
|
||||
if (!kthread_should_stop())
|
||||
schedule();
|
||||
@ -1635,9 +1633,7 @@ sleep:
|
||||
wake_up_process(root->fs_info->cleaner_kthread);
|
||||
mutex_unlock(&root->fs_info->transaction_kthread_mutex);
|
||||
|
||||
if (freezing(current)) {
|
||||
refrigerator();
|
||||
} else {
|
||||
if (!try_to_freeze()) {
|
||||
set_current_state(TASK_INTERRUPTIBLE);
|
||||
if (!kthread_should_stop() &&
|
||||
!btrfs_transaction_blocked(root->fs_info))
|
||||
|
@ -2883,8 +2883,7 @@ cont_thread:
|
||||
}
|
||||
mutex_unlock(&eli->li_list_mtx);
|
||||
|
||||
if (freezing(current))
|
||||
refrigerator();
|
||||
try_to_freeze();
|
||||
|
||||
cur = jiffies;
|
||||
if ((time_after_eq(cur, next_wakeup)) ||
|
||||
|
@ -937,7 +937,7 @@ int bdi_writeback_thread(void *data)
|
||||
|
||||
trace_writeback_thread_start(bdi);
|
||||
|
||||
while (!kthread_should_stop()) {
|
||||
while (!kthread_freezable_should_stop(NULL)) {
|
||||
/*
|
||||
* Remove own delayed wake-up timer, since we are already awake
|
||||
* and we'll take care of the preriodic write-back.
|
||||
@ -967,8 +967,6 @@ int bdi_writeback_thread(void *data)
|
||||
*/
|
||||
schedule();
|
||||
}
|
||||
|
||||
try_to_freeze();
|
||||
}
|
||||
|
||||
/* Flush any work that raced with us exiting */
|
||||
|
@ -951,8 +951,8 @@ int gfs2_logd(void *data)
|
||||
wake_up(&sdp->sd_log_waitq);
|
||||
|
||||
t = gfs2_tune_get(sdp, gt_logd_secs) * HZ;
|
||||
if (freezing(current))
|
||||
refrigerator();
|
||||
|
||||
try_to_freeze();
|
||||
|
||||
do {
|
||||
prepare_to_wait(&sdp->sd_logd_waitq, &wait,
|
||||
|
@ -1427,8 +1427,8 @@ int gfs2_quotad(void *data)
|
||||
/* Check for & recover partially truncated inodes */
|
||||
quotad_check_trunc_list(sdp);
|
||||
|
||||
if (freezing(current))
|
||||
refrigerator();
|
||||
try_to_freeze();
|
||||
|
||||
t = min(quotad_timeo, statfs_timeo);
|
||||
|
||||
prepare_to_wait(&sdp->sd_quota_wait, &wait, TASK_INTERRUPTIBLE);
|
||||
|
@ -166,7 +166,7 @@ loop:
|
||||
*/
|
||||
jbd_debug(1, "Now suspending kjournald\n");
|
||||
spin_unlock(&journal->j_state_lock);
|
||||
refrigerator();
|
||||
try_to_freeze();
|
||||
spin_lock(&journal->j_state_lock);
|
||||
} else {
|
||||
/*
|
||||
|
@ -173,7 +173,7 @@ loop:
|
||||
*/
|
||||
jbd_debug(1, "Now suspending kjournald2\n");
|
||||
write_unlock(&journal->j_state_lock);
|
||||
refrigerator();
|
||||
try_to_freeze();
|
||||
write_lock(&journal->j_state_lock);
|
||||
} else {
|
||||
/*
|
||||
|
@ -2349,7 +2349,7 @@ int jfsIOWait(void *arg)
|
||||
|
||||
if (freezing(current)) {
|
||||
spin_unlock_irq(&log_redrive_lock);
|
||||
refrigerator();
|
||||
try_to_freeze();
|
||||
} else {
|
||||
set_current_state(TASK_INTERRUPTIBLE);
|
||||
spin_unlock_irq(&log_redrive_lock);
|
||||
|
@ -2800,7 +2800,7 @@ int jfs_lazycommit(void *arg)
|
||||
|
||||
if (freezing(current)) {
|
||||
LAZY_UNLOCK(flags);
|
||||
refrigerator();
|
||||
try_to_freeze();
|
||||
} else {
|
||||
DECLARE_WAITQUEUE(wq, current);
|
||||
|
||||
@ -2994,7 +2994,7 @@ int jfs_sync(void *arg)
|
||||
|
||||
if (freezing(current)) {
|
||||
TXN_UNLOCK();
|
||||
refrigerator();
|
||||
try_to_freeze();
|
||||
} else {
|
||||
set_current_state(TASK_INTERRUPTIBLE);
|
||||
TXN_UNLOCK();
|
||||
|
@ -38,6 +38,7 @@
|
||||
#include <linux/nfs_xdr.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/compat.h>
|
||||
#include <linux/freezer.h>
|
||||
|
||||
#include <asm/system.h>
|
||||
#include <asm/uaccess.h>
|
||||
@ -77,7 +78,7 @@ int nfs_wait_bit_killable(void *word)
|
||||
{
|
||||
if (fatal_signal_pending(current))
|
||||
return -ERESTARTSYS;
|
||||
schedule();
|
||||
freezable_schedule();
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -17,6 +17,7 @@
|
||||
#include <linux/nfs_page.h>
|
||||
#include <linux/lockd/bind.h>
|
||||
#include <linux/nfs_mount.h>
|
||||
#include <linux/freezer.h>
|
||||
|
||||
#include "iostat.h"
|
||||
#include "internal.h"
|
||||
@ -32,7 +33,7 @@ nfs3_rpc_wrapper(struct rpc_clnt *clnt, struct rpc_message *msg, int flags)
|
||||
res = rpc_call_sync(clnt, msg, flags);
|
||||
if (res != -EJUKEBOX && res != -EKEYEXPIRED)
|
||||
break;
|
||||
schedule_timeout_killable(NFS_JUKEBOX_RETRY_TIME);
|
||||
freezable_schedule_timeout_killable(NFS_JUKEBOX_RETRY_TIME);
|
||||
res = -ERESTARTSYS;
|
||||
} while (!fatal_signal_pending(current));
|
||||
return res;
|
||||
|
@ -55,6 +55,7 @@
|
||||
#include <linux/sunrpc/bc_xprt.h>
|
||||
#include <linux/xattr.h>
|
||||
#include <linux/utsname.h>
|
||||
#include <linux/freezer.h>
|
||||
|
||||
#include "nfs4_fs.h"
|
||||
#include "delegation.h"
|
||||
@ -243,7 +244,7 @@ static int nfs4_delay(struct rpc_clnt *clnt, long *timeout)
|
||||
*timeout = NFS4_POLL_RETRY_MIN;
|
||||
if (*timeout > NFS4_POLL_RETRY_MAX)
|
||||
*timeout = NFS4_POLL_RETRY_MAX;
|
||||
schedule_timeout_killable(*timeout);
|
||||
freezable_schedule_timeout_killable(*timeout);
|
||||
if (fatal_signal_pending(current))
|
||||
res = -ERESTARTSYS;
|
||||
*timeout <<= 1;
|
||||
@ -3958,7 +3959,7 @@ int nfs4_proc_delegreturn(struct inode *inode, struct rpc_cred *cred, const nfs4
|
||||
static unsigned long
|
||||
nfs4_set_lock_task_retry(unsigned long timeout)
|
||||
{
|
||||
schedule_timeout_killable(timeout);
|
||||
freezable_schedule_timeout_killable(timeout);
|
||||
timeout <<= 1;
|
||||
if (timeout > NFS4_LOCK_MAXTIMEOUT)
|
||||
return NFS4_LOCK_MAXTIMEOUT;
|
||||
|
@ -41,6 +41,7 @@
|
||||
#include <linux/nfs_fs.h>
|
||||
#include <linux/nfs_page.h>
|
||||
#include <linux/lockd/bind.h>
|
||||
#include <linux/freezer.h>
|
||||
#include "internal.h"
|
||||
|
||||
#define NFSDBG_FACILITY NFSDBG_PROC
|
||||
@ -59,7 +60,7 @@ nfs_rpc_wrapper(struct rpc_clnt *clnt, struct rpc_message *msg, int flags)
|
||||
res = rpc_call_sync(clnt, msg, flags);
|
||||
if (res != -EKEYEXPIRED)
|
||||
break;
|
||||
schedule_timeout_killable(NFS_JUKEBOX_RETRY_TIME);
|
||||
freezable_schedule_timeout_killable(NFS_JUKEBOX_RETRY_TIME);
|
||||
res = -ERESTARTSYS;
|
||||
} while (!fatal_signal_pending(current));
|
||||
return res;
|
||||
|
@ -2470,7 +2470,7 @@ static int nilfs_segctor_thread(void *arg)
|
||||
|
||||
if (freezing(current)) {
|
||||
spin_unlock(&sci->sc_state_lock);
|
||||
refrigerator();
|
||||
try_to_freeze();
|
||||
spin_lock(&sci->sc_state_lock);
|
||||
} else {
|
||||
DEFINE_WAIT(wait);
|
||||
|
@ -1703,7 +1703,7 @@ xfsbufd(
|
||||
|
||||
if (unlikely(freezing(current))) {
|
||||
set_bit(XBT_FORCE_SLEEP, &target->bt_flags);
|
||||
refrigerator();
|
||||
try_to_freeze();
|
||||
} else {
|
||||
clear_bit(XBT_FORCE_SLEEP, &target->bt_flags);
|
||||
}
|
||||
|
@ -5,71 +5,58 @@
|
||||
|
||||
#include <linux/sched.h>
|
||||
#include <linux/wait.h>
|
||||
#include <linux/atomic.h>
|
||||
|
||||
#ifdef CONFIG_FREEZER
|
||||
extern atomic_t system_freezing_cnt; /* nr of freezing conds in effect */
|
||||
extern bool pm_freezing; /* PM freezing in effect */
|
||||
extern bool pm_nosig_freezing; /* PM nosig freezing in effect */
|
||||
|
||||
/*
|
||||
* Check if a process has been frozen
|
||||
*/
|
||||
static inline int frozen(struct task_struct *p)
|
||||
static inline bool frozen(struct task_struct *p)
|
||||
{
|
||||
return p->flags & PF_FROZEN;
|
||||
}
|
||||
|
||||
extern bool freezing_slow_path(struct task_struct *p);
|
||||
|
||||
/*
|
||||
* Check if there is a request to freeze a process
|
||||
*/
|
||||
static inline int freezing(struct task_struct *p)
|
||||
static inline bool freezing(struct task_struct *p)
|
||||
{
|
||||
return test_tsk_thread_flag(p, TIF_FREEZE);
|
||||
}
|
||||
|
||||
/*
|
||||
* Request that a process be frozen
|
||||
*/
|
||||
static inline void set_freeze_flag(struct task_struct *p)
|
||||
{
|
||||
set_tsk_thread_flag(p, TIF_FREEZE);
|
||||
}
|
||||
|
||||
/*
|
||||
* Sometimes we may need to cancel the previous 'freeze' request
|
||||
*/
|
||||
static inline void clear_freeze_flag(struct task_struct *p)
|
||||
{
|
||||
clear_tsk_thread_flag(p, TIF_FREEZE);
|
||||
}
|
||||
|
||||
static inline bool should_send_signal(struct task_struct *p)
|
||||
{
|
||||
return !(p->flags & PF_FREEZER_NOSIG);
|
||||
if (likely(!atomic_read(&system_freezing_cnt)))
|
||||
return false;
|
||||
return freezing_slow_path(p);
|
||||
}
|
||||
|
||||
/* Takes and releases task alloc lock using task_lock() */
|
||||
extern int thaw_process(struct task_struct *p);
|
||||
extern void __thaw_task(struct task_struct *t);
|
||||
|
||||
extern void refrigerator(void);
|
||||
extern bool __refrigerator(bool check_kthr_stop);
|
||||
extern int freeze_processes(void);
|
||||
extern int freeze_kernel_threads(void);
|
||||
extern void thaw_processes(void);
|
||||
|
||||
static inline int try_to_freeze(void)
|
||||
static inline bool try_to_freeze(void)
|
||||
{
|
||||
if (freezing(current)) {
|
||||
refrigerator();
|
||||
return 1;
|
||||
} else
|
||||
return 0;
|
||||
might_sleep();
|
||||
if (likely(!freezing(current)))
|
||||
return false;
|
||||
return __refrigerator(false);
|
||||
}
|
||||
|
||||
extern bool freeze_task(struct task_struct *p, bool sig_only);
|
||||
extern void cancel_freezing(struct task_struct *p);
|
||||
extern bool freeze_task(struct task_struct *p);
|
||||
extern bool set_freezable(void);
|
||||
|
||||
#ifdef CONFIG_CGROUP_FREEZER
|
||||
extern int cgroup_freezing_or_frozen(struct task_struct *task);
|
||||
extern bool cgroup_freezing(struct task_struct *task);
|
||||
#else /* !CONFIG_CGROUP_FREEZER */
|
||||
static inline int cgroup_freezing_or_frozen(struct task_struct *task)
|
||||
static inline bool cgroup_freezing(struct task_struct *task)
|
||||
{
|
||||
return 0;
|
||||
return false;
|
||||
}
|
||||
#endif /* !CONFIG_CGROUP_FREEZER */
|
||||
|
||||
@ -80,33 +67,27 @@ static inline int cgroup_freezing_or_frozen(struct task_struct *task)
|
||||
* appropriately in case the child has exited before the freezing of tasks is
|
||||
* complete. However, we don't want kernel threads to be frozen in unexpected
|
||||
* places, so we allow them to block freeze_processes() instead or to set
|
||||
* PF_NOFREEZE if needed and PF_FREEZER_SKIP is only set for userland vfork
|
||||
* parents. Fortunately, in the ____call_usermodehelper() case the parent won't
|
||||
* really block freeze_processes(), since ____call_usermodehelper() (the child)
|
||||
* does a little before exec/exit and it can't be frozen before waking up the
|
||||
* parent.
|
||||
* PF_NOFREEZE if needed. Fortunately, in the ____call_usermodehelper() case the
|
||||
* parent won't really block freeze_processes(), since ____call_usermodehelper()
|
||||
* (the child) does a little before exec/exit and it can't be frozen before
|
||||
* waking up the parent.
|
||||
*/
|
||||
|
||||
/*
|
||||
* If the current task is a user space one, tell the freezer not to count it as
|
||||
* freezable.
|
||||
*/
|
||||
|
||||
/* Tell the freezer not to count the current task as freezable. */
|
||||
static inline void freezer_do_not_count(void)
|
||||
{
|
||||
if (current->mm)
|
||||
current->flags |= PF_FREEZER_SKIP;
|
||||
current->flags |= PF_FREEZER_SKIP;
|
||||
}
|
||||
|
||||
/*
|
||||
* If the current task is a user space one, tell the freezer to count it as
|
||||
* freezable again and try to freeze it.
|
||||
* Tell the freezer to count the current task as freezable again and try to
|
||||
* freeze it.
|
||||
*/
|
||||
static inline void freezer_count(void)
|
||||
{
|
||||
if (current->mm) {
|
||||
current->flags &= ~PF_FREEZER_SKIP;
|
||||
try_to_freeze();
|
||||
}
|
||||
current->flags &= ~PF_FREEZER_SKIP;
|
||||
try_to_freeze();
|
||||
}
|
||||
|
||||
/*
|
||||
@ -118,21 +99,27 @@ static inline int freezer_should_skip(struct task_struct *p)
|
||||
}
|
||||
|
||||
/*
|
||||
* Tell the freezer that the current task should be frozen by it
|
||||
* These macros are intended to be used whenever you want allow a task that's
|
||||
* sleeping in TASK_UNINTERRUPTIBLE or TASK_KILLABLE state to be frozen. Note
|
||||
* that neither return any clear indication of whether a freeze event happened
|
||||
* while in this function.
|
||||
*/
|
||||
static inline void set_freezable(void)
|
||||
{
|
||||
current->flags &= ~PF_NOFREEZE;
|
||||
}
|
||||
|
||||
/*
|
||||
* Tell the freezer that the current task should be frozen by it and that it
|
||||
* should send a fake signal to the task to freeze it.
|
||||
*/
|
||||
static inline void set_freezable_with_signal(void)
|
||||
{
|
||||
current->flags &= ~(PF_NOFREEZE | PF_FREEZER_NOSIG);
|
||||
}
|
||||
/* Like schedule(), but should not block the freezer. */
|
||||
#define freezable_schedule() \
|
||||
({ \
|
||||
freezer_do_not_count(); \
|
||||
schedule(); \
|
||||
freezer_count(); \
|
||||
})
|
||||
|
||||
/* Like schedule_timeout_killable(), but should not block the freezer. */
|
||||
#define freezable_schedule_timeout_killable(timeout) \
|
||||
({ \
|
||||
freezer_do_not_count(); \
|
||||
schedule_timeout_killable(timeout); \
|
||||
freezer_count(); \
|
||||
})
|
||||
|
||||
/*
|
||||
* Freezer-friendly wrappers around wait_event_interruptible(),
|
||||
@ -152,47 +139,51 @@ static inline void set_freezable_with_signal(void)
|
||||
#define wait_event_freezable(wq, condition) \
|
||||
({ \
|
||||
int __retval; \
|
||||
do { \
|
||||
for (;;) { \
|
||||
__retval = wait_event_interruptible(wq, \
|
||||
(condition) || freezing(current)); \
|
||||
if (__retval && !freezing(current)) \
|
||||
if (__retval || (condition)) \
|
||||
break; \
|
||||
else if (!(condition)) \
|
||||
__retval = -ERESTARTSYS; \
|
||||
} while (try_to_freeze()); \
|
||||
try_to_freeze(); \
|
||||
} \
|
||||
__retval; \
|
||||
})
|
||||
|
||||
|
||||
#define wait_event_freezable_timeout(wq, condition, timeout) \
|
||||
({ \
|
||||
long __retval = timeout; \
|
||||
do { \
|
||||
for (;;) { \
|
||||
__retval = wait_event_interruptible_timeout(wq, \
|
||||
(condition) || freezing(current), \
|
||||
__retval); \
|
||||
} while (try_to_freeze()); \
|
||||
if (__retval <= 0 || (condition)) \
|
||||
break; \
|
||||
try_to_freeze(); \
|
||||
} \
|
||||
__retval; \
|
||||
})
|
||||
#else /* !CONFIG_FREEZER */
|
||||
static inline int frozen(struct task_struct *p) { return 0; }
|
||||
static inline int freezing(struct task_struct *p) { return 0; }
|
||||
static inline void set_freeze_flag(struct task_struct *p) {}
|
||||
static inline void clear_freeze_flag(struct task_struct *p) {}
|
||||
static inline int thaw_process(struct task_struct *p) { return 1; }
|
||||
|
||||
static inline void refrigerator(void) {}
|
||||
#else /* !CONFIG_FREEZER */
|
||||
static inline bool frozen(struct task_struct *p) { return false; }
|
||||
static inline bool freezing(struct task_struct *p) { return false; }
|
||||
static inline void __thaw_task(struct task_struct *t) {}
|
||||
|
||||
static inline bool __refrigerator(bool check_kthr_stop) { return false; }
|
||||
static inline int freeze_processes(void) { return -ENOSYS; }
|
||||
static inline int freeze_kernel_threads(void) { return -ENOSYS; }
|
||||
static inline void thaw_processes(void) {}
|
||||
|
||||
static inline int try_to_freeze(void) { return 0; }
|
||||
static inline bool try_to_freeze(void) { return false; }
|
||||
|
||||
static inline void freezer_do_not_count(void) {}
|
||||
static inline void freezer_count(void) {}
|
||||
static inline int freezer_should_skip(struct task_struct *p) { return 0; }
|
||||
static inline void set_freezable(void) {}
|
||||
static inline void set_freezable_with_signal(void) {}
|
||||
|
||||
#define freezable_schedule() schedule()
|
||||
|
||||
#define freezable_schedule_timeout_killable(timeout) \
|
||||
schedule_timeout_killable(timeout)
|
||||
|
||||
#define wait_event_freezable(wq, condition) \
|
||||
wait_event_interruptible(wq, condition)
|
||||
|
@ -117,5 +117,7 @@ extern void usermodehelper_init(void);
|
||||
extern int usermodehelper_disable(void);
|
||||
extern void usermodehelper_enable(void);
|
||||
extern bool usermodehelper_is_disabled(void);
|
||||
extern void read_lock_usermodehelper(void);
|
||||
extern void read_unlock_usermodehelper(void);
|
||||
|
||||
#endif /* __LINUX_KMOD_H__ */
|
||||
|
@ -35,6 +35,7 @@ struct task_struct *kthread_create_on_node(int (*threadfn)(void *data),
|
||||
void kthread_bind(struct task_struct *k, unsigned int cpu);
|
||||
int kthread_stop(struct task_struct *k);
|
||||
int kthread_should_stop(void);
|
||||
bool kthread_freezable_should_stop(bool *was_frozen);
|
||||
void *kthread_data(struct task_struct *k);
|
||||
|
||||
int kthreadd(void *unused);
|
||||
|
@ -264,62 +264,34 @@ static inline char *early_platform_driver_setup_func(void) \
|
||||
}
|
||||
#endif /* MODULE */
|
||||
|
||||
#ifdef CONFIG_PM_SLEEP
|
||||
extern int platform_pm_prepare(struct device *dev);
|
||||
extern void platform_pm_complete(struct device *dev);
|
||||
#else
|
||||
#define platform_pm_prepare NULL
|
||||
#define platform_pm_complete NULL
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_SUSPEND
|
||||
extern int platform_pm_suspend(struct device *dev);
|
||||
extern int platform_pm_suspend_noirq(struct device *dev);
|
||||
extern int platform_pm_resume(struct device *dev);
|
||||
extern int platform_pm_resume_noirq(struct device *dev);
|
||||
#else
|
||||
#define platform_pm_suspend NULL
|
||||
#define platform_pm_resume NULL
|
||||
#define platform_pm_suspend_noirq NULL
|
||||
#define platform_pm_resume_noirq NULL
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_HIBERNATE_CALLBACKS
|
||||
extern int platform_pm_freeze(struct device *dev);
|
||||
extern int platform_pm_freeze_noirq(struct device *dev);
|
||||
extern int platform_pm_thaw(struct device *dev);
|
||||
extern int platform_pm_thaw_noirq(struct device *dev);
|
||||
extern int platform_pm_poweroff(struct device *dev);
|
||||
extern int platform_pm_poweroff_noirq(struct device *dev);
|
||||
extern int platform_pm_restore(struct device *dev);
|
||||
extern int platform_pm_restore_noirq(struct device *dev);
|
||||
#else
|
||||
#define platform_pm_freeze NULL
|
||||
#define platform_pm_thaw NULL
|
||||
#define platform_pm_poweroff NULL
|
||||
#define platform_pm_restore NULL
|
||||
#define platform_pm_freeze_noirq NULL
|
||||
#define platform_pm_thaw_noirq NULL
|
||||
#define platform_pm_poweroff_noirq NULL
|
||||
#define platform_pm_restore_noirq NULL
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_PM_SLEEP
|
||||
#define USE_PLATFORM_PM_SLEEP_OPS \
|
||||
.prepare = platform_pm_prepare, \
|
||||
.complete = platform_pm_complete, \
|
||||
.suspend = platform_pm_suspend, \
|
||||
.resume = platform_pm_resume, \
|
||||
.freeze = platform_pm_freeze, \
|
||||
.thaw = platform_pm_thaw, \
|
||||
.poweroff = platform_pm_poweroff, \
|
||||
.restore = platform_pm_restore, \
|
||||
.suspend_noirq = platform_pm_suspend_noirq, \
|
||||
.resume_noirq = platform_pm_resume_noirq, \
|
||||
.freeze_noirq = platform_pm_freeze_noirq, \
|
||||
.thaw_noirq = platform_pm_thaw_noirq, \
|
||||
.poweroff_noirq = platform_pm_poweroff_noirq, \
|
||||
.restore_noirq = platform_pm_restore_noirq,
|
||||
.restore = platform_pm_restore,
|
||||
#else
|
||||
#define USE_PLATFORM_PM_SLEEP_OPS
|
||||
#endif
|
||||
|
@ -300,19 +300,6 @@ const struct dev_pm_ops name = { \
|
||||
SET_RUNTIME_PM_OPS(suspend_fn, resume_fn, idle_fn) \
|
||||
}
|
||||
|
||||
/*
|
||||
* Use this for subsystems (bus types, device types, device classes) that don't
|
||||
* need any special suspend/resume handling in addition to invoking the PM
|
||||
* callbacks provided by device drivers supporting both the system sleep PM and
|
||||
* runtime PM, make the pm member point to generic_subsys_pm_ops.
|
||||
*/
|
||||
#ifdef CONFIG_PM
|
||||
extern struct dev_pm_ops generic_subsys_pm_ops;
|
||||
#define GENERIC_SUBSYS_PM_OPS (&generic_subsys_pm_ops)
|
||||
#else
|
||||
#define GENERIC_SUBSYS_PM_OPS NULL
|
||||
#endif
|
||||
|
||||
/**
|
||||
* PM_EVENT_ messages
|
||||
*
|
||||
|
@ -220,7 +220,7 @@ extern char ___assert_task_state[1 - 2*!!(
|
||||
((task->state & (__TASK_STOPPED | __TASK_TRACED)) != 0)
|
||||
#define task_contributes_to_load(task) \
|
||||
((task->state & TASK_UNINTERRUPTIBLE) != 0 && \
|
||||
(task->flags & PF_FREEZING) == 0)
|
||||
(task->flags & PF_FROZEN) == 0)
|
||||
|
||||
#define __set_task_state(tsk, state_value) \
|
||||
do { (tsk)->state = (state_value); } while (0)
|
||||
@ -1772,7 +1772,6 @@ extern void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *
|
||||
#define PF_MEMALLOC 0x00000800 /* Allocating memory */
|
||||
#define PF_NPROC_EXCEEDED 0x00001000 /* set_user noticed that RLIMIT_NPROC was exceeded */
|
||||
#define PF_USED_MATH 0x00002000 /* if unset the fpu must be initialized before use */
|
||||
#define PF_FREEZING 0x00004000 /* freeze in progress. do not account to load */
|
||||
#define PF_NOFREEZE 0x00008000 /* this thread should not be frozen */
|
||||
#define PF_FROZEN 0x00010000 /* frozen for system suspend */
|
||||
#define PF_FSTRANS 0x00020000 /* inside a filesystem transaction */
|
||||
@ -1788,7 +1787,6 @@ extern void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *
|
||||
#define PF_MEMPOLICY 0x10000000 /* Non-default NUMA mempolicy */
|
||||
#define PF_MUTEX_TESTER 0x20000000 /* Thread belongs to the rt mutex tester */
|
||||
#define PF_FREEZER_SKIP 0x40000000 /* Freezer should not count it as freezable */
|
||||
#define PF_FREEZER_NOSIG 0x80000000 /* Freezer won't send signals to it */
|
||||
|
||||
/*
|
||||
* Only the _current_ task can read/write to tsk->flags, but other
|
||||
|
@ -6,6 +6,7 @@
|
||||
#include <linux/init.h>
|
||||
#include <linux/pm.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/freezer.h>
|
||||
#include <asm/errno.h>
|
||||
|
||||
#ifdef CONFIG_VT
|
||||
@ -331,6 +332,8 @@ static inline bool system_entering_hibernation(void) { return false; }
|
||||
#define PM_RESTORE_PREPARE 0x0005 /* Going to restore a saved image */
|
||||
#define PM_POST_RESTORE 0x0006 /* Restore failed */
|
||||
|
||||
extern struct mutex pm_mutex;
|
||||
|
||||
#ifdef CONFIG_PM_SLEEP
|
||||
void save_processor_state(void);
|
||||
void restore_processor_state(void);
|
||||
@ -351,6 +354,19 @@ extern bool events_check_enabled;
|
||||
extern bool pm_wakeup_pending(void);
|
||||
extern bool pm_get_wakeup_count(unsigned int *count);
|
||||
extern bool pm_save_wakeup_count(unsigned int count);
|
||||
|
||||
static inline void lock_system_sleep(void)
|
||||
{
|
||||
freezer_do_not_count();
|
||||
mutex_lock(&pm_mutex);
|
||||
}
|
||||
|
||||
static inline void unlock_system_sleep(void)
|
||||
{
|
||||
mutex_unlock(&pm_mutex);
|
||||
freezer_count();
|
||||
}
|
||||
|
||||
#else /* !CONFIG_PM_SLEEP */
|
||||
|
||||
static inline int register_pm_notifier(struct notifier_block *nb)
|
||||
@ -366,28 +382,11 @@ static inline int unregister_pm_notifier(struct notifier_block *nb)
|
||||
#define pm_notifier(fn, pri) do { (void)(fn); } while (0)
|
||||
|
||||
static inline bool pm_wakeup_pending(void) { return false; }
|
||||
#endif /* !CONFIG_PM_SLEEP */
|
||||
|
||||
extern struct mutex pm_mutex;
|
||||
|
||||
#ifndef CONFIG_HIBERNATE_CALLBACKS
|
||||
static inline void lock_system_sleep(void) {}
|
||||
static inline void unlock_system_sleep(void) {}
|
||||
|
||||
#else
|
||||
|
||||
/* Let some subsystems like memory hotadd exclude hibernation */
|
||||
|
||||
static inline void lock_system_sleep(void)
|
||||
{
|
||||
mutex_lock(&pm_mutex);
|
||||
}
|
||||
|
||||
static inline void unlock_system_sleep(void)
|
||||
{
|
||||
mutex_unlock(&pm_mutex);
|
||||
}
|
||||
#endif
|
||||
#endif /* !CONFIG_PM_SLEEP */
|
||||
|
||||
#ifdef CONFIG_ARCH_SAVE_PAGE_KEYS
|
||||
/*
|
||||
|
@ -48,19 +48,17 @@ static inline struct freezer *task_freezer(struct task_struct *task)
|
||||
struct freezer, css);
|
||||
}
|
||||
|
||||
static inline int __cgroup_freezing_or_frozen(struct task_struct *task)
|
||||
bool cgroup_freezing(struct task_struct *task)
|
||||
{
|
||||
enum freezer_state state = task_freezer(task)->state;
|
||||
return (state == CGROUP_FREEZING) || (state == CGROUP_FROZEN);
|
||||
}
|
||||
enum freezer_state state;
|
||||
bool ret;
|
||||
|
||||
int cgroup_freezing_or_frozen(struct task_struct *task)
|
||||
{
|
||||
int result;
|
||||
task_lock(task);
|
||||
result = __cgroup_freezing_or_frozen(task);
|
||||
task_unlock(task);
|
||||
return result;
|
||||
rcu_read_lock();
|
||||
state = task_freezer(task)->state;
|
||||
ret = state == CGROUP_FREEZING || state == CGROUP_FROZEN;
|
||||
rcu_read_unlock();
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -102,9 +100,6 @@ struct cgroup_subsys freezer_subsys;
|
||||
* freezer_can_attach():
|
||||
* cgroup_mutex (held by caller of can_attach)
|
||||
*
|
||||
* cgroup_freezing_or_frozen():
|
||||
* task->alloc_lock (to get task's cgroup)
|
||||
*
|
||||
* freezer_fork() (preserving fork() performance means can't take cgroup_mutex):
|
||||
* freezer->lock
|
||||
* sighand->siglock (if the cgroup is freezing)
|
||||
@ -130,7 +125,7 @@ struct cgroup_subsys freezer_subsys;
|
||||
* write_lock css_set_lock (cgroup iterator start)
|
||||
* task->alloc_lock
|
||||
* read_lock css_set_lock (cgroup iterator start)
|
||||
* task->alloc_lock (inside thaw_process(), prevents race with refrigerator())
|
||||
* task->alloc_lock (inside __thaw_task(), prevents race with refrigerator())
|
||||
* sighand->siglock
|
||||
*/
|
||||
static struct cgroup_subsys_state *freezer_create(struct cgroup_subsys *ss,
|
||||
@ -150,7 +145,11 @@ static struct cgroup_subsys_state *freezer_create(struct cgroup_subsys *ss,
|
||||
static void freezer_destroy(struct cgroup_subsys *ss,
|
||||
struct cgroup *cgroup)
|
||||
{
|
||||
kfree(cgroup_freezer(cgroup));
|
||||
struct freezer *freezer = cgroup_freezer(cgroup);
|
||||
|
||||
if (freezer->state != CGROUP_THAWED)
|
||||
atomic_dec(&system_freezing_cnt);
|
||||
kfree(freezer);
|
||||
}
|
||||
|
||||
/* task is frozen or will freeze immediately when next it gets woken */
|
||||
@ -184,13 +183,7 @@ static int freezer_can_attach(struct cgroup_subsys *ss,
|
||||
|
||||
static int freezer_can_attach_task(struct cgroup *cgrp, struct task_struct *tsk)
|
||||
{
|
||||
rcu_read_lock();
|
||||
if (__cgroup_freezing_or_frozen(tsk)) {
|
||||
rcu_read_unlock();
|
||||
return -EBUSY;
|
||||
}
|
||||
rcu_read_unlock();
|
||||
return 0;
|
||||
return cgroup_freezing(tsk) ? -EBUSY : 0;
|
||||
}
|
||||
|
||||
static void freezer_fork(struct cgroup_subsys *ss, struct task_struct *task)
|
||||
@ -220,7 +213,7 @@ static void freezer_fork(struct cgroup_subsys *ss, struct task_struct *task)
|
||||
|
||||
/* Locking avoids race with FREEZING -> THAWED transitions. */
|
||||
if (freezer->state == CGROUP_FREEZING)
|
||||
freeze_task(task, true);
|
||||
freeze_task(task);
|
||||
spin_unlock_irq(&freezer->lock);
|
||||
}
|
||||
|
||||
@ -238,7 +231,7 @@ static void update_if_frozen(struct cgroup *cgroup,
|
||||
cgroup_iter_start(cgroup, &it);
|
||||
while ((task = cgroup_iter_next(cgroup, &it))) {
|
||||
ntotal++;
|
||||
if (is_task_frozen_enough(task))
|
||||
if (freezing(task) && is_task_frozen_enough(task))
|
||||
nfrozen++;
|
||||
}
|
||||
|
||||
@ -286,10 +279,9 @@ static int try_to_freeze_cgroup(struct cgroup *cgroup, struct freezer *freezer)
|
||||
struct task_struct *task;
|
||||
unsigned int num_cant_freeze_now = 0;
|
||||
|
||||
freezer->state = CGROUP_FREEZING;
|
||||
cgroup_iter_start(cgroup, &it);
|
||||
while ((task = cgroup_iter_next(cgroup, &it))) {
|
||||
if (!freeze_task(task, true))
|
||||
if (!freeze_task(task))
|
||||
continue;
|
||||
if (is_task_frozen_enough(task))
|
||||
continue;
|
||||
@ -307,12 +299,9 @@ static void unfreeze_cgroup(struct cgroup *cgroup, struct freezer *freezer)
|
||||
struct task_struct *task;
|
||||
|
||||
cgroup_iter_start(cgroup, &it);
|
||||
while ((task = cgroup_iter_next(cgroup, &it))) {
|
||||
thaw_process(task);
|
||||
}
|
||||
while ((task = cgroup_iter_next(cgroup, &it)))
|
||||
__thaw_task(task);
|
||||
cgroup_iter_end(cgroup, &it);
|
||||
|
||||
freezer->state = CGROUP_THAWED;
|
||||
}
|
||||
|
||||
static int freezer_change_state(struct cgroup *cgroup,
|
||||
@ -326,20 +315,24 @@ static int freezer_change_state(struct cgroup *cgroup,
|
||||
spin_lock_irq(&freezer->lock);
|
||||
|
||||
update_if_frozen(cgroup, freezer);
|
||||
if (goal_state == freezer->state)
|
||||
goto out;
|
||||
|
||||
switch (goal_state) {
|
||||
case CGROUP_THAWED:
|
||||
if (freezer->state != CGROUP_THAWED)
|
||||
atomic_dec(&system_freezing_cnt);
|
||||
freezer->state = CGROUP_THAWED;
|
||||
unfreeze_cgroup(cgroup, freezer);
|
||||
break;
|
||||
case CGROUP_FROZEN:
|
||||
if (freezer->state == CGROUP_THAWED)
|
||||
atomic_inc(&system_freezing_cnt);
|
||||
freezer->state = CGROUP_FREEZING;
|
||||
retval = try_to_freeze_cgroup(cgroup, freezer);
|
||||
break;
|
||||
default:
|
||||
BUG();
|
||||
}
|
||||
out:
|
||||
|
||||
spin_unlock_irq(&freezer->lock);
|
||||
|
||||
return retval;
|
||||
|
@ -679,8 +679,6 @@ static void exit_mm(struct task_struct * tsk)
|
||||
tsk->mm = NULL;
|
||||
up_read(&mm->mmap_sem);
|
||||
enter_lazy_tlb(mm, current);
|
||||
/* We don't want this task to be frozen prematurely */
|
||||
clear_freeze_flag(tsk);
|
||||
task_unlock(tsk);
|
||||
mm_update_next_owner(mm);
|
||||
mmput(mm);
|
||||
@ -1040,6 +1038,7 @@ NORET_TYPE void do_exit(long code)
|
||||
exit_rcu();
|
||||
/* causes final put_task_struct in finish_task_switch(). */
|
||||
tsk->state = TASK_DEAD;
|
||||
tsk->flags |= PF_NOFREEZE; /* tell freezer to ignore us */
|
||||
schedule();
|
||||
BUG();
|
||||
/* Avoid "noreturn function does return". */
|
||||
|
@ -992,7 +992,6 @@ static void copy_flags(unsigned long clone_flags, struct task_struct *p)
|
||||
new_flags |= PF_FORKNOEXEC;
|
||||
new_flags |= PF_STARTING;
|
||||
p->flags = new_flags;
|
||||
clear_freeze_flag(p);
|
||||
}
|
||||
|
||||
SYSCALL_DEFINE1(set_tid_address, int __user *, tidptr)
|
||||
|
209
kernel/freezer.c
209
kernel/freezer.c
@ -9,101 +9,114 @@
|
||||
#include <linux/export.h>
|
||||
#include <linux/syscalls.h>
|
||||
#include <linux/freezer.h>
|
||||
#include <linux/kthread.h>
|
||||
|
||||
/*
|
||||
* freezing is complete, mark current process as frozen
|
||||
/* total number of freezing conditions in effect */
|
||||
atomic_t system_freezing_cnt = ATOMIC_INIT(0);
|
||||
EXPORT_SYMBOL(system_freezing_cnt);
|
||||
|
||||
/* indicate whether PM freezing is in effect, protected by pm_mutex */
|
||||
bool pm_freezing;
|
||||
bool pm_nosig_freezing;
|
||||
|
||||
/* protects freezing and frozen transitions */
|
||||
static DEFINE_SPINLOCK(freezer_lock);
|
||||
|
||||
/**
|
||||
* freezing_slow_path - slow path for testing whether a task needs to be frozen
|
||||
* @p: task to be tested
|
||||
*
|
||||
* This function is called by freezing() if system_freezing_cnt isn't zero
|
||||
* and tests whether @p needs to enter and stay in frozen state. Can be
|
||||
* called under any context. The freezers are responsible for ensuring the
|
||||
* target tasks see the updated state.
|
||||
*/
|
||||
static inline void frozen_process(void)
|
||||
bool freezing_slow_path(struct task_struct *p)
|
||||
{
|
||||
if (!unlikely(current->flags & PF_NOFREEZE)) {
|
||||
current->flags |= PF_FROZEN;
|
||||
smp_wmb();
|
||||
}
|
||||
clear_freeze_flag(current);
|
||||
if (p->flags & PF_NOFREEZE)
|
||||
return false;
|
||||
|
||||
if (pm_nosig_freezing || cgroup_freezing(p))
|
||||
return true;
|
||||
|
||||
if (pm_freezing && !(p->flags & PF_KTHREAD))
|
||||
return true;
|
||||
|
||||
return false;
|
||||
}
|
||||
EXPORT_SYMBOL(freezing_slow_path);
|
||||
|
||||
/* Refrigerator is place where frozen processes are stored :-). */
|
||||
void refrigerator(void)
|
||||
bool __refrigerator(bool check_kthr_stop)
|
||||
{
|
||||
/* Hmm, should we be allowed to suspend when there are realtime
|
||||
processes around? */
|
||||
long save;
|
||||
bool was_frozen = false;
|
||||
long save = current->state;
|
||||
|
||||
task_lock(current);
|
||||
if (freezing(current)) {
|
||||
frozen_process();
|
||||
task_unlock(current);
|
||||
} else {
|
||||
task_unlock(current);
|
||||
return;
|
||||
}
|
||||
save = current->state;
|
||||
pr_debug("%s entered refrigerator\n", current->comm);
|
||||
|
||||
spin_lock_irq(¤t->sighand->siglock);
|
||||
recalc_sigpending(); /* We sent fake signal, clean it up */
|
||||
spin_unlock_irq(¤t->sighand->siglock);
|
||||
|
||||
/* prevent accounting of that task to load */
|
||||
current->flags |= PF_FREEZING;
|
||||
|
||||
for (;;) {
|
||||
set_current_state(TASK_UNINTERRUPTIBLE);
|
||||
if (!frozen(current))
|
||||
|
||||
spin_lock_irq(&freezer_lock);
|
||||
current->flags |= PF_FROZEN;
|
||||
if (!freezing(current) ||
|
||||
(check_kthr_stop && kthread_should_stop()))
|
||||
current->flags &= ~PF_FROZEN;
|
||||
spin_unlock_irq(&freezer_lock);
|
||||
|
||||
if (!(current->flags & PF_FROZEN))
|
||||
break;
|
||||
was_frozen = true;
|
||||
schedule();
|
||||
}
|
||||
|
||||
/* Remove the accounting blocker */
|
||||
current->flags &= ~PF_FREEZING;
|
||||
|
||||
pr_debug("%s left refrigerator\n", current->comm);
|
||||
__set_current_state(save);
|
||||
|
||||
/*
|
||||
* Restore saved task state before returning. The mb'd version
|
||||
* needs to be used; otherwise, it might silently break
|
||||
* synchronization which depends on ordered task state change.
|
||||
*/
|
||||
set_current_state(save);
|
||||
|
||||
return was_frozen;
|
||||
}
|
||||
EXPORT_SYMBOL(refrigerator);
|
||||
EXPORT_SYMBOL(__refrigerator);
|
||||
|
||||
static void fake_signal_wake_up(struct task_struct *p)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&p->sighand->siglock, flags);
|
||||
signal_wake_up(p, 0);
|
||||
spin_unlock_irqrestore(&p->sighand->siglock, flags);
|
||||
if (lock_task_sighand(p, &flags)) {
|
||||
signal_wake_up(p, 0);
|
||||
unlock_task_sighand(p, &flags);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* freeze_task - send a freeze request to given task
|
||||
* @p: task to send the request to
|
||||
* @sig_only: if set, the request will only be sent if the task has the
|
||||
* PF_FREEZER_NOSIG flag unset
|
||||
* Return value: 'false', if @sig_only is set and the task has
|
||||
* PF_FREEZER_NOSIG set or the task is frozen, 'true', otherwise
|
||||
* freeze_task - send a freeze request to given task
|
||||
* @p: task to send the request to
|
||||
*
|
||||
* The freeze request is sent by setting the tasks's TIF_FREEZE flag and
|
||||
* either sending a fake signal to it or waking it up, depending on whether
|
||||
* or not it has PF_FREEZER_NOSIG set. If @sig_only is set and the task
|
||||
* has PF_FREEZER_NOSIG set (ie. it is a typical kernel thread), its
|
||||
* TIF_FREEZE flag will not be set.
|
||||
* If @p is freezing, the freeze request is sent by setting %TIF_FREEZE
|
||||
* flag and either sending a fake signal to it or waking it up, depending
|
||||
* on whether it has %PF_FREEZER_NOSIG set.
|
||||
*
|
||||
* RETURNS:
|
||||
* %false, if @p is not freezing or already frozen; %true, otherwise
|
||||
*/
|
||||
bool freeze_task(struct task_struct *p, bool sig_only)
|
||||
bool freeze_task(struct task_struct *p)
|
||||
{
|
||||
/*
|
||||
* We first check if the task is freezing and next if it has already
|
||||
* been frozen to avoid the race with frozen_process() which first marks
|
||||
* the task as frozen and next clears its TIF_FREEZE.
|
||||
*/
|
||||
if (!freezing(p)) {
|
||||
smp_rmb();
|
||||
if (frozen(p))
|
||||
return false;
|
||||
unsigned long flags;
|
||||
|
||||
if (!sig_only || should_send_signal(p))
|
||||
set_freeze_flag(p);
|
||||
else
|
||||
return false;
|
||||
spin_lock_irqsave(&freezer_lock, flags);
|
||||
if (!freezing(p) || frozen(p)) {
|
||||
spin_unlock_irqrestore(&freezer_lock, flags);
|
||||
return false;
|
||||
}
|
||||
|
||||
if (should_send_signal(p)) {
|
||||
if (!(p->flags & PF_KTHREAD)) {
|
||||
fake_signal_wake_up(p);
|
||||
/*
|
||||
* fake_signal_wake_up() goes through p's scheduler
|
||||
@ -111,56 +124,48 @@ bool freeze_task(struct task_struct *p, bool sig_only)
|
||||
* TASK_RUNNING transition can't race with task state
|
||||
* testing in try_to_freeze_tasks().
|
||||
*/
|
||||
} else if (sig_only) {
|
||||
return false;
|
||||
} else {
|
||||
wake_up_state(p, TASK_INTERRUPTIBLE);
|
||||
}
|
||||
|
||||
spin_unlock_irqrestore(&freezer_lock, flags);
|
||||
return true;
|
||||
}
|
||||
|
||||
void cancel_freezing(struct task_struct *p)
|
||||
void __thaw_task(struct task_struct *p)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
if (freezing(p)) {
|
||||
pr_debug(" clean up: %s\n", p->comm);
|
||||
clear_freeze_flag(p);
|
||||
spin_lock_irqsave(&p->sighand->siglock, flags);
|
||||
recalc_sigpending_and_wake(p);
|
||||
spin_unlock_irqrestore(&p->sighand->siglock, flags);
|
||||
}
|
||||
}
|
||||
|
||||
static int __thaw_process(struct task_struct *p)
|
||||
{
|
||||
if (frozen(p)) {
|
||||
p->flags &= ~PF_FROZEN;
|
||||
return 1;
|
||||
}
|
||||
clear_freeze_flag(p);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Wake up a frozen process
|
||||
*
|
||||
* task_lock() is needed to prevent the race with refrigerator() which may
|
||||
* occur if the freezing of tasks fails. Namely, without the lock, if the
|
||||
* freezing of tasks failed, thaw_tasks() might have run before a task in
|
||||
* refrigerator() could call frozen_process(), in which case the task would be
|
||||
* frozen and no one would thaw it.
|
||||
*/
|
||||
int thaw_process(struct task_struct *p)
|
||||
{
|
||||
task_lock(p);
|
||||
if (__thaw_process(p) == 1) {
|
||||
task_unlock(p);
|
||||
/*
|
||||
* Clear freezing and kick @p if FROZEN. Clearing is guaranteed to
|
||||
* be visible to @p as waking up implies wmb. Waking up inside
|
||||
* freezer_lock also prevents wakeups from leaking outside
|
||||
* refrigerator.
|
||||
*/
|
||||
spin_lock_irqsave(&freezer_lock, flags);
|
||||
if (frozen(p))
|
||||
wake_up_process(p);
|
||||
return 1;
|
||||
}
|
||||
task_unlock(p);
|
||||
return 0;
|
||||
spin_unlock_irqrestore(&freezer_lock, flags);
|
||||
}
|
||||
EXPORT_SYMBOL(thaw_process);
|
||||
|
||||
/**
|
||||
* set_freezable - make %current freezable
|
||||
*
|
||||
* Mark %current freezable and enter refrigerator if necessary.
|
||||
*/
|
||||
bool set_freezable(void)
|
||||
{
|
||||
might_sleep();
|
||||
|
||||
/*
|
||||
* Modify flags while holding freezer_lock. This ensures the
|
||||
* freezer notices that we aren't frozen yet or the freezing
|
||||
* condition is visible to try_to_freeze() below.
|
||||
*/
|
||||
spin_lock_irq(&freezer_lock);
|
||||
current->flags &= ~PF_NOFREEZE;
|
||||
spin_unlock_irq(&freezer_lock);
|
||||
|
||||
return try_to_freeze();
|
||||
}
|
||||
EXPORT_SYMBOL(set_freezable);
|
||||
|
@ -1523,7 +1523,7 @@ int kernel_kexec(void)
|
||||
|
||||
#ifdef CONFIG_KEXEC_JUMP
|
||||
if (kexec_image->preserve_context) {
|
||||
mutex_lock(&pm_mutex);
|
||||
lock_system_sleep();
|
||||
pm_prepare_console();
|
||||
error = freeze_processes();
|
||||
if (error) {
|
||||
@ -1576,7 +1576,7 @@ int kernel_kexec(void)
|
||||
thaw_processes();
|
||||
Restore_console:
|
||||
pm_restore_console();
|
||||
mutex_unlock(&pm_mutex);
|
||||
unlock_system_sleep();
|
||||
}
|
||||
#endif
|
||||
|
||||
|
@ -34,6 +34,9 @@
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/resource.h>
|
||||
#include <linux/notifier.h>
|
||||
#include <linux/suspend.h>
|
||||
#include <linux/rwsem.h>
|
||||
#include <asm/uaccess.h>
|
||||
|
||||
#include <trace/events/module.h>
|
||||
@ -48,6 +51,7 @@ static struct workqueue_struct *khelper_wq;
|
||||
static kernel_cap_t usermodehelper_bset = CAP_FULL_SET;
|
||||
static kernel_cap_t usermodehelper_inheritable = CAP_FULL_SET;
|
||||
static DEFINE_SPINLOCK(umh_sysctl_lock);
|
||||
static DECLARE_RWSEM(umhelper_sem);
|
||||
|
||||
#ifdef CONFIG_MODULES
|
||||
|
||||
@ -273,6 +277,7 @@ static void __call_usermodehelper(struct work_struct *work)
|
||||
* If set, call_usermodehelper_exec() will exit immediately returning -EBUSY
|
||||
* (used for preventing user land processes from being created after the user
|
||||
* land has been frozen during a system-wide hibernation or suspend operation).
|
||||
* Should always be manipulated under umhelper_sem acquired for write.
|
||||
*/
|
||||
static int usermodehelper_disabled = 1;
|
||||
|
||||
@ -291,6 +296,18 @@ static DECLARE_WAIT_QUEUE_HEAD(running_helpers_waitq);
|
||||
*/
|
||||
#define RUNNING_HELPERS_TIMEOUT (5 * HZ)
|
||||
|
||||
void read_lock_usermodehelper(void)
|
||||
{
|
||||
down_read(&umhelper_sem);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(read_lock_usermodehelper);
|
||||
|
||||
void read_unlock_usermodehelper(void)
|
||||
{
|
||||
up_read(&umhelper_sem);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(read_unlock_usermodehelper);
|
||||
|
||||
/**
|
||||
* usermodehelper_disable - prevent new helpers from being started
|
||||
*/
|
||||
@ -298,8 +315,10 @@ int usermodehelper_disable(void)
|
||||
{
|
||||
long retval;
|
||||
|
||||
down_write(&umhelper_sem);
|
||||
usermodehelper_disabled = 1;
|
||||
smp_mb();
|
||||
up_write(&umhelper_sem);
|
||||
|
||||
/*
|
||||
* From now on call_usermodehelper_exec() won't start any new
|
||||
* helpers, so it is sufficient if running_helpers turns out to
|
||||
@ -312,7 +331,9 @@ int usermodehelper_disable(void)
|
||||
if (retval)
|
||||
return 0;
|
||||
|
||||
down_write(&umhelper_sem);
|
||||
usermodehelper_disabled = 0;
|
||||
up_write(&umhelper_sem);
|
||||
return -EAGAIN;
|
||||
}
|
||||
|
||||
@ -321,7 +342,9 @@ int usermodehelper_disable(void)
|
||||
*/
|
||||
void usermodehelper_enable(void)
|
||||
{
|
||||
down_write(&umhelper_sem);
|
||||
usermodehelper_disabled = 0;
|
||||
up_write(&umhelper_sem);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -58,6 +58,31 @@ int kthread_should_stop(void)
|
||||
}
|
||||
EXPORT_SYMBOL(kthread_should_stop);
|
||||
|
||||
/**
|
||||
* kthread_freezable_should_stop - should this freezable kthread return now?
|
||||
* @was_frozen: optional out parameter, indicates whether %current was frozen
|
||||
*
|
||||
* kthread_should_stop() for freezable kthreads, which will enter
|
||||
* refrigerator if necessary. This function is safe from kthread_stop() /
|
||||
* freezer deadlock and freezable kthreads should use this function instead
|
||||
* of calling try_to_freeze() directly.
|
||||
*/
|
||||
bool kthread_freezable_should_stop(bool *was_frozen)
|
||||
{
|
||||
bool frozen = false;
|
||||
|
||||
might_sleep();
|
||||
|
||||
if (unlikely(freezing(current)))
|
||||
frozen = __refrigerator(true);
|
||||
|
||||
if (was_frozen)
|
||||
*was_frozen = frozen;
|
||||
|
||||
return kthread_should_stop();
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(kthread_freezable_should_stop);
|
||||
|
||||
/**
|
||||
* kthread_data - return data value specified on kthread creation
|
||||
* @task: kthread task in question
|
||||
@ -257,7 +282,7 @@ int kthreadd(void *unused)
|
||||
set_cpus_allowed_ptr(tsk, cpu_all_mask);
|
||||
set_mems_allowed(node_states[N_HIGH_MEMORY]);
|
||||
|
||||
current->flags |= PF_NOFREEZE | PF_FREEZER_NOSIG;
|
||||
current->flags |= PF_NOFREEZE;
|
||||
|
||||
for (;;) {
|
||||
set_current_state(TASK_INTERRUPTIBLE);
|
||||
|
@ -43,8 +43,6 @@ int in_suspend __nosavedata;
|
||||
enum {
|
||||
HIBERNATION_INVALID,
|
||||
HIBERNATION_PLATFORM,
|
||||
HIBERNATION_TEST,
|
||||
HIBERNATION_TESTPROC,
|
||||
HIBERNATION_SHUTDOWN,
|
||||
HIBERNATION_REBOOT,
|
||||
/* keep last */
|
||||
@ -55,7 +53,7 @@ enum {
|
||||
|
||||
static int hibernation_mode = HIBERNATION_SHUTDOWN;
|
||||
|
||||
static bool freezer_test_done;
|
||||
bool freezer_test_done;
|
||||
|
||||
static const struct platform_hibernation_ops *hibernation_ops;
|
||||
|
||||
@ -71,14 +69,14 @@ void hibernation_set_ops(const struct platform_hibernation_ops *ops)
|
||||
WARN_ON(1);
|
||||
return;
|
||||
}
|
||||
mutex_lock(&pm_mutex);
|
||||
lock_system_sleep();
|
||||
hibernation_ops = ops;
|
||||
if (ops)
|
||||
hibernation_mode = HIBERNATION_PLATFORM;
|
||||
else if (hibernation_mode == HIBERNATION_PLATFORM)
|
||||
hibernation_mode = HIBERNATION_SHUTDOWN;
|
||||
|
||||
mutex_unlock(&pm_mutex);
|
||||
unlock_system_sleep();
|
||||
}
|
||||
|
||||
static bool entering_platform_hibernation;
|
||||
@ -96,15 +94,6 @@ static void hibernation_debug_sleep(void)
|
||||
mdelay(5000);
|
||||
}
|
||||
|
||||
static int hibernation_testmode(int mode)
|
||||
{
|
||||
if (hibernation_mode == mode) {
|
||||
hibernation_debug_sleep();
|
||||
return 1;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int hibernation_test(int level)
|
||||
{
|
||||
if (pm_test_level == level) {
|
||||
@ -114,7 +103,6 @@ static int hibernation_test(int level)
|
||||
return 0;
|
||||
}
|
||||
#else /* !CONFIG_PM_DEBUG */
|
||||
static int hibernation_testmode(int mode) { return 0; }
|
||||
static int hibernation_test(int level) { return 0; }
|
||||
#endif /* !CONFIG_PM_DEBUG */
|
||||
|
||||
@ -278,8 +266,7 @@ static int create_image(int platform_mode)
|
||||
goto Platform_finish;
|
||||
|
||||
error = disable_nonboot_cpus();
|
||||
if (error || hibernation_test(TEST_CPUS)
|
||||
|| hibernation_testmode(HIBERNATION_TEST))
|
||||
if (error || hibernation_test(TEST_CPUS))
|
||||
goto Enable_cpus;
|
||||
|
||||
local_irq_disable();
|
||||
@ -333,7 +320,7 @@ static int create_image(int platform_mode)
|
||||
*/
|
||||
int hibernation_snapshot(int platform_mode)
|
||||
{
|
||||
pm_message_t msg = PMSG_RECOVER;
|
||||
pm_message_t msg;
|
||||
int error;
|
||||
|
||||
error = platform_begin(platform_mode);
|
||||
@ -349,8 +336,7 @@ int hibernation_snapshot(int platform_mode)
|
||||
if (error)
|
||||
goto Cleanup;
|
||||
|
||||
if (hibernation_test(TEST_FREEZER) ||
|
||||
hibernation_testmode(HIBERNATION_TESTPROC)) {
|
||||
if (hibernation_test(TEST_FREEZER)) {
|
||||
|
||||
/*
|
||||
* Indicate to the caller that we are returning due to a
|
||||
@ -362,26 +348,26 @@ int hibernation_snapshot(int platform_mode)
|
||||
|
||||
error = dpm_prepare(PMSG_FREEZE);
|
||||
if (error) {
|
||||
dpm_complete(msg);
|
||||
dpm_complete(PMSG_RECOVER);
|
||||
goto Cleanup;
|
||||
}
|
||||
|
||||
suspend_console();
|
||||
pm_restrict_gfp_mask();
|
||||
|
||||
error = dpm_suspend(PMSG_FREEZE);
|
||||
if (error)
|
||||
goto Recover_platform;
|
||||
|
||||
if (hibernation_test(TEST_DEVICES))
|
||||
goto Recover_platform;
|
||||
if (error || hibernation_test(TEST_DEVICES))
|
||||
platform_recover(platform_mode);
|
||||
else
|
||||
error = create_image(platform_mode);
|
||||
|
||||
error = create_image(platform_mode);
|
||||
/*
|
||||
* Control returns here (1) after the image has been created or the
|
||||
* In the case that we call create_image() above, the control
|
||||
* returns here (1) after the image has been created or the
|
||||
* image creation has failed and (2) after a successful restore.
|
||||
*/
|
||||
|
||||
Resume_devices:
|
||||
/* We may need to release the preallocated image pages here. */
|
||||
if (error || !in_suspend)
|
||||
swsusp_free();
|
||||
@ -399,10 +385,6 @@ int hibernation_snapshot(int platform_mode)
|
||||
platform_end(platform_mode);
|
||||
return error;
|
||||
|
||||
Recover_platform:
|
||||
platform_recover(platform_mode);
|
||||
goto Resume_devices;
|
||||
|
||||
Cleanup:
|
||||
swsusp_free();
|
||||
goto Close;
|
||||
@ -590,9 +572,6 @@ int hibernation_platform_enter(void)
|
||||
static void power_down(void)
|
||||
{
|
||||
switch (hibernation_mode) {
|
||||
case HIBERNATION_TEST:
|
||||
case HIBERNATION_TESTPROC:
|
||||
break;
|
||||
case HIBERNATION_REBOOT:
|
||||
kernel_restart(NULL);
|
||||
break;
|
||||
@ -611,17 +590,6 @@ static void power_down(void)
|
||||
while(1);
|
||||
}
|
||||
|
||||
static int prepare_processes(void)
|
||||
{
|
||||
int error = 0;
|
||||
|
||||
if (freeze_processes()) {
|
||||
error = -EBUSY;
|
||||
thaw_processes();
|
||||
}
|
||||
return error;
|
||||
}
|
||||
|
||||
/**
|
||||
* hibernate - Carry out system hibernation, including saving the image.
|
||||
*/
|
||||
@ -629,7 +597,7 @@ int hibernate(void)
|
||||
{
|
||||
int error;
|
||||
|
||||
mutex_lock(&pm_mutex);
|
||||
lock_system_sleep();
|
||||
/* The snapshot device should not be opened while we're running */
|
||||
if (!atomic_add_unless(&snapshot_device_available, -1, 0)) {
|
||||
error = -EBUSY;
|
||||
@ -654,7 +622,7 @@ int hibernate(void)
|
||||
sys_sync();
|
||||
printk("done.\n");
|
||||
|
||||
error = prepare_processes();
|
||||
error = freeze_processes();
|
||||
if (error)
|
||||
goto Finish;
|
||||
|
||||
@ -697,7 +665,7 @@ int hibernate(void)
|
||||
pm_restore_console();
|
||||
atomic_inc(&snapshot_device_available);
|
||||
Unlock:
|
||||
mutex_unlock(&pm_mutex);
|
||||
unlock_system_sleep();
|
||||
return error;
|
||||
}
|
||||
|
||||
@ -811,11 +779,13 @@ static int software_resume(void)
|
||||
goto close_finish;
|
||||
|
||||
error = create_basic_memory_bitmaps();
|
||||
if (error)
|
||||
if (error) {
|
||||
usermodehelper_enable();
|
||||
goto close_finish;
|
||||
}
|
||||
|
||||
pr_debug("PM: Preparing processes for restore.\n");
|
||||
error = prepare_processes();
|
||||
error = freeze_processes();
|
||||
if (error) {
|
||||
swsusp_close(FMODE_READ);
|
||||
goto Done;
|
||||
@ -855,8 +825,6 @@ static const char * const hibernation_modes[] = {
|
||||
[HIBERNATION_PLATFORM] = "platform",
|
||||
[HIBERNATION_SHUTDOWN] = "shutdown",
|
||||
[HIBERNATION_REBOOT] = "reboot",
|
||||
[HIBERNATION_TEST] = "test",
|
||||
[HIBERNATION_TESTPROC] = "testproc",
|
||||
};
|
||||
|
||||
/*
|
||||
@ -865,17 +833,15 @@ static const char * const hibernation_modes[] = {
|
||||
* Hibernation can be handled in several ways. There are a few different ways
|
||||
* to put the system into the sleep state: using the platform driver (e.g. ACPI
|
||||
* or other hibernation_ops), powering it off or rebooting it (for testing
|
||||
* mostly), or using one of the two available test modes.
|
||||
* mostly).
|
||||
*
|
||||
* The sysfs file /sys/power/disk provides an interface for selecting the
|
||||
* hibernation mode to use. Reading from this file causes the available modes
|
||||
* to be printed. There are 5 modes that can be supported:
|
||||
* to be printed. There are 3 modes that can be supported:
|
||||
*
|
||||
* 'platform'
|
||||
* 'shutdown'
|
||||
* 'reboot'
|
||||
* 'test'
|
||||
* 'testproc'
|
||||
*
|
||||
* If a platform hibernation driver is in use, 'platform' will be supported
|
||||
* and will be used by default. Otherwise, 'shutdown' will be used by default.
|
||||
@ -899,8 +865,6 @@ static ssize_t disk_show(struct kobject *kobj, struct kobj_attribute *attr,
|
||||
switch (i) {
|
||||
case HIBERNATION_SHUTDOWN:
|
||||
case HIBERNATION_REBOOT:
|
||||
case HIBERNATION_TEST:
|
||||
case HIBERNATION_TESTPROC:
|
||||
break;
|
||||
case HIBERNATION_PLATFORM:
|
||||
if (hibernation_ops)
|
||||
@ -929,7 +893,7 @@ static ssize_t disk_store(struct kobject *kobj, struct kobj_attribute *attr,
|
||||
p = memchr(buf, '\n', n);
|
||||
len = p ? p - buf : n;
|
||||
|
||||
mutex_lock(&pm_mutex);
|
||||
lock_system_sleep();
|
||||
for (i = HIBERNATION_FIRST; i <= HIBERNATION_MAX; i++) {
|
||||
if (len == strlen(hibernation_modes[i])
|
||||
&& !strncmp(buf, hibernation_modes[i], len)) {
|
||||
@ -941,8 +905,6 @@ static ssize_t disk_store(struct kobject *kobj, struct kobj_attribute *attr,
|
||||
switch (mode) {
|
||||
case HIBERNATION_SHUTDOWN:
|
||||
case HIBERNATION_REBOOT:
|
||||
case HIBERNATION_TEST:
|
||||
case HIBERNATION_TESTPROC:
|
||||
hibernation_mode = mode;
|
||||
break;
|
||||
case HIBERNATION_PLATFORM:
|
||||
@ -957,7 +919,7 @@ static ssize_t disk_store(struct kobject *kobj, struct kobj_attribute *attr,
|
||||
if (!error)
|
||||
pr_debug("PM: Hibernation mode set to '%s'\n",
|
||||
hibernation_modes[mode]);
|
||||
mutex_unlock(&pm_mutex);
|
||||
unlock_system_sleep();
|
||||
return error ? error : n;
|
||||
}
|
||||
|
||||
@ -984,9 +946,9 @@ static ssize_t resume_store(struct kobject *kobj, struct kobj_attribute *attr,
|
||||
if (maj != MAJOR(res) || min != MINOR(res))
|
||||
goto out;
|
||||
|
||||
mutex_lock(&pm_mutex);
|
||||
lock_system_sleep();
|
||||
swsusp_resume_device = res;
|
||||
mutex_unlock(&pm_mutex);
|
||||
unlock_system_sleep();
|
||||
printk(KERN_INFO "PM: Starting manual resume from disk\n");
|
||||
noresume = 0;
|
||||
software_resume();
|
||||
|
@ -3,7 +3,7 @@
|
||||
*
|
||||
* Copyright (c) 2003 Patrick Mochel
|
||||
* Copyright (c) 2003 Open Source Development Lab
|
||||
*
|
||||
*
|
||||
* This file is released under the GPLv2
|
||||
*
|
||||
*/
|
||||
@ -116,7 +116,7 @@ static ssize_t pm_test_store(struct kobject *kobj, struct kobj_attribute *attr,
|
||||
p = memchr(buf, '\n', n);
|
||||
len = p ? p - buf : n;
|
||||
|
||||
mutex_lock(&pm_mutex);
|
||||
lock_system_sleep();
|
||||
|
||||
level = TEST_FIRST;
|
||||
for (s = &pm_tests[level]; level <= TEST_MAX; s++, level++)
|
||||
@ -126,7 +126,7 @@ static ssize_t pm_test_store(struct kobject *kobj, struct kobj_attribute *attr,
|
||||
break;
|
||||
}
|
||||
|
||||
mutex_unlock(&pm_mutex);
|
||||
unlock_system_sleep();
|
||||
|
||||
return error ? error : n;
|
||||
}
|
||||
@ -240,7 +240,7 @@ struct kobject *power_kobj;
|
||||
* 'standby' (Power-On Suspend), 'mem' (Suspend-to-RAM), and
|
||||
* 'disk' (Suspend-to-Disk).
|
||||
*
|
||||
* store() accepts one of those strings, translates it into the
|
||||
* store() accepts one of those strings, translates it into the
|
||||
* proper enumerated value, and initiates a suspend transition.
|
||||
*/
|
||||
static ssize_t state_show(struct kobject *kobj, struct kobj_attribute *attr,
|
||||
@ -282,7 +282,7 @@ static ssize_t state_store(struct kobject *kobj, struct kobj_attribute *attr,
|
||||
/* First, check if we are requested to hibernate */
|
||||
if (len == 4 && !strncmp(buf, "disk", len)) {
|
||||
error = hibernate();
|
||||
goto Exit;
|
||||
goto Exit;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_SUSPEND
|
||||
|
@ -50,6 +50,8 @@ static inline char *check_image_kernel(struct swsusp_info *info)
|
||||
#define SPARE_PAGES ((1024 * 1024) >> PAGE_SHIFT)
|
||||
|
||||
/* kernel/power/hibernate.c */
|
||||
extern bool freezer_test_done;
|
||||
|
||||
extern int hibernation_snapshot(int platform_mode);
|
||||
extern int hibernation_restore(int platform_mode);
|
||||
extern int hibernation_platform_enter(void);
|
||||
|
@ -22,16 +22,7 @@
|
||||
*/
|
||||
#define TIMEOUT (20 * HZ)
|
||||
|
||||
static inline int freezable(struct task_struct * p)
|
||||
{
|
||||
if ((p == current) ||
|
||||
(p->flags & PF_NOFREEZE) ||
|
||||
(p->exit_state != 0))
|
||||
return 0;
|
||||
return 1;
|
||||
}
|
||||
|
||||
static int try_to_freeze_tasks(bool sig_only)
|
||||
static int try_to_freeze_tasks(bool user_only)
|
||||
{
|
||||
struct task_struct *g, *p;
|
||||
unsigned long end_time;
|
||||
@ -46,17 +37,14 @@ static int try_to_freeze_tasks(bool sig_only)
|
||||
|
||||
end_time = jiffies + TIMEOUT;
|
||||
|
||||
if (!sig_only)
|
||||
if (!user_only)
|
||||
freeze_workqueues_begin();
|
||||
|
||||
while (true) {
|
||||
todo = 0;
|
||||
read_lock(&tasklist_lock);
|
||||
do_each_thread(g, p) {
|
||||
if (frozen(p) || !freezable(p))
|
||||
continue;
|
||||
|
||||
if (!freeze_task(p, sig_only))
|
||||
if (p == current || !freeze_task(p))
|
||||
continue;
|
||||
|
||||
/*
|
||||
@ -77,7 +65,7 @@ static int try_to_freeze_tasks(bool sig_only)
|
||||
} while_each_thread(g, p);
|
||||
read_unlock(&tasklist_lock);
|
||||
|
||||
if (!sig_only) {
|
||||
if (!user_only) {
|
||||
wq_busy = freeze_workqueues_busy();
|
||||
todo += wq_busy;
|
||||
}
|
||||
@ -103,11 +91,6 @@ static int try_to_freeze_tasks(bool sig_only)
|
||||
elapsed_csecs = elapsed_csecs64;
|
||||
|
||||
if (todo) {
|
||||
/* This does not unfreeze processes that are already frozen
|
||||
* (we have slightly ugly calling convention in that respect,
|
||||
* and caller must call thaw_processes() if something fails),
|
||||
* but it cleans up leftover PF_FREEZE requests.
|
||||
*/
|
||||
printk("\n");
|
||||
printk(KERN_ERR "Freezing of tasks %s after %d.%02d seconds "
|
||||
"(%d tasks refusing to freeze, wq_busy=%d):\n",
|
||||
@ -115,15 +98,11 @@ static int try_to_freeze_tasks(bool sig_only)
|
||||
elapsed_csecs / 100, elapsed_csecs % 100,
|
||||
todo - wq_busy, wq_busy);
|
||||
|
||||
thaw_workqueues();
|
||||
|
||||
read_lock(&tasklist_lock);
|
||||
do_each_thread(g, p) {
|
||||
task_lock(p);
|
||||
if (!wakeup && freezing(p) && !freezer_should_skip(p))
|
||||
if (!wakeup && !freezer_should_skip(p) &&
|
||||
p != current && freezing(p) && !frozen(p))
|
||||
sched_show_task(p);
|
||||
cancel_freezing(p);
|
||||
task_unlock(p);
|
||||
} while_each_thread(g, p);
|
||||
read_unlock(&tasklist_lock);
|
||||
} else {
|
||||
@ -136,12 +115,18 @@ static int try_to_freeze_tasks(bool sig_only)
|
||||
|
||||
/**
|
||||
* freeze_processes - Signal user space processes to enter the refrigerator.
|
||||
*
|
||||
* On success, returns 0. On failure, -errno and system is fully thawed.
|
||||
*/
|
||||
int freeze_processes(void)
|
||||
{
|
||||
int error;
|
||||
|
||||
if (!pm_freezing)
|
||||
atomic_inc(&system_freezing_cnt);
|
||||
|
||||
printk("Freezing user space processes ... ");
|
||||
pm_freezing = true;
|
||||
error = try_to_freeze_tasks(true);
|
||||
if (!error) {
|
||||
printk("done.");
|
||||
@ -150,17 +135,22 @@ int freeze_processes(void)
|
||||
printk("\n");
|
||||
BUG_ON(in_atomic());
|
||||
|
||||
if (error)
|
||||
thaw_processes();
|
||||
return error;
|
||||
}
|
||||
|
||||
/**
|
||||
* freeze_kernel_threads - Make freezable kernel threads go to the refrigerator.
|
||||
*
|
||||
* On success, returns 0. On failure, -errno and system is fully thawed.
|
||||
*/
|
||||
int freeze_kernel_threads(void)
|
||||
{
|
||||
int error;
|
||||
|
||||
printk("Freezing remaining freezable tasks ... ");
|
||||
pm_nosig_freezing = true;
|
||||
error = try_to_freeze_tasks(false);
|
||||
if (!error)
|
||||
printk("done.");
|
||||
@ -168,37 +158,32 @@ int freeze_kernel_threads(void)
|
||||
printk("\n");
|
||||
BUG_ON(in_atomic());
|
||||
|
||||
if (error)
|
||||
thaw_processes();
|
||||
return error;
|
||||
}
|
||||
|
||||
static void thaw_tasks(bool nosig_only)
|
||||
{
|
||||
struct task_struct *g, *p;
|
||||
|
||||
read_lock(&tasklist_lock);
|
||||
do_each_thread(g, p) {
|
||||
if (!freezable(p))
|
||||
continue;
|
||||
|
||||
if (nosig_only && should_send_signal(p))
|
||||
continue;
|
||||
|
||||
if (cgroup_freezing_or_frozen(p))
|
||||
continue;
|
||||
|
||||
thaw_process(p);
|
||||
} while_each_thread(g, p);
|
||||
read_unlock(&tasklist_lock);
|
||||
}
|
||||
|
||||
void thaw_processes(void)
|
||||
{
|
||||
struct task_struct *g, *p;
|
||||
|
||||
if (pm_freezing)
|
||||
atomic_dec(&system_freezing_cnt);
|
||||
pm_freezing = false;
|
||||
pm_nosig_freezing = false;
|
||||
|
||||
oom_killer_enable();
|
||||
|
||||
printk("Restarting tasks ... ");
|
||||
|
||||
thaw_workqueues();
|
||||
thaw_tasks(true);
|
||||
thaw_tasks(false);
|
||||
|
||||
read_lock(&tasklist_lock);
|
||||
do_each_thread(g, p) {
|
||||
__thaw_task(p);
|
||||
} while_each_thread(g, p);
|
||||
read_unlock(&tasklist_lock);
|
||||
|
||||
schedule();
|
||||
printk("done.\n");
|
||||
}
|
||||
|
@ -42,9 +42,9 @@ static const struct platform_suspend_ops *suspend_ops;
|
||||
*/
|
||||
void suspend_set_ops(const struct platform_suspend_ops *ops)
|
||||
{
|
||||
mutex_lock(&pm_mutex);
|
||||
lock_system_sleep();
|
||||
suspend_ops = ops;
|
||||
mutex_unlock(&pm_mutex);
|
||||
unlock_system_sleep();
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(suspend_set_ops);
|
||||
|
||||
@ -106,13 +106,11 @@ static int suspend_prepare(void)
|
||||
goto Finish;
|
||||
|
||||
error = suspend_freeze_processes();
|
||||
if (error) {
|
||||
suspend_stats.failed_freeze++;
|
||||
dpm_save_failed_step(SUSPEND_FREEZE);
|
||||
} else
|
||||
if (!error)
|
||||
return 0;
|
||||
|
||||
suspend_thaw_processes();
|
||||
suspend_stats.failed_freeze++;
|
||||
dpm_save_failed_step(SUSPEND_FREEZE);
|
||||
usermodehelper_enable();
|
||||
Finish:
|
||||
pm_notifier_call_chain(PM_POST_SUSPEND);
|
||||
|
@ -30,28 +30,6 @@
|
||||
|
||||
#include "power.h"
|
||||
|
||||
/*
|
||||
* NOTE: The SNAPSHOT_SET_SWAP_FILE and SNAPSHOT_PMOPS ioctls are obsolete and
|
||||
* will be removed in the future. They are only preserved here for
|
||||
* compatibility with existing userland utilities.
|
||||
*/
|
||||
#define SNAPSHOT_SET_SWAP_FILE _IOW(SNAPSHOT_IOC_MAGIC, 10, unsigned int)
|
||||
#define SNAPSHOT_PMOPS _IOW(SNAPSHOT_IOC_MAGIC, 12, unsigned int)
|
||||
|
||||
#define PMOPS_PREPARE 1
|
||||
#define PMOPS_ENTER 2
|
||||
#define PMOPS_FINISH 3
|
||||
|
||||
/*
|
||||
* NOTE: The following ioctl definitions are wrong and have been replaced with
|
||||
* correct ones. They are only preserved here for compatibility with existing
|
||||
* userland utilities and will be removed in the future.
|
||||
*/
|
||||
#define SNAPSHOT_ATOMIC_SNAPSHOT _IOW(SNAPSHOT_IOC_MAGIC, 3, void *)
|
||||
#define SNAPSHOT_SET_IMAGE_SIZE _IOW(SNAPSHOT_IOC_MAGIC, 6, unsigned long)
|
||||
#define SNAPSHOT_AVAIL_SWAP _IOR(SNAPSHOT_IOC_MAGIC, 7, void *)
|
||||
#define SNAPSHOT_GET_SWAP_PAGE _IOR(SNAPSHOT_IOC_MAGIC, 8, void *)
|
||||
|
||||
|
||||
#define SNAPSHOT_MINOR 231
|
||||
|
||||
@ -71,7 +49,7 @@ static int snapshot_open(struct inode *inode, struct file *filp)
|
||||
struct snapshot_data *data;
|
||||
int error;
|
||||
|
||||
mutex_lock(&pm_mutex);
|
||||
lock_system_sleep();
|
||||
|
||||
if (!atomic_add_unless(&snapshot_device_available, -1, 0)) {
|
||||
error = -EBUSY;
|
||||
@ -123,7 +101,7 @@ static int snapshot_open(struct inode *inode, struct file *filp)
|
||||
data->platform_support = 0;
|
||||
|
||||
Unlock:
|
||||
mutex_unlock(&pm_mutex);
|
||||
unlock_system_sleep();
|
||||
|
||||
return error;
|
||||
}
|
||||
@ -132,7 +110,7 @@ static int snapshot_release(struct inode *inode, struct file *filp)
|
||||
{
|
||||
struct snapshot_data *data;
|
||||
|
||||
mutex_lock(&pm_mutex);
|
||||
lock_system_sleep();
|
||||
|
||||
swsusp_free();
|
||||
free_basic_memory_bitmaps();
|
||||
@ -146,7 +124,7 @@ static int snapshot_release(struct inode *inode, struct file *filp)
|
||||
PM_POST_HIBERNATION : PM_POST_RESTORE);
|
||||
atomic_inc(&snapshot_device_available);
|
||||
|
||||
mutex_unlock(&pm_mutex);
|
||||
unlock_system_sleep();
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -158,7 +136,7 @@ static ssize_t snapshot_read(struct file *filp, char __user *buf,
|
||||
ssize_t res;
|
||||
loff_t pg_offp = *offp & ~PAGE_MASK;
|
||||
|
||||
mutex_lock(&pm_mutex);
|
||||
lock_system_sleep();
|
||||
|
||||
data = filp->private_data;
|
||||
if (!data->ready) {
|
||||
@ -179,7 +157,7 @@ static ssize_t snapshot_read(struct file *filp, char __user *buf,
|
||||
*offp += res;
|
||||
|
||||
Unlock:
|
||||
mutex_unlock(&pm_mutex);
|
||||
unlock_system_sleep();
|
||||
|
||||
return res;
|
||||
}
|
||||
@ -191,7 +169,7 @@ static ssize_t snapshot_write(struct file *filp, const char __user *buf,
|
||||
ssize_t res;
|
||||
loff_t pg_offp = *offp & ~PAGE_MASK;
|
||||
|
||||
mutex_lock(&pm_mutex);
|
||||
lock_system_sleep();
|
||||
|
||||
data = filp->private_data;
|
||||
|
||||
@ -208,20 +186,11 @@ static ssize_t snapshot_write(struct file *filp, const char __user *buf,
|
||||
if (res > 0)
|
||||
*offp += res;
|
||||
unlock:
|
||||
mutex_unlock(&pm_mutex);
|
||||
unlock_system_sleep();
|
||||
|
||||
return res;
|
||||
}
|
||||
|
||||
static void snapshot_deprecated_ioctl(unsigned int cmd)
|
||||
{
|
||||
if (printk_ratelimit())
|
||||
printk(KERN_NOTICE "%pf: ioctl '%.8x' is deprecated and will "
|
||||
"be removed soon, update your suspend-to-disk "
|
||||
"utilities\n",
|
||||
__builtin_return_address(0), cmd);
|
||||
}
|
||||
|
||||
static long snapshot_ioctl(struct file *filp, unsigned int cmd,
|
||||
unsigned long arg)
|
||||
{
|
||||
@ -257,11 +226,9 @@ static long snapshot_ioctl(struct file *filp, unsigned int cmd,
|
||||
break;
|
||||
|
||||
error = freeze_processes();
|
||||
if (error) {
|
||||
thaw_processes();
|
||||
if (error)
|
||||
usermodehelper_enable();
|
||||
}
|
||||
if (!error)
|
||||
else
|
||||
data->frozen = 1;
|
||||
break;
|
||||
|
||||
@ -274,8 +241,6 @@ static long snapshot_ioctl(struct file *filp, unsigned int cmd,
|
||||
data->frozen = 0;
|
||||
break;
|
||||
|
||||
case SNAPSHOT_ATOMIC_SNAPSHOT:
|
||||
snapshot_deprecated_ioctl(cmd);
|
||||
case SNAPSHOT_CREATE_IMAGE:
|
||||
if (data->mode != O_RDONLY || !data->frozen || data->ready) {
|
||||
error = -EPERM;
|
||||
@ -283,10 +248,15 @@ static long snapshot_ioctl(struct file *filp, unsigned int cmd,
|
||||
}
|
||||
pm_restore_gfp_mask();
|
||||
error = hibernation_snapshot(data->platform_support);
|
||||
if (!error)
|
||||
if (!error) {
|
||||
error = put_user(in_suspend, (int __user *)arg);
|
||||
if (!error)
|
||||
data->ready = 1;
|
||||
if (!error && !freezer_test_done)
|
||||
data->ready = 1;
|
||||
if (freezer_test_done) {
|
||||
freezer_test_done = false;
|
||||
thaw_processes();
|
||||
}
|
||||
}
|
||||
break;
|
||||
|
||||
case SNAPSHOT_ATOMIC_RESTORE:
|
||||
@ -305,8 +275,6 @@ static long snapshot_ioctl(struct file *filp, unsigned int cmd,
|
||||
data->ready = 0;
|
||||
break;
|
||||
|
||||
case SNAPSHOT_SET_IMAGE_SIZE:
|
||||
snapshot_deprecated_ioctl(cmd);
|
||||
case SNAPSHOT_PREF_IMAGE_SIZE:
|
||||
image_size = arg;
|
||||
break;
|
||||
@ -321,16 +289,12 @@ static long snapshot_ioctl(struct file *filp, unsigned int cmd,
|
||||
error = put_user(size, (loff_t __user *)arg);
|
||||
break;
|
||||
|
||||
case SNAPSHOT_AVAIL_SWAP:
|
||||
snapshot_deprecated_ioctl(cmd);
|
||||
case SNAPSHOT_AVAIL_SWAP_SIZE:
|
||||
size = count_swap_pages(data->swap, 1);
|
||||
size <<= PAGE_SHIFT;
|
||||
error = put_user(size, (loff_t __user *)arg);
|
||||
break;
|
||||
|
||||
case SNAPSHOT_GET_SWAP_PAGE:
|
||||
snapshot_deprecated_ioctl(cmd);
|
||||
case SNAPSHOT_ALLOC_SWAP_PAGE:
|
||||
if (data->swap < 0 || data->swap >= MAX_SWAPFILES) {
|
||||
error = -ENODEV;
|
||||
@ -353,27 +317,6 @@ static long snapshot_ioctl(struct file *filp, unsigned int cmd,
|
||||
free_all_swap_pages(data->swap);
|
||||
break;
|
||||
|
||||
case SNAPSHOT_SET_SWAP_FILE: /* This ioctl is deprecated */
|
||||
snapshot_deprecated_ioctl(cmd);
|
||||
if (!swsusp_swap_in_use()) {
|
||||
/*
|
||||
* User space encodes device types as two-byte values,
|
||||
* so we need to recode them
|
||||
*/
|
||||
if (old_decode_dev(arg)) {
|
||||
data->swap = swap_type_of(old_decode_dev(arg),
|
||||
0, NULL);
|
||||
if (data->swap < 0)
|
||||
error = -ENODEV;
|
||||
} else {
|
||||
data->swap = -1;
|
||||
error = -EINVAL;
|
||||
}
|
||||
} else {
|
||||
error = -EPERM;
|
||||
}
|
||||
break;
|
||||
|
||||
case SNAPSHOT_S2RAM:
|
||||
if (!data->frozen) {
|
||||
error = -EPERM;
|
||||
@ -396,33 +339,6 @@ static long snapshot_ioctl(struct file *filp, unsigned int cmd,
|
||||
error = hibernation_platform_enter();
|
||||
break;
|
||||
|
||||
case SNAPSHOT_PMOPS: /* This ioctl is deprecated */
|
||||
snapshot_deprecated_ioctl(cmd);
|
||||
error = -EINVAL;
|
||||
|
||||
switch (arg) {
|
||||
|
||||
case PMOPS_PREPARE:
|
||||
data->platform_support = 1;
|
||||
error = 0;
|
||||
break;
|
||||
|
||||
case PMOPS_ENTER:
|
||||
if (data->platform_support)
|
||||
error = hibernation_platform_enter();
|
||||
break;
|
||||
|
||||
case PMOPS_FINISH:
|
||||
if (data->platform_support)
|
||||
error = 0;
|
||||
break;
|
||||
|
||||
default:
|
||||
printk(KERN_ERR "SNAPSHOT_PMOPS: invalid argument %ld\n", arg);
|
||||
|
||||
}
|
||||
break;
|
||||
|
||||
case SNAPSHOT_SET_SWAP_AREA:
|
||||
if (swsusp_swap_in_use()) {
|
||||
error = -EPERM;
|
||||
|
@ -600,14 +600,10 @@ static void bdi_wb_shutdown(struct backing_dev_info *bdi)
|
||||
|
||||
/*
|
||||
* Finally, kill the kernel thread. We don't need to be RCU
|
||||
* safe anymore, since the bdi is gone from visibility. Force
|
||||
* unfreeze of the thread before calling kthread_stop(), otherwise
|
||||
* it would never exet if it is currently stuck in the refrigerator.
|
||||
* safe anymore, since the bdi is gone from visibility.
|
||||
*/
|
||||
if (bdi->wb.task) {
|
||||
thaw_process(bdi->wb.task);
|
||||
if (bdi->wb.task)
|
||||
kthread_stop(bdi->wb.task);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -328,7 +328,7 @@ static struct task_struct *select_bad_process(unsigned int *ppoints,
|
||||
*/
|
||||
if (test_tsk_thread_flag(p, TIF_MEMDIE)) {
|
||||
if (unlikely(frozen(p)))
|
||||
thaw_process(p);
|
||||
__thaw_task(p);
|
||||
return ERR_PTR(-1UL);
|
||||
}
|
||||
if (!p->mm)
|
||||
|
@ -18,6 +18,7 @@
|
||||
#include <linux/smp.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/mutex.h>
|
||||
#include <linux/freezer.h>
|
||||
|
||||
#include <linux/sunrpc/clnt.h>
|
||||
|
||||
@ -231,7 +232,7 @@ static int rpc_wait_bit_killable(void *word)
|
||||
{
|
||||
if (fatal_signal_pending(current))
|
||||
return -ERESTARTSYS;
|
||||
schedule();
|
||||
freezable_schedule();
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user