mirror of
https://github.com/edk2-porting/linux-next.git
synced 2024-12-22 20:23:57 +08:00
2b44c4db2e
Calling freeze_processes sets a global flag that will cause any process that calls try_to_freeze to enter the refrigerator. It skips sending a signal to the current task, but if the current task ever hits try_to_freeze, all threads will be frozen and the system will deadlock. Set a new flag, PF_SUSPEND_TASK, on the task that calls freeze_processes. The flag notifies the freezer that the thread is involved in suspend and should not be frozen. Also add a WARN_ON in thaw_processes if the caller does not have the PF_SUSPEND_TASK flag set to catch if a different task calls thaw_processes than the one that called freeze_processes, leaving a task with PF_SUSPEND_TASK permanently set on it. Threads that spawn off a task with PF_SUSPEND_TASK set (which swsusp does) will also have PF_SUSPEND_TASK set, preventing them from freezing while they are helping with suspend, but they need to be dead by the time suspend is triggered, otherwise they may run when userspace is expected to be frozen. Add a WARN_ON in thaw_processes if more than one thread has the PF_SUSPEND_TASK flag set. Reported-and-tested-by: Michael Leun <lkml20130126@newton.leun.net> Signed-off-by: Colin Cross <ccross@android.com> Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
225 lines
5.0 KiB
C
225 lines
5.0 KiB
C
/*
|
|
* drivers/power/process.c - Functions for starting/stopping processes on
|
|
* suspend transitions.
|
|
*
|
|
* Originally from swsusp.
|
|
*/
|
|
|
|
|
|
#undef DEBUG
|
|
|
|
#include <linux/interrupt.h>
|
|
#include <linux/oom.h>
|
|
#include <linux/suspend.h>
|
|
#include <linux/module.h>
|
|
#include <linux/syscalls.h>
|
|
#include <linux/freezer.h>
|
|
#include <linux/delay.h>
|
|
#include <linux/workqueue.h>
|
|
#include <linux/kmod.h>
|
|
|
|
/*
|
|
* Timeout for stopping processes
|
|
*/
|
|
unsigned int __read_mostly freeze_timeout_msecs = 20 * MSEC_PER_SEC;
|
|
|
|
static int try_to_freeze_tasks(bool user_only)
|
|
{
|
|
struct task_struct *g, *p;
|
|
unsigned long end_time;
|
|
unsigned int todo;
|
|
bool wq_busy = false;
|
|
struct timeval start, end;
|
|
u64 elapsed_msecs64;
|
|
unsigned int elapsed_msecs;
|
|
bool wakeup = false;
|
|
int sleep_usecs = USEC_PER_MSEC;
|
|
|
|
do_gettimeofday(&start);
|
|
|
|
end_time = jiffies + msecs_to_jiffies(freeze_timeout_msecs);
|
|
|
|
if (!user_only)
|
|
freeze_workqueues_begin();
|
|
|
|
while (true) {
|
|
todo = 0;
|
|
read_lock(&tasklist_lock);
|
|
do_each_thread(g, p) {
|
|
if (p == current || !freeze_task(p))
|
|
continue;
|
|
|
|
if (!freezer_should_skip(p))
|
|
todo++;
|
|
} while_each_thread(g, p);
|
|
read_unlock(&tasklist_lock);
|
|
|
|
if (!user_only) {
|
|
wq_busy = freeze_workqueues_busy();
|
|
todo += wq_busy;
|
|
}
|
|
|
|
if (!todo || time_after(jiffies, end_time))
|
|
break;
|
|
|
|
if (pm_wakeup_pending()) {
|
|
wakeup = true;
|
|
break;
|
|
}
|
|
|
|
/*
|
|
* We need to retry, but first give the freezing tasks some
|
|
* time to enter the refrigerator. Start with an initial
|
|
* 1 ms sleep followed by exponential backoff until 8 ms.
|
|
*/
|
|
usleep_range(sleep_usecs / 2, sleep_usecs);
|
|
if (sleep_usecs < 8 * USEC_PER_MSEC)
|
|
sleep_usecs *= 2;
|
|
}
|
|
|
|
do_gettimeofday(&end);
|
|
elapsed_msecs64 = timeval_to_ns(&end) - timeval_to_ns(&start);
|
|
do_div(elapsed_msecs64, NSEC_PER_MSEC);
|
|
elapsed_msecs = elapsed_msecs64;
|
|
|
|
if (todo) {
|
|
printk("\n");
|
|
printk(KERN_ERR "Freezing of tasks %s after %d.%03d seconds "
|
|
"(%d tasks refusing to freeze, wq_busy=%d):\n",
|
|
wakeup ? "aborted" : "failed",
|
|
elapsed_msecs / 1000, elapsed_msecs % 1000,
|
|
todo - wq_busy, wq_busy);
|
|
|
|
if (!wakeup) {
|
|
read_lock(&tasklist_lock);
|
|
do_each_thread(g, p) {
|
|
if (p != current && !freezer_should_skip(p)
|
|
&& freezing(p) && !frozen(p))
|
|
sched_show_task(p);
|
|
} while_each_thread(g, p);
|
|
read_unlock(&tasklist_lock);
|
|
}
|
|
} else {
|
|
printk("(elapsed %d.%03d seconds) ", elapsed_msecs / 1000,
|
|
elapsed_msecs % 1000);
|
|
}
|
|
|
|
return todo ? -EBUSY : 0;
|
|
}
|
|
|
|
/**
|
|
* freeze_processes - Signal user space processes to enter the refrigerator.
|
|
* The current thread will not be frozen. The same process that calls
|
|
* freeze_processes must later call thaw_processes.
|
|
*
|
|
* On success, returns 0. On failure, -errno and system is fully thawed.
|
|
*/
|
|
int freeze_processes(void)
|
|
{
|
|
int error;
|
|
|
|
error = __usermodehelper_disable(UMH_FREEZING);
|
|
if (error)
|
|
return error;
|
|
|
|
/* Make sure this task doesn't get frozen */
|
|
current->flags |= PF_SUSPEND_TASK;
|
|
|
|
if (!pm_freezing)
|
|
atomic_inc(&system_freezing_cnt);
|
|
|
|
printk("Freezing user space processes ... ");
|
|
pm_freezing = true;
|
|
error = try_to_freeze_tasks(true);
|
|
if (!error) {
|
|
printk("done.");
|
|
__usermodehelper_set_disable_depth(UMH_DISABLED);
|
|
oom_killer_disable();
|
|
}
|
|
printk("\n");
|
|
BUG_ON(in_atomic());
|
|
|
|
if (error)
|
|
thaw_processes();
|
|
return error;
|
|
}
|
|
|
|
/**
|
|
* freeze_kernel_threads - Make freezable kernel threads go to the refrigerator.
|
|
*
|
|
* On success, returns 0. On failure, -errno and only the kernel threads are
|
|
* thawed, so as to give a chance to the caller to do additional cleanups
|
|
* (if any) before thawing the userspace tasks. So, it is the responsibility
|
|
* of the caller to thaw the userspace tasks, when the time is right.
|
|
*/
|
|
int freeze_kernel_threads(void)
|
|
{
|
|
int error;
|
|
|
|
printk("Freezing remaining freezable tasks ... ");
|
|
pm_nosig_freezing = true;
|
|
error = try_to_freeze_tasks(false);
|
|
if (!error)
|
|
printk("done.");
|
|
|
|
printk("\n");
|
|
BUG_ON(in_atomic());
|
|
|
|
if (error)
|
|
thaw_kernel_threads();
|
|
return error;
|
|
}
|
|
|
|
void thaw_processes(void)
|
|
{
|
|
struct task_struct *g, *p;
|
|
struct task_struct *curr = current;
|
|
|
|
if (pm_freezing)
|
|
atomic_dec(&system_freezing_cnt);
|
|
pm_freezing = false;
|
|
pm_nosig_freezing = false;
|
|
|
|
oom_killer_enable();
|
|
|
|
printk("Restarting tasks ... ");
|
|
|
|
thaw_workqueues();
|
|
|
|
read_lock(&tasklist_lock);
|
|
do_each_thread(g, p) {
|
|
/* No other threads should have PF_SUSPEND_TASK set */
|
|
WARN_ON((p != curr) && (p->flags & PF_SUSPEND_TASK));
|
|
__thaw_task(p);
|
|
} while_each_thread(g, p);
|
|
read_unlock(&tasklist_lock);
|
|
|
|
WARN_ON(!(curr->flags & PF_SUSPEND_TASK));
|
|
curr->flags &= ~PF_SUSPEND_TASK;
|
|
|
|
usermodehelper_enable();
|
|
|
|
schedule();
|
|
printk("done.\n");
|
|
}
|
|
|
|
void thaw_kernel_threads(void)
|
|
{
|
|
struct task_struct *g, *p;
|
|
|
|
pm_nosig_freezing = false;
|
|
printk("Restarting kernel threads ... ");
|
|
|
|
thaw_workqueues();
|
|
|
|
read_lock(&tasklist_lock);
|
|
do_each_thread(g, p) {
|
|
if (p->flags & (PF_KTHREAD | PF_WQ_WORKER))
|
|
__thaw_task(p);
|
|
} while_each_thread(g, p);
|
|
read_unlock(&tasklist_lock);
|
|
|
|
schedule();
|
|
printk("done.\n");
|
|
}
|