mirror of
https://github.com/edk2-porting/linux-next.git
synced 2024-12-29 15:43:59 +08:00
f5d39b0208
Rewrite the core freezer to behave better wrt thawing and be simpler in general. By replacing PF_FROZEN with TASK_FROZEN, a special block state, it is ensured frozen tasks stay frozen until thawed and don't randomly wake up early, as is currently possible. As such, it does away with PF_FROZEN and PF_FREEZER_SKIP, freeing up two PF_flags (yay!). Specifically; the current scheme works a little like: freezer_do_not_count(); schedule(); freezer_count(); And either the task is blocked, or it lands in try_to_freezer() through freezer_count(). Now, when it is blocked, the freezer considers it frozen and continues. However, on thawing, once pm_freezing is cleared, freezer_count() stops working, and any random/spurious wakeup will let a task run before its time. That is, thawing tries to thaw things in explicit order; kernel threads and workqueues before doing bringing SMP back before userspace etc.. However due to the above mentioned races it is entirely possible for userspace tasks to thaw (by accident) before SMP is back. This can be a fatal problem in asymmetric ISA architectures (eg ARMv9) where the userspace task requires a special CPU to run. As said; replace this with a special task state TASK_FROZEN and add the following state transitions: TASK_FREEZABLE -> TASK_FROZEN __TASK_STOPPED -> TASK_FROZEN __TASK_TRACED -> TASK_FROZEN The new TASK_FREEZABLE can be set on any state part of TASK_NORMAL (IOW. TASK_INTERRUPTIBLE and TASK_UNINTERRUPTIBLE) -- any such state is already required to deal with spurious wakeups and the freezer causes one such when thawing the task (since the original state is lost). The special __TASK_{STOPPED,TRACED} states *can* be restored since their canonical state is in ->jobctl. With this, frozen tasks need an explicit TASK_FROZEN wakeup and are free of undue (early / spurious) wakeups. Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Reviewed-by: Ingo Molnar <mingo@kernel.org> Acked-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com> Link: https://lore.kernel.org/r/20220822114649.055452969@infradead.org
236 lines
5.4 KiB
C
236 lines
5.4 KiB
C
// SPDX-License-Identifier: GPL-2.0-only
|
|
/*
|
|
* kernel/freezer.c - Function to freeze a process
|
|
*
|
|
* Originally from kernel/power/process.c
|
|
*/
|
|
|
|
#include <linux/interrupt.h>
|
|
#include <linux/suspend.h>
|
|
#include <linux/export.h>
|
|
#include <linux/syscalls.h>
|
|
#include <linux/freezer.h>
|
|
#include <linux/kthread.h>
|
|
|
|
/* total number of freezing conditions in effect */
|
|
DEFINE_STATIC_KEY_FALSE(freezer_active);
|
|
EXPORT_SYMBOL(freezer_active);
|
|
|
|
/*
|
|
* indicate whether PM freezing is in effect, protected by
|
|
* system_transition_mutex
|
|
*/
|
|
bool pm_freezing;
|
|
bool pm_nosig_freezing;
|
|
|
|
/* protects freezing and frozen transitions */
|
|
static DEFINE_SPINLOCK(freezer_lock);
|
|
|
|
/**
|
|
* freezing_slow_path - slow path for testing whether a task needs to be frozen
|
|
* @p: task to be tested
|
|
*
|
|
* This function is called by freezing() if freezer_active isn't zero
|
|
* and tests whether @p needs to enter and stay in frozen state. Can be
|
|
* called under any context. The freezers are responsible for ensuring the
|
|
* target tasks see the updated state.
|
|
*/
|
|
bool freezing_slow_path(struct task_struct *p)
|
|
{
|
|
if (p->flags & (PF_NOFREEZE | PF_SUSPEND_TASK))
|
|
return false;
|
|
|
|
if (test_tsk_thread_flag(p, TIF_MEMDIE))
|
|
return false;
|
|
|
|
if (pm_nosig_freezing || cgroup_freezing(p))
|
|
return true;
|
|
|
|
if (pm_freezing && !(p->flags & PF_KTHREAD))
|
|
return true;
|
|
|
|
return false;
|
|
}
|
|
EXPORT_SYMBOL(freezing_slow_path);
|
|
|
|
bool frozen(struct task_struct *p)
|
|
{
|
|
return READ_ONCE(p->__state) & TASK_FROZEN;
|
|
}
|
|
|
|
/* Refrigerator is place where frozen processes are stored :-). */
|
|
bool __refrigerator(bool check_kthr_stop)
|
|
{
|
|
unsigned int state = get_current_state();
|
|
bool was_frozen = false;
|
|
|
|
pr_debug("%s entered refrigerator\n", current->comm);
|
|
|
|
WARN_ON_ONCE(state && !(state & TASK_NORMAL));
|
|
|
|
for (;;) {
|
|
bool freeze;
|
|
|
|
set_current_state(TASK_FROZEN);
|
|
|
|
spin_lock_irq(&freezer_lock);
|
|
freeze = freezing(current) && !(check_kthr_stop && kthread_should_stop());
|
|
spin_unlock_irq(&freezer_lock);
|
|
|
|
if (!freeze)
|
|
break;
|
|
|
|
was_frozen = true;
|
|
schedule();
|
|
}
|
|
__set_current_state(TASK_RUNNING);
|
|
|
|
pr_debug("%s left refrigerator\n", current->comm);
|
|
|
|
return was_frozen;
|
|
}
|
|
EXPORT_SYMBOL(__refrigerator);
|
|
|
|
static void fake_signal_wake_up(struct task_struct *p)
|
|
{
|
|
unsigned long flags;
|
|
|
|
if (lock_task_sighand(p, &flags)) {
|
|
signal_wake_up(p, 0);
|
|
unlock_task_sighand(p, &flags);
|
|
}
|
|
}
|
|
|
|
static int __set_task_frozen(struct task_struct *p, void *arg)
|
|
{
|
|
unsigned int state = READ_ONCE(p->__state);
|
|
|
|
if (p->on_rq)
|
|
return 0;
|
|
|
|
if (p != current && task_curr(p))
|
|
return 0;
|
|
|
|
if (!(state & (TASK_FREEZABLE | __TASK_STOPPED | __TASK_TRACED)))
|
|
return 0;
|
|
|
|
/*
|
|
* Only TASK_NORMAL can be augmented with TASK_FREEZABLE, since they
|
|
* can suffer spurious wakeups.
|
|
*/
|
|
if (state & TASK_FREEZABLE)
|
|
WARN_ON_ONCE(!(state & TASK_NORMAL));
|
|
|
|
#ifdef CONFIG_LOCKDEP
|
|
/*
|
|
* It's dangerous to freeze with locks held; there be dragons there.
|
|
*/
|
|
if (!(state & __TASK_FREEZABLE_UNSAFE))
|
|
WARN_ON_ONCE(debug_locks && p->lockdep_depth);
|
|
#endif
|
|
|
|
WRITE_ONCE(p->__state, TASK_FROZEN);
|
|
return TASK_FROZEN;
|
|
}
|
|
|
|
static bool __freeze_task(struct task_struct *p)
|
|
{
|
|
/* TASK_FREEZABLE|TASK_STOPPED|TASK_TRACED -> TASK_FROZEN */
|
|
return task_call_func(p, __set_task_frozen, NULL);
|
|
}
|
|
|
|
/**
|
|
* freeze_task - send a freeze request to given task
|
|
* @p: task to send the request to
|
|
*
|
|
* If @p is freezing, the freeze request is sent either by sending a fake
|
|
* signal (if it's not a kernel thread) or waking it up (if it's a kernel
|
|
* thread).
|
|
*
|
|
* RETURNS:
|
|
* %false, if @p is not freezing or already frozen; %true, otherwise
|
|
*/
|
|
bool freeze_task(struct task_struct *p)
|
|
{
|
|
unsigned long flags;
|
|
|
|
spin_lock_irqsave(&freezer_lock, flags);
|
|
if (!freezing(p) || frozen(p) || __freeze_task(p)) {
|
|
spin_unlock_irqrestore(&freezer_lock, flags);
|
|
return false;
|
|
}
|
|
|
|
if (!(p->flags & PF_KTHREAD))
|
|
fake_signal_wake_up(p);
|
|
else
|
|
wake_up_state(p, TASK_NORMAL);
|
|
|
|
spin_unlock_irqrestore(&freezer_lock, flags);
|
|
return true;
|
|
}
|
|
|
|
/*
|
|
* The special task states (TASK_STOPPED, TASK_TRACED) keep their canonical
|
|
* state in p->jobctl. If either of them got a wakeup that was missed because
|
|
* TASK_FROZEN, then their canonical state reflects that and the below will
|
|
* refuse to restore the special state and instead issue the wakeup.
|
|
*/
|
|
static int __set_task_special(struct task_struct *p, void *arg)
|
|
{
|
|
unsigned int state = 0;
|
|
|
|
if (p->jobctl & JOBCTL_TRACED)
|
|
state = TASK_TRACED;
|
|
|
|
else if (p->jobctl & JOBCTL_STOPPED)
|
|
state = TASK_STOPPED;
|
|
|
|
if (state)
|
|
WRITE_ONCE(p->__state, state);
|
|
|
|
return state;
|
|
}
|
|
|
|
void __thaw_task(struct task_struct *p)
|
|
{
|
|
unsigned long flags, flags2;
|
|
|
|
spin_lock_irqsave(&freezer_lock, flags);
|
|
if (WARN_ON_ONCE(freezing(p)))
|
|
goto unlock;
|
|
|
|
if (lock_task_sighand(p, &flags2)) {
|
|
/* TASK_FROZEN -> TASK_{STOPPED,TRACED} */
|
|
bool ret = task_call_func(p, __set_task_special, NULL);
|
|
unlock_task_sighand(p, &flags2);
|
|
if (ret)
|
|
goto unlock;
|
|
}
|
|
|
|
wake_up_state(p, TASK_FROZEN);
|
|
unlock:
|
|
spin_unlock_irqrestore(&freezer_lock, flags);
|
|
}
|
|
|
|
/**
|
|
* set_freezable - make %current freezable
|
|
*
|
|
* Mark %current freezable and enter refrigerator if necessary.
|
|
*/
|
|
bool set_freezable(void)
|
|
{
|
|
might_sleep();
|
|
|
|
/*
|
|
* Modify flags while holding freezer_lock. This ensures the
|
|
* freezer notices that we aren't frozen yet or the freezing
|
|
* condition is visible to try_to_freeze() below.
|
|
*/
|
|
spin_lock_irq(&freezer_lock);
|
|
current->flags &= ~PF_NOFREEZE;
|
|
spin_unlock_irq(&freezer_lock);
|
|
|
|
return try_to_freeze();
|
|
}
|
|
EXPORT_SYMBOL(set_freezable);
|