mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-12-04 01:24:12 +08:00
8f5e823f91
- Fix the handling of Performance and Energy Bias Hint (EPB) on Intel processors and expose it to user space via sysfs to avoid having to access it through the generic MSR I/F (Rafael Wysocki). - Improve the handling of global turbo changes made by the platform firmware in the intel_pstate driver (Rafael Wysocki). - Convert some slow-path static_cpu_has() callers to boot_cpu_has() in cpufreq (Borislav Petkov). - Fix the frequency calculation loop in the armada-37xx cpufreq driver (Gregory CLEMENT). - Fix possible object reference leaks in multuple cpufreq drivers (Wen Yang). - Fix kerneldoc comment in the centrino cpufreq driver (dongjian). - Clean up the ACPI and maple cpufreq drivers (Viresh Kumar, Mohan Kumar). - Add support for lx2160a and ls1028a to the qoriq cpufreq driver (Vabhav Sharma, Yuantian Tang). - Fix kobject memory leak in the cpufreq core (Viresh Kumar). - Simplify the IOwait boosting in the schedutil cpufreq governor and rework the TSC cpufreq notifier on x86 (Rafael Wysocki). - Clean up the cpufreq core and statistics code (Yue Hu, Kyle Lin). - Improve the cpufreq documentation, add SPDX license tags to some PM documentation files and unify copyright notices in them (Rafael Wysocki). - Add support for "CPU" domains to the generic power domains (genpd) framework and provide low-level PSCI firmware support for that feature (Ulf Hansson). - Rearrange the PSCI firmware support code and add support for SYSTEM_RESET2 to it (Ulf Hansson, Sudeep Holla). - Improve genpd support for devices in multiple power domains (Ulf Hansson). - Unify target residency for the AFTR and coupled AFTR states in the exynos cpuidle driver (Marek Szyprowski). - Introduce new helper routine in the operating performance points (OPP) framework (Andrew-sh.Cheng). - Add support for passing on-die termination (ODT) and auto power down parameters from the kernel to Trusted Firmware-A (TF-A) to the rk3399_dmc devfreq driver (Enric Balletbo i Serra). - Add tracing to devfreq (Lukasz Luba). - Make the exynos-bus devfreq driver suspend all devices on system shutdown (Marek Szyprowski). - Fix a few minor issues in the devfreq subsystem and clean it up somewhat (Enric Balletbo i Serra, MyungJoo Ham, Rob Herring, Saravana Kannan, Yangtao Li). - Improve system wakeup diagnostics (Stephen Boyd). - Rework filesystem sync messages emitted during system suspend and hibernation (Harry Pan). -----BEGIN PGP SIGNATURE----- iQJGBAABCAAwFiEE4fcc61cGeeHD/fCwgsRv/nhiVHEFAlzQEwUSHHJqd0Byand5 c29ja2kubmV0AAoJEILEb/54YlRxxXwP/jrxikIXdCOV3CJVioV0NetyebwlOqYp UsIA7lQBfZ/DY6dHw/oKuAT9LP01vcFg6XGe83Alkta9qczR5KZ/MYHFNSZXjXjL kEvIMBCS/oykaBuW+Xn9am8Ke3Yq/rBSTKWVom3vzSQY0qvZ9GBwPDrzw+k63Zhz P3afB4ThyY0e9ftgw4HvSSNm13Kn0ItUIQOdaLatXMMcPqP5aAdnUma5Ibinbtpp rpTHuHKYx7MSjaCg6wl3kKTJeWbQP4wYO2ISZqH9zEwQgdvSHeFAvfPKTegUkmw9 uUsQnPD1JvdglOKovr2muehD1Ur+zsjKDf2OKERkWsWXHPyWzA/AqaVv1mkkU++b KaWaJ9pE86kGlJ3EXwRbGfV0dM5rrl+dUUQW6nPI1XJnIOFlK61RzwAbqI26F0Mz AlKxY4jyPLcM3SpQz9iILqyzHQqB67rm29XvId/9scoGGgoqEI4S+v6LYZqI3Vx6 aeSRu+Yof7p5w4Kg5fODX+HzrtMnMrPmLUTXhbExfsYZMi7hXURcN6s+tMpH0ckM 4yiIpnNGCKUSV4vxHBm8XJdAuUnR4Vcz++yFslszgDVVvw5tkvF7SYeHZ6HqcQVm af9HdWzx3qajs/oyBwdRBedZYDnP1joC5donBI2ofLeF33NA7TEiPX8Zebw8XLkv fNikssA7PGdv =nY9p -----END PGP SIGNATURE----- Merge tag 'pm-5.2-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm Pull power management updates from Rafael Wysocki: "These fix the (Intel-specific) Performance and Energy Bias Hint (EPB) handling and expose it to user space via sysfs, fix and clean up several cpufreq drivers, add support for two new chips to the qoriq cpufreq driver, fix, simplify and clean up the cpufreq core and the schedutil governor, add support for "CPU" domains to the generic power domains (genpd) framework and provide low-level PSCI firmware support for that feature, fix the exynos cpuidle driver and fix a couple of issues in the devfreq subsystem and clean it up. Specifics: - Fix the handling of Performance and Energy Bias Hint (EPB) on Intel processors and expose it to user space via sysfs to avoid having to access it through the generic MSR I/F (Rafael Wysocki). - Improve the handling of global turbo changes made by the platform firmware in the intel_pstate driver (Rafael Wysocki). - Convert some slow-path static_cpu_has() callers to boot_cpu_has() in cpufreq (Borislav Petkov). - Fix the frequency calculation loop in the armada-37xx cpufreq driver (Gregory CLEMENT). - Fix possible object reference leaks in multuple cpufreq drivers (Wen Yang). - Fix kerneldoc comment in the centrino cpufreq driver (dongjian). - Clean up the ACPI and maple cpufreq drivers (Viresh Kumar, Mohan Kumar). - Add support for lx2160a and ls1028a to the qoriq cpufreq driver (Vabhav Sharma, Yuantian Tang). - Fix kobject memory leak in the cpufreq core (Viresh Kumar). - Simplify the IOwait boosting in the schedutil cpufreq governor and rework the TSC cpufreq notifier on x86 (Rafael Wysocki). - Clean up the cpufreq core and statistics code (Yue Hu, Kyle Lin). - Improve the cpufreq documentation, add SPDX license tags to some PM documentation files and unify copyright notices in them (Rafael Wysocki). - Add support for "CPU" domains to the generic power domains (genpd) framework and provide low-level PSCI firmware support for that feature (Ulf Hansson). - Rearrange the PSCI firmware support code and add support for SYSTEM_RESET2 to it (Ulf Hansson, Sudeep Holla). - Improve genpd support for devices in multiple power domains (Ulf Hansson). - Unify target residency for the AFTR and coupled AFTR states in the exynos cpuidle driver (Marek Szyprowski). - Introduce new helper routine in the operating performance points (OPP) framework (Andrew-sh.Cheng). - Add support for passing on-die termination (ODT) and auto power down parameters from the kernel to Trusted Firmware-A (TF-A) to the rk3399_dmc devfreq driver (Enric Balletbo i Serra). - Add tracing to devfreq (Lukasz Luba). - Make the exynos-bus devfreq driver suspend all devices on system shutdown (Marek Szyprowski). - Fix a few minor issues in the devfreq subsystem and clean it up somewhat (Enric Balletbo i Serra, MyungJoo Ham, Rob Herring, Saravana Kannan, Yangtao Li). - Improve system wakeup diagnostics (Stephen Boyd). - Rework filesystem sync messages emitted during system suspend and hibernation (Harry Pan)" * tag 'pm-5.2-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm: (72 commits) cpufreq: Fix kobject memleak cpufreq: armada-37xx: fix frequency calculation for opp cpufreq: centrino: Fix centrino_setpolicy() kerneldoc comment cpufreq: qoriq: add support for lx2160a x86: tsc: Rework time_cpufreq_notifier() PM / Domains: Allow to attach a CPU via genpd_dev_pm_attach_by_id|name() PM / Domains: Search for the CPU device outside the genpd lock PM / Domains: Drop unused in-parameter to some genpd functions PM / Domains: Use the base device for driver_deferred_probe_check_state() cpufreq: qoriq: Add ls1028a chip support PM / Domains: Enable genpd_dev_pm_attach_by_id|name() for single PM domain PM / Domains: Allow OF lookup for multi PM domain case from ->attach_dev() PM / Domains: Don't kfree() the virtual device in the error path cpufreq: Move ->get callback check outside of __cpufreq_get() PM / Domains: remove unnecessary unlikely() cpufreq: Remove needless bios_limit check in show_bios_limit() drivers/cpufreq/acpi-cpufreq.c: This fixes the following checkpatch warning firmware/psci: add support for SYSTEM_RESET2 PM / devfreq: add tracing for scheduling work trace: events: add devfreq trace event file ...
626 lines
15 KiB
C
626 lines
15 KiB
C
/*
|
|
* kernel/power/suspend.c - Suspend to RAM and standby functionality.
|
|
*
|
|
* Copyright (c) 2003 Patrick Mochel
|
|
* Copyright (c) 2003 Open Source Development Lab
|
|
* Copyright (c) 2009 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc.
|
|
*
|
|
* This file is released under the GPLv2.
|
|
*/
|
|
|
|
#define pr_fmt(fmt) "PM: " fmt
|
|
|
|
#include <linux/string.h>
|
|
#include <linux/delay.h>
|
|
#include <linux/errno.h>
|
|
#include <linux/init.h>
|
|
#include <linux/console.h>
|
|
#include <linux/cpu.h>
|
|
#include <linux/cpuidle.h>
|
|
#include <linux/gfp.h>
|
|
#include <linux/io.h>
|
|
#include <linux/kernel.h>
|
|
#include <linux/list.h>
|
|
#include <linux/mm.h>
|
|
#include <linux/slab.h>
|
|
#include <linux/export.h>
|
|
#include <linux/suspend.h>
|
|
#include <linux/syscore_ops.h>
|
|
#include <linux/swait.h>
|
|
#include <linux/ftrace.h>
|
|
#include <trace/events/power.h>
|
|
#include <linux/compiler.h>
|
|
#include <linux/moduleparam.h>
|
|
|
|
#include "power.h"
|
|
|
|
const char * const pm_labels[] = {
|
|
[PM_SUSPEND_TO_IDLE] = "freeze",
|
|
[PM_SUSPEND_STANDBY] = "standby",
|
|
[PM_SUSPEND_MEM] = "mem",
|
|
};
|
|
const char *pm_states[PM_SUSPEND_MAX];
|
|
static const char * const mem_sleep_labels[] = {
|
|
[PM_SUSPEND_TO_IDLE] = "s2idle",
|
|
[PM_SUSPEND_STANDBY] = "shallow",
|
|
[PM_SUSPEND_MEM] = "deep",
|
|
};
|
|
const char *mem_sleep_states[PM_SUSPEND_MAX];
|
|
|
|
suspend_state_t mem_sleep_current = PM_SUSPEND_TO_IDLE;
|
|
suspend_state_t mem_sleep_default = PM_SUSPEND_MAX;
|
|
suspend_state_t pm_suspend_target_state;
|
|
EXPORT_SYMBOL_GPL(pm_suspend_target_state);
|
|
|
|
unsigned int pm_suspend_global_flags;
|
|
EXPORT_SYMBOL_GPL(pm_suspend_global_flags);
|
|
|
|
static const struct platform_suspend_ops *suspend_ops;
|
|
static const struct platform_s2idle_ops *s2idle_ops;
|
|
static DECLARE_SWAIT_QUEUE_HEAD(s2idle_wait_head);
|
|
|
|
enum s2idle_states __read_mostly s2idle_state;
|
|
static DEFINE_RAW_SPINLOCK(s2idle_lock);
|
|
|
|
bool pm_suspend_via_s2idle(void)
|
|
{
|
|
return mem_sleep_current == PM_SUSPEND_TO_IDLE;
|
|
}
|
|
EXPORT_SYMBOL_GPL(pm_suspend_via_s2idle);
|
|
|
|
void s2idle_set_ops(const struct platform_s2idle_ops *ops)
|
|
{
|
|
lock_system_sleep();
|
|
s2idle_ops = ops;
|
|
unlock_system_sleep();
|
|
}
|
|
|
|
static void s2idle_begin(void)
|
|
{
|
|
s2idle_state = S2IDLE_STATE_NONE;
|
|
}
|
|
|
|
static void s2idle_enter(void)
|
|
{
|
|
trace_suspend_resume(TPS("machine_suspend"), PM_SUSPEND_TO_IDLE, true);
|
|
|
|
raw_spin_lock_irq(&s2idle_lock);
|
|
if (pm_wakeup_pending())
|
|
goto out;
|
|
|
|
s2idle_state = S2IDLE_STATE_ENTER;
|
|
raw_spin_unlock_irq(&s2idle_lock);
|
|
|
|
get_online_cpus();
|
|
cpuidle_resume();
|
|
|
|
/* Push all the CPUs into the idle loop. */
|
|
wake_up_all_idle_cpus();
|
|
/* Make the current CPU wait so it can enter the idle loop too. */
|
|
swait_event_exclusive(s2idle_wait_head,
|
|
s2idle_state == S2IDLE_STATE_WAKE);
|
|
|
|
cpuidle_pause();
|
|
put_online_cpus();
|
|
|
|
raw_spin_lock_irq(&s2idle_lock);
|
|
|
|
out:
|
|
s2idle_state = S2IDLE_STATE_NONE;
|
|
raw_spin_unlock_irq(&s2idle_lock);
|
|
|
|
trace_suspend_resume(TPS("machine_suspend"), PM_SUSPEND_TO_IDLE, false);
|
|
}
|
|
|
|
static void s2idle_loop(void)
|
|
{
|
|
pm_pr_dbg("suspend-to-idle\n");
|
|
|
|
for (;;) {
|
|
int error;
|
|
|
|
dpm_noirq_begin();
|
|
|
|
/*
|
|
* Suspend-to-idle equals
|
|
* frozen processes + suspended devices + idle processors.
|
|
* Thus s2idle_enter() should be called right after
|
|
* all devices have been suspended.
|
|
*
|
|
* Wakeups during the noirq suspend of devices may be spurious,
|
|
* so prevent them from terminating the loop right away.
|
|
*/
|
|
error = dpm_noirq_suspend_devices(PMSG_SUSPEND);
|
|
if (!error)
|
|
s2idle_enter();
|
|
else if (error == -EBUSY && pm_wakeup_pending())
|
|
error = 0;
|
|
|
|
if (!error && s2idle_ops && s2idle_ops->wake)
|
|
s2idle_ops->wake();
|
|
|
|
dpm_noirq_resume_devices(PMSG_RESUME);
|
|
|
|
dpm_noirq_end();
|
|
|
|
if (error)
|
|
break;
|
|
|
|
if (s2idle_ops && s2idle_ops->sync)
|
|
s2idle_ops->sync();
|
|
|
|
if (pm_wakeup_pending())
|
|
break;
|
|
|
|
pm_wakeup_clear(false);
|
|
}
|
|
|
|
pm_pr_dbg("resume from suspend-to-idle\n");
|
|
}
|
|
|
|
void s2idle_wake(void)
|
|
{
|
|
unsigned long flags;
|
|
|
|
raw_spin_lock_irqsave(&s2idle_lock, flags);
|
|
if (s2idle_state > S2IDLE_STATE_NONE) {
|
|
s2idle_state = S2IDLE_STATE_WAKE;
|
|
swake_up_one(&s2idle_wait_head);
|
|
}
|
|
raw_spin_unlock_irqrestore(&s2idle_lock, flags);
|
|
}
|
|
EXPORT_SYMBOL_GPL(s2idle_wake);
|
|
|
|
static bool valid_state(suspend_state_t state)
|
|
{
|
|
/*
|
|
* PM_SUSPEND_STANDBY and PM_SUSPEND_MEM states need low level
|
|
* support and need to be valid to the low level
|
|
* implementation, no valid callback implies that none are valid.
|
|
*/
|
|
return suspend_ops && suspend_ops->valid && suspend_ops->valid(state);
|
|
}
|
|
|
|
void __init pm_states_init(void)
|
|
{
|
|
/* "mem" and "freeze" are always present in /sys/power/state. */
|
|
pm_states[PM_SUSPEND_MEM] = pm_labels[PM_SUSPEND_MEM];
|
|
pm_states[PM_SUSPEND_TO_IDLE] = pm_labels[PM_SUSPEND_TO_IDLE];
|
|
/*
|
|
* Suspend-to-idle should be supported even without any suspend_ops,
|
|
* initialize mem_sleep_states[] accordingly here.
|
|
*/
|
|
mem_sleep_states[PM_SUSPEND_TO_IDLE] = mem_sleep_labels[PM_SUSPEND_TO_IDLE];
|
|
}
|
|
|
|
static int __init mem_sleep_default_setup(char *str)
|
|
{
|
|
suspend_state_t state;
|
|
|
|
for (state = PM_SUSPEND_TO_IDLE; state <= PM_SUSPEND_MEM; state++)
|
|
if (mem_sleep_labels[state] &&
|
|
!strcmp(str, mem_sleep_labels[state])) {
|
|
mem_sleep_default = state;
|
|
break;
|
|
}
|
|
|
|
return 1;
|
|
}
|
|
__setup("mem_sleep_default=", mem_sleep_default_setup);
|
|
|
|
/**
|
|
* suspend_set_ops - Set the global suspend method table.
|
|
* @ops: Suspend operations to use.
|
|
*/
|
|
void suspend_set_ops(const struct platform_suspend_ops *ops)
|
|
{
|
|
lock_system_sleep();
|
|
|
|
suspend_ops = ops;
|
|
|
|
if (valid_state(PM_SUSPEND_STANDBY)) {
|
|
mem_sleep_states[PM_SUSPEND_STANDBY] = mem_sleep_labels[PM_SUSPEND_STANDBY];
|
|
pm_states[PM_SUSPEND_STANDBY] = pm_labels[PM_SUSPEND_STANDBY];
|
|
if (mem_sleep_default == PM_SUSPEND_STANDBY)
|
|
mem_sleep_current = PM_SUSPEND_STANDBY;
|
|
}
|
|
if (valid_state(PM_SUSPEND_MEM)) {
|
|
mem_sleep_states[PM_SUSPEND_MEM] = mem_sleep_labels[PM_SUSPEND_MEM];
|
|
if (mem_sleep_default >= PM_SUSPEND_MEM)
|
|
mem_sleep_current = PM_SUSPEND_MEM;
|
|
}
|
|
|
|
unlock_system_sleep();
|
|
}
|
|
EXPORT_SYMBOL_GPL(suspend_set_ops);
|
|
|
|
/**
|
|
* suspend_valid_only_mem - Generic memory-only valid callback.
|
|
*
|
|
* Platform drivers that implement mem suspend only and only need to check for
|
|
* that in their .valid() callback can use this instead of rolling their own
|
|
* .valid() callback.
|
|
*/
|
|
int suspend_valid_only_mem(suspend_state_t state)
|
|
{
|
|
return state == PM_SUSPEND_MEM;
|
|
}
|
|
EXPORT_SYMBOL_GPL(suspend_valid_only_mem);
|
|
|
|
static bool sleep_state_supported(suspend_state_t state)
|
|
{
|
|
return state == PM_SUSPEND_TO_IDLE || (suspend_ops && suspend_ops->enter);
|
|
}
|
|
|
|
static int platform_suspend_prepare(suspend_state_t state)
|
|
{
|
|
return state != PM_SUSPEND_TO_IDLE && suspend_ops->prepare ?
|
|
suspend_ops->prepare() : 0;
|
|
}
|
|
|
|
static int platform_suspend_prepare_late(suspend_state_t state)
|
|
{
|
|
return state == PM_SUSPEND_TO_IDLE && s2idle_ops && s2idle_ops->prepare ?
|
|
s2idle_ops->prepare() : 0;
|
|
}
|
|
|
|
static int platform_suspend_prepare_noirq(suspend_state_t state)
|
|
{
|
|
return state != PM_SUSPEND_TO_IDLE && suspend_ops->prepare_late ?
|
|
suspend_ops->prepare_late() : 0;
|
|
}
|
|
|
|
static void platform_resume_noirq(suspend_state_t state)
|
|
{
|
|
if (state != PM_SUSPEND_TO_IDLE && suspend_ops->wake)
|
|
suspend_ops->wake();
|
|
}
|
|
|
|
static void platform_resume_early(suspend_state_t state)
|
|
{
|
|
if (state == PM_SUSPEND_TO_IDLE && s2idle_ops && s2idle_ops->restore)
|
|
s2idle_ops->restore();
|
|
}
|
|
|
|
static void platform_resume_finish(suspend_state_t state)
|
|
{
|
|
if (state != PM_SUSPEND_TO_IDLE && suspend_ops->finish)
|
|
suspend_ops->finish();
|
|
}
|
|
|
|
static int platform_suspend_begin(suspend_state_t state)
|
|
{
|
|
if (state == PM_SUSPEND_TO_IDLE && s2idle_ops && s2idle_ops->begin)
|
|
return s2idle_ops->begin();
|
|
else if (suspend_ops && suspend_ops->begin)
|
|
return suspend_ops->begin(state);
|
|
else
|
|
return 0;
|
|
}
|
|
|
|
static void platform_resume_end(suspend_state_t state)
|
|
{
|
|
if (state == PM_SUSPEND_TO_IDLE && s2idle_ops && s2idle_ops->end)
|
|
s2idle_ops->end();
|
|
else if (suspend_ops && suspend_ops->end)
|
|
suspend_ops->end();
|
|
}
|
|
|
|
static void platform_recover(suspend_state_t state)
|
|
{
|
|
if (state != PM_SUSPEND_TO_IDLE && suspend_ops->recover)
|
|
suspend_ops->recover();
|
|
}
|
|
|
|
static bool platform_suspend_again(suspend_state_t state)
|
|
{
|
|
return state != PM_SUSPEND_TO_IDLE && suspend_ops->suspend_again ?
|
|
suspend_ops->suspend_again() : false;
|
|
}
|
|
|
|
#ifdef CONFIG_PM_DEBUG
|
|
static unsigned int pm_test_delay = 5;
|
|
module_param(pm_test_delay, uint, 0644);
|
|
MODULE_PARM_DESC(pm_test_delay,
|
|
"Number of seconds to wait before resuming from suspend test");
|
|
#endif
|
|
|
|
static int suspend_test(int level)
|
|
{
|
|
#ifdef CONFIG_PM_DEBUG
|
|
if (pm_test_level == level) {
|
|
pr_info("suspend debug: Waiting for %d second(s).\n",
|
|
pm_test_delay);
|
|
mdelay(pm_test_delay * 1000);
|
|
return 1;
|
|
}
|
|
#endif /* !CONFIG_PM_DEBUG */
|
|
return 0;
|
|
}
|
|
|
|
/**
|
|
* suspend_prepare - Prepare for entering system sleep state.
|
|
*
|
|
* Common code run for every system sleep state that can be entered (except for
|
|
* hibernation). Run suspend notifiers, allocate the "suspend" console and
|
|
* freeze processes.
|
|
*/
|
|
static int suspend_prepare(suspend_state_t state)
|
|
{
|
|
int error, nr_calls = 0;
|
|
|
|
if (!sleep_state_supported(state))
|
|
return -EPERM;
|
|
|
|
pm_prepare_console();
|
|
|
|
error = __pm_notifier_call_chain(PM_SUSPEND_PREPARE, -1, &nr_calls);
|
|
if (error) {
|
|
nr_calls--;
|
|
goto Finish;
|
|
}
|
|
|
|
trace_suspend_resume(TPS("freeze_processes"), 0, true);
|
|
error = suspend_freeze_processes();
|
|
trace_suspend_resume(TPS("freeze_processes"), 0, false);
|
|
if (!error)
|
|
return 0;
|
|
|
|
suspend_stats.failed_freeze++;
|
|
dpm_save_failed_step(SUSPEND_FREEZE);
|
|
Finish:
|
|
__pm_notifier_call_chain(PM_POST_SUSPEND, nr_calls, NULL);
|
|
pm_restore_console();
|
|
return error;
|
|
}
|
|
|
|
/* default implementation */
|
|
void __weak arch_suspend_disable_irqs(void)
|
|
{
|
|
local_irq_disable();
|
|
}
|
|
|
|
/* default implementation */
|
|
void __weak arch_suspend_enable_irqs(void)
|
|
{
|
|
local_irq_enable();
|
|
}
|
|
|
|
/**
|
|
* suspend_enter - Make the system enter the given sleep state.
|
|
* @state: System sleep state to enter.
|
|
* @wakeup: Returns information that the sleep state should not be re-entered.
|
|
*
|
|
* This function should be called after devices have been suspended.
|
|
*/
|
|
static int suspend_enter(suspend_state_t state, bool *wakeup)
|
|
{
|
|
int error;
|
|
|
|
error = platform_suspend_prepare(state);
|
|
if (error)
|
|
goto Platform_finish;
|
|
|
|
error = dpm_suspend_late(PMSG_SUSPEND);
|
|
if (error) {
|
|
pr_err("late suspend of devices failed\n");
|
|
goto Platform_finish;
|
|
}
|
|
error = platform_suspend_prepare_late(state);
|
|
if (error)
|
|
goto Devices_early_resume;
|
|
|
|
if (state == PM_SUSPEND_TO_IDLE && pm_test_level != TEST_PLATFORM) {
|
|
s2idle_loop();
|
|
goto Platform_early_resume;
|
|
}
|
|
|
|
error = dpm_suspend_noirq(PMSG_SUSPEND);
|
|
if (error) {
|
|
pr_err("noirq suspend of devices failed\n");
|
|
goto Platform_early_resume;
|
|
}
|
|
error = platform_suspend_prepare_noirq(state);
|
|
if (error)
|
|
goto Platform_wake;
|
|
|
|
if (suspend_test(TEST_PLATFORM))
|
|
goto Platform_wake;
|
|
|
|
error = suspend_disable_secondary_cpus();
|
|
if (error || suspend_test(TEST_CPUS))
|
|
goto Enable_cpus;
|
|
|
|
arch_suspend_disable_irqs();
|
|
BUG_ON(!irqs_disabled());
|
|
|
|
system_state = SYSTEM_SUSPEND;
|
|
|
|
error = syscore_suspend();
|
|
if (!error) {
|
|
*wakeup = pm_wakeup_pending();
|
|
if (!(suspend_test(TEST_CORE) || *wakeup)) {
|
|
trace_suspend_resume(TPS("machine_suspend"),
|
|
state, true);
|
|
error = suspend_ops->enter(state);
|
|
trace_suspend_resume(TPS("machine_suspend"),
|
|
state, false);
|
|
} else if (*wakeup) {
|
|
error = -EBUSY;
|
|
}
|
|
syscore_resume();
|
|
}
|
|
|
|
system_state = SYSTEM_RUNNING;
|
|
|
|
arch_suspend_enable_irqs();
|
|
BUG_ON(irqs_disabled());
|
|
|
|
Enable_cpus:
|
|
suspend_enable_secondary_cpus();
|
|
|
|
Platform_wake:
|
|
platform_resume_noirq(state);
|
|
dpm_resume_noirq(PMSG_RESUME);
|
|
|
|
Platform_early_resume:
|
|
platform_resume_early(state);
|
|
|
|
Devices_early_resume:
|
|
dpm_resume_early(PMSG_RESUME);
|
|
|
|
Platform_finish:
|
|
platform_resume_finish(state);
|
|
return error;
|
|
}
|
|
|
|
/**
|
|
* suspend_devices_and_enter - Suspend devices and enter system sleep state.
|
|
* @state: System sleep state to enter.
|
|
*/
|
|
int suspend_devices_and_enter(suspend_state_t state)
|
|
{
|
|
int error;
|
|
bool wakeup = false;
|
|
|
|
if (!sleep_state_supported(state))
|
|
return -ENOSYS;
|
|
|
|
pm_suspend_target_state = state;
|
|
|
|
error = platform_suspend_begin(state);
|
|
if (error)
|
|
goto Close;
|
|
|
|
suspend_console();
|
|
suspend_test_start();
|
|
error = dpm_suspend_start(PMSG_SUSPEND);
|
|
if (error) {
|
|
pr_err("Some devices failed to suspend, or early wake event detected\n");
|
|
goto Recover_platform;
|
|
}
|
|
suspend_test_finish("suspend devices");
|
|
if (suspend_test(TEST_DEVICES))
|
|
goto Recover_platform;
|
|
|
|
do {
|
|
error = suspend_enter(state, &wakeup);
|
|
} while (!error && !wakeup && platform_suspend_again(state));
|
|
|
|
Resume_devices:
|
|
suspend_test_start();
|
|
dpm_resume_end(PMSG_RESUME);
|
|
suspend_test_finish("resume devices");
|
|
trace_suspend_resume(TPS("resume_console"), state, true);
|
|
resume_console();
|
|
trace_suspend_resume(TPS("resume_console"), state, false);
|
|
|
|
Close:
|
|
platform_resume_end(state);
|
|
pm_suspend_target_state = PM_SUSPEND_ON;
|
|
return error;
|
|
|
|
Recover_platform:
|
|
platform_recover(state);
|
|
goto Resume_devices;
|
|
}
|
|
|
|
/**
|
|
* suspend_finish - Clean up before finishing the suspend sequence.
|
|
*
|
|
* Call platform code to clean up, restart processes, and free the console that
|
|
* we've allocated. This routine is not called for hibernation.
|
|
*/
|
|
static void suspend_finish(void)
|
|
{
|
|
suspend_thaw_processes();
|
|
pm_notifier_call_chain(PM_POST_SUSPEND);
|
|
pm_restore_console();
|
|
}
|
|
|
|
/**
|
|
* enter_state - Do common work needed to enter system sleep state.
|
|
* @state: System sleep state to enter.
|
|
*
|
|
* Make sure that no one else is trying to put the system into a sleep state.
|
|
* Fail if that's not the case. Otherwise, prepare for system suspend, make the
|
|
* system enter the given sleep state and clean up after wakeup.
|
|
*/
|
|
static int enter_state(suspend_state_t state)
|
|
{
|
|
int error;
|
|
|
|
trace_suspend_resume(TPS("suspend_enter"), state, true);
|
|
if (state == PM_SUSPEND_TO_IDLE) {
|
|
#ifdef CONFIG_PM_DEBUG
|
|
if (pm_test_level != TEST_NONE && pm_test_level <= TEST_CPUS) {
|
|
pr_warn("Unsupported test mode for suspend to idle, please choose none/freezer/devices/platform.\n");
|
|
return -EAGAIN;
|
|
}
|
|
#endif
|
|
} else if (!valid_state(state)) {
|
|
return -EINVAL;
|
|
}
|
|
if (!mutex_trylock(&system_transition_mutex))
|
|
return -EBUSY;
|
|
|
|
if (state == PM_SUSPEND_TO_IDLE)
|
|
s2idle_begin();
|
|
|
|
if (!IS_ENABLED(CONFIG_SUSPEND_SKIP_SYNC)) {
|
|
trace_suspend_resume(TPS("sync_filesystems"), 0, true);
|
|
ksys_sync_helper();
|
|
trace_suspend_resume(TPS("sync_filesystems"), 0, false);
|
|
}
|
|
|
|
pm_pr_dbg("Preparing system for sleep (%s)\n", mem_sleep_labels[state]);
|
|
pm_suspend_clear_flags();
|
|
error = suspend_prepare(state);
|
|
if (error)
|
|
goto Unlock;
|
|
|
|
if (suspend_test(TEST_FREEZER))
|
|
goto Finish;
|
|
|
|
trace_suspend_resume(TPS("suspend_enter"), state, false);
|
|
pm_pr_dbg("Suspending system (%s)\n", mem_sleep_labels[state]);
|
|
pm_restrict_gfp_mask();
|
|
error = suspend_devices_and_enter(state);
|
|
pm_restore_gfp_mask();
|
|
|
|
Finish:
|
|
events_check_enabled = false;
|
|
pm_pr_dbg("Finishing wakeup.\n");
|
|
suspend_finish();
|
|
Unlock:
|
|
mutex_unlock(&system_transition_mutex);
|
|
return error;
|
|
}
|
|
|
|
/**
|
|
* pm_suspend - Externally visible function for suspending the system.
|
|
* @state: System sleep state to enter.
|
|
*
|
|
* Check if the value of @state represents one of the supported states,
|
|
* execute enter_state() and update system suspend statistics.
|
|
*/
|
|
int pm_suspend(suspend_state_t state)
|
|
{
|
|
int error;
|
|
|
|
if (state <= PM_SUSPEND_ON || state >= PM_SUSPEND_MAX)
|
|
return -EINVAL;
|
|
|
|
pr_info("suspend entry (%s)\n", mem_sleep_labels[state]);
|
|
error = enter_state(state);
|
|
if (error) {
|
|
suspend_stats.fail++;
|
|
dpm_save_failed_errno(error);
|
|
} else {
|
|
suspend_stats.success++;
|
|
}
|
|
pr_info("suspend exit\n");
|
|
return error;
|
|
}
|
|
EXPORT_SYMBOL(pm_suspend);
|