2019-06-01 16:08:42 +08:00
|
|
|
// SPDX-License-Identifier: GPL-2.0-only
|
2005-04-17 06:20:36 +08:00
|
|
|
/*
|
|
|
|
* sleep.c - ACPI sleep support.
|
|
|
|
*
|
2005-03-19 05:20:46 +08:00
|
|
|
* Copyright (c) 2005 Alexey Starikovskiy <alexey.y.starikovskiy@intel.com>
|
2005-04-17 06:20:36 +08:00
|
|
|
* Copyright (c) 2004 David Shaohua Li <shaohua.li@intel.com>
|
|
|
|
* Copyright (c) 2000-2003 Patrick Mochel
|
|
|
|
* Copyright (c) 2003 Open Source Development Lab
|
|
|
|
*/
|
|
|
|
|
2021-06-02 16:54:39 +08:00
|
|
|
#define pr_fmt(fmt) "ACPI: PM: " fmt
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
#include <linux/delay.h>
|
|
|
|
#include <linux/irq.h>
|
|
|
|
#include <linux/dmi.h>
|
|
|
|
#include <linux/device.h>
|
2014-09-30 08:29:01 +08:00
|
|
|
#include <linux/interrupt.h>
|
2005-04-17 06:20:36 +08:00
|
|
|
#include <linux/suspend.h>
|
2008-08-12 10:20:22 +08:00
|
|
|
#include <linux/reboot.h>
|
2011-02-15 07:42:46 +08:00
|
|
|
#include <linux/acpi.h>
|
2012-03-21 10:58:46 +08:00
|
|
|
#include <linux/module.h>
|
2016-02-17 20:03:23 +08:00
|
|
|
#include <linux/syscore_ops.h>
|
2007-09-21 01:32:35 +08:00
|
|
|
#include <asm/io.h>
|
2014-06-06 20:40:17 +08:00
|
|
|
#include <trace/events/power.h>
|
2007-09-21 01:32:35 +08:00
|
|
|
|
2009-03-14 02:08:26 +08:00
|
|
|
#include "internal.h"
|
2005-04-17 06:20:36 +08:00
|
|
|
#include "sleep.h"
|
|
|
|
|
2016-03-22 08:51:10 +08:00
|
|
|
/*
|
|
|
|
* Some HW-full platforms do not have _S5, so they may need
|
|
|
|
* to leverage efi power off for a shutdown.
|
|
|
|
*/
|
|
|
|
bool acpi_no_s5;
|
2010-10-19 09:47:25 +08:00
|
|
|
static u8 sleep_states[ACPI_S_STATE_COUNT];
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2008-08-12 10:20:22 +08:00
|
|
|
static void acpi_sleep_tts_switch(u32 acpi_state)
|
|
|
|
{
|
2013-06-29 00:24:39 +08:00
|
|
|
acpi_status status;
|
2008-08-12 10:20:22 +08:00
|
|
|
|
2013-06-29 00:24:39 +08:00
|
|
|
status = acpi_execute_simple_method(NULL, "\\_TTS", acpi_state);
|
2008-08-12 10:20:22 +08:00
|
|
|
if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) {
|
|
|
|
/*
|
|
|
|
* OS can't evaluate the _TTS object correctly. Some warning
|
|
|
|
* message will be printed. But it won't break anything.
|
|
|
|
*/
|
2021-06-02 16:54:39 +08:00
|
|
|
pr_notice("Failure in evaluating _TTS object\n");
|
2008-08-12 10:20:22 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-11-21 21:25:49 +08:00
|
|
|
static int tts_notify_reboot(struct notifier_block *this,
|
2008-08-12 10:20:22 +08:00
|
|
|
unsigned long code, void *x)
|
|
|
|
{
|
|
|
|
acpi_sleep_tts_switch(ACPI_STATE_S5);
|
|
|
|
return NOTIFY_DONE;
|
|
|
|
}
|
|
|
|
|
2016-11-21 21:25:49 +08:00
|
|
|
static struct notifier_block tts_notifier = {
|
|
|
|
.notifier_call = tts_notify_reboot,
|
2008-08-12 10:20:22 +08:00
|
|
|
.next = NULL,
|
|
|
|
.priority = 0,
|
|
|
|
};
|
|
|
|
|
2008-01-08 07:10:57 +08:00
|
|
|
static int acpi_sleep_prepare(u32 acpi_state)
|
2007-09-24 20:33:21 +08:00
|
|
|
{
|
|
|
|
#ifdef CONFIG_ACPI_SLEEP
|
2019-11-27 00:54:16 +08:00
|
|
|
unsigned long acpi_wakeup_address;
|
|
|
|
|
2007-09-24 20:33:21 +08:00
|
|
|
/* do we have a wakeup address for S2 and S3? */
|
|
|
|
if (acpi_state == ACPI_STATE_S3) {
|
2019-11-27 00:54:16 +08:00
|
|
|
acpi_wakeup_address = acpi_get_wakeup_address();
|
2012-05-30 17:33:41 +08:00
|
|
|
if (!acpi_wakeup_address)
|
2007-09-24 20:33:21 +08:00
|
|
|
return -EFAULT;
|
2016-01-05 05:05:20 +08:00
|
|
|
acpi_set_waking_vector(acpi_wakeup_address);
|
2007-09-24 20:33:21 +08:00
|
|
|
|
|
|
|
}
|
|
|
|
#endif
|
2021-06-02 16:54:39 +08:00
|
|
|
pr_info("Preparing to enter system sleep state S%d\n", acpi_state);
|
2010-07-07 10:09:38 +08:00
|
|
|
acpi_enable_wakeup_devices(acpi_state);
|
2007-09-24 20:33:21 +08:00
|
|
|
acpi_enter_sleep_state_prep(acpi_state);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
ACPI: PM: Make acpi_sleep_state_supported() non-static
With some upcoming patches to save/restore the Hyper-V drivers related
states, a Linux VM running on Hyper-V will be able to hibernate. When
a Linux VM hibernates, unluckily we must disable the memory hot-add/remove
and balloon up/down capabilities in the hv_balloon driver
(drivers/hv/hv_balloon.c), because these can not really work according to
the design of the related back-end driver on the host.
By default, Hyper-V does not enable the virtual ACPI S4 state for a VM;
on recent Hyper-V hosts, the administrator is able to enable the virtual
ACPI S4 state for a VM, so we hope to use the presence of the virtual ACPI
S4 state as a hint for hv_balloon to disable the aforementioned
capabilities. In this way, hibernation will work more reliably, from the
user's perspective.
By marking acpi_sleep_state_supported() non-static, we'll be able to
implement a hv_is_hibernation_supported() API in the always-built-in
module arch/x86/hyperv/hv_init.c, and the API will be called by hv_balloon.
Signed-off-by: Dexuan Cui <decui@microsoft.com>
Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
2019-07-04 10:43:32 +08:00
|
|
|
bool acpi_sleep_state_supported(u8 sleep_state)
|
2014-03-14 05:11:39 +08:00
|
|
|
{
|
|
|
|
acpi_status status;
|
|
|
|
u8 type_a, type_b;
|
|
|
|
|
|
|
|
status = acpi_get_sleep_type_data(sleep_state, &type_a, &type_b);
|
|
|
|
return ACPI_SUCCESS(status) && (!acpi_gbl_reduced_hardware
|
|
|
|
|| (acpi_gbl_FADT.sleep_control.address
|
|
|
|
&& acpi_gbl_FADT.sleep_status.address));
|
|
|
|
}
|
|
|
|
|
2008-10-23 02:58:43 +08:00
|
|
|
#ifdef CONFIG_ACPI_SLEEP
|
2010-12-07 22:52:25 +08:00
|
|
|
static u32 acpi_target_sleep_state = ACPI_STATE_S0;
|
2012-11-02 08:40:53 +08:00
|
|
|
|
|
|
|
u32 acpi_target_system_state(void)
|
|
|
|
{
|
|
|
|
return acpi_target_sleep_state;
|
|
|
|
}
|
2014-05-09 05:22:15 +08:00
|
|
|
EXPORT_SYMBOL_GPL(acpi_target_system_state);
|
2012-11-02 08:40:53 +08:00
|
|
|
|
2012-07-24 03:01:02 +08:00
|
|
|
static bool pwr_btn_event_pending;
|
2010-12-07 22:52:25 +08:00
|
|
|
|
2010-07-24 04:59:09 +08:00
|
|
|
/*
|
|
|
|
* The ACPI specification wants us to save NVS memory regions during hibernation
|
|
|
|
* and to restore them during the subsequent resume. Windows does that also for
|
|
|
|
* suspend to RAM. However, it is known that this mechanism does not work on
|
|
|
|
* all machines, so we allow the user to disable it with the help of the
|
|
|
|
* 'acpi_sleep=nonvs' kernel command line option.
|
|
|
|
*/
|
|
|
|
static bool nvs_nosave;
|
|
|
|
|
|
|
|
void __init acpi_nvs_nosave(void)
|
|
|
|
{
|
|
|
|
nvs_nosave = true;
|
|
|
|
}
|
|
|
|
|
2012-10-26 19:39:15 +08:00
|
|
|
/*
|
|
|
|
* The ACPI specification wants us to save NVS memory regions during hibernation
|
|
|
|
* but says nothing about saving NVS during S3. Not all versions of Windows
|
|
|
|
* save NVS on S3 suspend either, and it is clear that not all systems need
|
|
|
|
* NVS to be saved at S3 time. To improve suspend/resume time, allow the
|
|
|
|
* user to disable saving NVS on S3 if their system does not require it, but
|
|
|
|
* continue to save/restore NVS for S4 as specified.
|
|
|
|
*/
|
|
|
|
static bool nvs_nosave_s3;
|
|
|
|
|
|
|
|
void __init acpi_nvs_nosave_s3(void)
|
|
|
|
{
|
|
|
|
nvs_nosave_s3 = true;
|
|
|
|
}
|
|
|
|
|
2017-01-16 10:55:45 +08:00
|
|
|
static int __init init_nvs_save_s3(const struct dmi_system_id *d)
|
|
|
|
{
|
|
|
|
nvs_nosave_s3 = false;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2008-06-13 05:24:06 +08:00
|
|
|
/*
|
|
|
|
* ACPI 1.0 wants us to execute _PTS before suspending devices, so we allow the
|
|
|
|
* user to request that behavior by using the 'acpi_old_suspend_ordering'
|
|
|
|
* kernel command line option that causes the following variable to be set.
|
|
|
|
*/
|
|
|
|
static bool old_suspend_ordering;
|
|
|
|
|
|
|
|
void __init acpi_old_suspend_ordering(void)
|
|
|
|
{
|
|
|
|
old_suspend_ordering = true;
|
|
|
|
}
|
|
|
|
|
2012-11-30 19:57:03 +08:00
|
|
|
static int __init init_old_suspend_ordering(const struct dmi_system_id *d)
|
|
|
|
{
|
|
|
|
acpi_old_suspend_ordering();
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int __init init_nvs_nosave(const struct dmi_system_id *d)
|
|
|
|
{
|
|
|
|
acpi_nvs_nosave();
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2020-12-18 03:23:17 +08:00
|
|
|
bool acpi_sleep_default_s3;
|
2017-11-07 06:56:57 +08:00
|
|
|
|
2019-07-31 17:05:25 +08:00
|
|
|
static int __init init_default_s3(const struct dmi_system_id *d)
|
2017-11-07 06:56:57 +08:00
|
|
|
{
|
2019-07-31 17:05:25 +08:00
|
|
|
acpi_sleep_default_s3 = true;
|
2017-11-07 06:56:57 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2017-09-14 17:59:30 +08:00
|
|
|
static const struct dmi_system_id acpisleep_dmi_table[] __initconst = {
|
2012-11-30 19:57:03 +08:00
|
|
|
{
|
|
|
|
.callback = init_old_suspend_ordering,
|
|
|
|
.ident = "Abit KN9 (nForce4 variant)",
|
|
|
|
.matches = {
|
|
|
|
DMI_MATCH(DMI_BOARD_VENDOR, "http://www.abit.com.tw/"),
|
|
|
|
DMI_MATCH(DMI_BOARD_NAME, "KN9 Series(NF-CK804)"),
|
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
.callback = init_old_suspend_ordering,
|
|
|
|
.ident = "HP xw4600 Workstation",
|
|
|
|
.matches = {
|
|
|
|
DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
|
|
|
|
DMI_MATCH(DMI_PRODUCT_NAME, "HP xw4600 Workstation"),
|
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
.callback = init_old_suspend_ordering,
|
|
|
|
.ident = "Asus Pundit P1-AH2 (M2N8L motherboard)",
|
|
|
|
.matches = {
|
|
|
|
DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTek Computer INC."),
|
|
|
|
DMI_MATCH(DMI_BOARD_NAME, "M2N8L"),
|
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
.callback = init_old_suspend_ordering,
|
|
|
|
.ident = "Panasonic CF51-2L",
|
|
|
|
.matches = {
|
|
|
|
DMI_MATCH(DMI_BOARD_VENDOR,
|
|
|
|
"Matsushita Electric Industrial Co.,Ltd."),
|
|
|
|
DMI_MATCH(DMI_BOARD_NAME, "CF51-2L"),
|
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
.callback = init_nvs_nosave,
|
2013-02-05 08:16:29 +08:00
|
|
|
.ident = "Sony Vaio VGN-FW41E_H",
|
|
|
|
.matches = {
|
|
|
|
DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
|
|
|
|
DMI_MATCH(DMI_PRODUCT_NAME, "VGN-FW41E_H"),
|
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
.callback = init_nvs_nosave,
|
2012-11-30 19:57:03 +08:00
|
|
|
.ident = "Sony Vaio VGN-FW21E",
|
|
|
|
.matches = {
|
|
|
|
DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
|
|
|
|
DMI_MATCH(DMI_PRODUCT_NAME, "VGN-FW21E"),
|
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
.callback = init_nvs_nosave,
|
2013-03-12 03:16:34 +08:00
|
|
|
.ident = "Sony Vaio VGN-FW21M",
|
|
|
|
.matches = {
|
|
|
|
DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
|
|
|
|
DMI_MATCH(DMI_PRODUCT_NAME, "VGN-FW21M"),
|
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
.callback = init_nvs_nosave,
|
2012-11-30 19:57:03 +08:00
|
|
|
.ident = "Sony Vaio VPCEB17FX",
|
|
|
|
.matches = {
|
|
|
|
DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
|
|
|
|
DMI_MATCH(DMI_PRODUCT_NAME, "VPCEB17FX"),
|
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
.callback = init_nvs_nosave,
|
|
|
|
.ident = "Sony Vaio VGN-SR11M",
|
|
|
|
.matches = {
|
|
|
|
DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
|
|
|
|
DMI_MATCH(DMI_PRODUCT_NAME, "VGN-SR11M"),
|
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
.callback = init_nvs_nosave,
|
|
|
|
.ident = "Everex StepNote Series",
|
|
|
|
.matches = {
|
|
|
|
DMI_MATCH(DMI_SYS_VENDOR, "Everex Systems, Inc."),
|
|
|
|
DMI_MATCH(DMI_PRODUCT_NAME, "Everex StepNote Series"),
|
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
.callback = init_nvs_nosave,
|
|
|
|
.ident = "Sony Vaio VPCEB1Z1E",
|
|
|
|
.matches = {
|
|
|
|
DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
|
|
|
|
DMI_MATCH(DMI_PRODUCT_NAME, "VPCEB1Z1E"),
|
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
.callback = init_nvs_nosave,
|
|
|
|
.ident = "Sony Vaio VGN-NW130D",
|
|
|
|
.matches = {
|
|
|
|
DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
|
|
|
|
DMI_MATCH(DMI_PRODUCT_NAME, "VGN-NW130D"),
|
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
.callback = init_nvs_nosave,
|
|
|
|
.ident = "Sony Vaio VPCCW29FX",
|
|
|
|
.matches = {
|
|
|
|
DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
|
|
|
|
DMI_MATCH(DMI_PRODUCT_NAME, "VPCCW29FX"),
|
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
.callback = init_nvs_nosave,
|
|
|
|
.ident = "Averatec AV1020-ED2",
|
|
|
|
.matches = {
|
|
|
|
DMI_MATCH(DMI_SYS_VENDOR, "AVERATEC"),
|
|
|
|
DMI_MATCH(DMI_PRODUCT_NAME, "1000 Series"),
|
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
.callback = init_old_suspend_ordering,
|
|
|
|
.ident = "Asus A8N-SLI DELUXE",
|
|
|
|
.matches = {
|
|
|
|
DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."),
|
|
|
|
DMI_MATCH(DMI_BOARD_NAME, "A8N-SLI DELUXE"),
|
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
.callback = init_old_suspend_ordering,
|
|
|
|
.ident = "Asus A8N-SLI Premium",
|
|
|
|
.matches = {
|
|
|
|
DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."),
|
|
|
|
DMI_MATCH(DMI_BOARD_NAME, "A8N-SLI Premium"),
|
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
.callback = init_nvs_nosave,
|
|
|
|
.ident = "Sony Vaio VGN-SR26GN_P",
|
|
|
|
.matches = {
|
|
|
|
DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
|
|
|
|
DMI_MATCH(DMI_PRODUCT_NAME, "VGN-SR26GN_P"),
|
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
.callback = init_nvs_nosave,
|
|
|
|
.ident = "Sony Vaio VPCEB1S1E",
|
|
|
|
.matches = {
|
|
|
|
DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
|
|
|
|
DMI_MATCH(DMI_PRODUCT_NAME, "VPCEB1S1E"),
|
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
.callback = init_nvs_nosave,
|
|
|
|
.ident = "Sony Vaio VGN-FW520F",
|
|
|
|
.matches = {
|
|
|
|
DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
|
|
|
|
DMI_MATCH(DMI_PRODUCT_NAME, "VGN-FW520F"),
|
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
.callback = init_nvs_nosave,
|
|
|
|
.ident = "Asus K54C",
|
|
|
|
.matches = {
|
|
|
|
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer Inc."),
|
|
|
|
DMI_MATCH(DMI_PRODUCT_NAME, "K54C"),
|
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
.callback = init_nvs_nosave,
|
|
|
|
.ident = "Asus K54HR",
|
|
|
|
.matches = {
|
|
|
|
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer Inc."),
|
|
|
|
DMI_MATCH(DMI_PRODUCT_NAME, "K54HR"),
|
|
|
|
},
|
|
|
|
},
|
2018-07-09 20:03:55 +08:00
|
|
|
{
|
|
|
|
.callback = init_nvs_save_s3,
|
|
|
|
.ident = "Asus 1025C",
|
|
|
|
.matches = {
|
|
|
|
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
|
|
|
|
DMI_MATCH(DMI_PRODUCT_NAME, "1025C"),
|
|
|
|
},
|
|
|
|
},
|
2017-01-16 10:55:45 +08:00
|
|
|
/*
|
|
|
|
* https://bugzilla.kernel.org/show_bug.cgi?id=189431
|
|
|
|
* Lenovo G50-45 is a platform later than 2012, but needs nvs memory
|
|
|
|
* saving during S3.
|
|
|
|
*/
|
|
|
|
{
|
|
|
|
.callback = init_nvs_save_s3,
|
|
|
|
.ident = "Lenovo G50-45",
|
|
|
|
.matches = {
|
|
|
|
DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
|
|
|
|
DMI_MATCH(DMI_PRODUCT_NAME, "80E3"),
|
|
|
|
},
|
|
|
|
},
|
2018-04-10 23:07:51 +08:00
|
|
|
/*
|
|
|
|
* ThinkPad X1 Tablet(2016) cannot do suspend-to-idle using
|
|
|
|
* the Low Power S0 Idle firmware interface (see
|
|
|
|
* https://bugzilla.kernel.org/show_bug.cgi?id=199057).
|
|
|
|
*/
|
|
|
|
{
|
2019-07-31 17:05:25 +08:00
|
|
|
.callback = init_default_s3,
|
2018-04-10 23:07:51 +08:00
|
|
|
.ident = "ThinkPad X1 Tablet(2016)",
|
|
|
|
.matches = {
|
|
|
|
DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
|
|
|
|
DMI_MATCH(DMI_PRODUCT_NAME, "20GGA00L00"),
|
|
|
|
},
|
|
|
|
},
|
2022-05-10 21:11:36 +08:00
|
|
|
/*
|
|
|
|
* ASUS B1400CEAE hangs on resume from suspend (see
|
|
|
|
* https://bugzilla.kernel.org/show_bug.cgi?id=215742).
|
|
|
|
*/
|
|
|
|
{
|
|
|
|
.callback = init_default_s3,
|
|
|
|
.ident = "ASUS B1400CEAE",
|
|
|
|
.matches = {
|
|
|
|
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
|
|
|
|
DMI_MATCH(DMI_PRODUCT_NAME, "ASUS EXPERTBOOK B1400CEAE"),
|
|
|
|
},
|
|
|
|
},
|
2012-11-30 19:57:03 +08:00
|
|
|
{},
|
|
|
|
};
|
|
|
|
|
2017-11-15 09:16:55 +08:00
|
|
|
static bool ignore_blacklist;
|
|
|
|
|
|
|
|
void __init acpi_sleep_no_blacklist(void)
|
|
|
|
{
|
|
|
|
ignore_blacklist = true;
|
|
|
|
}
|
|
|
|
|
2015-01-23 16:12:06 +08:00
|
|
|
static void __init acpi_sleep_dmi_check(void)
|
2012-11-30 19:57:03 +08:00
|
|
|
{
|
2017-11-15 09:16:55 +08:00
|
|
|
if (ignore_blacklist)
|
|
|
|
return;
|
|
|
|
|
2018-02-22 20:59:22 +08:00
|
|
|
if (dmi_get_bios_year() >= 2012)
|
2014-07-23 14:42:33 +08:00
|
|
|
acpi_nvs_nosave_s3();
|
|
|
|
|
2012-11-30 19:57:03 +08:00
|
|
|
dmi_check_system(acpisleep_dmi_table);
|
|
|
|
}
|
|
|
|
|
2008-06-13 05:24:06 +08:00
|
|
|
/**
|
2010-04-09 07:39:40 +08:00
|
|
|
* acpi_pm_freeze - Disable the GPEs and suspend EC transactions.
|
2008-06-13 05:24:06 +08:00
|
|
|
*/
|
2010-04-09 07:39:40 +08:00
|
|
|
static int acpi_pm_freeze(void)
|
2008-06-13 05:24:06 +08:00
|
|
|
{
|
2008-12-16 16:57:46 +08:00
|
|
|
acpi_disable_all_gpes();
|
2012-05-22 16:43:49 +08:00
|
|
|
acpi_os_wait_events_complete();
|
2010-04-09 07:40:38 +08:00
|
|
|
acpi_ec_block_transactions();
|
2008-06-13 05:24:06 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2010-07-02 06:14:09 +08:00
|
|
|
/**
|
2021-06-10 17:40:39 +08:00
|
|
|
* acpi_pm_pre_suspend - Enable wakeup devices, "freeze" EC and save NVS.
|
2010-07-02 06:14:09 +08:00
|
|
|
*/
|
|
|
|
static int acpi_pm_pre_suspend(void)
|
|
|
|
{
|
|
|
|
acpi_pm_freeze();
|
2011-01-07 08:42:31 +08:00
|
|
|
return suspend_nvs_save();
|
2010-07-02 06:14:09 +08:00
|
|
|
}
|
|
|
|
|
2008-06-13 05:24:06 +08:00
|
|
|
/**
|
|
|
|
* __acpi_pm_prepare - Prepare the platform to enter the target state.
|
|
|
|
*
|
|
|
|
* If necessary, set the firmware waking vector and do arch-specific
|
|
|
|
* nastiness to get the wakeup code to the waking vector.
|
|
|
|
*/
|
|
|
|
static int __acpi_pm_prepare(void)
|
|
|
|
{
|
|
|
|
int error = acpi_sleep_prepare(acpi_target_sleep_state);
|
|
|
|
if (error)
|
|
|
|
acpi_target_sleep_state = ACPI_STATE_S0;
|
2010-07-02 06:14:09 +08:00
|
|
|
|
2008-06-13 05:24:06 +08:00
|
|
|
return error;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* acpi_pm_prepare - Prepare the platform to enter the target sleep
|
|
|
|
* state and disable the GPEs.
|
|
|
|
*/
|
|
|
|
static int acpi_pm_prepare(void)
|
|
|
|
{
|
|
|
|
int error = __acpi_pm_prepare();
|
|
|
|
if (!error)
|
2011-01-07 08:42:31 +08:00
|
|
|
error = acpi_pm_pre_suspend();
|
2010-04-09 07:39:40 +08:00
|
|
|
|
2008-06-13 05:24:06 +08:00
|
|
|
return error;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* acpi_pm_finish - Instruct the platform to leave a sleep state.
|
|
|
|
*
|
|
|
|
* This is called after we wake back up (or if entering the sleep state
|
|
|
|
* failed).
|
|
|
|
*/
|
|
|
|
static void acpi_pm_finish(void)
|
|
|
|
{
|
2019-06-12 18:07:02 +08:00
|
|
|
struct acpi_device *pwr_btn_adev;
|
2008-06-13 05:24:06 +08:00
|
|
|
u32 acpi_state = acpi_target_sleep_state;
|
|
|
|
|
2010-06-12 13:15:40 +08:00
|
|
|
acpi_ec_unblock_transactions();
|
2011-01-20 05:27:55 +08:00
|
|
|
suspend_nvs_free();
|
2010-05-29 04:32:15 +08:00
|
|
|
|
2008-06-13 05:24:06 +08:00
|
|
|
if (acpi_state == ACPI_STATE_S0)
|
|
|
|
return;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2021-06-02 16:54:39 +08:00
|
|
|
pr_info("Waking up from system sleep state S%d\n", acpi_state);
|
2010-07-07 10:09:38 +08:00
|
|
|
acpi_disable_wakeup_devices(acpi_state);
|
2008-06-13 05:24:06 +08:00
|
|
|
acpi_leave_sleep_state(acpi_state);
|
|
|
|
|
|
|
|
/* reset firmware waking vector */
|
2016-01-05 05:05:20 +08:00
|
|
|
acpi_set_waking_vector(0);
|
2008-06-13 05:24:06 +08:00
|
|
|
|
|
|
|
acpi_target_sleep_state = ACPI_STATE_S0;
|
2012-05-10 07:08:43 +08:00
|
|
|
|
2013-01-17 21:11:06 +08:00
|
|
|
acpi_resume_power_resources();
|
|
|
|
|
2012-05-10 07:08:43 +08:00
|
|
|
/* If we were woken with the fixed power button, provide a small
|
|
|
|
* hint to userspace in the form of a wakeup event on the fixed power
|
|
|
|
* button device (if it can be found).
|
|
|
|
*
|
|
|
|
* We delay the event generation til now, as the PM layer requires
|
|
|
|
* timekeeping to be running before we generate events. */
|
|
|
|
if (!pwr_btn_event_pending)
|
|
|
|
return;
|
|
|
|
|
|
|
|
pwr_btn_event_pending = false;
|
2019-06-12 18:07:02 +08:00
|
|
|
pwr_btn_adev = acpi_dev_get_first_match_dev(ACPI_BUTTON_HID_POWERF,
|
|
|
|
NULL, -1);
|
|
|
|
if (pwr_btn_adev) {
|
|
|
|
pm_wakeup_event(&pwr_btn_adev->dev, 0);
|
|
|
|
acpi_dev_put(pwr_btn_adev);
|
2012-05-10 07:08:43 +08:00
|
|
|
}
|
2008-06-13 05:24:06 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
2013-08-20 07:42:32 +08:00
|
|
|
* acpi_pm_start - Start system PM transition.
|
|
|
|
*/
|
|
|
|
static void acpi_pm_start(u32 acpi_state)
|
|
|
|
{
|
|
|
|
acpi_target_sleep_state = acpi_state;
|
|
|
|
acpi_sleep_tts_switch(acpi_target_sleep_state);
|
|
|
|
acpi_scan_lock_acquire();
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* acpi_pm_end - Finish up system PM transition.
|
2008-06-13 05:24:06 +08:00
|
|
|
*/
|
|
|
|
static void acpi_pm_end(void)
|
|
|
|
{
|
ACPI: power: Rework turning off unused power resources
Make turning off unused power resources (after the enumeration of
devices and during system-wide resume from S3) more straightforward
by using the observation that the power resource state stored in
struct acpi_power_resource can be used to determine whether or not
the give power resource has any users.
Namely, when the state of the power resource is unknown, its _STA
method has never been evaluated (or the evaluation of it has failed)
and its _ON and _OFF methods have never been executed (or they have
failed to execute), so for all practical purposes it can be assumed
to have no users (or to be unusable). Therefore, instead of checking
the number of power resource users, it is sufficient to check if its
state is known.
Moreover, if the last known state of a given power resource is "off",
it is not necessary to turn it off, because it has been used to
initialize the power state or the wakeup power resources list of at
least one device and either its _STA method has returned 0 ("off"),
or its _OFF method has been successfully executed already.
Accordingly, modify acpi_turn_off_unused_power_resources() to do the
above checks (which are suitable for both uses of it) instead of
using the number of power resource users or evaluating its _STA
method, drop its argument (which is not useful any more) and update
its callers.
Also drop the users field from struct acpi_power_resource as it is
not useful any more.
Tested-by: Dave Olsthoorn <dave@bewaar.me>
Tested-by: Shujun Wang <wsj20369@163.com>
Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
2021-05-24 23:26:16 +08:00
|
|
|
acpi_turn_off_unused_power_resources();
|
2013-08-20 07:42:32 +08:00
|
|
|
acpi_scan_lock_release();
|
2008-06-13 05:24:06 +08:00
|
|
|
/*
|
|
|
|
* This is necessary in case acpi_pm_finish() is not called during a
|
|
|
|
* failing transition to a sleep state.
|
|
|
|
*/
|
|
|
|
acpi_target_sleep_state = ACPI_STATE_S0;
|
2008-08-12 10:20:22 +08:00
|
|
|
acpi_sleep_tts_switch(acpi_target_sleep_state);
|
2008-06-13 05:24:06 +08:00
|
|
|
}
|
2008-10-24 03:46:43 +08:00
|
|
|
#else /* !CONFIG_ACPI_SLEEP */
|
2019-07-31 17:05:25 +08:00
|
|
|
#define sleep_no_lps0 (1)
|
2008-10-24 03:46:43 +08:00
|
|
|
#define acpi_target_sleep_state ACPI_STATE_S0
|
2019-07-31 17:05:25 +08:00
|
|
|
#define acpi_sleep_default_s3 (1)
|
2012-11-30 19:57:03 +08:00
|
|
|
static inline void acpi_sleep_dmi_check(void) {}
|
2008-10-23 02:58:43 +08:00
|
|
|
#endif /* CONFIG_ACPI_SLEEP */
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2008-06-13 05:24:06 +08:00
|
|
|
#ifdef CONFIG_SUSPEND
|
2005-04-17 06:20:36 +08:00
|
|
|
static u32 acpi_suspend_states[] = {
|
2005-03-19 05:20:46 +08:00
|
|
|
[PM_SUSPEND_ON] = ACPI_STATE_S0,
|
|
|
|
[PM_SUSPEND_STANDBY] = ACPI_STATE_S1,
|
|
|
|
[PM_SUSPEND_MEM] = ACPI_STATE_S3,
|
|
|
|
[PM_SUSPEND_MAX] = ACPI_STATE_S5
|
2005-04-17 06:20:36 +08:00
|
|
|
};
|
|
|
|
|
2007-07-18 04:40:06 +08:00
|
|
|
/**
|
2008-04-24 06:02:52 +08:00
|
|
|
* acpi_suspend_begin - Set the target system sleep state to the state
|
2007-07-18 04:40:06 +08:00
|
|
|
* associated with given @pm_state, if supported.
|
|
|
|
*/
|
2008-04-24 06:02:52 +08:00
|
|
|
static int acpi_suspend_begin(suspend_state_t pm_state)
|
2007-07-18 04:40:06 +08:00
|
|
|
{
|
|
|
|
u32 acpi_state = acpi_suspend_states[pm_state];
|
2013-08-20 07:42:32 +08:00
|
|
|
int error;
|
2007-07-18 04:40:06 +08:00
|
|
|
|
2012-10-26 19:39:15 +08:00
|
|
|
error = (nvs_nosave || nvs_nosave_s3) ? 0 : suspend_nvs_alloc();
|
2010-05-29 04:32:15 +08:00
|
|
|
if (error)
|
|
|
|
return error;
|
|
|
|
|
2013-08-20 07:42:32 +08:00
|
|
|
if (!sleep_states[acpi_state]) {
|
|
|
|
pr_err("ACPI does not support sleep state S%u\n", acpi_state);
|
|
|
|
return -ENOSYS;
|
2007-07-18 04:40:06 +08:00
|
|
|
}
|
2015-10-07 06:49:34 +08:00
|
|
|
if (acpi_state > ACPI_STATE_S1)
|
|
|
|
pm_set_suspend_via_firmware();
|
2013-08-20 07:42:32 +08:00
|
|
|
|
|
|
|
acpi_pm_start(acpi_state);
|
|
|
|
return 0;
|
2007-07-18 04:40:06 +08:00
|
|
|
}
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
/**
|
2008-04-24 06:02:52 +08:00
|
|
|
* acpi_suspend_enter - Actually enter a sleep state.
|
2007-07-18 04:40:06 +08:00
|
|
|
* @pm_state: ignored
|
2005-04-17 06:20:36 +08:00
|
|
|
*
|
2007-07-24 17:58:39 +08:00
|
|
|
* Flush caches and go to sleep. For STR we have to call arch-specific
|
|
|
|
* assembly, which in turn call acpi_enter_sleep_state().
|
2005-04-17 06:20:36 +08:00
|
|
|
* It's unfortunate, but it works. Please fix if you're feeling frisky.
|
|
|
|
*/
|
2008-04-24 06:02:52 +08:00
|
|
|
static int acpi_suspend_enter(suspend_state_t pm_state)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
|
|
|
acpi_status status = AE_OK;
|
2007-07-18 04:40:06 +08:00
|
|
|
u32 acpi_state = acpi_target_sleep_state;
|
2011-02-09 06:42:09 +08:00
|
|
|
int error;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2014-06-06 20:40:17 +08:00
|
|
|
trace_suspend_resume(TPS("acpi_suspend"), acpi_state, true);
|
2007-07-18 04:40:06 +08:00
|
|
|
switch (acpi_state) {
|
|
|
|
case ACPI_STATE_S1:
|
2005-04-17 06:20:36 +08:00
|
|
|
barrier();
|
2012-07-27 08:08:54 +08:00
|
|
|
status = acpi_enter_sleep_state(acpi_state);
|
2005-04-17 06:20:36 +08:00
|
|
|
break;
|
|
|
|
|
2007-07-18 04:40:06 +08:00
|
|
|
case ACPI_STATE_S3:
|
2013-05-15 01:09:16 +08:00
|
|
|
if (!acpi_suspend_lowlevel)
|
|
|
|
return -ENOSYS;
|
2011-02-09 06:42:22 +08:00
|
|
|
error = acpi_suspend_lowlevel();
|
2011-02-09 06:42:09 +08:00
|
|
|
if (error)
|
|
|
|
return error;
|
2021-06-02 16:54:39 +08:00
|
|
|
pr_info("Low-level resume complete\n");
|
2015-10-07 06:49:34 +08:00
|
|
|
pm_set_resume_via_firmware();
|
2005-04-17 06:20:36 +08:00
|
|
|
break;
|
|
|
|
}
|
2014-06-06 20:40:17 +08:00
|
|
|
trace_suspend_resume(TPS("acpi_suspend"), acpi_state, false);
|
2006-04-27 17:25:00 +08:00
|
|
|
|
2010-05-12 01:49:25 +08:00
|
|
|
/* This violates the spec but is required for bug compatibility. */
|
|
|
|
acpi_write_bit_register(ACPI_BITREG_SCI_ENABLE, 1);
|
2008-11-27 06:53:13 +08:00
|
|
|
|
2012-07-27 08:08:54 +08:00
|
|
|
/* Reprogram control registers */
|
|
|
|
acpi_leave_sleep_state_prep(acpi_state);
|
2008-01-08 07:05:21 +08:00
|
|
|
|
2008-02-06 02:27:12 +08:00
|
|
|
/* ACPI 3.0 specs (P62) says that it's the responsibility
|
2006-04-27 17:25:00 +08:00
|
|
|
* of the OSPM to clear the status bit [ implying that the
|
|
|
|
* POWER_BUTTON event should not reach userspace ]
|
2012-05-10 07:08:43 +08:00
|
|
|
*
|
|
|
|
* However, we do generate a small hint for userspace in the form of
|
|
|
|
* a wakeup event. We flag this condition for now and generate the
|
|
|
|
* event later, as we're currently too early in resume to be able to
|
|
|
|
* generate wakeup events.
|
2006-04-27 17:25:00 +08:00
|
|
|
*/
|
2012-05-10 07:08:43 +08:00
|
|
|
if (ACPI_SUCCESS(status) && (acpi_state == ACPI_STATE_S3)) {
|
2013-11-21 06:45:51 +08:00
|
|
|
acpi_event_status pwr_btn_status = ACPI_EVENT_FLAG_DISABLED;
|
2012-05-10 07:08:43 +08:00
|
|
|
|
|
|
|
acpi_get_event_status(ACPI_EVENT_POWER_BUTTON, &pwr_btn_status);
|
|
|
|
|
2016-08-04 16:43:45 +08:00
|
|
|
if (pwr_btn_status & ACPI_EVENT_FLAG_STATUS_SET) {
|
2012-05-10 07:08:43 +08:00
|
|
|
acpi_clear_event(ACPI_EVENT_POWER_BUTTON);
|
|
|
|
/* Flag for later */
|
|
|
|
pwr_btn_event_pending = true;
|
|
|
|
}
|
|
|
|
}
|
2006-04-27 17:25:00 +08:00
|
|
|
|
2007-06-20 09:17:58 +08:00
|
|
|
/*
|
|
|
|
* Disable and clear GPE status before interrupt is enabled. Some GPEs
|
|
|
|
* (like wakeup GPE) haven't handler, this can avoid such GPE misfire.
|
|
|
|
* acpi_leave_sleep_state will reenable specific GPEs later
|
|
|
|
*/
|
2008-12-16 16:57:46 +08:00
|
|
|
acpi_disable_all_gpes();
|
2010-04-09 07:39:40 +08:00
|
|
|
/* Allow EC transactions to happen. */
|
ACPI / EC: Add PM operations to improve event handling for resume process
This patch makes 2 changes:
1. Restore old behavior
Originally, EC driver stops handling both events and transactions in
acpi_ec_block_transactions(), and restarts to handle transactions in
acpi_ec_unblock_transactions_early(), restarts to handle both events and
transactions in acpi_ec_unblock_transactions().
While currently, EC driver still stops handling both events and
transactions in acpi_ec_block_transactions(), but restarts to handle both
events and transactions in acpi_ec_unblock_transactions_early().
This patch tries to restore the old behavior by dropping
__acpi_ec_enable_event() from acpi_unblock_transactions_early().
2. Improve old behavior
However this still cannot fix the real issue as both of the
acpi_ec_unblock_xxx() functions are invoked in the noirq stage. Since the
EC driver actually doesn't implement the event handling in the polling
mode, re-enabling the event handling too early in the noirq stage could
result in the problem that if there is no triggering source causing
advance_transaction() to be invoked, pending SCI_EVT cannot be detected by
the EC driver and _Qxx cannot be triggered.
It actually makes sense to restart the event handling in any point during
resuming after the noirq stage. Just like the boot stage where the event
handling is enabled in .add(), this patch further moves
acpi_ec_enable_event() to .resume(). After doing that, the following 2
functions can be combined:
acpi_ec_unblock_transactions_early()/acpi_ec_unblock_transactions().
The differences of the event handling availability between the old behavior
(this patch isn't applied) and the new behavior (this patch is applied) are
as follows:
!Applied Applied
before suspend Y Y
suspend before EC Y Y
suspend after EC Y Y
suspend_late Y Y
suspend_noirq Y (actually N) Y (actually N)
resume_noirq Y (actually N) Y (actually N)
resume_late Y (actually N) Y (actually N)
resume before EC Y (actually N) Y (actually N)
resume after EC Y (actually N) Y
after resume Y (actually N) Y
Where "actually N" means if there is no triggering source, the EC driver
is actually not able to notice the pending SCI_EVT occurred in the noirq
stage. So we can clearly see that this patch has improved the situation.
Signed-off-by: Lv Zheng <lv.zheng@intel.com>
Tested-by: Todd E Brandt <todd.e.brandt@linux.intel.com>
Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
2016-08-03 16:01:36 +08:00
|
|
|
acpi_ec_unblock_transactions();
|
2007-06-20 09:17:58 +08:00
|
|
|
|
2010-05-29 04:32:15 +08:00
|
|
|
suspend_nvs_restore();
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
return ACPI_SUCCESS(status) ? 0 : -EFAULT;
|
|
|
|
}
|
|
|
|
|
2008-04-24 06:02:52 +08:00
|
|
|
static int acpi_suspend_state_valid(suspend_state_t pm_state)
|
2005-10-31 07:00:01 +08:00
|
|
|
{
|
2007-05-01 06:09:54 +08:00
|
|
|
u32 acpi_state;
|
2005-10-31 07:00:01 +08:00
|
|
|
|
2007-05-01 06:09:54 +08:00
|
|
|
switch (pm_state) {
|
|
|
|
case PM_SUSPEND_ON:
|
|
|
|
case PM_SUSPEND_STANDBY:
|
|
|
|
case PM_SUSPEND_MEM:
|
|
|
|
acpi_state = acpi_suspend_states[pm_state];
|
|
|
|
|
|
|
|
return sleep_states[acpi_state];
|
|
|
|
default:
|
|
|
|
return 0;
|
|
|
|
}
|
2005-10-31 07:00:01 +08:00
|
|
|
}
|
|
|
|
|
2010-11-16 21:14:02 +08:00
|
|
|
static const struct platform_suspend_ops acpi_suspend_ops = {
|
2008-04-24 06:02:52 +08:00
|
|
|
.valid = acpi_suspend_state_valid,
|
|
|
|
.begin = acpi_suspend_begin,
|
2009-04-20 02:08:42 +08:00
|
|
|
.prepare_late = acpi_pm_prepare,
|
2008-04-24 06:02:52 +08:00
|
|
|
.enter = acpi_suspend_enter,
|
2010-07-02 06:14:57 +08:00
|
|
|
.wake = acpi_pm_finish,
|
2008-06-13 05:24:06 +08:00
|
|
|
.end = acpi_pm_end,
|
|
|
|
};
|
|
|
|
|
|
|
|
/**
|
|
|
|
* acpi_suspend_begin_old - Set the target system sleep state to the
|
|
|
|
* state associated with given @pm_state, if supported, and
|
|
|
|
* execute the _PTS control method. This function is used if the
|
|
|
|
* pre-ACPI 2.0 suspend ordering has been requested.
|
|
|
|
*/
|
|
|
|
static int acpi_suspend_begin_old(suspend_state_t pm_state)
|
|
|
|
{
|
|
|
|
int error = acpi_suspend_begin(pm_state);
|
|
|
|
if (!error)
|
|
|
|
error = __acpi_pm_prepare();
|
2010-07-02 06:14:09 +08:00
|
|
|
|
2008-06-13 05:24:06 +08:00
|
|
|
return error;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The following callbacks are used if the pre-ACPI 2.0 suspend ordering has
|
|
|
|
* been requested.
|
|
|
|
*/
|
2010-11-16 21:14:02 +08:00
|
|
|
static const struct platform_suspend_ops acpi_suspend_ops_old = {
|
2008-06-13 05:24:06 +08:00
|
|
|
.valid = acpi_suspend_state_valid,
|
|
|
|
.begin = acpi_suspend_begin_old,
|
2010-07-02 06:14:09 +08:00
|
|
|
.prepare_late = acpi_pm_pre_suspend,
|
2008-04-24 06:02:52 +08:00
|
|
|
.enter = acpi_suspend_enter,
|
2010-07-02 06:14:57 +08:00
|
|
|
.wake = acpi_pm_finish,
|
2008-06-13 05:24:06 +08:00
|
|
|
.end = acpi_pm_end,
|
|
|
|
.recover = acpi_pm_finish,
|
2005-04-17 06:20:36 +08:00
|
|
|
};
|
2013-01-17 21:11:09 +08:00
|
|
|
|
ACPI / PM: Ignore spurious SCI wakeups from suspend-to-idle
The ACPI SCI (System Control Interrupt) is set up as a wakeup IRQ
during suspend-to-idle transitions and, consequently, any events
signaled through it wake up the system from that state. However,
on some systems some of the events signaled via the ACPI SCI while
suspended to idle should not cause the system to wake up. In fact,
quite often they should just be discarded.
Arguably, systems should not resume entirely on such events, but in
order to decide which events really should cause the system to resume
and which are spurious, it is necessary to resume up to the point
when ACPI SCIs are actually handled and processed, which is after
executing dpm_resume_noirq() in the system resume path.
For this reasons, add a loop around freeze_enter() in which the
platforms can process events signaled via multiplexed IRQ lines
like the ACPI SCI and add suspend-to-idle hooks that can be
used for this purpose to struct platform_freeze_ops.
In the ACPI case, the ->wake hook is used for checking if the SCI
has triggered while suspended and deferring the interrupt-induced
system wakeup until the events signaled through it are actually
processed sufficiently to decide whether or not the system should
resume. In turn, the ->sync hook allows all of the relevant event
queues to be flushed so as to prevent events from being missed due
to race conditions.
In addition to that, some ACPI code processing wakeup events needs
to be modified to use the "hard" version of wakeup triggers, so that
it will cause a system resume to happen on device-induced wakeup
events even if the "soft" mechanism to prevent the system from
suspending is not enabled. However, to preserve the existing
behavior with respect to suspend-to-RAM, this only is done in
the suspend-to-idle case and only if an SCI has occurred while
suspended.
Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
2017-06-13 04:56:34 +08:00
|
|
|
static bool s2idle_wakeup;
|
|
|
|
|
2020-12-18 03:23:17 +08:00
|
|
|
int acpi_s2idle_begin(void)
|
2014-05-16 05:29:57 +08:00
|
|
|
{
|
|
|
|
acpi_scan_lock_acquire();
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2020-12-18 03:23:17 +08:00
|
|
|
int acpi_s2idle_prepare(void)
|
2014-09-30 08:29:01 +08:00
|
|
|
{
|
2019-08-21 17:40:19 +08:00
|
|
|
if (acpi_sci_irq_valid()) {
|
2015-10-25 01:02:46 +08:00
|
|
|
enable_irq_wake(acpi_sci_irq);
|
2019-08-21 17:40:19 +08:00
|
|
|
acpi_ec_set_gpe_wake_mask(ACPI_GPE_ENABLE);
|
|
|
|
}
|
2017-06-13 04:51:07 +08:00
|
|
|
|
2019-05-14 03:17:08 +08:00
|
|
|
acpi_enable_wakeup_devices(ACPI_STATE_S0);
|
|
|
|
|
2018-12-17 19:21:55 +08:00
|
|
|
/* Change the configuration of GPEs to avoid spurious wakeup. */
|
|
|
|
acpi_enable_all_wakeup_gpes();
|
|
|
|
acpi_os_wait_events_complete();
|
2019-07-16 05:51:19 +08:00
|
|
|
|
|
|
|
s2idle_wakeup = true;
|
2014-09-30 08:29:01 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2020-12-18 03:23:17 +08:00
|
|
|
bool acpi_s2idle_wake(void)
|
ACPI: PM: s2idle: Execute LPS0 _DSM functions with suspended devices
According to Section 3.5 of the "Intel Low Power S0 Idle" document [1],
Function 5 of the LPS0 _DSM is expected to be invoked when the system
configuration matches the criteria for entering the target low-power
state of the platform. In particular, this means that all devices
should be suspended and in low-power states already when that function
is invoked.
This is not the case currently, however, because Function 5 of the
LPS0 _DSM is invoked by it before the "noirq" phase of device suspend,
which means that some devices may not have been put into low-power
states yet at that point. That is a consequence of the previous
design of the suspend-to-idle flow that allowed the "noirq" phase of
device suspend and the "noirq" phase of device resume to be carried
out for multiple times while "suspended" (if any spurious wakeup
events were detected) and the point of the LPS0 _DSM Function 5
invocation was chosen so as to call it (and LPS0 _DSM Function 6
analogously) once per suspend-resume cycle (regardless of how many
times the "noirq" phases of device suspend and resume were carried
out while "suspended").
Now that the suspend-to-idle flow has been redesigned to carry out
the "noirq" phases of device suspend and resume once in each cycle,
the code can be reordered to follow the specification that it is
based on more closely.
For this purpose, add ->prepare_late and ->restore_early platform
callbacks for suspend-to-idle, to be executed, respectively, after
the "noirq" phase of suspending devices and before the "noirq"
phase of resuming them and make ACPI use them for the invocation
of LPS0 _DSM functions as appropriate.
While at it, move the LPS0 entry requirements check to be made
before invoking Functions 3 and 5 of the LPS0 _DSM (also once
per cycle) as follows from the specification [1].
Link: https://uefi.org/sites/default/files/resources/Intel_ACPI_Low_Power_S0_Idle.pdf # [1]
Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Tested-by: Kai-Heng Feng <kai.heng.feng@canonical.com>
2019-08-02 01:31:10 +08:00
|
|
|
{
|
2020-02-11 17:11:02 +08:00
|
|
|
if (!acpi_sci_irq_valid())
|
|
|
|
return pm_wakeup_pending();
|
|
|
|
|
|
|
|
while (pm_wakeup_pending()) {
|
|
|
|
/*
|
|
|
|
* If IRQD_WAKEUP_ARMED is set for the SCI at this point, the
|
|
|
|
* SCI has not triggered while suspended, so bail out (the
|
|
|
|
* wakeup is pending anyway and the SCI is not the source of
|
|
|
|
* it).
|
|
|
|
*/
|
2020-05-19 19:36:48 +08:00
|
|
|
if (irqd_is_wakeup_armed(irq_get_irq_data(acpi_sci_irq))) {
|
|
|
|
pm_pr_dbg("Wakeup unrelated to ACPI SCI\n");
|
2020-02-11 17:11:02 +08:00
|
|
|
return true;
|
2020-05-19 19:36:48 +08:00
|
|
|
}
|
2020-02-11 17:11:02 +08:00
|
|
|
|
2020-02-21 08:46:18 +08:00
|
|
|
/*
|
|
|
|
* If the status bit of any enabled fixed event is set, the
|
|
|
|
* wakeup is regarded as valid.
|
|
|
|
*/
|
2020-05-19 19:36:48 +08:00
|
|
|
if (acpi_any_fixed_event_status_set()) {
|
|
|
|
pm_pr_dbg("ACPI fixed event wakeup\n");
|
2020-02-21 08:46:18 +08:00
|
|
|
return true;
|
2020-05-19 19:36:48 +08:00
|
|
|
}
|
2020-02-21 08:46:18 +08:00
|
|
|
|
2020-04-03 23:48:33 +08:00
|
|
|
/* Check wakeups from drivers sharing the SCI. */
|
2020-05-19 19:36:48 +08:00
|
|
|
if (acpi_check_wakeup_handlers()) {
|
|
|
|
pm_pr_dbg("ACPI custom handler wakeup\n");
|
2020-04-03 23:48:33 +08:00
|
|
|
return true;
|
2020-05-19 19:36:48 +08:00
|
|
|
}
|
2020-04-03 23:48:33 +08:00
|
|
|
|
2022-02-05 01:31:02 +08:00
|
|
|
/*
|
|
|
|
* Check non-EC GPE wakeups and if there are none, cancel the
|
|
|
|
* SCI-related wakeup and dispatch the EC GPE.
|
|
|
|
*/
|
2020-05-19 19:36:48 +08:00
|
|
|
if (acpi_ec_dispatch_gpe()) {
|
|
|
|
pm_pr_dbg("ACPI non-EC GPE wakeup\n");
|
2020-02-11 17:11:02 +08:00
|
|
|
return true;
|
2020-05-19 19:36:48 +08:00
|
|
|
}
|
PM: sleep: Simplify suspend-to-idle control flow
After commit 33e4f80ee69b ("ACPI / PM: Ignore spurious SCI wakeups
from suspend-to-idle") the "noirq" phases of device suspend and
resume may run for multiple times during suspend-to-idle, if there
are spurious system wakeup events while suspended. However, this
is complicated and fragile and actually unnecessary.
The main reason for doing this is that on some systems the EC may
signal system wakeup events (power button events, for example) as
well as events that should not cause the system to resume (spurious
system wakeup events). Thus, in order to determine whether or not
a given event signaled by the EC while suspended is a proper system
wakeup one, the EC GPE needs to be dispatched and to start with that
was achieved by allowing the ACPI SCI action handler to run, which
was only possible after calling resume_device_irqs().
However, dispatching the EC GPE this way turned out to take too much
time in some cases and some EC events might be missed due to that, so
commit 68e22011856f ("ACPI: EC: Dispatch the EC GPE directly on
s2idle wake") started to dispatch the EC GPE right after a wakeup
event has been detected, so in fact the full ACPI SCI action handler
doesn't need to run any more to deal with the wakeups coming from the
EC.
Use this observation to simplify the suspend-to-idle control flow
so that the "noirq" phases of device suspend and resume are each
run only once in every suspend-to-idle cycle, which is reported to
significantly reduce power drawn by some systems when suspended to
idle (by allowing them to reach a deep platform-wide low-power state
through the suspend-to-idle flow). [What appears to happen is that
the "noirq" resume of devices after a spurious EC wakeup brings some
devices into a state in which they prevent the platform from reaching
the deep low-power state going forward, even after a subsequent
"noirq" suspend phase, and on some systems the EC triggers such
wakeups already when the "noirq" suspend of devices is running for
the first time in the given suspend/resume cycle, so the platform
cannot reach the deep low-power state at all.]
First, make acpi_s2idle_wake() use the acpi_ec_dispatch_gpe() return
value to determine whether or not the wakeup may have been triggered
by the EC (in which case the system wakeup is canceled and ACPI
events are processed in order to determine whether or not the event
is a proper system wakeup one) and use rearm_wake_irq() (introduced
by a previous change) in it to rearm the ACPI SCI for system wakeup
detection in case the system will remain suspended.
Second, drop acpi_s2idle_sync(), which is not needed any more, and
the corresponding global platform suspend-to-idle callback.
Next, drop the pm_wakeup_pending() check (which is an optimization
only) from __device_suspend_noirq() to prevent it from returning
errors on system wakeups occurring before the "noirq" phase of
device suspend is complete (as in the case of suspend-to-idle it is
not known whether or not these wakeups are suprious at that point),
in order to avoid having to carry out a "noirq" resume of devices
on a spurious system wakeup.
Finally, change the code flow in s2idle_loop() to (1) run the
"noirq" suspend of devices once before starting the loop, (2) check
for spurious EC wakeups (via the platform ->wake callback) for the
first time before calling s2idle_enter(), and (3) run the "noirq"
resume of devices once after leaving the loop.
Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Acked-by: Thomas Gleixner <tglx@linutronix.de>
2019-07-16 05:52:03 +08:00
|
|
|
|
2020-05-15 18:58:19 +08:00
|
|
|
acpi_os_wait_events_complete();
|
ACPI / PM: Ignore spurious SCI wakeups from suspend-to-idle
The ACPI SCI (System Control Interrupt) is set up as a wakeup IRQ
during suspend-to-idle transitions and, consequently, any events
signaled through it wake up the system from that state. However,
on some systems some of the events signaled via the ACPI SCI while
suspended to idle should not cause the system to wake up. In fact,
quite often they should just be discarded.
Arguably, systems should not resume entirely on such events, but in
order to decide which events really should cause the system to resume
and which are spurious, it is necessary to resume up to the point
when ACPI SCIs are actually handled and processed, which is after
executing dpm_resume_noirq() in the system resume path.
For this reasons, add a loop around freeze_enter() in which the
platforms can process events signaled via multiplexed IRQ lines
like the ACPI SCI and add suspend-to-idle hooks that can be
used for this purpose to struct platform_freeze_ops.
In the ACPI case, the ->wake hook is used for checking if the SCI
has triggered while suspended and deferring the interrupt-induced
system wakeup until the events signaled through it are actually
processed sufficiently to decide whether or not the system should
resume. In turn, the ->sync hook allows all of the relevant event
queues to be flushed so as to prevent events from being missed due
to race conditions.
In addition to that, some ACPI code processing wakeup events needs
to be modified to use the "hard" version of wakeup triggers, so that
it will cause a system resume to happen on device-induced wakeup
events even if the "soft" mechanism to prevent the system from
suspending is not enabled. However, to preserve the existing
behavior with respect to suspend-to-RAM, this only is done in
the suspend-to-idle case and only if an SCI has occurred while
suspended.
Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
2017-06-13 04:56:34 +08:00
|
|
|
|
2020-02-11 17:11:02 +08:00
|
|
|
/*
|
|
|
|
* The SCI is in the "suspended" state now and it cannot produce
|
|
|
|
* new wakeup events till the rearming below, so if any of them
|
|
|
|
* are pending here, they must be resulting from the processing
|
|
|
|
* of EC events above or coming from somewhere else.
|
|
|
|
*/
|
2020-05-19 19:36:48 +08:00
|
|
|
if (pm_wakeup_pending()) {
|
|
|
|
pm_pr_dbg("Wakeup after ACPI Notify sync\n");
|
2020-02-11 17:11:02 +08:00
|
|
|
return true;
|
2020-05-19 19:36:48 +08:00
|
|
|
}
|
2020-02-11 17:11:02 +08:00
|
|
|
|
2022-02-02 03:18:10 +08:00
|
|
|
pm_pr_dbg("Rearming ACPI SCI for wakeup\n");
|
|
|
|
|
PM: s2idle: ACPI: Fix wakeup interrupts handling
After commit e3728b50cd9b ("ACPI: PM: s2idle: Avoid possible race
related to the EC GPE") wakeup interrupts occurring immediately after
the one discarded by acpi_s2idle_wake() may be missed. Moreover, if
the SCI triggers again immediately after the rearming in
acpi_s2idle_wake(), that wakeup may be missed too.
The problem is that pm_system_irq_wakeup() only calls pm_system_wakeup()
when pm_wakeup_irq is 0, but that's not the case any more after the
interrupt causing acpi_s2idle_wake() to run until pm_wakeup_irq is
cleared by the pm_wakeup_clear() call in s2idle_loop(). However,
there may be wakeup interrupts occurring in that time frame and if
that happens, they will be missed.
To address that issue first move the clearing of pm_wakeup_irq to
the point at which it is known that the interrupt causing
acpi_s2idle_wake() to tun will be discarded, before rearming the SCI
for wakeup. Moreover, because that only reduces the size of the
time window in which the issue may manifest itself, allow
pm_system_irq_wakeup() to register two second wakeup interrupts in
a row and, when discarding the first one, replace it with the second
one. [Of course, this assumes that only one wakeup interrupt can be
discarded in one go, but currently that is the case and I am not
aware of any plans to change that.]
Fixes: e3728b50cd9b ("ACPI: PM: s2idle: Avoid possible race related to the EC GPE")
Cc: 5.4+ <stable@vger.kernel.org> # 5.4+
Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
2022-02-05 01:35:22 +08:00
|
|
|
pm_wakeup_clear(acpi_sci_irq);
|
2019-08-19 18:35:03 +08:00
|
|
|
rearm_wake_irq(acpi_sci_irq);
|
|
|
|
}
|
2020-02-11 17:11:02 +08:00
|
|
|
|
|
|
|
return false;
|
ACPI / PM: Ignore spurious SCI wakeups from suspend-to-idle
The ACPI SCI (System Control Interrupt) is set up as a wakeup IRQ
during suspend-to-idle transitions and, consequently, any events
signaled through it wake up the system from that state. However,
on some systems some of the events signaled via the ACPI SCI while
suspended to idle should not cause the system to wake up. In fact,
quite often they should just be discarded.
Arguably, systems should not resume entirely on such events, but in
order to decide which events really should cause the system to resume
and which are spurious, it is necessary to resume up to the point
when ACPI SCIs are actually handled and processed, which is after
executing dpm_resume_noirq() in the system resume path.
For this reasons, add a loop around freeze_enter() in which the
platforms can process events signaled via multiplexed IRQ lines
like the ACPI SCI and add suspend-to-idle hooks that can be
used for this purpose to struct platform_freeze_ops.
In the ACPI case, the ->wake hook is used for checking if the SCI
has triggered while suspended and deferring the interrupt-induced
system wakeup until the events signaled through it are actually
processed sufficiently to decide whether or not the system should
resume. In turn, the ->sync hook allows all of the relevant event
queues to be flushed so as to prevent events from being missed due
to race conditions.
In addition to that, some ACPI code processing wakeup events needs
to be modified to use the "hard" version of wakeup triggers, so that
it will cause a system resume to happen on device-induced wakeup
events even if the "soft" mechanism to prevent the system from
suspending is not enabled. However, to preserve the existing
behavior with respect to suspend-to-RAM, this only is done in
the suspend-to-idle case and only if an SCI has occurred while
suspended.
Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
2017-06-13 04:56:34 +08:00
|
|
|
}
|
|
|
|
|
2020-12-18 03:23:17 +08:00
|
|
|
void acpi_s2idle_restore(void)
|
2014-09-30 08:29:01 +08:00
|
|
|
{
|
2019-11-29 06:50:40 +08:00
|
|
|
/*
|
|
|
|
* Drain pending events before restoring the working-state configuration
|
|
|
|
* of GPEs.
|
|
|
|
*/
|
|
|
|
acpi_os_wait_events_complete(); /* synchronize GPE processing */
|
2020-05-15 18:58:19 +08:00
|
|
|
acpi_ec_flush_work(); /* flush the EC driver's workqueues */
|
|
|
|
acpi_os_wait_events_complete(); /* synchronize Notify handling */
|
2019-11-29 06:50:40 +08:00
|
|
|
|
2019-07-16 05:51:19 +08:00
|
|
|
s2idle_wakeup = false;
|
|
|
|
|
2018-12-17 19:21:55 +08:00
|
|
|
acpi_enable_all_runtime_gpes();
|
|
|
|
|
2019-05-14 03:17:08 +08:00
|
|
|
acpi_disable_wakeup_devices(ACPI_STATE_S0);
|
|
|
|
|
2019-08-21 17:40:19 +08:00
|
|
|
if (acpi_sci_irq_valid()) {
|
|
|
|
acpi_ec_set_gpe_wake_mask(ACPI_GPE_DISABLE);
|
2015-10-25 01:02:46 +08:00
|
|
|
disable_irq_wake(acpi_sci_irq);
|
2019-08-21 17:40:19 +08:00
|
|
|
}
|
2014-09-30 08:29:01 +08:00
|
|
|
}
|
|
|
|
|
2020-12-18 03:23:17 +08:00
|
|
|
void acpi_s2idle_end(void)
|
2014-05-16 05:29:57 +08:00
|
|
|
{
|
|
|
|
acpi_scan_lock_release();
|
|
|
|
}
|
|
|
|
|
2017-08-10 06:15:30 +08:00
|
|
|
static const struct platform_s2idle_ops acpi_s2idle_ops = {
|
|
|
|
.begin = acpi_s2idle_begin,
|
|
|
|
.prepare = acpi_s2idle_prepare,
|
|
|
|
.wake = acpi_s2idle_wake,
|
|
|
|
.restore = acpi_s2idle_restore,
|
|
|
|
.end = acpi_s2idle_end,
|
2014-05-16 05:29:57 +08:00
|
|
|
};
|
|
|
|
|
2020-12-18 03:23:17 +08:00
|
|
|
void __weak acpi_s2idle_setup(void)
|
|
|
|
{
|
|
|
|
s2idle_set_ops(&acpi_s2idle_ops);
|
|
|
|
}
|
|
|
|
|
2013-01-17 21:11:09 +08:00
|
|
|
static void acpi_sleep_suspend_setup(void)
|
|
|
|
{
|
2021-10-21 03:10:17 +08:00
|
|
|
bool suspend_ops_needed = false;
|
2013-01-17 21:11:09 +08:00
|
|
|
int i;
|
|
|
|
|
2014-03-14 05:11:39 +08:00
|
|
|
for (i = ACPI_STATE_S1; i < ACPI_STATE_S4; i++)
|
2021-10-21 03:10:17 +08:00
|
|
|
if (acpi_sleep_state_supported(i)) {
|
2013-01-17 21:11:09 +08:00
|
|
|
sleep_states[i] = 1;
|
2021-10-21 03:10:17 +08:00
|
|
|
suspend_ops_needed = true;
|
|
|
|
}
|
2013-01-17 21:11:09 +08:00
|
|
|
|
2021-10-21 03:10:17 +08:00
|
|
|
if (suspend_ops_needed)
|
|
|
|
suspend_set_ops(old_suspend_ordering ?
|
|
|
|
&acpi_suspend_ops_old : &acpi_suspend_ops);
|
ACPI / sleep: EC-based wakeup from suspend-to-idle on recent systems
Some recent Dell laptops, including the XPS13 model numbers 9360 and
9365, cannot be woken up from suspend-to-idle by pressing the power
button which is unexpected and makes that feature less usable on
those systems. Moreover, on the 9365 ACPI S3 (suspend-to-RAM) is
not expected to be used at all (the OS these systems ship with never
exercises the ACPI S3 path in the firmware) and suspend-to-idle is
the only viable system suspend mechanism there.
The reason why the power button wakeup from suspend-to-idle doesn't
work on those systems is because their power button events are
signaled by the EC (Embedded Controller), whose GPE (General Purpose
Event) line is disabled during suspend-to-idle transitions in Linux.
That is done on purpose, because in general the EC tends to be noisy
for various reasons (battery and thermal updates and similar, for
example) and all events signaled by it would kick the CPUs out of
deep idle states while in suspend-to-idle, which effectively might
defeat its purpose.
Of course, on the Dell systems in question the EC GPE must be enabled
during suspend-to-idle transitions for the button press events to
be signaled while suspended at all, but fortunately there is a way
out of this puzzle.
First of all, those systems have the ACPI_FADT_LOW_POWER_S0 flag set
in their ACPI tables, which means that the OS is expected to prefer
the "low power S0 idle" system state over ACPI S3 on them. That
causes the most recent versions of other OSes to simply ignore ACPI
S3 on those systems, so it is reasonable to expect that it should not
be necessary to block GPEs during suspend-to-idle on them.
Second, in addition to that, the systems in question provide a special
firmware interface that can be used to indicate to the platform that
the OS is transitioning into a system-wide low-power state in which
certain types of activity are not desirable or that it is leaving
such a state and that (in principle) should allow the platform to
adjust its operation mode accordingly.
That interface is a special _DSM object under a System Power
Management Controller device (PNP0D80). The expected way to use it
is to invoke function 0 from it on system initialization, functions
3 and 5 during suspend transitions and functions 4 and 6 during
resume transitions (to reverse the actions carried out by the
former). In particular, function 5 from the "Low-Power S0" device
_DSM is expected to cause the platform to put itself into a low-power
operation mode which should include making the EC less verbose (so to
speak). Next, on resume, function 6 switches the platform back to
the "working-state" operation mode.
In accordance with the above, modify the ACPI suspend-to-idle code
to look for the "Low-Power S0" _DSM interface on platforms with the
ACPI_FADT_LOW_POWER_S0 flag set in the ACPI tables. If it's there,
use it during suspend-to-idle transitions as prescribed and avoid
changing the GPE configuration in that case. [That should reflect
what the most recent versions of other OSes do.]
Also modify the ACPI EC driver to make it handle events during
suspend-to-idle in the usual way if the "Low-Power S0" _DSM interface
is going to be used to make the power button events work while
suspended on the Dell machines mentioned above
Link: http://www.uefi.org/sites/default/files/resources/Intel_ACPI_Low_Power_S0_Idle.pdf
Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
2017-06-23 21:24:32 +08:00
|
|
|
|
2020-12-18 03:23:17 +08:00
|
|
|
acpi_s2idle_setup();
|
2013-01-17 21:11:09 +08:00
|
|
|
}
|
2014-05-16 05:29:57 +08:00
|
|
|
|
2013-01-17 21:11:09 +08:00
|
|
|
#else /* !CONFIG_SUSPEND */
|
ACPI / sleep: EC-based wakeup from suspend-to-idle on recent systems
Some recent Dell laptops, including the XPS13 model numbers 9360 and
9365, cannot be woken up from suspend-to-idle by pressing the power
button which is unexpected and makes that feature less usable on
those systems. Moreover, on the 9365 ACPI S3 (suspend-to-RAM) is
not expected to be used at all (the OS these systems ship with never
exercises the ACPI S3 path in the firmware) and suspend-to-idle is
the only viable system suspend mechanism there.
The reason why the power button wakeup from suspend-to-idle doesn't
work on those systems is because their power button events are
signaled by the EC (Embedded Controller), whose GPE (General Purpose
Event) line is disabled during suspend-to-idle transitions in Linux.
That is done on purpose, because in general the EC tends to be noisy
for various reasons (battery and thermal updates and similar, for
example) and all events signaled by it would kick the CPUs out of
deep idle states while in suspend-to-idle, which effectively might
defeat its purpose.
Of course, on the Dell systems in question the EC GPE must be enabled
during suspend-to-idle transitions for the button press events to
be signaled while suspended at all, but fortunately there is a way
out of this puzzle.
First of all, those systems have the ACPI_FADT_LOW_POWER_S0 flag set
in their ACPI tables, which means that the OS is expected to prefer
the "low power S0 idle" system state over ACPI S3 on them. That
causes the most recent versions of other OSes to simply ignore ACPI
S3 on those systems, so it is reasonable to expect that it should not
be necessary to block GPEs during suspend-to-idle on them.
Second, in addition to that, the systems in question provide a special
firmware interface that can be used to indicate to the platform that
the OS is transitioning into a system-wide low-power state in which
certain types of activity are not desirable or that it is leaving
such a state and that (in principle) should allow the platform to
adjust its operation mode accordingly.
That interface is a special _DSM object under a System Power
Management Controller device (PNP0D80). The expected way to use it
is to invoke function 0 from it on system initialization, functions
3 and 5 during suspend transitions and functions 4 and 6 during
resume transitions (to reverse the actions carried out by the
former). In particular, function 5 from the "Low-Power S0" device
_DSM is expected to cause the platform to put itself into a low-power
operation mode which should include making the EC less verbose (so to
speak). Next, on resume, function 6 switches the platform back to
the "working-state" operation mode.
In accordance with the above, modify the ACPI suspend-to-idle code
to look for the "Low-Power S0" _DSM interface on platforms with the
ACPI_FADT_LOW_POWER_S0 flag set in the ACPI tables. If it's there,
use it during suspend-to-idle transitions as prescribed and avoid
changing the GPE configuration in that case. [That should reflect
what the most recent versions of other OSes do.]
Also modify the ACPI EC driver to make it handle events during
suspend-to-idle in the usual way if the "Low-Power S0" _DSM interface
is going to be used to make the power button events work while
suspended on the Dell machines mentioned above
Link: http://www.uefi.org/sites/default/files/resources/Intel_ACPI_Low_Power_S0_Idle.pdf
Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
2017-06-23 21:24:32 +08:00
|
|
|
#define s2idle_wakeup (false)
|
2013-01-17 21:11:09 +08:00
|
|
|
static inline void acpi_sleep_suspend_setup(void) {}
|
|
|
|
#endif /* !CONFIG_SUSPEND */
|
2007-07-30 05:27:18 +08:00
|
|
|
|
ACPI / PM: Ignore spurious SCI wakeups from suspend-to-idle
The ACPI SCI (System Control Interrupt) is set up as a wakeup IRQ
during suspend-to-idle transitions and, consequently, any events
signaled through it wake up the system from that state. However,
on some systems some of the events signaled via the ACPI SCI while
suspended to idle should not cause the system to wake up. In fact,
quite often they should just be discarded.
Arguably, systems should not resume entirely on such events, but in
order to decide which events really should cause the system to resume
and which are spurious, it is necessary to resume up to the point
when ACPI SCIs are actually handled and processed, which is after
executing dpm_resume_noirq() in the system resume path.
For this reasons, add a loop around freeze_enter() in which the
platforms can process events signaled via multiplexed IRQ lines
like the ACPI SCI and add suspend-to-idle hooks that can be
used for this purpose to struct platform_freeze_ops.
In the ACPI case, the ->wake hook is used for checking if the SCI
has triggered while suspended and deferring the interrupt-induced
system wakeup until the events signaled through it are actually
processed sufficiently to decide whether or not the system should
resume. In turn, the ->sync hook allows all of the relevant event
queues to be flushed so as to prevent events from being missed due
to race conditions.
In addition to that, some ACPI code processing wakeup events needs
to be modified to use the "hard" version of wakeup triggers, so that
it will cause a system resume to happen on device-induced wakeup
events even if the "soft" mechanism to prevent the system from
suspending is not enabled. However, to preserve the existing
behavior with respect to suspend-to-RAM, this only is done in
the suspend-to-idle case and only if an SCI has occurred while
suspended.
Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
2017-06-13 04:56:34 +08:00
|
|
|
bool acpi_s2idle_wakeup(void)
|
|
|
|
{
|
|
|
|
return s2idle_wakeup;
|
|
|
|
}
|
|
|
|
|
2016-02-17 20:03:23 +08:00
|
|
|
#ifdef CONFIG_PM_SLEEP
|
|
|
|
static u32 saved_bm_rld;
|
|
|
|
|
|
|
|
static int acpi_save_bm_rld(void)
|
|
|
|
{
|
|
|
|
acpi_read_bit_register(ACPI_BITREG_BUS_MASTER_RLD, &saved_bm_rld);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void acpi_restore_bm_rld(void)
|
|
|
|
{
|
|
|
|
u32 resumed_bm_rld = 0;
|
|
|
|
|
|
|
|
acpi_read_bit_register(ACPI_BITREG_BUS_MASTER_RLD, &resumed_bm_rld);
|
|
|
|
if (resumed_bm_rld == saved_bm_rld)
|
|
|
|
return;
|
|
|
|
|
|
|
|
acpi_write_bit_register(ACPI_BITREG_BUS_MASTER_RLD, saved_bm_rld);
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct syscore_ops acpi_sleep_syscore_ops = {
|
|
|
|
.suspend = acpi_save_bm_rld,
|
|
|
|
.resume = acpi_restore_bm_rld,
|
|
|
|
};
|
|
|
|
|
2017-07-31 17:40:13 +08:00
|
|
|
static void acpi_sleep_syscore_init(void)
|
2016-02-17 20:03:23 +08:00
|
|
|
{
|
|
|
|
register_syscore_ops(&acpi_sleep_syscore_ops);
|
|
|
|
}
|
|
|
|
#else
|
|
|
|
static inline void acpi_sleep_syscore_init(void) {}
|
|
|
|
#endif /* CONFIG_PM_SLEEP */
|
|
|
|
|
2007-07-30 05:24:36 +08:00
|
|
|
#ifdef CONFIG_HIBERNATION
|
2008-07-24 12:28:41 +08:00
|
|
|
static unsigned long s4_hardware_signature;
|
|
|
|
static struct acpi_table_facs *facs;
|
2022-03-12 03:20:17 +08:00
|
|
|
int acpi_check_s4_hw_signature = -1; /* Default behaviour is just to warn */
|
2008-07-24 12:28:41 +08:00
|
|
|
|
2019-05-16 18:43:19 +08:00
|
|
|
static int acpi_hibernation_begin(pm_message_t stage)
|
2007-10-18 18:04:42 +08:00
|
|
|
{
|
2019-05-16 18:43:19 +08:00
|
|
|
if (!nvs_nosave) {
|
|
|
|
int error = suspend_nvs_alloc();
|
|
|
|
if (error)
|
|
|
|
return error;
|
|
|
|
}
|
2008-10-27 03:52:15 +08:00
|
|
|
|
2019-05-16 18:43:19 +08:00
|
|
|
if (stage.event == PM_EVENT_HIBERNATE)
|
|
|
|
pm_set_suspend_via_firmware();
|
2008-10-27 03:52:15 +08:00
|
|
|
|
2019-05-16 18:43:19 +08:00
|
|
|
acpi_pm_start(ACPI_STATE_S4);
|
|
|
|
return 0;
|
2008-10-27 03:52:15 +08:00
|
|
|
}
|
|
|
|
|
2007-05-09 17:33:18 +08:00
|
|
|
static int acpi_hibernation_enter(void)
|
|
|
|
{
|
|
|
|
acpi_status status = AE_OK;
|
|
|
|
|
|
|
|
/* This shouldn't return. If it returns, we have a problem */
|
2012-07-27 08:08:54 +08:00
|
|
|
status = acpi_enter_sleep_state(ACPI_STATE_S4);
|
|
|
|
/* Reprogram control registers */
|
|
|
|
acpi_leave_sleep_state_prep(ACPI_STATE_S4);
|
2007-05-09 17:33:18 +08:00
|
|
|
|
|
|
|
return ACPI_SUCCESS(status) ? 0 : -EFAULT;
|
|
|
|
}
|
|
|
|
|
2007-10-18 18:04:55 +08:00
|
|
|
static void acpi_hibernation_leave(void)
|
|
|
|
{
|
2016-03-23 07:11:20 +08:00
|
|
|
pm_set_resume_via_firmware();
|
2007-10-18 18:04:55 +08:00
|
|
|
/*
|
|
|
|
* If ACPI is not enabled by the BIOS and the boot kernel, we need to
|
|
|
|
* enable it here.
|
|
|
|
*/
|
|
|
|
acpi_enable();
|
2012-07-27 08:08:54 +08:00
|
|
|
/* Reprogram control registers */
|
|
|
|
acpi_leave_sleep_state_prep(ACPI_STATE_S4);
|
2008-07-24 12:28:41 +08:00
|
|
|
/* Check the hardware signature */
|
2014-01-10 17:51:53 +08:00
|
|
|
if (facs && s4_hardware_signature != facs->hardware_signature)
|
2021-06-02 16:54:39 +08:00
|
|
|
pr_crit("Hardware changed while hibernated, success doubtful!\n");
|
2008-10-27 03:52:15 +08:00
|
|
|
/* Restore the NVS memory area */
|
2010-05-29 04:32:14 +08:00
|
|
|
suspend_nvs_restore();
|
2010-04-09 07:39:40 +08:00
|
|
|
/* Allow EC transactions to happen. */
|
ACPI / EC: Add PM operations to improve event handling for resume process
This patch makes 2 changes:
1. Restore old behavior
Originally, EC driver stops handling both events and transactions in
acpi_ec_block_transactions(), and restarts to handle transactions in
acpi_ec_unblock_transactions_early(), restarts to handle both events and
transactions in acpi_ec_unblock_transactions().
While currently, EC driver still stops handling both events and
transactions in acpi_ec_block_transactions(), but restarts to handle both
events and transactions in acpi_ec_unblock_transactions_early().
This patch tries to restore the old behavior by dropping
__acpi_ec_enable_event() from acpi_unblock_transactions_early().
2. Improve old behavior
However this still cannot fix the real issue as both of the
acpi_ec_unblock_xxx() functions are invoked in the noirq stage. Since the
EC driver actually doesn't implement the event handling in the polling
mode, re-enabling the event handling too early in the noirq stage could
result in the problem that if there is no triggering source causing
advance_transaction() to be invoked, pending SCI_EVT cannot be detected by
the EC driver and _Qxx cannot be triggered.
It actually makes sense to restart the event handling in any point during
resuming after the noirq stage. Just like the boot stage where the event
handling is enabled in .add(), this patch further moves
acpi_ec_enable_event() to .resume(). After doing that, the following 2
functions can be combined:
acpi_ec_unblock_transactions_early()/acpi_ec_unblock_transactions().
The differences of the event handling availability between the old behavior
(this patch isn't applied) and the new behavior (this patch is applied) are
as follows:
!Applied Applied
before suspend Y Y
suspend before EC Y Y
suspend after EC Y Y
suspend_late Y Y
suspend_noirq Y (actually N) Y (actually N)
resume_noirq Y (actually N) Y (actually N)
resume_late Y (actually N) Y (actually N)
resume before EC Y (actually N) Y (actually N)
resume after EC Y (actually N) Y
after resume Y (actually N) Y
Where "actually N" means if there is no triggering source, the EC driver
is actually not able to notice the pending SCI_EVT occurred in the noirq
stage. So we can clearly see that this patch has improved the situation.
Signed-off-by: Lv Zheng <lv.zheng@intel.com>
Tested-by: Todd E Brandt <todd.e.brandt@linux.intel.com>
Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
2016-08-03 16:01:36 +08:00
|
|
|
acpi_ec_unblock_transactions();
|
2007-10-18 18:04:55 +08:00
|
|
|
}
|
|
|
|
|
2010-04-09 07:39:40 +08:00
|
|
|
static void acpi_pm_thaw(void)
|
2010-03-04 08:52:58 +08:00
|
|
|
{
|
2010-04-09 07:40:38 +08:00
|
|
|
acpi_ec_unblock_transactions();
|
2008-12-16 16:57:46 +08:00
|
|
|
acpi_enable_all_runtime_gpes();
|
2007-05-09 17:33:18 +08:00
|
|
|
}
|
|
|
|
|
2010-11-10 04:48:49 +08:00
|
|
|
static const struct platform_hibernation_ops acpi_hibernation_ops = {
|
2008-06-13 05:24:06 +08:00
|
|
|
.begin = acpi_hibernation_begin,
|
|
|
|
.end = acpi_pm_end,
|
2010-07-02 06:14:09 +08:00
|
|
|
.pre_snapshot = acpi_pm_prepare,
|
2010-05-29 04:32:15 +08:00
|
|
|
.finish = acpi_pm_finish,
|
2008-06-13 05:24:06 +08:00
|
|
|
.prepare = acpi_pm_prepare,
|
|
|
|
.enter = acpi_hibernation_enter,
|
|
|
|
.leave = acpi_hibernation_leave,
|
2010-04-09 07:39:40 +08:00
|
|
|
.pre_restore = acpi_pm_freeze,
|
|
|
|
.restore_cleanup = acpi_pm_thaw,
|
2008-06-13 05:24:06 +08:00
|
|
|
};
|
2008-01-08 07:08:44 +08:00
|
|
|
|
2008-06-13 05:24:06 +08:00
|
|
|
/**
|
|
|
|
* acpi_hibernation_begin_old - Set the target system sleep state to
|
|
|
|
* ACPI_STATE_S4 and execute the _PTS control method. This
|
|
|
|
* function is used if the pre-ACPI 2.0 suspend ordering has been
|
|
|
|
* requested.
|
|
|
|
*/
|
2019-05-16 18:43:19 +08:00
|
|
|
static int acpi_hibernation_begin_old(pm_message_t stage)
|
swsusp: introduce restore platform operations
At least on some machines it is necessary to prepare the ACPI firmware for the
restoration of the system memory state from the hibernation image if the
"platform" mode of hibernation has been used. Namely, in that cases we need
to disable the GPEs before replacing the "boot" kernel with the "frozen"
kernel (cf. http://bugzilla.kernel.org/show_bug.cgi?id=7887). After the
restore they will be re-enabled by hibernation_ops->finish(), but if the
restore fails, they have to be re-enabled by the restore code explicitly.
For this purpose we can introduce two additional hibernation operations,
called pre_restore() and restore_cleanup() and call them from the restore code
path. Still, they should be called if the "platform" mode of hibernation has
been used, so we need to pass the information about the hibernation mode from
the "frozen" kernel to the "boot" kernel in the image header.
Apparently, we can't drop the disabling of GPEs before the restore because of
Bug #7887 . We also can't do it unconditionally, because the GPEs wouldn't
have been enabled after a successful restore if the suspend had been done in
the 'shutdown' or 'reboot' mode.
In principle we could (and probably should) unconditionally disable the GPEs
before each snapshot creation *and* before the restore, but then we'd have to
unconditionally enable them after the snapshot creation as well as after the
restore (or restore failure) Still, for this purpose we'd need to modify
acpi_enter_sleep_state_prep() and acpi_leave_sleep_state() and we'd have to
introduce some mechanism synchronizing the disablind/enabling of the GPEs with
the device drivers' .suspend()/.resume() routines and with
disable_/enable_nonboot_cpus(). However, this would have affected the
suspend (ie. s2ram) code as well as the hibernation, which I'd like to avoid
in this patch series.
Signed-off-by: Rafael J. Wysocki <rjw@sisk.pl>
Cc: Nigel Cunningham <nigel@nigel.suspend2.net>
Cc: Pavel Machek <pavel@ucw.cz>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2007-07-19 16:47:30 +08:00
|
|
|
{
|
2008-08-12 10:20:22 +08:00
|
|
|
int error;
|
|
|
|
/*
|
|
|
|
* The _TTS object should always be evaluated before the _PTS object.
|
|
|
|
* When the old_suspended_ordering is true, the _PTS object is
|
|
|
|
* evaluated in the acpi_sleep_prepare.
|
|
|
|
*/
|
|
|
|
acpi_sleep_tts_switch(ACPI_STATE_S4);
|
|
|
|
|
|
|
|
error = acpi_sleep_prepare(ACPI_STATE_S4);
|
2019-05-16 18:43:19 +08:00
|
|
|
if (error)
|
|
|
|
return error;
|
swsusp: introduce restore platform operations
At least on some machines it is necessary to prepare the ACPI firmware for the
restoration of the system memory state from the hibernation image if the
"platform" mode of hibernation has been used. Namely, in that cases we need
to disable the GPEs before replacing the "boot" kernel with the "frozen"
kernel (cf. http://bugzilla.kernel.org/show_bug.cgi?id=7887). After the
restore they will be re-enabled by hibernation_ops->finish(), but if the
restore fails, they have to be re-enabled by the restore code explicitly.
For this purpose we can introduce two additional hibernation operations,
called pre_restore() and restore_cleanup() and call them from the restore code
path. Still, they should be called if the "platform" mode of hibernation has
been used, so we need to pass the information about the hibernation mode from
the "frozen" kernel to the "boot" kernel in the image header.
Apparently, we can't drop the disabling of GPEs before the restore because of
Bug #7887 . We also can't do it unconditionally, because the GPEs wouldn't
have been enabled after a successful restore if the suspend had been done in
the 'shutdown' or 'reboot' mode.
In principle we could (and probably should) unconditionally disable the GPEs
before each snapshot creation *and* before the restore, but then we'd have to
unconditionally enable them after the snapshot creation as well as after the
restore (or restore failure) Still, for this purpose we'd need to modify
acpi_enter_sleep_state_prep() and acpi_leave_sleep_state() and we'd have to
introduce some mechanism synchronizing the disablind/enabling of the GPEs with
the device drivers' .suspend()/.resume() routines and with
disable_/enable_nonboot_cpus(). However, this would have affected the
suspend (ie. s2ram) code as well as the hibernation, which I'd like to avoid
in this patch series.
Signed-off-by: Rafael J. Wysocki <rjw@sisk.pl>
Cc: Nigel Cunningham <nigel@nigel.suspend2.net>
Cc: Pavel Machek <pavel@ucw.cz>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2007-07-19 16:47:30 +08:00
|
|
|
|
2019-05-16 18:43:19 +08:00
|
|
|
if (!nvs_nosave) {
|
|
|
|
error = suspend_nvs_alloc();
|
|
|
|
if (error)
|
|
|
|
return error;
|
2008-10-27 03:52:15 +08:00
|
|
|
}
|
2019-05-16 18:43:19 +08:00
|
|
|
|
|
|
|
if (stage.event == PM_EVENT_HIBERNATE)
|
|
|
|
pm_set_suspend_via_firmware();
|
|
|
|
|
|
|
|
acpi_target_sleep_state = ACPI_STATE_S4;
|
|
|
|
acpi_scan_lock_acquire();
|
|
|
|
return 0;
|
2008-10-27 03:52:15 +08:00
|
|
|
}
|
|
|
|
|
2008-06-13 05:24:06 +08:00
|
|
|
/*
|
|
|
|
* The following callbacks are used if the pre-ACPI 2.0 suspend ordering has
|
|
|
|
* been requested.
|
|
|
|
*/
|
2010-11-10 04:48:49 +08:00
|
|
|
static const struct platform_hibernation_ops acpi_hibernation_ops_old = {
|
2008-06-13 05:24:06 +08:00
|
|
|
.begin = acpi_hibernation_begin_old,
|
|
|
|
.end = acpi_pm_end,
|
2010-07-02 06:14:09 +08:00
|
|
|
.pre_snapshot = acpi_pm_pre_suspend,
|
2010-04-09 07:39:40 +08:00
|
|
|
.prepare = acpi_pm_freeze,
|
2010-05-29 04:32:15 +08:00
|
|
|
.finish = acpi_pm_finish,
|
2007-05-09 17:33:18 +08:00
|
|
|
.enter = acpi_hibernation_enter,
|
2007-10-18 18:04:55 +08:00
|
|
|
.leave = acpi_hibernation_leave,
|
2010-04-09 07:39:40 +08:00
|
|
|
.pre_restore = acpi_pm_freeze,
|
|
|
|
.restore_cleanup = acpi_pm_thaw,
|
2008-06-13 05:24:06 +08:00
|
|
|
.recover = acpi_pm_finish,
|
2007-05-09 17:33:18 +08:00
|
|
|
};
|
2013-01-17 21:11:09 +08:00
|
|
|
|
|
|
|
static void acpi_sleep_hibernate_setup(void)
|
|
|
|
{
|
2014-03-14 05:11:39 +08:00
|
|
|
if (!acpi_sleep_state_supported(ACPI_STATE_S4))
|
2013-01-17 21:11:09 +08:00
|
|
|
return;
|
|
|
|
|
|
|
|
hibernation_set_ops(old_suspend_ordering ?
|
|
|
|
&acpi_hibernation_ops_old : &acpi_hibernation_ops);
|
|
|
|
sleep_states[ACPI_STATE_S4] = 1;
|
2022-03-12 03:20:17 +08:00
|
|
|
if (!acpi_check_s4_hw_signature)
|
2013-01-17 21:11:09 +08:00
|
|
|
return;
|
|
|
|
|
|
|
|
acpi_get_table(ACPI_SIG_FACS, 1, (struct acpi_table_header **)&facs);
|
2021-11-09 00:09:41 +08:00
|
|
|
if (facs) {
|
|
|
|
/*
|
|
|
|
* s4_hardware_signature is the local variable which is just
|
|
|
|
* used to warn about mismatch after we're attempting to
|
|
|
|
* resume (in violation of the ACPI specification.)
|
|
|
|
*/
|
2013-01-17 21:11:09 +08:00
|
|
|
s4_hardware_signature = facs->hardware_signature;
|
2021-11-09 00:09:41 +08:00
|
|
|
|
2022-03-12 03:20:17 +08:00
|
|
|
if (acpi_check_s4_hw_signature > 0) {
|
2021-11-09 00:09:41 +08:00
|
|
|
/*
|
|
|
|
* If we're actually obeying the ACPI specification
|
|
|
|
* then the signature is written out as part of the
|
|
|
|
* swsusp header, in order to allow the boot kernel
|
|
|
|
* to gracefully decline to resume.
|
|
|
|
*/
|
|
|
|
swsusp_hardware_signature = facs->hardware_signature;
|
|
|
|
}
|
|
|
|
}
|
2013-01-17 21:11:09 +08:00
|
|
|
}
|
|
|
|
#else /* !CONFIG_HIBERNATION */
|
|
|
|
static inline void acpi_sleep_hibernate_setup(void) {}
|
|
|
|
#endif /* !CONFIG_HIBERNATION */
|
2007-05-09 17:33:18 +08:00
|
|
|
|
2022-05-10 07:32:30 +08:00
|
|
|
static int acpi_power_off_prepare(struct sys_off_data *data)
|
2007-09-21 01:32:35 +08:00
|
|
|
{
|
|
|
|
/* Prepare to power off the system */
|
|
|
|
acpi_sleep_prepare(ACPI_STATE_S5);
|
2008-12-16 16:57:46 +08:00
|
|
|
acpi_disable_all_gpes();
|
2014-12-02 06:51:13 +08:00
|
|
|
acpi_os_wait_events_complete();
|
2022-05-10 07:32:30 +08:00
|
|
|
return NOTIFY_DONE;
|
2007-09-21 01:32:35 +08:00
|
|
|
}
|
|
|
|
|
2022-05-10 07:32:30 +08:00
|
|
|
static int acpi_power_off(struct sys_off_data *data)
|
2007-09-21 01:32:35 +08:00
|
|
|
{
|
|
|
|
/* acpi_sleep_prepare(ACPI_STATE_S5) should have already been called */
|
2021-06-02 16:54:39 +08:00
|
|
|
pr_debug("%s called\n", __func__);
|
2007-09-21 01:32:35 +08:00
|
|
|
local_irq_disable();
|
2012-07-27 08:08:54 +08:00
|
|
|
acpi_enter_sleep_state(ACPI_STATE_S5);
|
2022-05-10 07:32:30 +08:00
|
|
|
return NOTIFY_DONE;
|
2009-04-18 11:32:20 +08:00
|
|
|
}
|
|
|
|
|
2007-02-10 14:32:16 +08:00
|
|
|
int __init acpi_sleep_init(void)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2013-02-22 15:37:36 +08:00
|
|
|
char supported[ACPI_S_STATE_COUNT * 3 + 1];
|
|
|
|
char *pos = supported;
|
|
|
|
int i;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2012-11-30 19:57:03 +08:00
|
|
|
acpi_sleep_dmi_check();
|
|
|
|
|
2007-09-21 04:27:44 +08:00
|
|
|
sleep_states[ACPI_STATE_S0] = 1;
|
|
|
|
|
2016-02-17 20:03:23 +08:00
|
|
|
acpi_sleep_syscore_init();
|
2013-01-17 21:11:09 +08:00
|
|
|
acpi_sleep_suspend_setup();
|
|
|
|
acpi_sleep_hibernate_setup();
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2014-03-14 05:11:39 +08:00
|
|
|
if (acpi_sleep_state_supported(ACPI_STATE_S5)) {
|
2007-09-21 01:32:35 +08:00
|
|
|
sleep_states[ACPI_STATE_S5] = 1;
|
2022-05-10 07:32:30 +08:00
|
|
|
|
|
|
|
register_sys_off_handler(SYS_OFF_MODE_POWER_OFF_PREPARE,
|
|
|
|
SYS_OFF_PRIO_FIRMWARE,
|
|
|
|
acpi_power_off_prepare, NULL);
|
|
|
|
|
|
|
|
register_sys_off_handler(SYS_OFF_MODE_POWER_OFF,
|
|
|
|
SYS_OFF_PRIO_FIRMWARE,
|
|
|
|
acpi_power_off, NULL);
|
2016-03-22 08:51:10 +08:00
|
|
|
} else {
|
|
|
|
acpi_no_s5 = true;
|
2007-09-21 01:32:35 +08:00
|
|
|
}
|
2013-02-22 15:37:36 +08:00
|
|
|
|
|
|
|
supported[0] = 0;
|
|
|
|
for (i = 0; i < ACPI_S_STATE_COUNT; i++) {
|
|
|
|
if (sleep_states[i])
|
|
|
|
pos += sprintf(pos, " S%d", i);
|
|
|
|
}
|
2021-06-02 16:54:39 +08:00
|
|
|
pr_info("(supports%s)\n", supported);
|
2013-02-22 15:37:36 +08:00
|
|
|
|
2008-08-12 10:20:22 +08:00
|
|
|
/*
|
2016-11-21 21:25:49 +08:00
|
|
|
* Register the tts_notifier to reboot notifier list so that the _TTS
|
|
|
|
* object can also be evaluated when the system enters S5.
|
2008-08-12 10:20:22 +08:00
|
|
|
*/
|
2016-11-21 21:25:49 +08:00
|
|
|
register_reboot_notifier(&tts_notifier);
|
2005-04-17 06:20:36 +08:00
|
|
|
return 0;
|
|
|
|
}
|