mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-12-29 14:05:19 +08:00
84e70971e6
During the ACPI S3 suspend, we store the GDT in the wakup_header (see wakeup_asm.s) field called 'pmode_gdt'. Which is then used during the resume path and has the same exact value as what the store/load_gdt do with the saved_context (which is saved/restored via save/restore_processor_state()). The flow during resume from ACPI S3 is simpler than the 64-bit counterpart. We only use the early bootstrap once (wakeup_gdt) and do various checks in real mode. After the checks are completed, we load the saved GDT ('pmode_gdt') and continue on with the resume (by heading to startup_32 in trampoline_32.S) - which quickly jumps to what was saved in 'pmode_entry' aka 'wakeup_pmode_return'. The 'wakeup_pmode_return' restores the GDT (saved_gdt) again (which was saved in do_suspend_lowlevel initially). After that it ends up calling the 'ret_point' which calls 'restore_processor_state()'. We have two opportunities to remove code where we restore the same GDT twice. Here is the call chain: wakeup_start |- lgdtl wakeup_gdt [the work-around broken BIOSes] | | - lgdtl pmode_gdt [the real one] | \-- startup_32 (in trampoline_32.S) \-- wakeup_pmode_return (in wakeup_32.S) |- lgdtl saved_gdt [the real one] \-- ret_point |.. |- call restore_processor_state The hibernate path is much simpler. During the saving of the hibernation image we call save_processor_state() and save the contents of that along with the rest of the kernel in the hibernation image destination. We save the EIP of 'restore_registers' (restore_jump_address) and cr3 (restore_cr3). During hibernate resume, the 'restore_registers' (via the 'restore_jump_address) in hibernate_asm_32.S is invoked which restores the contents of most registers. Naturally the resume path benefits from already being in 32-bit mode, so it does not have to reload the GDT. It only reloads the cr3 (from restore_cr3) and continues on. Note that the restoration of the restore image page-tables is done prior to this. After the 'restore_registers' it returns and we end up called restore_processor_state() - where we reload the GDT. The reload of the GDT is not needed as bootup kernel has already loaded the GDT which is at the same physical location as the the restored kernel. Note that the hibernation path assumes the GDT is correct during its 'restore_registers'. The assumption in the code is that the restored image is the same as saved - meaning we are not trying to restore an different kernel in the virtual address space of a new kernel. Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> Link: http://lkml.kernel.org/r/1365194544-14648-3-git-send-email-konrad.wilk@oracle.com Cc: Rafael J. Wysocki <rjw@sisk.pl> Signed-off-by: H. Peter Anvin <hpa@linux.intel.com>
318 lines
8.0 KiB
C
318 lines
8.0 KiB
C
/*
|
|
* Suspend support specific for i386/x86-64.
|
|
*
|
|
* Distribute under GPLv2
|
|
*
|
|
* Copyright (c) 2007 Rafael J. Wysocki <rjw@sisk.pl>
|
|
* Copyright (c) 2002 Pavel Machek <pavel@ucw.cz>
|
|
* Copyright (c) 2001 Patrick Mochel <mochel@osdl.org>
|
|
*/
|
|
|
|
#include <linux/suspend.h>
|
|
#include <linux/export.h>
|
|
#include <linux/smp.h>
|
|
|
|
#include <asm/pgtable.h>
|
|
#include <asm/proto.h>
|
|
#include <asm/mtrr.h>
|
|
#include <asm/page.h>
|
|
#include <asm/mce.h>
|
|
#include <asm/xcr.h>
|
|
#include <asm/suspend.h>
|
|
#include <asm/debugreg.h>
|
|
#include <asm/fpu-internal.h> /* pcntxt_mask */
|
|
#include <asm/cpu.h>
|
|
|
|
#ifdef CONFIG_X86_32
|
|
static struct saved_context saved_context;
|
|
|
|
unsigned long saved_context_ebx;
|
|
unsigned long saved_context_esp, saved_context_ebp;
|
|
unsigned long saved_context_esi, saved_context_edi;
|
|
unsigned long saved_context_eflags;
|
|
#else
|
|
/* CONFIG_X86_64 */
|
|
struct saved_context saved_context;
|
|
#endif
|
|
|
|
/**
|
|
* __save_processor_state - save CPU registers before creating a
|
|
* hibernation image and before restoring the memory state from it
|
|
* @ctxt - structure to store the registers contents in
|
|
*
|
|
* NOTE: If there is a CPU register the modification of which by the
|
|
* boot kernel (ie. the kernel used for loading the hibernation image)
|
|
* might affect the operations of the restored target kernel (ie. the one
|
|
* saved in the hibernation image), then its contents must be saved by this
|
|
* function. In other words, if kernel A is hibernated and different
|
|
* kernel B is used for loading the hibernation image into memory, the
|
|
* kernel A's __save_processor_state() function must save all registers
|
|
* needed by kernel A, so that it can operate correctly after the resume
|
|
* regardless of what kernel B does in the meantime.
|
|
*/
|
|
static void __save_processor_state(struct saved_context *ctxt)
|
|
{
|
|
#ifdef CONFIG_X86_32
|
|
mtrr_save_fixed_ranges(NULL);
|
|
#endif
|
|
kernel_fpu_begin();
|
|
|
|
/*
|
|
* descriptor tables
|
|
*/
|
|
#ifdef CONFIG_X86_32
|
|
store_idt(&ctxt->idt);
|
|
#else
|
|
/* CONFIG_X86_64 */
|
|
store_idt((struct desc_ptr *)&ctxt->idt_limit);
|
|
#endif
|
|
store_tr(ctxt->tr);
|
|
|
|
/* XMM0..XMM15 should be handled by kernel_fpu_begin(). */
|
|
/*
|
|
* segment registers
|
|
*/
|
|
#ifdef CONFIG_X86_32
|
|
savesegment(es, ctxt->es);
|
|
savesegment(fs, ctxt->fs);
|
|
savesegment(gs, ctxt->gs);
|
|
savesegment(ss, ctxt->ss);
|
|
#else
|
|
/* CONFIG_X86_64 */
|
|
asm volatile ("movw %%ds, %0" : "=m" (ctxt->ds));
|
|
asm volatile ("movw %%es, %0" : "=m" (ctxt->es));
|
|
asm volatile ("movw %%fs, %0" : "=m" (ctxt->fs));
|
|
asm volatile ("movw %%gs, %0" : "=m" (ctxt->gs));
|
|
asm volatile ("movw %%ss, %0" : "=m" (ctxt->ss));
|
|
|
|
rdmsrl(MSR_FS_BASE, ctxt->fs_base);
|
|
rdmsrl(MSR_GS_BASE, ctxt->gs_base);
|
|
rdmsrl(MSR_KERNEL_GS_BASE, ctxt->gs_kernel_base);
|
|
mtrr_save_fixed_ranges(NULL);
|
|
|
|
rdmsrl(MSR_EFER, ctxt->efer);
|
|
#endif
|
|
|
|
/*
|
|
* control registers
|
|
*/
|
|
ctxt->cr0 = read_cr0();
|
|
ctxt->cr2 = read_cr2();
|
|
ctxt->cr3 = read_cr3();
|
|
#ifdef CONFIG_X86_32
|
|
ctxt->cr4 = read_cr4_safe();
|
|
#else
|
|
/* CONFIG_X86_64 */
|
|
ctxt->cr4 = read_cr4();
|
|
ctxt->cr8 = read_cr8();
|
|
#endif
|
|
ctxt->misc_enable_saved = !rdmsrl_safe(MSR_IA32_MISC_ENABLE,
|
|
&ctxt->misc_enable);
|
|
}
|
|
|
|
/* Needed by apm.c */
|
|
void save_processor_state(void)
|
|
{
|
|
__save_processor_state(&saved_context);
|
|
x86_platform.save_sched_clock_state();
|
|
}
|
|
#ifdef CONFIG_X86_32
|
|
EXPORT_SYMBOL(save_processor_state);
|
|
#endif
|
|
|
|
static void do_fpu_end(void)
|
|
{
|
|
/*
|
|
* Restore FPU regs if necessary.
|
|
*/
|
|
kernel_fpu_end();
|
|
}
|
|
|
|
static void fix_processor_context(void)
|
|
{
|
|
int cpu = smp_processor_id();
|
|
struct tss_struct *t = &per_cpu(init_tss, cpu);
|
|
|
|
set_tss_desc(cpu, t); /*
|
|
* This just modifies memory; should not be
|
|
* necessary. But... This is necessary, because
|
|
* 386 hardware has concept of busy TSS or some
|
|
* similar stupidity.
|
|
*/
|
|
|
|
#ifdef CONFIG_X86_64
|
|
get_cpu_gdt_table(cpu)[GDT_ENTRY_TSS].type = 9;
|
|
|
|
syscall_init(); /* This sets MSR_*STAR and related */
|
|
#endif
|
|
load_TR_desc(); /* This does ltr */
|
|
load_LDT(¤t->active_mm->context); /* This does lldt */
|
|
}
|
|
|
|
/**
|
|
* __restore_processor_state - restore the contents of CPU registers saved
|
|
* by __save_processor_state()
|
|
* @ctxt - structure to load the registers contents from
|
|
*/
|
|
static void __restore_processor_state(struct saved_context *ctxt)
|
|
{
|
|
if (ctxt->misc_enable_saved)
|
|
wrmsrl(MSR_IA32_MISC_ENABLE, ctxt->misc_enable);
|
|
/*
|
|
* control registers
|
|
*/
|
|
/* cr4 was introduced in the Pentium CPU */
|
|
#ifdef CONFIG_X86_32
|
|
if (ctxt->cr4)
|
|
write_cr4(ctxt->cr4);
|
|
#else
|
|
/* CONFIG X86_64 */
|
|
wrmsrl(MSR_EFER, ctxt->efer);
|
|
write_cr8(ctxt->cr8);
|
|
write_cr4(ctxt->cr4);
|
|
#endif
|
|
write_cr3(ctxt->cr3);
|
|
write_cr2(ctxt->cr2);
|
|
write_cr0(ctxt->cr0);
|
|
|
|
/*
|
|
* now restore the descriptor tables to their proper values
|
|
* ltr is done i fix_processor_context().
|
|
*/
|
|
#ifdef CONFIG_X86_32
|
|
load_idt(&ctxt->idt);
|
|
#else
|
|
/* CONFIG_X86_64 */
|
|
load_idt((const struct desc_ptr *)&ctxt->idt_limit);
|
|
#endif
|
|
|
|
/*
|
|
* segment registers
|
|
*/
|
|
#ifdef CONFIG_X86_32
|
|
loadsegment(es, ctxt->es);
|
|
loadsegment(fs, ctxt->fs);
|
|
loadsegment(gs, ctxt->gs);
|
|
loadsegment(ss, ctxt->ss);
|
|
|
|
/*
|
|
* sysenter MSRs
|
|
*/
|
|
if (boot_cpu_has(X86_FEATURE_SEP))
|
|
enable_sep_cpu();
|
|
#else
|
|
/* CONFIG_X86_64 */
|
|
asm volatile ("movw %0, %%ds" :: "r" (ctxt->ds));
|
|
asm volatile ("movw %0, %%es" :: "r" (ctxt->es));
|
|
asm volatile ("movw %0, %%fs" :: "r" (ctxt->fs));
|
|
load_gs_index(ctxt->gs);
|
|
asm volatile ("movw %0, %%ss" :: "r" (ctxt->ss));
|
|
|
|
wrmsrl(MSR_FS_BASE, ctxt->fs_base);
|
|
wrmsrl(MSR_GS_BASE, ctxt->gs_base);
|
|
wrmsrl(MSR_KERNEL_GS_BASE, ctxt->gs_kernel_base);
|
|
#endif
|
|
|
|
/*
|
|
* restore XCR0 for xsave capable cpu's.
|
|
*/
|
|
if (cpu_has_xsave)
|
|
xsetbv(XCR_XFEATURE_ENABLED_MASK, pcntxt_mask);
|
|
|
|
fix_processor_context();
|
|
|
|
do_fpu_end();
|
|
x86_platform.restore_sched_clock_state();
|
|
mtrr_bp_restore();
|
|
}
|
|
|
|
/* Needed by apm.c */
|
|
void restore_processor_state(void)
|
|
{
|
|
__restore_processor_state(&saved_context);
|
|
}
|
|
#ifdef CONFIG_X86_32
|
|
EXPORT_SYMBOL(restore_processor_state);
|
|
#endif
|
|
|
|
/*
|
|
* When bsp_check() is called in hibernate and suspend, cpu hotplug
|
|
* is disabled already. So it's unnessary to handle race condition between
|
|
* cpumask query and cpu hotplug.
|
|
*/
|
|
static int bsp_check(void)
|
|
{
|
|
if (cpumask_first(cpu_online_mask) != 0) {
|
|
pr_warn("CPU0 is offline.\n");
|
|
return -ENODEV;
|
|
}
|
|
|
|
return 0;
|
|
}
|
|
|
|
static int bsp_pm_callback(struct notifier_block *nb, unsigned long action,
|
|
void *ptr)
|
|
{
|
|
int ret = 0;
|
|
|
|
switch (action) {
|
|
case PM_SUSPEND_PREPARE:
|
|
case PM_HIBERNATION_PREPARE:
|
|
ret = bsp_check();
|
|
break;
|
|
#ifdef CONFIG_DEBUG_HOTPLUG_CPU0
|
|
case PM_RESTORE_PREPARE:
|
|
/*
|
|
* When system resumes from hibernation, online CPU0 because
|
|
* 1. it's required for resume and
|
|
* 2. the CPU was online before hibernation
|
|
*/
|
|
if (!cpu_online(0))
|
|
_debug_hotplug_cpu(0, 1);
|
|
break;
|
|
case PM_POST_RESTORE:
|
|
/*
|
|
* When a resume really happens, this code won't be called.
|
|
*
|
|
* This code is called only when user space hibernation software
|
|
* prepares for snapshot device during boot time. So we just
|
|
* call _debug_hotplug_cpu() to restore to CPU0's state prior to
|
|
* preparing the snapshot device.
|
|
*
|
|
* This works for normal boot case in our CPU0 hotplug debug
|
|
* mode, i.e. CPU0 is offline and user mode hibernation
|
|
* software initializes during boot time.
|
|
*
|
|
* If CPU0 is online and user application accesses snapshot
|
|
* device after boot time, this will offline CPU0 and user may
|
|
* see different CPU0 state before and after accessing
|
|
* the snapshot device. But hopefully this is not a case when
|
|
* user debugging CPU0 hotplug. Even if users hit this case,
|
|
* they can easily online CPU0 back.
|
|
*
|
|
* To simplify this debug code, we only consider normal boot
|
|
* case. Otherwise we need to remember CPU0's state and restore
|
|
* to that state and resolve racy conditions etc.
|
|
*/
|
|
_debug_hotplug_cpu(0, 0);
|
|
break;
|
|
#endif
|
|
default:
|
|
break;
|
|
}
|
|
return notifier_from_errno(ret);
|
|
}
|
|
|
|
static int __init bsp_pm_check_init(void)
|
|
{
|
|
/*
|
|
* Set this bsp_pm_callback as lower priority than
|
|
* cpu_hotplug_pm_callback. So cpu_hotplug_pm_callback will be called
|
|
* earlier to disable cpu hotplug before bsp online check.
|
|
*/
|
|
pm_notifier(bsp_pm_callback, -INT_MAX);
|
|
return 0;
|
|
}
|
|
|
|
core_initcall(bsp_pm_check_init);
|