2
0
mirror of https://github.com/edk2-porting/linux-next.git synced 2024-12-27 06:34:11 +08:00
linux-next/include/asm-i386/suspend.h
Roland McGrath ecd02dddd1 [PATCH] i386: Use loaddebug macro consistently
This moves the macro loaddebug from asm-i386/suspend.h to
asm-i386/processor.h, which is the place that makes sense for it to be
defined, removes the extra copy of the same macro in
arch/i386/kernel/process.c, and makes arch/i386/kernel/signal.c use the
macro in place of its expansion.

This is a purely cosmetic cleanup for the normal i386 kernel.  However, it
is handy for Xen to be able to just redefine the loaddebug macro once
instead of also changing the signal.c code.

Signed-off-by: Roland McGrath <roland@redhat.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2005-04-16 15:24:46 -07:00

62 lines
1.5 KiB
C

/*
* Copyright 2001-2002 Pavel Machek <pavel@suse.cz>
* Based on code
* Copyright 2001 Patrick Mochel <mochel@osdl.org>
*/
#include <asm/desc.h>
#include <asm/i387.h>
static inline int
arch_prepare_suspend(void)
{
/* If you want to make non-PSE machine work, turn off paging
in swsusp_arch_suspend. swsusp_pg_dir should have identity mapping, so
it could work... */
if (!cpu_has_pse) {
printk(KERN_ERR "PSE is required for swsusp.\n");
return -EPERM;
}
return 0;
}
/* image of the saved processor state */
struct saved_context {
u16 es, fs, gs, ss;
unsigned long cr0, cr2, cr3, cr4;
u16 gdt_pad;
u16 gdt_limit;
unsigned long gdt_base;
u16 idt_pad;
u16 idt_limit;
unsigned long idt_base;
u16 ldt;
u16 tss;
unsigned long tr;
unsigned long safety;
unsigned long return_address;
} __attribute__((packed));
#ifdef CONFIG_ACPI_SLEEP
extern unsigned long saved_eip;
extern unsigned long saved_esp;
extern unsigned long saved_ebp;
extern unsigned long saved_ebx;
extern unsigned long saved_esi;
extern unsigned long saved_edi;
static inline void acpi_save_register_state(unsigned long return_point)
{
saved_eip = return_point;
asm volatile ("movl %%esp,%0" : "=m" (saved_esp));
asm volatile ("movl %%ebp,%0" : "=m" (saved_ebp));
asm volatile ("movl %%ebx,%0" : "=m" (saved_ebx));
asm volatile ("movl %%edi,%0" : "=m" (saved_edi));
asm volatile ("movl %%esi,%0" : "=m" (saved_esi));
}
#define acpi_restore_register_state() do {} while (0)
/* routines for saving/restoring kernel state */
extern int acpi_save_state_mem(void);
#endif