mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-17 17:24:17 +08:00
24f1e32c60
This patch rebase the implementation of the breakpoints API on top of perf events instances. Each breakpoints are now perf events that handle the register scheduling, thread/cpu attachment, etc.. The new layering is now made as follows: ptrace kgdb ftrace perf syscall \ | / / \ | / / / Core breakpoint API / / | / | / Breakpoints perf events | | Breakpoints PMU ---- Debug Register constraints handling (Part of core breakpoint API) | | Hardware debug registers Reasons of this rewrite: - Use the centralized/optimized pmu registers scheduling, implying an easier arch integration - More powerful register handling: perf attributes (pinned/flexible events, exclusive/non-exclusive, tunable period, etc...) Impact: - New perf ABI: the hardware breakpoints counters - Ptrace breakpoints setting remains tricky and still needs some per thread breakpoints references. Todo (in the order): - Support breakpoints perf counter events for perf tools (ie: implement perf_bpcounter_event()) - Support from perf tools Changes in v2: - Follow the perf "event " rename - The ptrace regression have been fixed (ptrace breakpoint perf events weren't released when a task ended) - Drop the struct hw_breakpoint and store generic fields in perf_event_attr. - Separate core and arch specific headers, drop asm-generic/hw_breakpoint.h and create linux/hw_breakpoint.h - Use new generic len/type for breakpoint - Handle off case: when breakpoints api is not supported by an arch Changes in v3: - Fix broken CONFIG_KVM, we need to propagate the breakpoint api changes to kvm when we exit the guest and restore the bp registers to the host. Changes in v4: - Drop the hw_breakpoint_restore() stub as it is only used by KVM - EXPORT_SYMBOL_GPL hw_breakpoint_restore() as KVM can be built as a module - Restore the breakpoints unconditionally on kvm guest exit: TIF_DEBUG_THREAD doesn't anymore cover every cases of running breakpoints and vcpu->arch.switch_db_regs might not always be set when the guest used debug registers. (Waiting for a reliable optimization) Changes in v5: - Split-up the asm-generic/hw-breakpoint.h moving to linux/hw_breakpoint.h into a separate patch - Optimize the breakpoints restoring while switching from kvm guest to host. We only want to restore the state if we have active breakpoints to the host, otherwise we don't care about messed-up address registers. - Add asm/hw_breakpoint.h to Kbuild - Fix bad breakpoint type in trace_selftest.c Changes in v6: - Fix wrong header inclusion in trace.h (triggered a build error with CONFIG_FTRACE_SELFTEST Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com> Cc: Prasad <prasad@linux.vnet.ibm.com> Cc: Alan Stern <stern@rowland.harvard.edu> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Arnaldo Carvalho de Melo <acme@redhat.com> Cc: Steven Rostedt <rostedt@goodmis.org> Cc: Ingo Molnar <mingo@elte.hu> Cc: Jan Kiszka <jan.kiszka@web.de> Cc: Jiri Slaby <jirislaby@gmail.com> Cc: Li Zefan <lizf@cn.fujitsu.com> Cc: Avi Kivity <avi@redhat.com> Cc: Paul Mackerras <paulus@samba.org> Cc: Mike Galbraith <efault@gmx.de> Cc: Masami Hiramatsu <mhiramat@redhat.com> Cc: Paul Mundt <lethal@linux-sh.org>
232 lines
5.5 KiB
C
232 lines
5.5 KiB
C
/*
|
|
* Suspend support specific for i386/x86-64.
|
|
*
|
|
* Distribute under GPLv2
|
|
*
|
|
* Copyright (c) 2007 Rafael J. Wysocki <rjw@sisk.pl>
|
|
* Copyright (c) 2002 Pavel Machek <pavel@suse.cz>
|
|
* Copyright (c) 2001 Patrick Mochel <mochel@osdl.org>
|
|
*/
|
|
|
|
#include <linux/suspend.h>
|
|
#include <linux/smp.h>
|
|
|
|
#include <asm/pgtable.h>
|
|
#include <asm/proto.h>
|
|
#include <asm/mtrr.h>
|
|
#include <asm/page.h>
|
|
#include <asm/mce.h>
|
|
#include <asm/xcr.h>
|
|
#include <asm/suspend.h>
|
|
#include <asm/debugreg.h>
|
|
|
|
#ifdef CONFIG_X86_32
|
|
static struct saved_context saved_context;
|
|
|
|
unsigned long saved_context_ebx;
|
|
unsigned long saved_context_esp, saved_context_ebp;
|
|
unsigned long saved_context_esi, saved_context_edi;
|
|
unsigned long saved_context_eflags;
|
|
#else
|
|
/* CONFIG_X86_64 */
|
|
struct saved_context saved_context;
|
|
#endif
|
|
|
|
/**
|
|
* __save_processor_state - save CPU registers before creating a
|
|
* hibernation image and before restoring the memory state from it
|
|
* @ctxt - structure to store the registers contents in
|
|
*
|
|
* NOTE: If there is a CPU register the modification of which by the
|
|
* boot kernel (ie. the kernel used for loading the hibernation image)
|
|
* might affect the operations of the restored target kernel (ie. the one
|
|
* saved in the hibernation image), then its contents must be saved by this
|
|
* function. In other words, if kernel A is hibernated and different
|
|
* kernel B is used for loading the hibernation image into memory, the
|
|
* kernel A's __save_processor_state() function must save all registers
|
|
* needed by kernel A, so that it can operate correctly after the resume
|
|
* regardless of what kernel B does in the meantime.
|
|
*/
|
|
static void __save_processor_state(struct saved_context *ctxt)
|
|
{
|
|
#ifdef CONFIG_X86_32
|
|
mtrr_save_fixed_ranges(NULL);
|
|
#endif
|
|
kernel_fpu_begin();
|
|
|
|
/*
|
|
* descriptor tables
|
|
*/
|
|
#ifdef CONFIG_X86_32
|
|
store_gdt(&ctxt->gdt);
|
|
store_idt(&ctxt->idt);
|
|
#else
|
|
/* CONFIG_X86_64 */
|
|
store_gdt((struct desc_ptr *)&ctxt->gdt_limit);
|
|
store_idt((struct desc_ptr *)&ctxt->idt_limit);
|
|
#endif
|
|
store_tr(ctxt->tr);
|
|
|
|
/* XMM0..XMM15 should be handled by kernel_fpu_begin(). */
|
|
/*
|
|
* segment registers
|
|
*/
|
|
#ifdef CONFIG_X86_32
|
|
savesegment(es, ctxt->es);
|
|
savesegment(fs, ctxt->fs);
|
|
savesegment(gs, ctxt->gs);
|
|
savesegment(ss, ctxt->ss);
|
|
#else
|
|
/* CONFIG_X86_64 */
|
|
asm volatile ("movw %%ds, %0" : "=m" (ctxt->ds));
|
|
asm volatile ("movw %%es, %0" : "=m" (ctxt->es));
|
|
asm volatile ("movw %%fs, %0" : "=m" (ctxt->fs));
|
|
asm volatile ("movw %%gs, %0" : "=m" (ctxt->gs));
|
|
asm volatile ("movw %%ss, %0" : "=m" (ctxt->ss));
|
|
|
|
rdmsrl(MSR_FS_BASE, ctxt->fs_base);
|
|
rdmsrl(MSR_GS_BASE, ctxt->gs_base);
|
|
rdmsrl(MSR_KERNEL_GS_BASE, ctxt->gs_kernel_base);
|
|
mtrr_save_fixed_ranges(NULL);
|
|
|
|
rdmsrl(MSR_EFER, ctxt->efer);
|
|
#endif
|
|
|
|
/*
|
|
* control registers
|
|
*/
|
|
ctxt->cr0 = read_cr0();
|
|
ctxt->cr2 = read_cr2();
|
|
ctxt->cr3 = read_cr3();
|
|
#ifdef CONFIG_X86_32
|
|
ctxt->cr4 = read_cr4_safe();
|
|
#else
|
|
/* CONFIG_X86_64 */
|
|
ctxt->cr4 = read_cr4();
|
|
ctxt->cr8 = read_cr8();
|
|
#endif
|
|
}
|
|
|
|
/* Needed by apm.c */
|
|
void save_processor_state(void)
|
|
{
|
|
__save_processor_state(&saved_context);
|
|
}
|
|
#ifdef CONFIG_X86_32
|
|
EXPORT_SYMBOL(save_processor_state);
|
|
#endif
|
|
|
|
static void do_fpu_end(void)
|
|
{
|
|
/*
|
|
* Restore FPU regs if necessary.
|
|
*/
|
|
kernel_fpu_end();
|
|
}
|
|
|
|
static void fix_processor_context(void)
|
|
{
|
|
int cpu = smp_processor_id();
|
|
struct tss_struct *t = &per_cpu(init_tss, cpu);
|
|
|
|
set_tss_desc(cpu, t); /*
|
|
* This just modifies memory; should not be
|
|
* necessary. But... This is necessary, because
|
|
* 386 hardware has concept of busy TSS or some
|
|
* similar stupidity.
|
|
*/
|
|
|
|
#ifdef CONFIG_X86_64
|
|
get_cpu_gdt_table(cpu)[GDT_ENTRY_TSS].type = 9;
|
|
|
|
syscall_init(); /* This sets MSR_*STAR and related */
|
|
#endif
|
|
load_TR_desc(); /* This does ltr */
|
|
load_LDT(¤t->active_mm->context); /* This does lldt */
|
|
}
|
|
|
|
/**
|
|
* __restore_processor_state - restore the contents of CPU registers saved
|
|
* by __save_processor_state()
|
|
* @ctxt - structure to load the registers contents from
|
|
*/
|
|
static void __restore_processor_state(struct saved_context *ctxt)
|
|
{
|
|
/*
|
|
* control registers
|
|
*/
|
|
/* cr4 was introduced in the Pentium CPU */
|
|
#ifdef CONFIG_X86_32
|
|
if (ctxt->cr4)
|
|
write_cr4(ctxt->cr4);
|
|
#else
|
|
/* CONFIG X86_64 */
|
|
wrmsrl(MSR_EFER, ctxt->efer);
|
|
write_cr8(ctxt->cr8);
|
|
write_cr4(ctxt->cr4);
|
|
#endif
|
|
write_cr3(ctxt->cr3);
|
|
write_cr2(ctxt->cr2);
|
|
write_cr0(ctxt->cr0);
|
|
|
|
/*
|
|
* now restore the descriptor tables to their proper values
|
|
* ltr is done i fix_processor_context().
|
|
*/
|
|
#ifdef CONFIG_X86_32
|
|
load_gdt(&ctxt->gdt);
|
|
load_idt(&ctxt->idt);
|
|
#else
|
|
/* CONFIG_X86_64 */
|
|
load_gdt((const struct desc_ptr *)&ctxt->gdt_limit);
|
|
load_idt((const struct desc_ptr *)&ctxt->idt_limit);
|
|
#endif
|
|
|
|
/*
|
|
* segment registers
|
|
*/
|
|
#ifdef CONFIG_X86_32
|
|
loadsegment(es, ctxt->es);
|
|
loadsegment(fs, ctxt->fs);
|
|
loadsegment(gs, ctxt->gs);
|
|
loadsegment(ss, ctxt->ss);
|
|
|
|
/*
|
|
* sysenter MSRs
|
|
*/
|
|
if (boot_cpu_has(X86_FEATURE_SEP))
|
|
enable_sep_cpu();
|
|
#else
|
|
/* CONFIG_X86_64 */
|
|
asm volatile ("movw %0, %%ds" :: "r" (ctxt->ds));
|
|
asm volatile ("movw %0, %%es" :: "r" (ctxt->es));
|
|
asm volatile ("movw %0, %%fs" :: "r" (ctxt->fs));
|
|
load_gs_index(ctxt->gs);
|
|
asm volatile ("movw %0, %%ss" :: "r" (ctxt->ss));
|
|
|
|
wrmsrl(MSR_FS_BASE, ctxt->fs_base);
|
|
wrmsrl(MSR_GS_BASE, ctxt->gs_base);
|
|
wrmsrl(MSR_KERNEL_GS_BASE, ctxt->gs_kernel_base);
|
|
#endif
|
|
|
|
/*
|
|
* restore XCR0 for xsave capable cpu's.
|
|
*/
|
|
if (cpu_has_xsave)
|
|
xsetbv(XCR_XFEATURE_ENABLED_MASK, pcntxt_mask);
|
|
|
|
fix_processor_context();
|
|
|
|
do_fpu_end();
|
|
mtrr_bp_restore();
|
|
}
|
|
|
|
/* Needed by apm.c */
|
|
void restore_processor_state(void)
|
|
{
|
|
__restore_processor_state(&saved_context);
|
|
}
|
|
#ifdef CONFIG_X86_32
|
|
EXPORT_SYMBOL(restore_processor_state);
|
|
#endif
|